From 86699c4ee59ff58dc69f8432812af57e4c099d6f Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Mon, 6 Jan 2025 16:31:19 +0530 Subject: [PATCH 1/5] Update styling in ALL files --- apps/api/R/auth.R | 57 +- apps/api/R/available-models.R | 6 +- apps/api/R/entrypoint.R | 18 +- apps/api/R/formats.R | 45 +- apps/api/R/general.R | 20 +- apps/api/R/get.file.R | 24 +- apps/api/R/inputs.R | 113 +- apps/api/R/models.R | 42 +- apps/api/R/pfts.R | 51 +- apps/api/R/runs.R | 180 +- apps/api/R/sites.R | 35 +- apps/api/R/submit.workflow.R | 132 +- apps/api/R/workflows.R | 138 +- apps/api/tests/alltests.R | 2 +- apps/api/tests/test.auth.R | 2 +- apps/api/tests/test.formats.R | 2 +- apps/api/tests/test.models.R | 2 +- apps/api/tests/test.pfts.R | 2 +- apps/api/tests/test.ping.R | 2 +- apps/api/tests/test.sites.R | 2 +- apps/api/tests/test.workflows.R | 4 +- base/all/R/pecan_version.R | 36 +- base/all/data-raw/record_versions.R | 10 +- base/all/data/pecan_releases.R | 4 +- base/all/data/pecan_version_history.R | 10 +- base/all/tests/testthat/test-pecan_version.R | 24 +- base/all/tests/testthat/test.workflow.R | 6 +- base/db/R/assign.treatments.R | 10 +- base/db/R/check.lists.R | 6 +- base/db/R/clone_pft.R | 29 +- base/db/R/convert_input.R | 1370 +++++----- base/db/R/covariate.functions.R | 85 +- base/db/R/db_merge_into.R | 4 +- base/db/R/dbfiles.R | 45 +- base/db/R/derive.trait.R | 20 +- base/db/R/derive.traits.R | 53 +- base/db/R/fetch.stats2se.R | 4 +- base/db/R/get.trait.data.R | 87 +- base/db/R/get.trait.data.pft.R | 577 ++-- base/db/R/get_postgres_envvars.R | 11 +- base/db/R/input.name.check.R | 4 +- base/db/R/insert.format.vars.R | 99 +- base/db/R/insert_table.R | 18 +- base/db/R/pft.add.spp.R | 65 +- base/db/R/query.data.R | 44 +- base/db/R/query.dplyr.R | 101 +- base/db/R/query.file.path.R | 11 +- base/db/R/query.format.vars.R | 106 +- base/db/R/query.pft.R | 58 +- base/db/R/query.prior.R | 104 +- base/db/R/query.site.R | 7 +- base/db/R/query.trait.data.R | 143 +- base/db/R/query.traits.R | 26 +- base/db/R/query.yields.R | 37 +- base/db/R/query_pfts.R | 12 +- base/db/R/search_references.R | 3 +- base/db/R/symmetric_setdiff.R | 28 +- base/db/R/take.samples.R | 6 +- base/db/R/try2sqlite.R | 34 +- base/db/R/utils_db.R | 160 +- base/db/inst/PFT_builder/PFT_builder.R | 37 +- .../import-ForestGEO/import_species_list.R | 162 +- base/db/inst/import-try/02_citations.R | 4 +- base/db/inst/import-try/91.global.subset.R | 14 +- .../inst/import-try/92.data.specific.subset.R | 28 +- base/db/inst/import-try/93.create.try.sites.R | 78 +- base/db/inst/import-try/94.match.species.R | 54 +- base/db/inst/import-try/95.citations.R | 16 +- base/db/inst/import-try/96.load.data.R | 27 +- base/db/inst/import-try/common.R | 10 +- base/db/inst/import-try/helpers.species.R | 125 +- base/db/tests/testthat.R | 13 +- base/db/tests/testthat/helper-db-setup.R | 21 +- .../tests/testthat/test-get.trait.data.pft.R | 55 +- base/db/tests/testthat/test-query.traits.R | 59 +- .../tests/testthat/test.assign.treatments.R | 4 +- base/db/tests/testthat/test.check.lists.R | 8 +- base/db/tests/testthat/test.contents_sanity.R | 65 +- base/db/tests/testthat/test.convert_input.R | 18 +- .../tests/testthat/test.covariate.functions.R | 4 +- base/db/tests/testthat/test.db.utils.R | 4 +- base/db/tests/testthat/test.dbfiles.R | 107 +- base/db/tests/testthat/test.derive.traits.R | 47 +- base/db/tests/testthat/test.insert.R | 21 +- base/db/tests/testthat/test.met_inputs.R | 4 +- base/db/tests/testthat/test.query.base.R | 14 +- base/db/tests/testthat/test.query.data.R | 10 +- base/db/tests/testthat/test.query.dplyr.R | 43 +- base/db/tests/testthat/test.query.file.path.R | 10 +- base/db/tests/testthat/test.query.priors.R | 8 +- base/db/tests/testthat/test.query.site.R | 6 +- base/db/tests/testthat/test.query.yields.R | 18 +- base/db/tests/testthat/test.stamp.R | 6 +- .../tests/testthat/test.symmetric-setdiff.R | 16 +- base/db/tests/testthat/test.take.samples.R | 16 +- base/db/tests/testthat/test.utils_db.R | 4 +- base/logger/R/logger.R | 54 +- base/logger/R/logifnot.R | 6 +- base/logger/tests/testthat/test.logger.R | 51 +- base/logger/tests/testthat/test.logifnot.R | 36 +- base/qaqc/tests/testthat.R | 2 +- base/qaqc/tests/testthat/test-taylorplot.R | 23 +- .../testthat/test.cull_database_entries.R | 14 +- .../test.find_formats_without_inputs.R | 16 +- .../test.find_inputs_without_formats.R | 14 +- .../testthat/test.get_table_column_names.R | 8 +- .../tests/testthat/test.write_out_table.R | 6 +- base/remote/R/check_qsub_status.R | 10 +- base/remote/R/kill.tunnel.R | 2 +- base/remote/R/merge_job_files.R | 18 +- base/remote/R/open.tunnel.R | 24 +- base/remote/R/qsub_get_jobid.R | 3 +- base/remote/R/qsub_parallel.R | 134 +- base/remote/R/rabbitmq.R | 14 +- base/remote/R/remote.copy.from.R | 11 +- base/remote/R/remote.copy.to.R | 9 +- base/remote/R/remote.execute.R.R | 35 +- base/remote/R/remote.execute.cmd.R | 14 +- base/remote/R/setup_modellauncher.R | 3 +- base/remote/R/start.model.runs.R | 8 +- base/remote/R/start_qsub.R | 3 +- base/remote/R/start_rabbitmq.R | 2 +- base/remote/R/test_remote.R | 4 +- base/remote/tests/testthat.R | 1 - base/remote/tests/testthat/test-fqdn.R | 3 +- .../tests/testthat/test.check_model_run.R | 6 +- base/remote/tests/testthat/test.kill.tunnel.R | 6 +- base/remote/tests/testthat/test.localhost.R | 2 +- .../tests/testthat/test.qsub_get_jobid.R | 6 +- base/remote/tests/testthat/test.rabbitmq.R | 64 +- base/remote/tests/testthat/test.remote.R | 4 +- .../tests/testthat/test.remote.copy.from.R | 12 +- .../tests/testthat/test.remote.copy.to.R | 8 +- .../tests/testthat/test.remote.execute.R.R | 1 - .../tests/testthat/test.remote.execute.cmd.R | 3 +- base/remote/tests/testthat/test.start_qsub.R | 10 +- .../tests/testthat/test.start_rabbitmq.R | 18 +- .../remote/tests/testthat/test.start_serial.R | 12 +- base/settings/R/MultiSettings.R | 16 +- base/settings/R/addSecrets.R | 9 +- base/settings/R/check.all.settings.R | 570 ++-- base/settings/R/clean.settings.R | 2 +- base/settings/R/fix.deprecated.settings.R | 27 +- base/settings/R/get_args.R | 4 +- base/settings/R/known_bety_migrations.R | 127 +- base/settings/R/listToXml.R | 4 +- base/settings/R/loadPath_sitePFT.R | 7 +- base/settings/R/papply.R | 24 +- base/settings/R/pft_site_linker.R | 44 +- base/settings/R/prepare.settings.R | 1 - base/settings/R/site_pft_link_settings.R | 30 +- base/settings/R/update.settings.R | 81 +- base/settings/R/write.settings.R | 10 +- base/settings/examples/examples.papply.R | 19 +- base/settings/tests/testthat.R | 2 +- .../tests/testthat/helper-get.test.settings.R | 23 +- .../tests/testthat/test.MultiSettings.class.R | 21 +- .../tests/testthat/test.Safelist.class.R | 28 +- .../settings/tests/testthat/test.addSecrets.R | 26 +- .../tests/testthat/test.check.all.settings.R | 60 +- .../tests/testthat/test.check.bety.version.R | 40 +- .../tests/testthat/test.clean.settings.R | 7 +- .../test.createMultisiteMultiSettings.R | 22 +- base/settings/tests/testthat/test.get_args.R | 6 +- .../tests/testthat/test.loadPath_sitePFT.R | 4 +- .../tests/testthat/test.pft_site_linker.R | 6 +- .../tests/testthat/test.read.settings.R | 61 +- .../testthat/test.site_pft_link_settings.R | 6 +- .../tests/testthat/test.write.settings.R | 13 +- base/utils/R/Defunct.R | 2 +- base/utils/R/cf2date.R | 24 +- base/utils/R/clear.scratch.R | 11 +- base/utils/R/datasets.R | 3 +- base/utils/R/days_in_year.R | 10 +- base/utils/R/distn.stats.R | 24 +- base/utils/R/download.url.R | 7 +- base/utils/R/full.path.R | 3 +- base/utils/R/get.ensemble.inputs.R | 15 +- base/utils/R/help.R | 6 +- base/utils/R/listToArgString.R | 54 +- base/utils/R/mail.R | 19 +- base/utils/R/match_file.R | 6 +- base/utils/R/mcmc.list2init.R | 68 +- base/utils/R/n_leap_day.R | 9 +- base/utils/R/need_packages.R | 2 +- base/utils/R/r2bugs.distributions.R | 42 +- base/utils/R/read.output.R | 9 +- base/utils/R/read_web_config.R | 17 +- base/utils/R/remove.config.R | 15 +- base/utils/R/seconds_in_year.R | 12 +- base/utils/R/status.R | 24 +- base/utils/R/timezone_hour.R | 6 +- base/utils/R/to_nc.R | 68 +- base/utils/R/transformstats.R | 20 +- base/utils/R/ud_convert.R | 8 +- base/utils/R/unit_is_parseable.R | 18 +- base/utils/R/utils.R | 241 +- base/utils/R/write.config.utils.R | 5 +- base/utils/data/standard_vars.R | 6 +- base/utils/inst/LBNL_remote_test.R | 48 +- base/utils/scripts/metutils.R | 1039 ++++---- base/utils/scripts/time.constants.R | 20 +- base/utils/tests/testthat/helper.R | 14 +- base/utils/tests/testthat/test-read.output.R | 132 +- base/utils/tests/testthat/test-status.R | 92 +- base/utils/tests/testthat/test-ud_convert.R | 7 +- .../tests/testthat/test-unit_is_parseable.R | 1 - base/utils/tests/testthat/test.cf2date.R | 3 +- .../utils/tests/testthat/test.clear.scratch.R | 8 +- base/utils/tests/testthat/test.days_in_year.R | 2 +- base/utils/tests/testthat/test.distn.stats.R | 34 +- base/utils/tests/testthat/test.download.url.R | 14 +- .../tests/testthat/test.get.ensemble.inputs.R | 4 +- .../tests/testthat/test.listToArgString.R | 6 +- base/utils/tests/testthat/test.load_local.R | 2 +- base/utils/tests/testthat/test.n_leap_day.R | 5 +- .../utils/tests/testthat/test.need_packages.R | 5 +- .../testthat/test.r2bugs.distributions.R | 18 +- .../tests/testthat/test.seconds_in_year.R | 2 +- base/utils/tests/testthat/test.sendmail.R | 20 +- .../utils/tests/testthat/test.timezone_hour.R | 6 +- .../tests/testthat/test.trait.dictionary.R | 6 +- .../testthat/test.units_are_equivalent.R | 2 +- base/utils/tests/testthat/test.utils.R | 180 +- base/visualization/R/add_icon.R | 36 +- base/visualization/R/ciEnvelope.R | 8 +- base/visualization/R/map.output.R | 15 +- base/visualization/R/plot_netcdf.R | 62 +- base/visualization/R/plots.R | 162 +- .../R/visually.weighted.watercolor.plots.R | 132 +- .../tests/testthat/test.add_icon.R | 2 +- .../tests/testthat/test.plot_data.R | 4 +- .../tests/testthat/test.plot_netcdf.R | 6 +- base/visualization/tests/testthat/test.viz.R | 2 - base/workflow/R/create_execute_test_xml.R | 106 +- base/workflow/R/do_conversions.R | 69 +- base/workflow/R/run.write.configs.R | 149 +- base/workflow/R/runModule.get.trait.data.R | 14 +- base/workflow/R/runModule.run.write.configs.R | 15 +- base/workflow/R/start_model_runs.R | 174 +- base/workflow/inst/permutation_tests.R | 32 +- .../testthat/test-runModule.get.trait.data.R | 11 +- .../tests/testthat/test.do_conversions.R | 10 +- .../tests/testthat/test.start_model_runs.R | 1 - contrib/browndog/PEcAn#DALEC_convert.R | 54 +- contrib/browndog/PEcAn#LINKAGES_convert.R | 6 +- contrib/browndog/PEcAn#Sipnet_convert.R | 6 +- contrib/browndog/PEcAn#Site_convert.R | 32 +- contrib/browndog/PEcAn.R | 20 +- docker/depends/pecan.depends.R | 35 +- models/basgra/R/read_restart.BASGRA.R | 132 +- models/basgra/R/run_BASGRA.R | 843 +++--- models/basgra/R/write.config.BASGRA.R | 516 ++-- models/basgra/R/write_restart.BASGRA.R | 88 +- .../basgra/tests/testthat/test.run_BASGRA.R | 571 ++-- .../basgra/tests/testthat/test.write.config.R | 186 +- models/biocro/R/call_biocro.R | 2 - models/biocro/R/close_nc_if_open.R | 7 +- models/biocro/R/get_biocro_defaults.R | 25 +- models/biocro/R/met2model.BIOCRO.R | 84 +- models/biocro/R/model2netcdf.BIOCRO.R | 82 +- models/biocro/R/read.biocro.config.R | 18 +- models/biocro/R/run.biocro.R | 139 +- models/biocro/R/write.configs.BIOCRO.R | 138 +- models/biocro/inst/workflow.R | 18 +- models/biocro/tests/testthat.R | 2 +- models/biocro/tests/testthat/helper.R | 24 +- .../biocro/tests/testthat/test-call_biocro.R | 162 +- .../biocro/tests/testthat/test-run.biocro.R | 5 +- models/biocro/tests/testthat/test.cf2biocro.R | 11 +- models/biocro/tests/testthat/test.met2model.R | 12 +- .../tests/testthat/test.model2netcdf.BIOCRO.R | 37 +- .../testthat/test.write.configs.BIOCRO.R | 93 +- models/cable/R/met2model.CABLE.R | 11 +- models/cable/R/model2netcdf.CABLE.R | 11 +- models/cable/R/read_restart.CABLE.R | 21 +- models/cable/R/write.config.CABLE.R | 37 +- models/cable/R/write_restart.CABLE.R | 22 +- models/cable/tests/testthat.R | 4 +- models/cable/tests/testthat/test.met2model.R | 12 +- models/clm45/R/met2model.CLM45.R | 207 +- models/clm45/R/model2netcdf.CLM45.R | 835 +++--- models/clm45/R/write.configs.CLM45.R | 86 +- models/clm45/tests/testthat.R | 2 +- models/clm45/tests/testthat/test.met2model.R | 12 +- models/dalec/R/met2model.DALEC.R | 128 +- models/dalec/R/model2netcdf.DALEC.R | 170 +- models/dalec/R/write.configs.dalec.R | 85 +- models/dalec/inst/DALEC_priors.R | 192 +- models/dalec/tests/testthat/test.met2model.R | 3 +- models/dvmdostem/R/globals.R | 2 +- models/dvmdostem/R/model2netcdf.dvmdostem.R | 181 +- .../R/pecan2dvmdostem_variable_mapping.R | 46 +- models/dvmdostem/R/write.config.dvmdostem.R | 673 ++--- .../testthat/test-model2netcdf.dvmdostem.R | 9 +- .../dvmdostem/tests/testthat/test.met2model.R | 3 +- models/ed/R/SAS.ED2.R | 628 ++--- models/ed/R/SDA.helpers.ED2.R | 150 +- models/ed/R/check_ed2in.R | 6 +- models/ed/R/check_ed_metheader.R | 16 +- models/ed/R/check_veg.R | 31 +- models/ed/R/create_veg.R | 4 +- models/ed/R/data.R | 2 +- models/ed/R/download_edi.R | 6 +- models/ed/R/ed_varlookup.R | 158 +- models/ed/R/example_veg.R | 6 +- models/ed/R/get_met_dates.R | 6 +- models/ed/R/met2model.ED2.R | 115 +- models/ed/R/model2netcdf.ED2.R | 2105 ++++++++------- models/ed/R/modify_ed2in.R | 51 +- models/ed/R/other.helpers.ED2.R | 26 +- models/ed/R/parse.history.R | 18 +- models/ed/R/read_ed2in.R | 30 +- models/ed/R/read_ed_metheader.R | 20 +- models/ed/R/read_ed_veg.R | 13 +- models/ed/R/read_restart.ED2.R | 195 +- models/ed/R/run_ed_singularity.R | 12 +- models/ed/R/veg2model.ED2.R | 173 +- models/ed/R/write.configs.ed.R | 185 +- models/ed/R/write_ed2in.R | 24 +- models/ed/R/write_ed_metheader.R | 2 +- models/ed/R/write_ed_veg.R | 14 +- models/ed/R/write_restart.ED2.R | 295 +-- models/ed/data-raw/sysdata.R | 9 +- models/ed/inst/pecan.ed2.diag.plots.R | 1328 +++++----- models/ed/inst/pecan.ed2.diagnostics.R | 376 +-- models/ed/inst/plot.hdf5.R | 196 +- models/ed/inst/rewrite.config.R | 33 +- models/ed/inst/test.read.restart.R | 16 +- models/ed/inst/test.timeutils.R | 21 +- models/ed/inst/test.write.restart.ED2.R | 32 +- models/ed/inst/timeutils.R | 1059 ++++---- models/ed/scripts/get.model.output.ed.R | 81 +- models/ed/scripts/read.output.ed.R | 66 +- models/ed/scripts/write.configs.rscript.R | 140 +- models/ed/tests/test_ed_integration.R | 5 +- models/ed/tests/testthat.R | 2 +- models/ed/tests/testthat/test-put_ET_files.R | 20 +- models/ed/tests/testthat/test-read_ET_files.R | 19 +- models/ed/tests/testthat/test-read_S_files.R | 2 +- models/ed/tests/testthat/test.met2model.R | 9 +- .../ed/tests/testthat/test.model2netcdf.ED2.R | 72 +- .../ed/tests/testthat/test.no_storage_resp.R | 1 - .../ed/tests/testthat/test.write.configs.ed.R | 68 +- models/fates/R/met2model.FATES.R | 177 +- models/fates/R/model2netcdf.FATES.R | 188 +- models/fates/R/recurse.create.R | 7 +- models/fates/R/write.configs.FATES.R | 1835 +++++++------ models/fates/inst/model2netcdf_test.R | 12 +- models/fates/tests/testthat.R | 2 +- models/fates/tests/testthat/test.met2model.R | 3 +- models/gday/R/met2model.GDAY.R | 70 +- models/gday/R/model2netcdf.GDAY.R | 73 +- models/gday/R/write.config.GDAY.R | 167 +- models/gday/tests/testthat.R | 2 +- models/gday/tests/testthat/test.met2model.R | 3 +- models/jules/R/model2netcdf.JULES.R | 99 +- models/jules/R/write.config.JULES.R | 316 +-- models/jules/tests/testthat.R | 2 +- models/jules/tests/testthat/test.met2model.R | 3 +- models/ldndc/R/met2model.LDNDC.R | 256 +- models/ldndc/R/model2netcdf.LDNDC.R | 311 ++- models/ldndc/R/write.config.LDNDC.R | 2309 ++++++++--------- models/ldndc/tests/testthat.R | 2 +- models/ldndc/tests/testthat/test.met2model.R | 3 +- models/linkages/R/met2model.LINKAGES.R | 49 +- models/linkages/R/model2netcdf.LINKAGES.R | 106 +- models/linkages/R/read_restart.LINKAGES.R | 65 +- models/linkages/R/sample.IC.LINKAGES.R | 28 +- models/linkages/R/spinup.LINKAGES.R | 20 +- models/linkages/R/split_inputs.LINKAGES.R | 2 - models/linkages/R/write.config.LINKAGES.R | 306 +-- models/linkages/R/write_restart.LINKAGES.R | 347 +-- models/linkages/inst/LINKAGES.lyford.SDA.R | 84 +- models/linkages/inst/MIP_workflow_addn.R | 92 +- .../inst/output.visualization.LINKAGES.R | 73 +- models/linkages/inst/remove_files.R | 22 +- models/linkages/inst/setup_da.R | 102 +- .../tests/testthat/test.met2model.LINKAGES.R | 21 +- models/lpjguess/R/met2model.LPJGUESS.R | 57 +- models/lpjguess/R/model2netcdf.LPJGUESS.R | 74 +- models/lpjguess/R/readStateBinary.LPJGUESS.R | 1005 ++++--- models/lpjguess/R/write.config.LPJGUESS.R | 227 +- models/lpjguess/tests/testthat.R | 2 +- .../lpjguess/tests/testthat/test.met2model.R | 3 +- models/maat/R/met2model.MAAT.R | 184 +- models/maat/R/model2netcdf.MAAT.R | 241 +- models/maat/R/write.config.MAAT.R | 236 +- .../inst/generate_maat_met_drivers_ngee.R | 113 +- .../generate_maat_met_drivers_ngee_pa-bar.R | 111 +- models/maat/tests/testthat.R | 2 +- models/maat/tests/testthat/test.met2model.R | 3 +- models/maespa/R/met2model.MAESPA.R | 59 +- models/maespa/R/model2netcdf.MAESPA.R | 60 +- models/maespa/R/write.config.MAESPA.R | 68 +- models/maespa/tests/testthat.R | 2 +- models/maespa/tests/testthat/test.met2model.R | 3 +- models/preles/R/runPRELES.jobsh.R | 196 +- models/preles/R/write.config.PRELES.R | 30 +- models/preles/tests/testthat/test.met2model.R | 3 +- models/sibcasa/R/datasets.R | 4 +- models/sibcasa/R/met2model.SIBCASA.R | 1 - models/sibcasa/R/model2netcdf.SIBCASA.R | 2 - models/sibcasa/R/write.config.SIBCASA.R | 3 - models/sibcasa/data/sibcasa_output_vars.R | 1 - models/sibcasa/tests/testthat.R | 2 +- models/sibcasa/tests/testthat/test-configs.R | 11 +- models/sipnet/R/met2model.SIPNET.R | 231 +- models/sipnet/R/model2netcdf.SIPNET.R | 288 +- models/sipnet/R/read_restart.SIPNET.R | 159 +- models/sipnet/R/sample.IC.SIPNET.R | 68 +- models/sipnet/R/split_inputs.SIPNET.R | 46 +- models/sipnet/R/veg2model.SIPNET.R | 15 +- models/sipnet/R/write.configs.SIPNET.R | 361 +-- models/sipnet/R/write_restart.SIPNET.R | 85 +- models/sipnet/inst/SIPNET.lyford.SDA.R | 28 +- models/sipnet/tests/testthat.R | 2 +- models/sipnet/tests/testthat/test.met2model.R | 3 +- models/stics/R/met2model.STICS.R | 190 +- models/stics/R/model2netcdf.STICS.R | 113 +- models/stics/R/write.config.STICS.R | 1279 +++++---- models/stics/tests/testthat/test.met2model.R | 3 +- models/template/R/met2model.MODEL.R | 7 +- models/template/R/model2netcdf.MODEL.R | 7 +- models/template/R/read_restart.ModelName.R | 11 +- models/template/R/write.config.MODEL.R | 30 +- models/template/R/write_restart.ModelName.R | 14 +- models/template/tests/testthat.R | 2 +- .../template/tests/testthat/test.met2model.R | 3 +- .../DART/Kodiak/models/ED2/utils/adjValue.R | 31 +- .../Kodiak/models/ED2/utils/createInput.R | 88 +- .../Kodiak/models/ED2/utils/createTransit.R | 50 +- .../DART/Kodiak/models/ED2/utils/readValue.R | 27 +- .../DART/Kodiak/models/ED2/work/adjValue.R | 35 +- .../DART/Kodiak/models/ED2/work/createInput.R | 88 +- .../Kodiak/models/ED2/work/createTransit.R | 50 +- .../DART/Kodiak/models/ED2/work/readValue.R | 29 +- modules/DART/R/ObsSeq.R | 69 +- modules/DART/R/adjValue.R | 4 +- modules/DART/R/createInput.R | 10 +- modules/DART/R/date2month.R | 4 +- modules/DART/R/filter_ics.R | 4 +- modules/allometry/R/AllomAve.R | 165 +- modules/allometry/R/allom.BayesFit.R | 259 +- modules/allometry/R/allom.predict.R | 89 +- modules/allometry/R/query.allom.data.R | 47 +- modules/allometry/R/read.allom.data.R | 158 +- modules/allometry/inst/AllomDriver.R | 25 +- modules/allometry/inst/JenkinsPFTs.R | 207 +- modules/allometry/inst/workflow.allometry.R | 244 +- .../allometry/tests/testthat/test_AllomAve.R | 52 +- modules/allometry/tests/testthat/test_coefs.R | 36 +- modules/assim.batch/R/autoburnin.R | 48 +- modules/assim.batch/R/gelman_diag.R | 35 +- modules/assim.batch/R/get.da.data.R | 65 +- modules/assim.batch/R/get.da.data.growth.R | 82 +- modules/assim.batch/R/helpers.R | 1 - modules/assim.batch/R/hier.mcmc.R | 240 +- modules/assim.batch/R/load.L2Ameriflux.cf.R | 5 +- modules/assim.batch/R/minimize.GP.R | 218 +- modules/assim.batch/R/pda.bayesian.tools.R | 217 +- .../assim.batch/R/pda.bayestools.helpers.R | 151 +- modules/assim.batch/R/pda.define.llik.R | 174 +- modules/assim.batch/R/pda.emulator.R | 468 ++-- modules/assim.batch/R/pda.emulator.ms.R | 402 +-- .../assim.batch/R/pda.generate.externals.R | 164 +- modules/assim.batch/R/pda.get.model.output.R | 119 +- modules/assim.batch/R/pda.load.data.R | 92 +- modules/assim.batch/R/pda.mcmc.R | 235 +- modules/assim.batch/R/pda.mcmc.bs.R | 278 +- modules/assim.batch/R/pda.mcmc.recover.R | 130 +- modules/assim.batch/R/pda.neff.R | 168 +- modules/assim.batch/R/pda.postprocess.R | 310 ++- modules/assim.batch/R/pda.utils.R | 592 +++-- modules/assim.batch/R/plot.da.R | 136 +- .../tests/testthat/test.autoburnin.R | 26 +- .../tests/testthat/test.bt_prior.R | 44 +- modules/assim.sequential/R/Adjustment.R | 69 +- modules/assim.sequential/R/Analysis_sda.R | 832 +++--- .../assim.sequential/R/Analysis_sda_block.R | 487 ++-- .../R/Analysis_sda_multiSite.R | 427 +-- .../assim.sequential/R/Create_Site_PFT_CSV.R | 186 +- modules/assim.sequential/R/GEF_Helper.R | 145 +- modules/assim.sequential/R/Helper.functions.R | 96 +- modules/assim.sequential/R/Localization.R | 46 +- .../R/Multi_Site_Constructors.R | 214 +- modules/assim.sequential/R/Nimble_codes.R | 239 +- modules/assim.sequential/R/Prep_OBS_SDA.R | 331 +-- .../assim.sequential/R/SDA_OBS_Assembler.R | 247 +- modules/assim.sequential/R/assess.params.R | 229 +- modules/assim.sequential/R/build_X.R | 64 +- .../assim.sequential/R/downscale_function.R | 134 +- .../R/downscale_function_hrly.R | 65 +- .../assim.sequential/R/get_ensemble_weights.R | 125 +- modules/assim.sequential/R/hop_test.R | 151 +- .../assim.sequential/R/load_data_paleon_sda.R | 347 +-- modules/assim.sequential/R/matrix_operation.R | 36 +- modules/assim.sequential/R/metSplit.R | 34 +- .../R/met_filtering_helpers.R | 59 +- .../assim.sequential/R/sample.parameters.R | 33 +- modules/assim.sequential/R/sda.enkf.R | 1933 +++++++------- .../assim.sequential/R/sda.enkf_MultiSite.R | 1149 ++++---- .../assim.sequential/R/sda.enkf_refactored.R | 698 ++--- modules/assim.sequential/R/sda_matchparam.R | 33 +- modules/assim.sequential/R/sda_plotting.R | 1308 +++++----- modules/assim.sequential/R/sda_weights_site.R | 102 +- .../MultiSite-Exs/SDA/Create_Multi_settings.R | 193 +- .../inst/MultiSite-Exs/SDA/Multisite-4sites.R | 120 +- .../inst/MultiSite-Exs/SDA/Weights.R | 287 +- .../inst/MultiSite-Exs/SDA/Weights_RDS.R | 106 +- .../inst/MultiSite-Exs/SDA/Weights_Site.R | 178 +- .../inst/NEFI/US_Harvard/download_Harvard.R | 82 +- .../NEFI/US_Harvard/download_Harvard_met.R | 59 +- .../US_Harvard/download_soilmoist_harvard.R | 76 +- .../inst/NEFI/US_Los/download_Los.R | 23 +- .../inst/NEFI/US_Los/download_Los_met.R | 23 +- .../inst/NEFI/US_Potato/download_Potato.R | 132 +- .../inst/NEFI/US_Syv/download_Syv.R | 45 +- .../inst/NEFI/US_Syv/download_Syv_met.R | 22 +- .../inst/NEFI/US_Syv/download_soilmoist_Syv.R | 25 +- .../inst/NEFI/US_WCr/download_WCr.R | 50 +- .../inst/NEFI/US_WCr/download_WCr_met.R | 43 +- .../inst/NEFI/US_WCr/download_soilmoist_WCr.R | 47 +- .../inst/NEFI/US_WLEF/download_WLEF.R | 40 +- .../inst/NEFI/US_WLEF/download_WLEF_met.R | 31 +- .../NEFI/US_WLEF/download_soilmoist_WLEF.R | 25 +- modules/assim.sequential/inst/NEFI/email.R | 32 +- .../assim.sequential/inst/NEFI/email_graphs.R | 88 +- .../inst/NEFI/forecast.graphs.R | 227 +- .../inst/NEFI/generate.gefs.xml.R | 57 +- .../inst/NEFI/graph_SDA_fluxtowers.R | 439 ++-- .../inst/NEFI/graph_fluxtowers.R | 685 ++--- .../inst/RemoteLauncher/Remote_sync.R | 57 +- .../inst/RemoteLauncher/SDA_launcher.R | 60 +- modules/assim.sequential/inst/SDA_runner.R | 104 +- .../inst/WillowCreek/NoDataWorkflow.R | 230 +- .../inst/WillowCreek/SDA_Workflow.R | 525 ++-- .../assim.sequential/inst/WillowCreek/Utils.R | 64 +- .../inst/WillowCreek/download_WCr.R | 62 +- .../inst/WillowCreek/download_soilmoist_WCr.R | 37 +- .../inst/WillowCreek/gapfill_WCr.R | 89 +- .../inst/WillowCreek/prep.data.assim.R | 141 +- .../inst/WillowCreek/workflow.template.R | 343 +-- modules/assim.sequential/inst/alr_test.R | 257 +- modules/assim.sequential/inst/covariates.R | 74 +- .../inst/ensemble_adj_visualization.R | 354 +-- .../fluxnet_sandbox/ApplyingWeights_sps_v4.R | 326 ++- .../fluxnet_sandbox/FLUX_plotting_Hamze.R | 107 +- .../plot_sda_results_by_site.R | 6 +- .../hf_landscape/04_ForwardOnlyForecast.R | 73 +- .../05B_SDA_Workflow_NA.reanalysis.R | 128 +- .../05C_SDA_Workflow_NA.forecast.R | 137 +- .../inst/hf_landscape/05_SDA_Workflow_NA.R | 195 +- .../inst/hf_landscape/07_SMAP.R | 231 +- .../inst/hf_landscape/PEcAn2EFI.R | 114 +- .../inst/metfiltering.exploration.R | 129 +- modules/assim.sequential/inst/paleon_sda.R | 121 +- .../SDA_Workflow_LAI.R | 508 ++-- .../SDA_Workflow_NA.R | 256 +- modules/assim.sequential/inst/sda.particle.R | 142 +- modules/assim.sequential/inst/sda.rewind.R | 179 +- .../inst/sda.tobit.simulation.R | 631 ++--- .../bmorrison/Multi_Site_Constructors.R | 221 +- .../bmorrison/Multisite_SDA_Bailey.R | 283 +- .../bmorrison/Multisite_SDA_Bailey_AGB_LAI.R | 305 ++- .../Multisite_SDA_Bailey_AGB_LAI_2_sites.R | 262 +- ...isite_SDA_Bailey_AGB_LAI_2_sites_with_NA.R | 332 +-- .../Multisite_SDA_Bailey_LAI_8_days.R | 136 +- .../bmorrison/Multisite_SDA_Shawn.R | 156 +- .../bmorrison/ecoregion_lai_CONUS.R | 253 +- .../bmorrison/ecoregion_lai_agb_trends.R | 223 +- .../bmorrison/extract_500_site_data.R | 274 +- .../bmorrison/extract_50_sitegroup_data.R | 267 +- .../bmorrison/extract_50_sitegroup_data_2.R | 267 +- .../sda_backup/bmorrison/general_sda_setup.R | 187 +- .../bmorrison/general_sda_setup_2.R | 187 +- .../inst/sda_backup/bmorrison/pft_selection.R | 195 +- .../sda_backup/bmorrison/pft_site_locations.R | 139 +- .../bmorrison/register_site_group.R | 37 +- .../bmorrison/site_selection/pick_sda_sites.R | 352 ++- .../upscaling_sda_output/AGB_rf_model.R | 221 +- .../check_site_id_mixup.R | 111 +- .../rf_agb_model_2021_CODE.R | 184 +- .../sserbin/R_scripts_2/Multisite-3sites.R | 121 +- .../sserbin/R_scripts_2/Multisite-4sites.R | 121 +- .../sserbin/R_scripts_2/Multisite_SDA_BNL.R | 132 +- .../R_scripts_2/Multisite_SDA_BNL_updated.R | 142 +- .../R_scripts_2/workflow_doconversions.R | 22 +- .../sserbin/R_scripts_2/workflow_metprocess.R | 28 +- .../sserbin/Rscripts/multi_site_LAI_SDA_BNL.R | 202 +- .../sserbin/Rscripts/single_site_SDA_BNL.R | 171 +- .../inst/sda_backup/sserbin/workflow.R | 24 +- .../inst/sda_backup/sserbin/workflow_2.R | 24 +- .../inst/workflow.variance.partitioning.R | 291 ++- .../tests/testthat/test_aggregation.R | 50 +- .../tests/testthat/test_rescaling.R | 115 +- .../benchmark/R/align_by_first_observation.R | 69 +- modules/benchmark/R/align_data.R | 116 +- modules/benchmark/R/align_data_to_data_pft.R | 220 +- modules/benchmark/R/align_pft.R | 80 +- modules/benchmark/R/bm_settings.R | 53 +- modules/benchmark/R/calc_benchmark.R | 253 +- modules/benchmark/R/calc_metrics.R | 37 +- modules/benchmark/R/check_if_legal_table.R | 104 +- modules/benchmark/R/check_if_list_of_pfts.R | 10 +- modules/benchmark/R/check_if_species_list.R | 27 +- modules/benchmark/R/create_BRR.R | 52 +- modules/benchmark/R/define_benchmark.R | 167 +- modules/benchmark/R/format_wide2long.R | 145 +- .../benchmark/R/get_species_list_standard.R | 19 +- modules/benchmark/R/load_csv.R | 30 +- modules/benchmark/R/load_data.R | 119 +- modules/benchmark/R/load_netcdf.R | 38 +- modules/benchmark/R/load_rds.R | 10 +- modules/benchmark/R/load_tab.R | 38 +- modules/benchmark/R/match_timestep.R | 7 +- .../benchmark/R/mean_over_larger_timestep.R | 4 +- modules/benchmark/R/metric_AME.R | 4 +- modules/benchmark/R/metric_Frechet.R | 2 +- modules/benchmark/R/metric_MAE.R | 6 +- modules/benchmark/R/metric_MSE.R | 4 +- modules/benchmark/R/metric_PPMC.R | 2 +- modules/benchmark/R/metric_R2.R | 15 +- modules/benchmark/R/metric_RAE.R | 2 +- modules/benchmark/R/metric_RMSE.R | 4 +- modules/benchmark/R/metric_cor.R | 4 +- modules/benchmark/R/metric_lmDiag_plot.R | 67 +- modules/benchmark/R/metric_residual_plot.R | 18 +- modules/benchmark/R/metric_run.R | 33 +- modules/benchmark/R/metric_scatter_plot.R | 19 +- modules/benchmark/R/metric_timeseries_plot.R | 20 +- modules/benchmark/R/pecan_bench.R | 69 +- .../scripts/benchmark.workflow.FATES_BCI.R | 56 +- .../inst/scripts/benchmark.workflow.R | 7 +- modules/benchmark/tests/testthat.R | 2 +- .../benchmark/tests/testthat/test-align_pft.R | 32 +- modules/data.atmosphere/R/ERA5_met_process.R | 213 +- .../data.atmosphere/R/GEFS_helper_functions.R | 868 ++++--- modules/data.atmosphere/R/align_met.R | 445 ++-- modules/data.atmosphere/R/check_met_input.R | 8 +- modules/data.atmosphere/R/closest_xy.R | 33 +- modules/data.atmosphere/R/data.R | 33 +- modules/data.atmosphere/R/debias.met.R | 154 +- .../data.atmosphere/R/debias_met_regression.R | 873 ++++--- .../data.atmosphere/R/download.Ameriflux.R | 78 +- .../data.atmosphere/R/download.AmerifluxLBL.R | 85 +- .../R/download.CRUNCEP_Global.R | 79 +- modules/data.atmosphere/R/download.ERA5.R | 136 +- modules/data.atmosphere/R/download.FACE.R | 38 +- .../data.atmosphere/R/download.Fluxnet2015.R | 106 +- .../R/download.FluxnetLaThuile.R | 79 +- modules/data.atmosphere/R/download.GFDL.R | 73 +- modules/data.atmosphere/R/download.GLDAS.R | 136 +- .../data.atmosphere/R/download.Geostreams.R | 108 +- modules/data.atmosphere/R/download.ICOS.R | 114 +- modules/data.atmosphere/R/download.MACA.R | 211 +- modules/data.atmosphere/R/download.MERRA.R | 81 +- .../data.atmosphere/R/download.MsTMIP_NARR.R | 95 +- modules/data.atmosphere/R/download.NARR.R | 65 +- .../data.atmosphere/R/download.NARR_site.R | 30 +- modules/data.atmosphere/R/download.NEONmet.R | 478 ++-- modules/data.atmosphere/R/download.NLDAS.R | 129 +- .../data.atmosphere/R/download.NOAA_GEFS.R | 107 +- modules/data.atmosphere/R/download.PalEON.R | 61 +- .../data.atmosphere/R/download.PalEON_ENS.R | 63 +- modules/data.atmosphere/R/download.US_WCr.R | 124 +- modules/data.atmosphere/R/download.US_Wlef.R | 113 +- .../R/download.raw.met.module.R | 163 +- .../R/download_noaa_gefs_efi.R | 152 +- .../R/downscaling_helper_functions.R | 73 +- modules/data.atmosphere/R/extract.nc.R | 81 +- modules/data.atmosphere/R/extract.nc.module.R | 59 +- modules/data.atmosphere/R/extract.success.R | 15 +- modules/data.atmosphere/R/extract_ERA5.R | 252 +- .../data.atmosphere/R/extract_local_CMIP5.R | 476 ++-- .../data.atmosphere/R/extract_local_NLDAS.R | 220 +- .../R/get_cf_variables_table.R | 5 +- .../data.atmosphere/R/half_hour_downscale.R | 267 +- modules/data.atmosphere/R/lightME.R | 48 +- modules/data.atmosphere/R/load.cfmet.R | 46 +- .../data.atmosphere/R/merge.met.variable.R | 161 +- modules/data.atmosphere/R/met.process.R | 317 +-- modules/data.atmosphere/R/met.process.stage.R | 25 +- modules/data.atmosphere/R/met2CF.ALMA.R | 520 ++-- modules/data.atmosphere/R/met2CF.Ameriflux.R | 207 +- .../data.atmosphere/R/met2CF.AmerifluxLBL.R | 76 +- modules/data.atmosphere/R/met2CF.FACE.R | 135 +- modules/data.atmosphere/R/met2CF.Geostreams.R | 94 +- modules/data.atmosphere/R/met2CF.ICOS.R | 17 +- modules/data.atmosphere/R/met2CF.NARR.R | 69 +- modules/data.atmosphere/R/met2CF.csv.R | 447 ++-- modules/data.atmosphere/R/met2cf.ERA5.R | 243 +- modules/data.atmosphere/R/met2cf.module.R | 102 +- modules/data.atmosphere/R/met2cf.nc.R | 3 +- modules/data.atmosphere/R/met2model.module.R | 87 +- ...met_temporal_downscale.Gaussian_ensemble.R | 282 +- .../data.atmosphere/R/metgapfill.NOAA_GEFS.R | 176 +- modules/data.atmosphere/R/metgapfill.R | 304 ++- modules/data.atmosphere/R/metgapfill.module.R | 60 +- modules/data.atmosphere/R/metutils.R | 28 +- modules/data.atmosphere/R/nc_merge.R | 348 +-- .../data.atmosphere/R/noaa_gefs_efi_helper.R | 53 +- .../R/pecan_standard_met_table.R | 38 +- modules/data.atmosphere/R/permute.nc.R | 56 +- modules/data.atmosphere/R/read.register.R | 40 +- modules/data.atmosphere/R/site.lst.R | 6 +- modules/data.atmosphere/R/solar_angle.R | 40 +- modules/data.atmosphere/R/spin.met.R | 101 +- modules/data.atmosphere/R/split_wind.R | 95 +- .../R/tdm_generate_subdaily_models.R | 337 +-- .../data.atmosphere/R/tdm_lm_ensemble_sims.R | 402 +-- modules/data.atmosphere/R/tdm_model_train.R | 211 +- .../R/tdm_predict_subdaily_met.R | 477 ++-- modules/data.atmosphere/R/tdm_save_betas.R | 31 +- modules/data.atmosphere/R/tdm_save_model.R | 8 +- modules/data.atmosphere/R/tdm_subdaily_pred.R | 68 +- .../R/tdm_temporal_downscale_functions.R | 455 ++-- .../data.atmosphere/R/temporal.downscaling.R | 101 +- modules/data.atmosphere/R/upscale_met.R | 80 +- .../data.atmosphere/data/FLUXNET.sitemap.R | 8 +- .../inst/ERA5/ERA5_NA_download.R | 71 +- .../inst/ERA5/ERA5_db_register.R | 24 +- .../test.download.AmerifluxLBL.R | 16 +- .../integrationTests/test.download.CRUNCEP.R | 19 +- .../integrationTests/test.download.ERA5.R | 18 +- .../inst/scripts/DUKE_FACE_MET.v2.R | 419 +-- .../inst/scripts/ExtractNOAAstation.R | 232 +- .../data.atmosphere/inst/scripts/GetLSHRG.R | 13 +- .../data.atmosphere/inst/scripts/MetMerge.R | 185 +- .../inst/scripts/NACP2DRIVER.R | 267 +- .../data.atmosphere/inst/scripts/NARRdriver.R | 12 +- .../inst/scripts/NARRinventory.R | 86 +- .../inst/scripts/ORNL_FACE_MET.v2.R | 241 +- .../data.atmosphere/inst/scripts/PRISMlapse.R | 229 +- .../inst/scripts/WeathGenPPT.R | 61 +- .../inst/scripts/checkMetHDF.R | 8 +- .../inst/scripts/extract2driver.R | 519 ++-- .../inst/scripts/extract2driver.grid.R | 308 ++- .../inst/scripts/flux2lsm.v2.R | 421 ++- modules/data.atmosphere/inst/scripts/lapse.R | 52 +- .../inst/scripts/load.ameriflux.R | 25 +- .../data.atmosphere/inst/scripts/lsmMerge.R | 411 ++- .../inst/scripts/met.workflow.NARR.R | 103 +- .../inst/scripts/met.workflow.R | 17 +- .../inst/scripts/met.workflow.gen.R | 99 +- .../inst/scripts/met.workflow.params.NARR.R | 58 +- .../inst/scripts/met.workflow.params.R | 26 +- .../inst/scripts/met_ensemble_testing.R | 10 +- .../inst/scripts/ncep/met2csv.R | 184 +- .../inst/scripts/runtest-download.NOAA_GEFS.R | 44 +- modules/data.atmosphere/tests/test.NARR.R | 31 +- modules/data.atmosphere/tests/testthat.R | 2 +- .../data.atmosphere/tests/testthat/helper.R | 42 +- .../tests/testthat/test.cf-downscaling.R | 62 +- .../tests/testthat/test.check_met_file.R | 11 +- .../testthat/test.download.AmerifluxLBL.R | 147 +- .../tests/testthat/test.download.CRUNCEP.R | 18 +- .../tests/testthat/test.download.GFDLR.R | 18 +- .../tests/testthat/test.download.MERRA.R | 10 +- .../tests/testthat/test.download.NARR.R | 3 +- .../testthat/test.download.raw.met.module.R | 45 +- .../tests/testthat/test.load.cfmet.R | 86 +- .../tests/testthat/test.met.process.R | 37 +- .../tests/testthat/test.met2CF.ALMA.R | 54 +- .../tests/testthat/test.met2CF.csv.R | 82 +- .../tests/testthat/test.metutils.R | 23 +- .../tests/testthat/test.upscale_met.R | 21 +- modules/data.land/R/DataONE_doi_download.R | 96 +- modules/data.land/R/IC_BADM_Utilities.R | 252 +- modules/data.land/R/ISCN_extract.R | 8 +- modules/data.land/R/InventoryGrowthFusion.R | 456 ++-- .../R/InventoryGrowthFusionDiagnostics.R | 139 +- modules/data.land/R/Read_Tuscon.R | 18 +- modules/data.land/R/Soilgrids_SoilC_prep.R | 35 +- .../R/buildJAGSdata_InventoryRings.R | 51 +- modules/data.land/R/cohort2pool.R | 113 +- modules/data.land/R/data.R | 22 +- modules/data.land/R/dataone_download.R | 52 +- modules/data.land/R/download.SM_CDS.R | 136 +- .../data.land/R/download_NEON_soilmoisture.R | 99 +- modules/data.land/R/ens.veg.module.R | 73 +- modules/data.land/R/extract_FIA.R | 126 +- modules/data.land/R/extract_NEON_veg.R | 193 +- modules/data.land/R/extract_SM_CDS.R | 96 +- modules/data.land/R/extract_soil_nc.R | 469 ++-- modules/data.land/R/extract_veg.R | 332 +-- modules/data.land/R/fia2ED.R | 310 ++- modules/data.land/R/fuse_plot_treering.R | 98 +- modules/data.land/R/gSSURGO_Query.R | 107 +- modules/data.land/R/get.soil.R | 16 +- modules/data.land/R/get_veg_module.R | 41 +- modules/data.land/R/gis.functions.R | 111 +- modules/data.land/R/ic_process.R | 231 +- modules/data.land/R/land.utils.R | 10 +- modules/data.land/R/load_veg.R | 61 +- modules/data.land/R/matchInventoryRings.R | 19 +- modules/data.land/R/match_pft.R | 90 +- modules/data.land/R/match_species_id.R | 117 +- modules/data.land/R/partition_roots.R | 32 +- modules/data.land/R/plot2AGB.R | 88 +- modules/data.land/R/pool_ic_list2netcdf.R | 68 +- modules/data.land/R/pool_ic_netcdf2list.R | 16 +- modules/data.land/R/prepare_pools.R | 141 +- modules/data.land/R/put_veg_module.R | 91 +- modules/data.land/R/read.plot.R | 10 +- modules/data.land/R/read.velmex.R | 3 +- modules/data.land/R/sample_ic.R | 327 ++- modules/data.land/R/soil2netcdf.R | 102 +- modules/data.land/R/soil_process.R | 98 +- modules/data.land/R/soil_utils.R | 313 +-- .../data.land/R/soilgrids_soc_extraction.R | 195 +- modules/data.land/R/write_ic.R | 67 +- modules/data.land/R/write_veg.R | 16 +- .../data-raw/build_soil_texture_variables.R | 290 ++- modules/data.land/data-raw/load_BADM.R | 4 +- modules/data.land/inst/BCI_AGB.R | 62 +- modules/data.land/inst/CanopyCover2NLCD.R | 172 +- modules/data.land/inst/LoadFLUXNETsites.R | 272 +- modules/data.land/inst/LoadPalEONsites.R | 383 ++- .../inst/Multi_Site_IC_Process_Script.R | 115 +- modules/data.land/inst/StatsModel_FIA.R | 114 +- modules/data.land/inst/diametergrow.R | 859 +++--- modules/data.land/inst/download_vswc.R | 74 +- modules/data.land/inst/meta2format.EML.R | 22 +- modules/data.land/inst/treecore_StateSpace.R | 8 +- modules/data.land/tests/testthat.R | 2 +- .../tests/testthat/test-PFT_consistency.R | 31 +- .../tests/testthat/test-match_species_id.R | 29 +- .../tests/testthat/test-soil_params.R | 31 +- .../data.mining/inst/ModelResidualSpectra.R | 70 +- modules/data.mining/inst/NACPspecPost.R | 339 +-- .../data.mining/inst/NACPspectral.Keenan.R | 107 +- modules/data.mining/inst/NACPspectral.R | 86 +- modules/data.mining/inst/NACPspectral.v2.R | 393 +-- modules/data.mining/inst/NullSpectra.R | 46 +- modules/data.mining/inst/NullSpectra.v2.R | 47 +- modules/data.mining/inst/NullSpectraPost.R | 126 +- modules/data.mining/inst/NullSpectraPost.v2.R | 55 +- modules/data.mining/inst/ResidSpectra.R | 100 +- modules/data.mining/inst/SpectralSlice.R | 82 +- modules/data.mining/inst/bigmv.R | 29 +- modules/data.remote/R/GEDI_AGB_prep.R | 329 +-- modules/data.remote/R/LandTrendr.AGB.R | 249 +- modules/data.remote/R/Landtrendr_AGB_prep.R | 232 +- modules/data.remote/R/MODIS_LAI_prep.R | 204 +- modules/data.remote/R/MODIS_LC_prep.R | 191 +- modules/data.remote/R/NASA_DAAC_download.R | 71 +- modules/data.remote/R/NLCD.R | 203 +- .../R/Prep_AGB_IC_from_2010_global.R | 33 +- modules/data.remote/R/SMAP_SMP_prep.R | 100 +- modules/data.remote/R/call_MODIS.R | 422 ++- modules/data.remote/R/download.thredds.R | 111 +- .../data.remote/R/extract_phenology_MODIS.R | 153 +- modules/data.remote/R/regrid.R | 28 +- modules/data.remote/R/remote_process.R | 738 +++--- .../data.remote/inst/FieldvsSMAP_compare.R | 706 ++--- .../data.remote/inst/SMAPvsModel_Comparison.R | 359 +-- modules/data.remote/inst/download_SMAP.R | 53 +- .../inst/download_SMAP_gee2pecan.R | 139 +- .../inst/scripts/Bayesian_curve_fitting.R | 546 ++-- .../inst/scripts/ChEAS_FIA_03142014.R | 876 +++---- .../inst/scripts/ChEAS_FIA_04042014.R | 250 +- .../inst/scripts/Download_OSU_AGB_data_v5.R | 106 +- .../inst/scripts/Restrict_to_growing_season.R | 74 +- .../inst/scripts/figure_generator.R | 45 +- .../data.remote/inst/scripts/old/ChEAS_FIA.R | 593 ++--- .../inst/scripts/old/ChEAS_FIA_02102014.R | 763 +++--- .../inst/scripts/old/ChEAS_FIA_03102014.R | 807 +++--- .../inst/scripts/old/ChEAS_FIA_03112014.R | 799 +++--- .../inst/scripts/old/ChEAS_FIA_03132014.R | 859 +++--- .../LMfit_and_disturbance_chronosequence.R | 387 +-- .../inst/scripts/old/Park_Falls_Golf.R | 358 ++- .../inst/scripts/old/Park_Falls_Runway.R | 195 +- modules/data.remote/inst/scripts/old/ROI_sd.R | 18 +- .../inst/scripts/old/Raster_snapshots.R | 38 +- .../inst/scripts/old/build_params_table.R | 35 +- .../inst/scripts/old/build_polygons.R | 17 +- .../inst/scripts/old/curve_fitting_07282013.R | 280 +- .../inst/scripts/old/curve_fitting_07292013.R | 288 +- .../inst/scripts/old/extract_UNDERC_points.R | 275 +- .../inst/scripts/old/extract_WLEF_buff7.R | 185 +- .../old/extract_WLEF_using_ChEAS_FIA.R | 332 +-- .../old/extract_palsar_metadata_function.R | 273 +- .../scripts/old/extract_polygons_function.R | 275 +- .../old/extract_polygons_function_v05092013.R | 74 +- .../old/extract_polygons_function_v05282013.R | 236 +- .../old/extract_polygons_function_v06252013.R | 230 +- .../old/extract_polygons_function_v06262013.R | 271 +- .../data.remote/inst/scripts/old/ll.monod.R | 76 +- .../inst/scripts/old/ll.monod_v07292013.R | 120 +- .../inst/scripts/old/read_binary.R | 8 +- .../inst/scripts/palsar_extractor.R | 304 +-- .../data.remote/inst/scripts/palsar_plotter.R | 640 ++--- .../data.remote/inst/scripts/spp_cleanup.R | 121 +- modules/emulator/R/GaussProcess.R | 167 +- modules/emulator/R/arate.R | 2 +- modules/emulator/R/calcSpatialCov.R | 4 +- modules/emulator/R/calcSpatialCov.list.R | 20 +- modules/emulator/R/calcSpatialCov.matrix.R | 10 +- modules/emulator/R/distance.R | 2 +- modules/emulator/R/distance.matrix.R | 6 +- modules/emulator/R/distance12.matrix.R | 6 +- modules/emulator/R/gp_mle.R | 22 +- modules/emulator/R/groupid.R | 5 +- modules/emulator/R/jump.R | 6 +- modules/emulator/R/ldinvgamma.R | 19 +- modules/emulator/R/lhc.R | 27 +- modules/emulator/R/nderiv.R | 4 +- modules/emulator/R/p.R | 4 +- modules/emulator/R/p.jump.R | 8 +- modules/emulator/R/plot.jump.R | 29 +- modules/emulator/R/predict.GP.R | 41 +- modules/emulator/R/predict.density.R | 16 +- modules/emulator/R/summarize.GP.R | 28 +- modules/emulator/R/update.jump.R | 20 +- modules/emulator/R/zzz.R | 30 +- modules/meta.analysis/R/approx.posterior.R | 88 +- modules/meta.analysis/R/jagify.R | 62 +- modules/meta.analysis/R/meta.analysis.R | 60 +- .../meta.analysis/R/meta.analysis.summary.R | 48 +- .../R/meta.analysis.write.model.R | 9 +- modules/meta.analysis/R/rename_jags_columns.R | 26 +- modules/meta.analysis/R/run.meta.analysis.R | 143 +- modules/meta.analysis/R/single.MA.R | 59 +- modules/meta.analysis/R/zzz.R | 15 +- .../tests/testthat/test.approx.posterior.R | 10 +- .../tests/testthat/test.jagify.R | 32 +- .../tests/testthat/test.run.meta.analysis.R | 14 +- modules/photosynthesis/R/Licor.QC.R | 102 +- modules/photosynthesis/R/fitA.R | 261 +- modules/photosynthesis/R/plots.R | 105 +- modules/photosynthesis/code/BB_model.R | 427 +-- modules/photosynthesis/code/C3_photomodel.R | 549 ++-- .../photosynthesis/code/C3_photomodel_XF.R | 244 +- .../code/FBB_cluster_processing.R | 202 +- modules/photosynthesis/code/FBB_compare.R | 825 +++--- modules/photosynthesis/code/FBB_functions.R | 172 +- modules/photosynthesis/code/FBB_main.R | 202 +- modules/photosynthesis/code/FBB_mcmc.R | 450 ++-- .../code/FBB_multi_chain_analysis.R | 623 ++--- modules/photosynthesis/code/FBB_param_corr.R | 94 +- modules/photosynthesis/code/FBB_setup.R | 24 +- modules/photosynthesis/code/FBB_summary.R | 54 +- .../Process_LiCor_GE_ACi_Data.R | 354 +-- .../photo.processing.functions.R | 428 +-- modules/photosynthesis/code/test.fitA.R | 97 +- modules/priors/R/plots.R | 100 +- modules/priors/R/priors.R | 103 +- modules/priors/tests/testthat/test.priors.R | 55 +- modules/rtm/R/bayestools.R | 122 +- modules/rtm/R/cbind.spectra.R | 2 +- modules/rtm/R/check.convergence.R | 18 +- modules/rtm/R/defparam.R | 8 +- modules/rtm/R/edr.wrapper.R | 47 +- modules/rtm/R/fortran.datamodule.R | 54 +- modules/rtm/R/generate-rsr.R | 62 +- modules/rtm/R/generate.noise.R | 6 +- modules/rtm/R/gpm.R | 2 +- modules/rtm/R/helpers.R | 2 +- modules/rtm/R/invert.auto.R | 184 +- modules/rtm/R/invert.custom.R | 135 +- modules/rtm/R/invert.lsq.R | 12 +- modules/rtm/R/neff.R | 40 +- modules/rtm/R/params2edr.R | 31 +- modules/rtm/R/plot.spectra.R | 8 +- modules/rtm/R/print.spectra.R | 2 +- modules/rtm/R/print_results_summary.R | 4 +- modules/rtm/R/prior.R | 28 +- modules/rtm/R/pro2s.R | 14 +- modules/rtm/R/process-output.R | 38 +- modules/rtm/R/process.specresponse.R | 36 +- modules/rtm/R/prosail.R | 34 +- modules/rtm/R/prospect.R | 28 +- modules/rtm/R/resample.R | 20 +- modules/rtm/R/sail.R | 22 +- modules/rtm/R/select.spectra.R | 3 +- modules/rtm/R/setup_edr.R | 15 +- modules/rtm/R/spectra.R | 3 +- modules/rtm/R/spectral-response.R | 22 +- modules/rtm/R/truncnorm.R | 10 +- modules/rtm/inst/scripts/analyze_results.R | 2 +- .../rtm/inst/scripts/benchmark_inversion.R | 28 +- modules/rtm/inst/scripts/edr_test.R | 82 +- modules/rtm/inst/scripts/effectivesize.R | 23 +- modules/rtm/inst/scripts/invert_quadratic.R | 28 +- modules/rtm/inst/scripts/like_scale_compare.R | 31 +- modules/rtm/inst/scripts/read_dataspec.R | 6 +- modules/rtm/tests/testthat.R | 2 +- modules/rtm/tests/testthat/test.2s.R | 17 +- modules/rtm/tests/testthat/test.foursail.R | 24 +- modules/rtm/tests/testthat/test.gpm.R | 3 +- .../tests/testthat/test.invert_bayestools.R | 73 +- .../rtm/tests/testthat/test.invert_simple.R | 26 +- modules/rtm/tests/testthat/test.prospect.R | 22 +- modules/rtm/tests/testthat/test.resample.R | 8 +- modules/rtm/tests/testthat/test.sail.R | 8 +- modules/rtm/tests/testthat/test.spectra.R | 4 +- modules/rtm/vignettes/edr.sensitivity.R | 116 +- modules/rtm/vignettes/invert.edr.R | 119 +- modules/rtm/vignettes/test.edr.R | 94 +- modules/uncertainty/R/ensemble.R | 412 +-- modules/uncertainty/R/flux_uncertainty.R | 164 +- modules/uncertainty/R/get.parameter.samples.R | 168 +- modules/uncertainty/R/get.results.R | 146 +- modules/uncertainty/R/plots.R | 170 +- modules/uncertainty/R/run.ensemble.analysis.R | 275 +- .../uncertainty/R/run.sensitivity.analysis.R | 315 +-- modules/uncertainty/R/sensitivity.R | 312 ++- modules/uncertainty/R/variance.R | 55 +- .../inst/abbreviated_workflow_SIPNET.R | 20 +- modules/uncertainty/tests/testthat.R | 2 +- .../tests/testthat/test.plot.sensitivity.R | 5 +- .../test.plot.variance.decomposition.R | 11 +- .../testthat/test.sensitivity.analysis.R | 21 +- scripts/EFI_metprocess.R | 88 +- scripts/EFI_workflow.R | 150 +- scripts/HARV_metdownload_efi.R | 67 +- scripts/Rfcn.R | 3 +- scripts/check_with_errors.R | 230 +- scripts/confirm_deps.R | 1 - scripts/dependencies.R | 34 +- scripts/efi_data_process.R | 291 ++- scripts/format.sh | 56 + scripts/generate_dependencies.R | 30 +- scripts/install_shiny_deps.R | 17 +- scripts/quickbuild.R | 12 +- scripts/workflow.bm.R | 68 +- scripts/workflow.pda.R | 113 +- scripts/workflow.treering.R | 24 +- scripts/workflow.wcr.assim.R | 72 +- shiny/BenchmarkReport/server.R | 18 +- shiny/BenchmarkReport/ui.R | 37 +- shiny/Data-Ingest/Data_Ingest_Functions.R | 4 +- shiny/Data-Ingest/app.R | 179 +- shiny/Data-Ingest/helper.R | 12 +- .../Data-Ingest/modules/d1_download_module.R | 109 +- shiny/Data-Ingest/modules/dbFiles_module.R | 52 +- shiny/Data-Ingest/modules/formats_module.R | 110 +- shiny/Data-Ingest/modules/inputs_module.R | 117 +- .../Data-Ingest/modules/local_upload_module.R | 108 +- .../server_files/create_input_record_svr.R | 28 +- .../server_files/d1_download_svr.R | 92 +- .../server_files/dbfiles_record_svr.R | 17 +- .../server_files/formats_record_svr.R | 17 +- .../server_files/ingest_workflow_svr.R | 289 ++- .../server_files/input_record_svr.R | 55 +- .../server_files/local_upload_svr.R | 69 +- .../ui_files/create_input_record_ui.R | 52 +- shiny/Data-Ingest/ui_files/d1_download_ui.R | 39 +- .../Data-Ingest/ui_files/dbfiles_record_ui.R | 46 +- .../Data-Ingest/ui_files/formats_record_ui.R | 84 +- shiny/Data-Ingest/ui_files/homepage_ui.R | 5 +- .../Data-Ingest/ui_files/ingest_workflow_ui.R | 421 +-- shiny/Data-Ingest/ui_files/input_record_ui.R | 275 +- .../ui_files/local_file_upload_ui.R | 62 +- shiny/Data-Ingest/ui_files/sidebar_ui.R | 20 +- shiny/Data-Ingest/ui_utils.R | 1 - shiny/Elicitation/server.R | 184 +- shiny/Elicitation/ui.R | 16 +- .../animated_WCr_graphs.R | 181 +- shiny/ForecastingDashboard/wcr.graphs.R | 69 +- shiny/Pecan.depend/SERVER.R | 577 ++-- shiny/Pecan.depend/UI.R | 251 +- shiny/Pecan.depend/global.R | 68 +- shiny/SDAdashboard/Utilities/Math_Utility.R | 64 +- shiny/SDAdashboard/Utilities/uihelp.R | 37 +- shiny/SDAdashboard/server.R | 867 ++++--- shiny/SDAdashboard/ui.R | 68 +- shiny/ViewMet/server.R | 339 +-- shiny/ViewMet/ui.R | 60 +- shiny/dbsync/app.R | 469 ++-- shiny/global-sensitivity/load_ensemble.R | 115 +- shiny/global-sensitivity/plotEnsemble.R | 91 +- shiny/global-sensitivity/server.R | 160 +- shiny/global-sensitivity/ui.R | 52 +- shiny/workflowPlot/helper.R | 28 +- shiny/workflowPlot/server.R | 174 +- .../server_files/benchmarking_server.R | 931 ++++--- .../server_files/history_server.R | 194 +- .../server_files/model_data_plots_server.R | 348 +-- .../server_files/model_plots_server.R | 245 +- .../server_files/pdf_viewer_server.R | 63 +- .../server_files/select_data_server.R | 223 +- .../server_files/sidebar_server.R | 508 ++-- shiny/workflowPlot/ui.R | 172 +- .../ui_files/benchmarking_ScoresPlots_UI.R | 86 +- .../ui_files/benchmarking_settings_UI.R | 11 +- .../workflowPlot/ui_files/documentation_UI.R | 50 +- .../ui_files/model_data_plots_UI.R | 30 +- shiny/workflowPlot/ui_files/model_plots_UI.R | 32 +- shiny/workflowPlot/ui_files/select_data_UI.R | 4 +- shiny/workflowPlot/ui_files/sidebar_UI.R | 41 +- shiny/workflowPlot/ui_utils.R | 2 +- shiny/workflowPlot/workflowPlot_fcns.R | 75 +- tests/interactive-workflow.R | 61 +- tests/testpfts.R | 35 +- web/plot.netcdf.R | 23 +- web/workflow.R | 24 +- 1098 files changed, 70014 insertions(+), 64164 deletions(-) create mode 100755 scripts/format.sh diff --git a/apps/api/R/auth.R b/apps/api/R/auth.R index 1458b511f84..8c21513a21a 100644 --- a/apps/api/R/auth.R +++ b/apps/api/R/auth.R @@ -7,14 +7,14 @@ library(dplyr) #* @return Encrypted password #* @author Tezan Sahu get_crypt_pass <- function(username, password, secretkey = NA) { - secretkey <- if(is.na(secretkey)) "notasecret" else secretkey + secretkey <- if (is.na(secretkey)) "notasecret" else secretkey dig <- secretkey salt <- username for (i in 1:10) { dig <- digest::digest( - paste(dig, salt, password, secretkey, sep="--"), - algo="sha1", - serialize=FALSE + paste(dig, salt, password, secretkey, sep = "--"), + algo = "sha1", + serialize = FALSE ) } return(dig) @@ -28,16 +28,17 @@ get_crypt_pass <- function(username, password, secretkey = NA) { #* @return TRUE if encrypted password is correct, else FALSE #* @author Tezan Sahu validate_crypt_pass <- function(username, crypt_pass) { - res <- tbl(global_db_pool, "users") %>% - filter(login == username, - crypted_password == crypt_pass) %>% + filter( + login == username, + crypted_password == crypt_pass + ) %>% collect() if (nrow(res) == 1) { return(res$id) } - + return(NA) } @@ -49,12 +50,12 @@ validate_api_key <- function(api_key) { res <- tbl(global_db_pool, "users") %>% filter(apikey == api_key) %>% collect() - + if (nrow(res) == 1) { return(res) } - - return(NA) + + return(NA) } #* Filter to authenticate a user calling the PEcAn API @@ -65,39 +66,37 @@ validate_api_key <- function(api_key) { authenticate_user <- function(req, res) { # Fix CORS issues res$setHeader("Access-Control-Allow-Origin", "*") - + # If the API endpoint that do not require authentication if ( Sys.getenv("AUTH_REQ") == FALSE || - grepl("swagger", req$PATH_INFO, ignore.case = TRUE) || - grepl("openapi.json", req$PATH_INFO, fixed = TRUE) || - grepl("/api/ping", req$PATH_INFO, ignore.case = TRUE) || - grepl("/api/status", req$PATH_INFO, ignore.case = TRUE)) - { + grepl("swagger", req$PATH_INFO, ignore.case = TRUE) || + grepl("openapi.json", req$PATH_INFO, fixed = TRUE) || + grepl("/api/ping", req$PATH_INFO, ignore.case = TRUE) || + grepl("/api/status", req$PATH_INFO, ignore.case = TRUE)) { req$user$userid <- NA req$user$username <- "" return(plumber::forward()) } - + if (!is.null(req$HTTP_AUTHORIZATION)) { - # HTTP_AUTHORIZATION is of the form "Basic ", + # HTTP_AUTHORIZATION is of the form "Basic ", # where the is contains : auth_details <- strsplit(rawToChar(jsonlite::base64_dec(strsplit(req$HTTP_AUTHORIZATION, " +")[[1]][2])), ":")[[1]] username <- auth_details[1] password <- auth_details[2] crypt_pass <- get_crypt_pass(username, password) - + userid <- validate_crypt_pass(username, crypt_pass) - - if(! is.na(userid)){ + + if (!is.na(userid)) { req$user$userid <- userid req$user$username <- username return(plumber::forward()) } - } - - if(!is.null(req$HTTP_X_API_KEY)) { + + if (!is.null(req$HTTP_X_API_KEY)) { key <- req$HTTP_X_API_KEY # HTTP_X_API_KEY is of the form "api_key" user <- validate_api_key(key) @@ -105,14 +104,14 @@ authenticate_user <- function(req, res) { username <- user$login print(userid) print(username) - - if(! is.na(userid)){ + + if (!is.na(userid)) { req$user$userid <- userid req$user$username <- username return(plumber::forward()) } } - + res$status <- 401 # Unauthorized - return(list(error="Authentication required")) + return(list(error = "Authentication required")) } diff --git a/apps/api/R/available-models.R b/apps/api/R/available-models.R index aa93e9db7ad..96748215ef5 100644 --- a/apps/api/R/available-models.R +++ b/apps/api/R/available-models.R @@ -29,8 +29,10 @@ availableModels <- function(machine_name = "docker", machine_id = NA) { dplyr::select(modeltype_id = id, modeltype = name) modelfiles %>% - dplyr::select(dbfile_id = id, file_name, file_path, - model_id = container_id) %>% + dplyr::select( + dbfile_id = id, file_name, file_path, + model_id = container_id + ) %>% dplyr::inner_join(models, c("model_id" = "id")) %>% dplyr::inner_join(modeltypes, "modeltype_id") %>% dplyr::collect() diff --git a/apps/api/R/entrypoint.R b/apps/api/R/entrypoint.R index 5f1d8a3fb94..04cfe2b358f 100755 --- a/apps/api/R/entrypoint.R +++ b/apps/api/R/entrypoint.R @@ -1,6 +1,6 @@ #!/usr/bin/env Rscript -#* This is the entry point to the PEcAn API. +#* This is the entry point to the PEcAn API. #* All API endpoints (& filters) are mounted here #* @author Tezan Sahu @@ -8,21 +8,21 @@ source("auth.R") source("general.R") # Set up the global database pool -#.bety_params <- PEcAn.DB::get_postgres_envvars( +# .bety_params <- PEcAn.DB::get_postgres_envvars( # host = "localhost", # dbname = "bety", # user = "bety", # password = "bety", # driver = "Postgres" -#) +# ) # -#.bety_params$driver <- NULL -#.bety_params$drv <- RPostgres::Postgres() -#global_db_pool <- do.call(pool::dbPool, .bety_params) +# .bety_params$driver <- NULL +# .bety_params$drv <- RPostgres::Postgres() +# global_db_pool <- do.call(pool::dbPool, .bety_params) global_db_pool <- PEcAn.DB::betyConnect() # redirect to trailing slash -plumber::options_plumber(trailingSlash=TRUE) +plumber::options_plumber(trailingSlash = TRUE) # root router root <- plumber::Plumber$new() @@ -31,7 +31,7 @@ root$setSerializer(plumber::serializer_unboxed_json()) # Filter for authenticating users trying to hit the API endpoints root$filter("require-auth", authenticate_user) -# The /api/ping & /api/status are standalone API endpoints +# The /api/ping & /api/status are standalone API endpoints # implemented using handle() because of restrictions of plumber # to mount multiple endpoints on the same path (or subpath) root$handle("GET", "/api/ping", ping) @@ -77,4 +77,4 @@ root$setDebug(TRUE) # The API server is bound to 0.0.0.0 on port 8000 # The Swagger UI for the API draws its source from the pecanapi-spec.yml file -root$run(host="0.0.0.0", port=8000) +root$run(host = "0.0.0.0", port = 8000) diff --git a/apps/api/R/formats.R b/apps/api/R/formats.R index d805fc48e7c..390927774c8 100644 --- a/apps/api/R/formats.R +++ b/apps/api/R/formats.R @@ -5,40 +5,38 @@ library(dplyr) #' @return Format details #' @author Tezan Sahu #* @get / -getFormat <- function(format_id, res){ - +getFormat <- function(format_id, res) { Format <- tbl(global_db_pool, "formats") %>% select(format_id = id, name, notes, header, mimetype_id) %>% filter(format_id == !!format_id) - + Format <- tbl(global_db_pool, "mimetypes") %>% select(mimetype_id = id, mimetype = type_string) %>% inner_join(Format, by = "mimetype_id") %>% select(-mimetype_id) - + qry_res <- Format %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Format not found")) - } - else { + return(list(error = "Format not found")) + } else { # Convert the response from tibble to list response <- list() - for(colname in colnames(qry_res)){ + for (colname in colnames(qry_res)) { response[colname] <- qry_res[colname] } - + format_vars <- tbl(global_db_pool, "formats_variables") %>% select(name, unit, format_id, variable_id) %>% filter(format_id == !!format_id) format_vars <- tbl(global_db_pool, "variables") %>% select(variable_id = id, description, units) %>% - inner_join(format_vars, by="variable_id") %>% + inner_join(format_vars, by = "variable_id") %>% mutate(unit = ifelse(unit %in% "", units, unit)) %>% select(-variable_id, -format_id, -units) %>% collect() - + response$format_variables <- format_vars return(response) } @@ -53,28 +51,27 @@ getFormat <- function(format_id, res){ #' @return Formats subset matching the model search string #' @author Tezan Sahu #* @get / -searchFormats <- function(format_name="", mimetype="", ignore_case=TRUE, res){ +searchFormats <- function(format_name = "", mimetype = "", ignore_case = TRUE, res) { format_name <- URLdecode(format_name) mimetype <- URLdecode(mimetype) - + Formats <- tbl(global_db_pool, "formats") %>% - select(format_id = id, format_name=name, mimetype_id) %>% - filter(grepl(!!format_name, format_name, ignore.case=ignore_case)) - + select(format_id = id, format_name = name, mimetype_id) %>% + filter(grepl(!!format_name, format_name, ignore.case = ignore_case)) + Formats <- tbl(global_db_pool, "mimetypes") %>% select(mimetype_id = id, mimetype = type_string) %>% inner_join(Formats, by = "mimetype_id") %>% - filter(grepl(!!mimetype, mimetype, ignore.case=ignore_case)) %>% + filter(grepl(!!mimetype, mimetype, ignore.case = ignore_case)) %>% select(-mimetype_id) %>% arrange(format_id) - + qry_res <- Formats %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Format(s) not found")) - } - else { - return(list(formats=qry_res, count = nrow(qry_res))) + return(list(error = "Format(s) not found")) + } else { + return(list(formats = qry_res, count = nrow(qry_res))) } } diff --git a/apps/api/R/general.R b/apps/api/R/general.R index 5f5c9ec36b2..66d12883987 100644 --- a/apps/api/R/general.R +++ b/apps/api/R/general.R @@ -2,8 +2,8 @@ #* If successful connection to API server is established, this function will return the "pong" message #* @return Mapping containing response as "pong" #* @author Tezan Sahu -ping <- function(req){ - res <- list(request="ping", response="pong") +ping <- function(req) { + res <- list(request = "ping", response = "pong") res } @@ -12,18 +12,18 @@ ping <- function(req){ #* @author Tezan Sahu status <- function() { ## helper function to obtain environment variables - get_env_var = function (item, default = "unknown") { - value = Sys.getenv(item) + get_env_var <- function(item, default = "unknown") { + value <- Sys.getenv(item) if (value == "") default else value } - + res <- list(host_details = PEcAn.DB::dbHostInfo(global_db_pool)) - res$host_details$authentication_required = get_env_var("AUTH_REQ") - + res$host_details$authentication_required <- get_env_var("AUTH_REQ") + res$pecan_details <- list( - version = get_env_var("PECAN_VERSION"), - branch = get_env_var("PECAN_GIT_BRANCH"), + version = get_env_var("PECAN_VERSION"), + branch = get_env_var("PECAN_GIT_BRANCH"), gitsha1 = get_env_var("PECAN_GIT_CHECKSUM") ) return(res) -} \ No newline at end of file +} diff --git a/apps/api/R/get.file.R b/apps/api/R/get.file.R index 1d1fdeda9c3..ccf48cdf7f0 100644 --- a/apps/api/R/get.file.R +++ b/apps/api/R/get.file.R @@ -9,34 +9,34 @@ library(dplyr) #' @author Tezan Sehu get.file <- function(filepath, userid) { # Check if the file path is valid - if(! file.exists(filepath)){ + if (!file.exists(filepath)) { return(list(status = "Error", message = "File not found")) } - + # Check if the workflow for run after obtaining absolute path is owned by the user or not parent_dir <- normalizePath(dirname(filepath)) - run_id <- substr(parent_dir, stringi::stri_locate_last(parent_dir, regex="/")[1] + 1, stringr::str_length(parent_dir)) - - if(Sys.getenv("AUTH_REQ") == TRUE) { + run_id <- substr(parent_dir, stringi::stri_locate_last(parent_dir, regex = "/")[1] + 1, stringr::str_length(parent_dir)) + if (Sys.getenv("AUTH_REQ") == TRUE) { Run <- tbl(global_db_pool, "runs") %>% filter(id == !!run_id) Run <- tbl(global_db_pool, "ensembles") %>% - select(ensemble_id=id, workflow_id) %>% - full_join(Run, by="ensemble_id") %>% + select(ensemble_id = id, workflow_id) %>% + full_join(Run, by = "ensemble_id") %>% filter(id == !!run_id) user_id <- tbl(global_db_pool, "workflows") %>% - select(workflow_id=id, user_id) %>% full_join(Run, by="workflow_id") %>% + select(workflow_id = id, user_id) %>% + full_join(Run, by = "workflow_id") %>% filter(id == !!run_id) %>% pull(user_id) - - if(! user_id == userid) { + + if (!user_id == userid) { return(list(status = "Error", message = "Access forbidden")) } } - + # Read the data in binary form & return it - bin <- readBin(filepath,'raw', n = file.info(filepath)$size) + bin <- readBin(filepath, "raw", n = file.info(filepath)$size) return(list(file_contents = bin)) } diff --git a/apps/api/R/inputs.R b/apps/api/R/inputs.R index fd28a3a324a..d1412a57366 100644 --- a/apps/api/R/inputs.R +++ b/apps/api/R/inputs.R @@ -8,78 +8,77 @@ library(dplyr) #' @return Information about Inputs based on model & site #' @author Tezan Sahu #* @get / -searchInputs <- function(req, model_id=NA, site_id=NA, format_id=NA, host_id=NA, offset=0, limit=50, res){ - if (! limit %in% c(10, 20, 50, 100, 500)) { +searchInputs <- function(req, model_id = NA, site_id = NA, format_id = NA, host_id = NA, offset = 0, limit = 50, res) { + if (!limit %in% c(10, 20, 50, 100, 500)) { res$status <- 400 return(list(error = "Invalid value for parameter")) } - + inputs <- tbl(global_db_pool, "inputs") %>% - select(input_name=name, id, site_id, format_id, start_date, end_date) - + select(input_name = name, id, site_id, format_id, start_date, end_date) + inputs <- tbl(global_db_pool, "dbfiles") %>% - select(file_name, file_path, container_type, id=container_id, machine_id) %>% + select(file_name, file_path, container_type, id = container_id, machine_id) %>% inner_join(inputs, by = "id") %>% - filter(container_type == 'Input') %>% + filter(container_type == "Input") %>% select(-container_type) - + inputs <- tbl(global_db_pool, "machines") %>% - select(hostname, machine_id=id) %>% - inner_join(inputs, by='machine_id') - + select(hostname, machine_id = id) %>% + inner_join(inputs, by = "machine_id") + inputs <- tbl(global_db_pool, "formats") %>% select(format_id = id, format_name = name, mimetype_id) %>% - inner_join(inputs, by='format_id') - + inner_join(inputs, by = "format_id") + inputs <- tbl(global_db_pool, "mimetypes") %>% select(mimetype_id = id, mimetype = type_string) %>% - inner_join(inputs, by='mimetype_id') %>% + inner_join(inputs, by = "mimetype_id") %>% select(-mimetype_id) - + inputs <- tbl(global_db_pool, "sites") %>% select(site_id = id, sitename) %>% - inner_join(inputs, by='site_id') - - if(! is.na(model_id)) { + inner_join(inputs, by = "site_id") + + if (!is.na(model_id)) { inputs <- tbl(global_db_pool, "modeltypes_formats") %>% select(tag, modeltype_id, format_id, input) %>% - inner_join(inputs, by='format_id') %>% + inner_join(inputs, by = "format_id") %>% filter(input) %>% select(-input) - + inputs <- tbl(global_db_pool, "models") %>% select(model_id = id, modeltype_id, model_name, revision) %>% - inner_join(inputs, by='modeltype_id') %>% + inner_join(inputs, by = "modeltype_id") %>% filter(model_id == !!model_id) %>% select(-modeltype_id, -model_id) } - - if(! is.na(site_id)) { + + if (!is.na(site_id)) { inputs <- inputs %>% filter(site_id == !!site_id) } - - if(! is.na(format_id)) { + + if (!is.na(format_id)) { inputs <- inputs %>% filter(format_id == !!format_id) } - - if(! is.na(host_id)) { + + if (!is.na(host_id)) { inputs <- inputs %>% filter(machine_id == !!host_id) } - + qry_res <- inputs %>% select(-site_id, -format_id, -machine_id) %>% distinct() %>% arrange(id) %>% collect() - + if (nrow(qry_res) == 0 || as.numeric(offset) >= nrow(qry_res)) { res$status <- 404 - return(list(error="Input(s) not found")) - } - else { + return(list(error = "Input(s) not found")) + } else { has_next <- FALSE has_prev <- FALSE if (nrow(qry_res) > (as.numeric(offset) + as.numeric(limit))) { @@ -88,13 +87,13 @@ searchInputs <- function(req, model_id=NA, site_id=NA, format_id=NA, host_id=NA, if (as.numeric(offset) != 0) { has_prev <- TRUE } - + qry_res <- qry_res[(as.numeric(offset) + 1):min((as.numeric(offset) + as.numeric(limit)), nrow(qry_res)), ] - + result <- list(inputs = qry_res) result$count <- nrow(qry_res) - if(has_next){ - if(grepl("offset=", req$QUERY_STRING, fixed = TRUE)){ + if (has_next) { + if (grepl("offset=", req$QUERY_STRING, fixed = TRUE)) { result$next_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, @@ -102,11 +101,10 @@ searchInputs <- function(req, model_id=NA, site_id=NA, format_id=NA, host_id=NA, req$PATH_INFO, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "offset=")[[2]]), (as.numeric(limit) + as.numeric(offset)), - "&limit=", + "&limit=", limit ) - } - else { + } else { result$next_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, @@ -115,24 +113,24 @@ searchInputs <- function(req, model_id=NA, site_id=NA, format_id=NA, host_id=NA, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "limit=")[[2]] - 6), "offset=", (as.numeric(limit) + as.numeric(offset)), - "&limit=", + "&limit=", limit ) } } - if(has_prev) { + if (has_prev) { result$prev_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, "/api/inputs", - req$PATH_INFO, + req$PATH_INFO, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "offset=")[[2]]), max(0, (as.numeric(offset) - as.numeric(limit))), - "&limit=", + "&limit=", limit ) } - + return(result) } } @@ -147,47 +145,46 @@ searchInputs <- function(req, model_id=NA, site_id=NA, format_id=NA, host_id=NA, #' @author Tezan Sahu #* @serializer contentType list(type="application/octet-stream") #* @get / -downloadInput <- function(input_id, filename="", req, res){ +downloadInput <- function(input_id, filename = "", req, res) { db_hostid <- PEcAn.DB::dbHostInfo(global_db_pool)$hostid - + # This is just for temporary testing due to the existing issue in dbHostInfo() db_hostid <- ifelse(db_hostid == 99, 99000000001, db_hostid) - + input <- tbl(global_db_pool, "dbfiles") %>% select(file_name, file_path, container_id, machine_id, container_type) %>% filter(machine_id == !!db_hostid) %>% filter(container_type == "Input") %>% filter(container_id == !!input_id) %>% collect() - + if (nrow(input) == 0) { res$status <- 404 return() - } - else { + } else { # Generate the full file path using the file_path & file_name filepath <- paste0(input$file_path, "/", input$file_name) - + # If the id points to a directory, check if 'filename' within this directory has been specified - if(dir.exists(filepath)) { + if (dir.exists(filepath)) { # If no filename is provided, return 400 Bad Request error - if(filename == "") { + if (filename == "") { res$status <- 400 return() } - + # Append the filename to the filepath filepath <- paste0(filepath, filename) } - + # If the file doesn't exist, return 404 error - if(! file.exists(filepath)){ + if (!file.exists(filepath)) { res$status <- 404 return() } - + # Read the data in binary form & return it - bin <- readBin(filepath,'raw', n = file.info(filepath)$size) + bin <- readBin(filepath, "raw", n = file.info(filepath)$size) return(bin) } } diff --git a/apps/api/R/models.R b/apps/api/R/models.R index 1a74b54210e..4bacb64f06d 100644 --- a/apps/api/R/models.R +++ b/apps/api/R/models.R @@ -5,32 +5,31 @@ library(dplyr) #' @return Model details #' @author Tezan Sahu #* @get / -getModel <- function(model_id, res){ - +getModel <- function(model_id, res) { Model <- tbl(global_db_pool, "models") %>% select(model_id = id, model_name, revision, modeltype_id) %>% filter(model_id == !!model_id) - + Model <- tbl(global_db_pool, "modeltypes") %>% select(modeltype_id = id, model_type = name) %>% inner_join(Model, by = "modeltype_id") - + qry_res <- Model %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Model not found")) - } - else { + return(list(error = "Model not found")) + } else { # Convert the response from tibble to list response <- list() - for(colname in colnames(qry_res)){ + for (colname in colnames(qry_res)) { response[colname] <- qry_res[colname] } - - inputs_req <- tbl(global_db_pool, "modeltypes_formats") %>% - filter(modeltype_id == bit64::as.integer64(qry_res$modeltype_id)) %>% - select(input=tag, required) %>% collect() + + inputs_req <- tbl(global_db_pool, "modeltypes_formats") %>% + filter(modeltype_id == bit64::as.integer64(qry_res$modeltype_id)) %>% + select(input = tag, required) %>% + collect() response$inputs <- jsonlite::fromJSON(gsub('(\")', '"', jsonlite::toJSON(inputs_req))) return(response) } @@ -45,23 +44,22 @@ getModel <- function(model_id, res){ #' @return Model subset matching the model search string #' @author Tezan Sahu #* @get / -searchModels <- function(model_name="", revision="", ignore_case=TRUE, res){ +searchModels <- function(model_name = "", revision = "", ignore_case = TRUE, res) { model_name <- URLdecode(model_name) revision <- URLdecode(revision) - + Models <- tbl(global_db_pool, "models") %>% select(model_id = id, model_name, revision) %>% - filter(grepl(!!model_name, model_name, ignore.case=ignore_case)) %>% - filter(grepl(!!revision, revision, ignore.case=ignore_case)) %>% + filter(grepl(!!model_name, model_name, ignore.case = ignore_case)) %>% + filter(grepl(!!revision, revision, ignore.case = ignore_case)) %>% arrange(model_id) - + qry_res <- Models %>% collect() if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Model(s) not found")) - } - else { - return(list(models=qry_res, count = nrow(qry_res))) + return(list(error = "Model(s) not found")) + } else { + return(list(models = qry_res, count = nrow(qry_res))) } } diff --git a/apps/api/R/pfts.R b/apps/api/R/pfts.R index 732340759aa..68fa0ad6d33 100644 --- a/apps/api/R/pfts.R +++ b/apps/api/R/pfts.R @@ -5,31 +5,29 @@ library(dplyr) #' @return PFT details #' @author Tezan Sahu #* @get / -getPfts <- function(pft_id, res){ - +getPfts <- function(pft_id, res) { pft <- tbl(global_db_pool, "pfts") %>% select(pft_id = id, pft_name = name, definition, pft_type, modeltype_id) %>% filter(pft_id == !!pft_id) - + pft <- tbl(global_db_pool, "modeltypes") %>% select(modeltype_id = id, model_type = name) %>% inner_join(pft, by = "modeltype_id") - - qry_res <- pft %>% - select(-modeltype_id) %>% + + qry_res <- pft %>% + select(-modeltype_id) %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="PFT not found")) - } - else { + return(list(error = "PFT not found")) + } else { # Convert the response from tibble to list response <- list() - for(colname in colnames(qry_res)){ + for (colname in colnames(qry_res)) { response[colname] <- qry_res[colname] } - + return(response) } } @@ -44,36 +42,35 @@ getPfts <- function(pft_id, res){ #' @return PFT subset matching the searc criteria #' @author Tezan Sahu #* @get / -searchPfts <- function(pft_name="", pft_type="", model_type="", ignore_case=TRUE, res){ +searchPfts <- function(pft_name = "", pft_type = "", model_type = "", ignore_case = TRUE, res) { pft_name <- URLdecode(pft_name) pft_type <- URLdecode(pft_type) model_type <- URLdecode(model_type) - - if(! pft_type %in% c("", "plant", "cultivar")){ + + if (!pft_type %in% c("", "plant", "cultivar")) { res$status <- 400 return(list(error = "Invalid pft_type")) } - + pfts <- tbl(global_db_pool, "pfts") %>% select(pft_id = id, pft_name = name, pft_type, modeltype_id) - + pfts <- tbl(global_db_pool, "modeltypes") %>% select(modeltype_id = id, model_type = name) %>% inner_join(pfts, by = "modeltype_id") - - qry_res <- pfts %>% - filter(grepl(!!pft_name, pft_name, ignore.case=ignore_case)) %>% - filter(grepl(!!pft_type, pft_type, ignore.case=ignore_case)) %>% - filter(grepl(!!model_type, model_type, ignore.case=ignore_case)) %>% + + qry_res <- pfts %>% + filter(grepl(!!pft_name, pft_name, ignore.case = ignore_case)) %>% + filter(grepl(!!pft_type, pft_type, ignore.case = ignore_case)) %>% + filter(grepl(!!model_type, model_type, ignore.case = ignore_case)) %>% select(-modeltype_id) %>% arrange(pft_id) %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="PFT(s) not found")) - } - else { - return(list(pfts=qry_res, count = nrow(qry_res))) + return(list(error = "PFT(s) not found")) + } else { + return(list(pfts = qry_res, count = nrow(qry_res))) } } diff --git a/apps/api/R/runs.R b/apps/api/R/runs.R index 3898f771f7e..8aa1378c52d 100644 --- a/apps/api/R/runs.R +++ b/apps/api/R/runs.R @@ -8,33 +8,32 @@ source("get.file.R") #' @return List of runs (belonging to a particuar workflow) #' @author Tezan Sahu #* @get / -getRuns <- function(req, workflow_id=NA, offset=0, limit=50, res){ - if (! limit %in% c(10, 20, 50, 100, 500)) { +getRuns <- function(req, workflow_id = NA, offset = 0, limit = 50, res) { + if (!limit %in% c(10, 20, 50, 100, 500)) { res$status <- 400 return(list(error = "Invalid value for parameter")) } Runs <- tbl(global_db_pool, "runs") %>% select(id, model_id, site_id, parameter_list, ensemble_id, start_time, finish_time) - + Runs <- tbl(global_db_pool, "ensembles") %>% - select(runtype, ensemble_id=id, workflow_id) %>% - full_join(Runs, by="ensemble_id") - - if(! is.na(workflow_id)){ + select(runtype, ensemble_id = id, workflow_id) %>% + full_join(Runs, by = "ensemble_id") + + if (!is.na(workflow_id)) { Runs <- Runs %>% filter(workflow_id == !!workflow_id) } - - qry_res <- Runs %>% + + qry_res <- Runs %>% arrange(id) %>% collect() - + if (nrow(qry_res) == 0 || as.numeric(offset) >= nrow(qry_res)) { res$status <- 404 - return(list(error="Run(s) not found")) - } - else { + return(list(error = "Run(s) not found")) + } else { has_next <- FALSE has_prev <- FALSE if (nrow(qry_res) > (as.numeric(offset) + as.numeric(limit))) { @@ -46,8 +45,8 @@ getRuns <- function(req, workflow_id=NA, offset=0, limit=50, res){ qry_res <- qry_res[(as.numeric(offset) + 1):min((as.numeric(offset) + as.numeric(limit)), nrow(qry_res)), ] result <- list(runs = qry_res) result$count <- nrow(qry_res) - if(has_next){ - if(grepl("offset=", req$QUERY_STRING, fixed = TRUE)){ + if (has_next) { + if (grepl("offset=", req$QUERY_STRING, fixed = TRUE)) { result$next_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, @@ -55,11 +54,10 @@ getRuns <- function(req, workflow_id=NA, offset=0, limit=50, res){ req$PATH_INFO, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "offset=")[[2]]), (as.numeric(limit) + as.numeric(offset)), - "&limit=", + "&limit=", limit ) - } - else { + } else { result$next_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, @@ -68,24 +66,24 @@ getRuns <- function(req, workflow_id=NA, offset=0, limit=50, res){ substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "limit=")[[2]] - 6), "offset=", (as.numeric(limit) + as.numeric(offset)), - "&limit=", + "&limit=", limit ) } } - if(has_prev) { + if (has_prev) { result$prev_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, "/api/runs", - req$PATH_INFO, + req$PATH_INFO, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "offset=")[[2]]), max(0, (as.numeric(offset) - as.numeric(limit))), - "&limit=", + "&limit=", limit ) } - + return(result) } } @@ -97,57 +95,55 @@ getRuns <- function(req, workflow_id=NA, offset=0, limit=50, res){ #' @return Details of requested run #' @author Tezan Sahu #* @get / -getRunDetails <- function(req, run_id, res){ - +getRunDetails <- function(req, run_id, res) { Runs <- tbl(global_db_pool, "runs") %>% select(-outdir, -outprefix, -setting, -created_at, -updated_at) - + Runs <- tbl(global_db_pool, "ensembles") %>% - select(runtype, ensemble_id=id, workflow_id) %>% - full_join(Runs, by="ensemble_id") %>% + select(runtype, ensemble_id = id, workflow_id) %>% + full_join(Runs, by = "ensemble_id") %>% filter(id == !!run_id) - + qry_res <- Runs %>% collect() - - if(Sys.getenv("AUTH_REQ") == TRUE){ + + if (Sys.getenv("AUTH_REQ") == TRUE) { user_id <- tbl(global_db_pool, "workflows") %>% - select(workflow_id=id, user_id) %>% full_join(Runs, by="workflow_id") %>% + select(workflow_id = id, user_id) %>% + full_join(Runs, by = "workflow_id") %>% filter(id == !!run_id) %>% pull(user_id) } - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Run with specified ID was not found")) - } - else { - - if(Sys.getenv("AUTH_REQ") == TRUE) { + return(list(error = "Run with specified ID was not found")) + } else { + if (Sys.getenv("AUTH_REQ") == TRUE) { # If user id of requested run does not match the caller of the API, return 403 Access forbidden - if(is.na(user_id) || user_id != req$user$userid){ + if (is.na(user_id) || user_id != req$user$userid) { res$status <- 403 - return(list(error="Access forbidden")) + return(list(error = "Access forbidden")) } } - + # Convert the response from tibble to list response <- list() - for(colname in colnames(qry_res)){ + for (colname in colnames(qry_res)) { response[colname] <- qry_res[colname] } - + # If inputs exist on the host, add them to the response indir <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", response$workflow_id, "/run/", run_id) - if(dir.exists(indir)){ + if (dir.exists(indir)) { response$inputs <- getRunInputs(indir) } - + # If outputs exist on the host, add them to the response outdir <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", response$workflow_id, "/out/", run_id) - if(dir.exists(outdir)){ + if (dir.exists(outdir)) { response$outputs <- getRunOutputs(outdir) } - + return(response) } } @@ -161,26 +157,25 @@ getRunDetails <- function(req, run_id, res){ #' @author Tezan Sahu #* @serializer contentType list(type="application/octet-stream") #* @get //input/ -getRunInputFile <- function(req, run_id, filename, res){ - +getRunInputFile <- function(req, run_id, filename, res) { Run <- tbl(global_db_pool, "runs") %>% filter(id == !!run_id) - + workflow_id <- tbl(global_db_pool, "ensembles") %>% - select(ensemble_id=id, workflow_id) %>% - full_join(Run, by="ensemble_id") %>% + select(ensemble_id = id, workflow_id) %>% + full_join(Run, by = "ensemble_id") %>% filter(id == !!run_id) %>% pull(workflow_id) - - inputpath <- paste0( Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", workflow_id, "/run/", run_id, "/", filename) + + inputpath <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", workflow_id, "/run/", run_id, "/", filename) result <- get.file(inputpath, req$user$userid) - if(is.null(result$file_contents)){ - if(result$status == "Error" && result$message == "Access forbidden") { + if (is.null(result$file_contents)) { + if (result$status == "Error" && result$message == "Access forbidden") { res$status <- 403 return() } - if(result$status == "Error" && result$message == "File not found") { + if (result$status == "Error" && result$message == "File not found") { res$status <- 404 return() } @@ -197,26 +192,25 @@ getRunInputFile <- function(req, run_id, filename, res){ #' @author Tezan Sahu #* @serializer contentType list(type="application/octet-stream") #* @get //output/ -getRunOutputFile <- function(req, run_id, filename, res){ - +getRunOutputFile <- function(req, run_id, filename, res) { Run <- tbl(global_db_pool, "runs") %>% filter(id == !!run_id) - + workflow_id <- tbl(global_db_pool, "ensembles") %>% - select(ensemble_id=id, workflow_id) %>% - full_join(Run, by="ensemble_id") %>% + select(ensemble_id = id, workflow_id) %>% + full_join(Run, by = "ensemble_id") %>% filter(id == !!run_id) %>% pull(workflow_id) - + outputpath <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", workflow_id, "/out/", run_id, "/", filename) - + result <- get.file(outputpath, req$user$userid) - if(is.null(result$file_contents)){ - if(result$status == "Error" && result$message == "Access forbidden") { + if (is.null(result$file_contents)) { + if (result$status == "Error" && result$message == "Access forbidden") { res$status <- 403 return() } - if(result$status == "Error" && result$message == "File not found") { + if (result$status == "Error" && result$message == "File not found") { res$status <- 404 return() } @@ -238,43 +232,43 @@ getRunOutputFile <- function(req, run_id, filename, res){ #* @get //graph// #* @serializer contentType list(type='image/png') -plotResults <- function(req, run_id, year, y_var, x_var="time", width=800, height=600, res) { +plotResults <- function(req, run_id, year, y_var, x_var = "time", width = 800, height = 600, res) { # Get workflow_id for the run Run <- tbl(global_db_pool, "runs") %>% filter(id == !!run_id) - + workflow_id <- tbl(global_db_pool, "ensembles") %>% - select(ensemble_id=id, workflow_id) %>% - full_join(Run, by="ensemble_id") %>% + select(ensemble_id = id, workflow_id) %>% + full_join(Run, by = "ensemble_id") %>% filter(id == !!run_id) %>% pull(workflow_id) - - if(Sys.getenv("AUTH_REQ") == TRUE){ + + if (Sys.getenv("AUTH_REQ") == TRUE) { user_id <- tbl(global_db_pool, "workflows") %>% select(id, user_id) %>% filter(id == !!workflow_id) %>% pull(user_id) } - + # Check if the data file exists on the host datafile <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", workflow_id, "/out/", run_id, "/", year, ".nc") - if(! file.exists(datafile)){ + if (!file.exists(datafile)) { res$status <- 404 return() } - - if(Sys.getenv("AUTH_REQ") == TRUE) { + + if (Sys.getenv("AUTH_REQ") == TRUE) { # If user id of requested run does not match the caller of the API, return 403 Access forbidden - if(is.na(user_id) || user_id != req$user$userid){ + if (is.na(user_id) || user_id != req$user$userid) { res$status <- 403 - return(list(error="Access forbidden")) + return(list(error = "Access forbidden")) } } - + # Plot & return filename <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/temp", stringi::stri_rand_strings(1, 10), ".png") - PEcAn.visualization::plot_netcdf(datafile, y_var, x_var, as.integer(width), as.integer(height), year=year, filename=filename) - img_bin <- readBin(filename,'raw',n = file.info(filename)$size) + PEcAn.visualization::plot_netcdf(datafile, y_var, x_var, as.integer(width), as.integer(height), year = year, filename = filename) + img_bin <- readBin(filename, "raw", n = file.info(filename)$size) file.remove(filename) return(img_bin) } @@ -287,9 +281,9 @@ plotResults <- function(req, run_id, year, y_var, x_var="time", width=800, heigh #' @return Input details of the run #' @author Tezan Sahu -getRunInputs <- function(indir){ +getRunInputs <- function(indir) { inputs <- list() - if(file.exists(paste0(indir, "/README.txt"))){ + if (file.exists(paste0(indir, "/README.txt"))) { inputs$info <- "README.txt" } all_files <- list.files(indir) @@ -304,26 +298,26 @@ getRunInputs <- function(indir){ #' @return Output details of the run #' @author Tezan Sahu -getRunOutputs <- function(outdir){ +getRunOutputs <- function(outdir) { outputs <- list() - if(file.exists(paste0(outdir, "/logfile.txt"))){ + if (file.exists(paste0(outdir, "/logfile.txt"))) { outputs$logfile <- "logfile.txt" } - - if(file.exists(paste0(outdir, "/README.txt"))){ + + if (file.exists(paste0(outdir, "/README.txt"))) { outputs$info <- "README.txt" } - - year_files <- list.files(outdir, pattern="*.nc$") + + year_files <- list.files(outdir, pattern = "*.nc$") years <- stringr::str_replace_all(year_files, ".nc", "") years_data <- c() outputs$years <- list() - for(year in years){ + for (year in years) { var_lines <- readLines(paste0(outdir, "/", year, ".nc.var")) keys <- stringr::word(var_lines, 1) values <- stringr::word(var_lines, 2, -1) vars <- list() - for(i in 1:length(keys)){ + for (i in 1:length(keys)) { vars[keys[i]] <- values[i] } years_data <- c(years_data, list(list( @@ -331,7 +325,7 @@ getRunOutputs <- function(outdir){ variables = vars ))) } - for(i in 1:length(years)){ + for (i in 1:length(years)) { outputs$years[years[i]] <- years_data[i] } return(outputs) diff --git a/apps/api/R/sites.R b/apps/api/R/sites.R index 09b6abba4b2..250de24d1f6 100644 --- a/apps/api/R/sites.R +++ b/apps/api/R/sites.R @@ -5,23 +5,21 @@ library(dplyr) #' @return Site details #' @author Tezan Sahu #* @get / -getSite <- function(site_id, res){ - +getSite <- function(site_id, res) { site <- tbl(global_db_pool, "sites") %>% select(-created_at, -updated_at, -user_id, -geometry) %>% filter(id == !!site_id) - - + + qry_res <- site %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Site not found")) - } - else { + return(list(error = "Site not found")) + } else { # Convert the response from tibble to list response <- list() - for(colname in colnames(qry_res)){ + for (colname in colnames(qry_res)) { response[colname] <- qry_res[colname] } return(response) @@ -36,22 +34,21 @@ getSite <- function(site_id, res){ #' @return Site subset matching the site search string #' @author Tezan Sahu #* @get / -searchSite <- function(sitename="", ignore_case=TRUE, res){ +searchSite <- function(sitename = "", ignore_case = TRUE, res) { sitename <- URLdecode(sitename) - + sites <- tbl(global_db_pool, "sites") %>% select(id, sitename) %>% - filter(grepl(!!sitename, sitename, ignore.case=ignore_case)) %>% + filter(grepl(!!sitename, sitename, ignore.case = ignore_case)) %>% arrange(id) - - + + qry_res <- sites %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Site(s) not found")) - } - else { - return(list(sites=qry_res, count = nrow(qry_res))) + return(list(error = "Site(s) not found")) + } else { + return(list(sites = qry_res, count = nrow(qry_res))) } } diff --git a/apps/api/R/submit.workflow.R b/apps/api/R/submit.workflow.R index 34d34b3853d..de8b499d77e 100644 --- a/apps/api/R/submit.workflow.R +++ b/apps/api/R/submit.workflow.R @@ -5,11 +5,10 @@ library(dplyr) #* @param userDetails List containing userid & username #* @return ID & status of the submitted workflow #* @author Tezan Sahu -submit.workflow.xml <- function(workflowXmlString, userDetails){ - +submit.workflow.xml <- function(workflowXmlString, userDetails) { workflowXml <- XML::xmlParseString(stringr::str_replace(workflowXmlString, "\n", "")) workflowList <- XML::xmlToList(workflowXml) - + return(submit.workflow.list(workflowList, userDetails)) } @@ -20,10 +19,9 @@ submit.workflow.xml <- function(workflowXmlString, userDetails){ #* @param userDetails List containing userid & username #* @return ID & status of the submitted workflow #* @author Tezan Sahu -submit.workflow.json <- function(workflowJsonString, userDetails){ - +submit.workflow.json <- function(workflowJsonString, userDetails) { workflowList <- jsonlite::fromJSON(workflowJsonString) - + return(submit.workflow.list(workflowList, userDetails)) } @@ -35,7 +33,6 @@ submit.workflow.json <- function(workflowJsonString, userDetails){ #* @return ID & status of the submitted workflow #* @author Tezan Sahu submit.workflow.list <- function(workflowList, userDetails) { - # Set database details workflowList$database <- list( bety = PEcAn.DB::get_postgres_envvars( @@ -48,15 +45,18 @@ submit.workflow.list <- function(workflowList, userDetails) { ) if (is.null(workflowList$model$id)) { - return(list(status = "Error", - error = "Must provide model ID.")) + return(list( + status = "Error", + error = "Must provide model ID." + )) } # Get model revision and type for the RabbitMQ queue model_info <- dplyr::tbl(global_db_pool, "models") %>% dplyr::filter(id == !!workflowList$model$id) %>% dplyr::inner_join(dplyr::tbl(global_db_pool, "modeltypes"), - by = c("modeltype_id" = "id")) %>% + by = c("modeltype_id" = "id") + ) %>% dplyr::collect() if (nrow(model_info) < 1) { @@ -82,16 +82,16 @@ submit.workflow.list <- function(workflowList, userDetails) { queue = paste0(model_type, "_", model_revision) ) ) - workflowList$host$name <- if(hostInfo$hostname == "") "localhost" else hostInfo$hostname + workflowList$host$name <- if (hostInfo$hostname == "") "localhost" else hostInfo$hostname # Fix the info workflowList$info$notes <- workflowList$info$notes - if(is.null(workflowList$info$userid)){ + if (is.null(workflowList$info$userid)) { workflowList$info$userid <- userDetails$userid } - if(is.null(workflowList$info$username)){ + if (is.null(workflowList$info$username)) { workflowList$info$username <- userDetails$username } - if(is.null(workflowList$info$date)){ + if (is.null(workflowList$info$date)) { workflowList$info$date <- Sys.time() } @@ -104,36 +104,35 @@ submit.workflow.list <- function(workflowList, userDetails) { insert.attribute(workflowList) # Fix the output directory - outdir <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", - workflow_id_str) + outdir <- paste0( + Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", + workflow_id_str + ) workflowList$outdir <- outdir - + # Create output diretory - dir.create(outdir, recursive=TRUE) + dir.create(outdir, recursive = TRUE) # Modify the `dbfiles` path & create the directory if needed workflowList$run$dbfiles <- Sys.getenv("DBFILES_DIR", "/data/dbfiles/") - if(! dir.exists(workflowList$run$dbfiles)){ + if (!dir.exists(workflowList$run$dbfiles)) { dir.create(workflowList$run$dbfiles, recursive = TRUE) } - + # Convert settings list to XML & save it into outdir workflowXml <- PEcAn.settings::listToXml(workflowList, "pecan") XML::saveXML(workflowXml, paste0(outdir, "/pecan.xml")) res <- file.copy("/work/workflow.R", outdir) - + # Post workflow to RabbitMQ message <- list(folder = outdir, workflowid = workflow_id_str) res <- PEcAn.remote::rabbitmq_post_message(workflowList$host$rabbitmq$uri, "pecan", message) - - if(res$routed){ + + if (res$routed) { return(list(workflow_id = workflow_id_str, status = "Submitted successfully")) - } - else{ + } else { return(list(status = "Error", message = "Could not submit to RabbitMQ")) } - - } ################################################################################################# @@ -142,15 +141,14 @@ submit.workflow.list <- function(workflowList, userDetails) { #* @param workflowList List containing the workflow details #* @return ID of the submitted workflow #* @author Tezan Sahu -insert.workflow <- function(workflowList){ - +insert.workflow <- function(workflowList) { model_id <- workflowList$model$id - if(is.null(model_id)){ + if (is.null(model_id)) { model_id <- PEcAn.DB::get.id("models", c("model_name", "revision"), c(workflowList$model$type, workflowList$model$revision), global_db_pool) } - + start_time <- Sys.time() - + workflow_df <- tibble::tibble( "site_id" = bit64::as.integer64(workflowList$run$site$id), "model_id" = bit64::as.integer64(model_id), @@ -161,8 +159,8 @@ insert.workflow <- function(workflowList){ "advanced_edit" = FALSE, "started_at" = start_time ) - - if (! is.na(workflowList$info$userid)){ + + if (!is.na(workflowList$info$userid)) { workflow_df <- workflow_df %>% tibble::add_column("user_id" = bit64::as.integer64(workflowList$info$userid)) } @@ -170,8 +168,8 @@ insert.workflow <- function(workflowList){ # NOTE: Have to "checkout" a connection from the pool here to work with # dbSendStatement and friends. We make sure to return the connection when the # function exits (successfully or not). - #con <- pool::poolCheckout(global_db_pool) - #on.exit(pool::poolReturn(con), add = TRUE) + # con <- pool::poolCheckout(global_db_pool) + # on.exit(pool::poolReturn(con), add = TRUE) con <- global_db_pool insert_query <- glue::glue( @@ -192,7 +190,8 @@ insert.workflow <- function(workflowList){ ) PEcAn.DB::db.query( - "UPDATE workflows SET folder = $1 WHERE id = $2", con, values = list( + "UPDATE workflows SET folder = $1 WHERE id = $2", con, + values = list( file.path("data", "workflows", paste0("PEcAn_", format(workflow_id, scientific = FALSE))), workflow_id ) @@ -206,64 +205,67 @@ insert.workflow <- function(workflowList){ #* Insert the workflow into attributes table #* @param workflowList List containing the workflow details #* @author Tezan Sahu -insert.attribute <- function(workflowList){ - +insert.attribute <- function(workflowList) { # Create an array of PFTs pfts <- c() - for(i in seq(length(workflowList$pfts))){ + for (i in seq(length(workflowList$pfts))) { pfts <- c(pfts, workflowList$pfts[i]$pft$name) } # Obtain the model_id model_id <- workflowList$model$id - if(is.null(model_id)){ + if (is.null(model_id)) { model_id <- PEcAn.DB::get.id("models", c("model_name", "revision"), c(workflowList$model$type, workflowList$model$revision), global_db_pool) } - + # Fill in the properties properties <- list( start = as.POSIXct(workflowList$run$start.date), end = as.POSIXct(workflowList$run$end.date), pfts = pfts, - runs = if(is.null(workflowList$ensemble$size)) 1 else workflowList$ensemble$size, + runs = if (is.null(workflowList$ensemble$size)) 1 else workflowList$ensemble$size, modelid = model_id, siteid = bit64::as.integer64(workflowList$run$site$id), sitename = dplyr::tbl(global_db_pool, "sites") %>% filter(id == bit64::as.integer64(workflowList$run$site$id)) %>% pull(sitename), - #sitegroupid <- - lat = if(is.null(workflowList$run$site$lat)) "" else workflowList$run$site$lat, - lon = if(is.null(workflowList$run$site$lon)) "" else workflowList$run$site$lon, - email = if(is.na(workflowList$info$userid) || workflowList$info$userid == -1) "" else - dplyr::tbl(global_db_pool, "users") %>% filter(id == bit64::as.integer64(workflowList$info$userid)) %>% pull(email), - notes = if(is.null(workflowList$info$notes)) "" else workflowList$info$notes, + # sitegroupid <- + lat = if (is.null(workflowList$run$site$lat)) "" else workflowList$run$site$lat, + lon = if (is.null(workflowList$run$site$lon)) "" else workflowList$run$site$lon, + email = if (is.na(workflowList$info$userid) || workflowList$info$userid == -1) { + "" + } else { + dplyr::tbl(global_db_pool, "users") %>% + filter(id == bit64::as.integer64(workflowList$info$userid)) %>% + pull(email) + }, + notes = if (is.null(workflowList$info$notes)) "" else workflowList$info$notes, variables = workflowList$ensemble$variable ) - - if(! is.null(workflowList$run$inputs$met$id)) { + + if (!is.null(workflowList$run$inputs$met$id)) { properties$input_met <- workflowList$run$inputs$met$id - } - else if(! is.null(workflowList$run$inputs$met$source)) { + } else if (!is.null(workflowList$run$inputs$met$source)) { properties$input_met <- workflowList$run$inputs$met$source } - - if(! is.null(workflowList$ensemble$parameters$method)) properties$parm_method <- workflowList$ensemble$parameters$method - if(! is.null(workflowList$sensitivity.analysis$quantiles)){ + + if (!is.null(workflowList$ensemble$parameters$method)) properties$parm_method <- workflowList$ensemble$parameters$method + if (!is.null(workflowList$sensitivity.analysis$quantiles)) { sensitivity <- c() - for(i in seq(length(workflowList$sensitivity.analysis$quantiles))){ + for (i in seq(length(workflowList$sensitivity.analysis$quantiles))) { sensitivity <- c(sensitivity, workflowList$sensitivity.analysis$quantiles[i]$sigma) } - properties$sensitivity <- paste0(sensitivity, collapse=",") + properties$sensitivity <- paste0(sensitivity, collapse = ",") } # More variables can be added later - + # Insert properties into attributes table value_json <- as.character(jsonlite::toJSON(properties, auto_unbox = TRUE)) - + # con <- pool::poolCheckout(global_db_pool) # on.exit(pool::poolReturn(con), add = TRUE) con <- global_db_pool - res <- DBI::dbSendStatement(con, - "INSERT INTO attributes (container_type, container_id, value) VALUES ($1, $2, $3)", - list("workflows", bit64::as.integer64(workflowList$workflow$id), value_json)) - - + res <- DBI::dbSendStatement( + con, + "INSERT INTO attributes (container_type, container_id, value) VALUES ($1, $2, $3)", + list("workflows", bit64::as.integer64(workflowList$workflow$id), value_json) + ) } diff --git a/apps/api/R/workflows.R b/apps/api/R/workflows.R index 302ff56fa6a..5ea612a9e69 100644 --- a/apps/api/R/workflows.R +++ b/apps/api/R/workflows.R @@ -9,32 +9,31 @@ source("submit.workflow.R") #' @return List of workflows (using a particular model & site, if specified) #' @author Tezan Sahu #* @get / -getWorkflows <- function(req, model_id=NA, site_id=NA, offset=0, limit=50, res){ - if (! limit %in% c(10, 20, 50, 100, 500)) { +getWorkflows <- function(req, model_id = NA, site_id = NA, offset = 0, limit = 50, res) { + if (!limit %in% c(10, 20, 50, 100, 500)) { res$status <- 400 return(list(error = "Invalid value for parameter")) } - + Workflow <- tbl(global_db_pool, "workflows") %>% select(-created_at, -updated_at, -params, -advanced_edit, -notes) - + if (!is.na(model_id)) { Workflow <- Workflow %>% filter(model_id == !!model_id) } - + if (!is.na(site_id)) { Workflow <- Workflow %>% filter(site_id == !!site_id) } - + qry_res <- Workflow %>% collect() if (nrow(qry_res) == 0 || as.numeric(offset) >= nrow(qry_res)) { res$status <- 404 - return(list(error="Workflows not found")) - } - else { + return(list(error = "Workflows not found")) + } else { has_next <- FALSE has_prev <- FALSE if (nrow(qry_res) > (as.numeric(offset) + as.numeric(limit))) { @@ -43,13 +42,13 @@ getWorkflows <- function(req, model_id=NA, site_id=NA, offset=0, limit=50, res){ if (as.numeric(offset) != 0) { has_prev <- TRUE } - + qry_res <- qry_res[(as.numeric(offset) + 1):min((as.numeric(offset) + as.numeric(limit)), nrow(qry_res)), ] - + result <- list(workflows = qry_res) result$count <- nrow(qry_res) - if(has_next){ - if(grepl("offset=", req$QUERY_STRING, fixed = TRUE)){ + if (has_next) { + if (grepl("offset=", req$QUERY_STRING, fixed = TRUE)) { result$next_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, @@ -57,11 +56,10 @@ getWorkflows <- function(req, model_id=NA, site_id=NA, offset=0, limit=50, res){ req$PATH_INFO, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "offset=")[[2]]), (as.numeric(limit) + as.numeric(offset)), - "&limit=", + "&limit=", limit ) - } - else { + } else { result$next_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, @@ -70,24 +68,24 @@ getWorkflows <- function(req, model_id=NA, site_id=NA, offset=0, limit=50, res){ substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "limit=")[[2]] - 6), "offset=", (as.numeric(limit) + as.numeric(offset)), - "&limit=", + "&limit=", limit ) } } - if(has_prev) { + if (has_prev) { result$prev_page <- paste0( req$rook.url_scheme, "://", req$HTTP_HOST, "/api/workflows", - req$PATH_INFO, + req$PATH_INFO, substr(req$QUERY_STRING, 0, stringr::str_locate(req$QUERY_STRING, "offset=")[[2]]), max(0, (as.numeric(offset) - as.numeric(limit))), - "&limit=", + "&limit=", limit ) } - + return(result) } } @@ -99,19 +97,17 @@ getWorkflows <- function(req, model_id=NA, site_id=NA, offset=0, limit=50, res){ #' @return ID & status of the submitted workflow #' @author Tezan Sahu #* @post / -submitWorkflow <- function(req, res){ - if(req$HTTP_CONTENT_TYPE == "application/xml") { +submitWorkflow <- function(req, res) { + if (req$HTTP_CONTENT_TYPE == "application/xml") { submission_res <- submit.workflow.xml(req$postBody, req$user) - } - else if(req$HTTP_CONTENT_TYPE == "application/json") { + } else if (req$HTTP_CONTENT_TYPE == "application/json") { submission_res <- submit.workflow.json(req$postBody, req$user) - } - else{ + } else { res$status <- 415 return(paste("Unsupported request content type:", req$HTTP_CONTENT_TYPE)) } - - if(submission_res$status == "Error"){ + + if (submission_res$status == "Error") { res$status <- 400 return(submission_res) } @@ -126,48 +122,46 @@ submitWorkflow <- function(req, res){ #' @return Details of requested workflow #' @author Tezan Sahu #* @get / -getWorkflowDetails <- function(id, req, res){ +getWorkflowDetails <- function(id, req, res) { Workflow <- tbl(global_db_pool, "workflows") %>% select(id, model_id, site_id, folder, hostname, user_id) - + Workflow <- tbl(global_db_pool, "attributes") %>% select(id = container_id, properties = value) %>% full_join(Workflow, by = "id") %>% filter(id == !!id) - + qry_res <- Workflow %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Workflow with specified ID was not found")) - } - else { - if(is.na(qry_res$properties)){ + return(list(error = "Workflow with specified ID was not found")) + } else { + if (is.na(qry_res$properties)) { res <- list( - id = id, - folder=qry_res$folder, - hostname=qry_res$hostname, - user_id=qry_res$user_id, + id = id, + folder = qry_res$folder, + hostname = qry_res$hostname, + user_id = qry_res$user_id, properties = list(modelid = qry_res$model_id, siteid = qry_res$site_id) ) - } - else{ + } else { res <- list( - id = id, - folder=qry_res$folder, - hostname=qry_res$hostname, - user_id=qry_res$user_id, + id = id, + folder = qry_res$folder, + hostname = qry_res$hostname, + user_id = qry_res$user_id, properties = jsonlite::parse_json(qry_res$properties[[1]]) ) } - + # Add the files for the workflow if they exist on disk filesdir <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", id) - if(dir.exists(filesdir)){ + if (dir.exists(filesdir)) { all_files <- list.files(filesdir) res$files <- all_files[!all_files %in% c("out", "rabbitmq.out", "pft", "run", "STATUS")] } - + return(res) } } @@ -179,29 +173,28 @@ getWorkflowDetails <- function(id, req, res){ #' @return Details of requested workflow #' @author Tezan Sahu #* @get //status -getWorkflowStatus <- function(req, id, res){ +getWorkflowStatus <- function(req, id, res) { Workflow <- tbl(global_db_pool, "workflows") %>% select(id, user_id) %>% filter(id == !!id) - + qry_res <- Workflow %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 - return(list(error="Workflow with specified ID was not found on this host")) - } - else { + return(list(error = "Workflow with specified ID was not found on this host")) + } else { # Check if the STATUS file exists on the host statusfile <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", qry_res$id, "/STATUS") - if(! file.exists(statusfile)){ + if (!file.exists(statusfile)) { res$status <- 404 - return(list(error="Workflow with specified ID was not found on this host")) + return(list(error = "Workflow with specified ID was not found on this host")) } - + wf_status <- readLines(statusfile) wf_status <- stringr::str_replace_all(wf_status, "\t", " ") - return(list(workflow_id=id, status=wf_status)) + return(list(workflow_id = id, status = wf_status)) } } @@ -213,34 +206,33 @@ getWorkflowStatus <- function(req, id, res){ #' @author Tezan Sahu #* @serializer contentType list(type="application/octet-stream") #* @get //file/ -getWorkflowFile <- function(req, id, filename, res){ +getWorkflowFile <- function(req, id, filename, res) { Workflow <- tbl(global_db_pool, "workflows") %>% - select(id, user_id) %>% + select(id, user_id) %>% filter(id == !!id) - + qry_res <- Workflow %>% collect() - + if (nrow(qry_res) == 0) { res$status <- 404 return() - } - else { + } else { # Check if the requested file exists on the host filepath <- paste0(Sys.getenv("DATA_DIR", "/data/"), "workflows/PEcAn_", id, "/", filename) - if(! file.exists(filepath)){ + if (!file.exists(filepath)) { res$status <- 404 return() } - - if(Sys.getenv("AUTH_REQ") == TRUE){ - if(qry_res$user_id != req$user$userid) { + + if (Sys.getenv("AUTH_REQ") == TRUE) { + if (qry_res$user_id != req$user$userid) { res$status <- 403 return() - } + } } - + # Read the data in binary form & return it - bin <- readBin(filepath,'raw', n = file.info(filepath)$size) + bin <- readBin(filepath, "raw", n = file.info(filepath)$size) return(bin) } } diff --git a/apps/api/tests/alltests.R b/apps/api/tests/alltests.R index ffd3c10d4f6..6d0fee68482 100755 --- a/apps/api/tests/alltests.R +++ b/apps/api/tests/alltests.R @@ -1,3 +1,3 @@ #!/usr/bin/env Rscript -testthat::test_dir("./") \ No newline at end of file +testthat::test_dir("./") diff --git a/apps/api/tests/test.auth.R b/apps/api/tests/test.auth.R index 4ced4f5b05a..eeaadbb2157 100644 --- a/apps/api/tests/test.auth.R +++ b/apps/api/tests/test.auth.R @@ -21,4 +21,4 @@ test_that("Not using username & password returns Status 401", { "http://pecan.localhost/api/models/", ) expect_equal(res$status, 401) -}) \ No newline at end of file +}) diff --git a/apps/api/tests/test.formats.R b/apps/api/tests/test.formats.R index 10ed4c4ceb8..4cbf99d6cb4 100644 --- a/apps/api/tests/test.formats.R +++ b/apps/api/tests/test.formats.R @@ -30,4 +30,4 @@ test_that("Calling /api/formats/{format_id} with invalid parameters returns Stat httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) -}) \ No newline at end of file +}) diff --git a/apps/api/tests/test.models.R b/apps/api/tests/test.models.R index d3dcff2b6ec..610140109a9 100644 --- a/apps/api/tests/test.models.R +++ b/apps/api/tests/test.models.R @@ -30,4 +30,4 @@ test_that("Calling /api/models/{model_id} with invalid parameters returns Status httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) -}) \ No newline at end of file +}) diff --git a/apps/api/tests/test.pfts.R b/apps/api/tests/test.pfts.R index 7931c324938..1d1940dac00 100644 --- a/apps/api/tests/test.pfts.R +++ b/apps/api/tests/test.pfts.R @@ -30,4 +30,4 @@ test_that("Calling /api/pfts/{pft_id} with invalid parameters returns Status 404 httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) -}) \ No newline at end of file +}) diff --git a/apps/api/tests/test.ping.R b/apps/api/tests/test.ping.R index fc8ec0bf97f..bad05198ab1 100644 --- a/apps/api/tests/test.ping.R +++ b/apps/api/tests/test.ping.R @@ -3,4 +3,4 @@ context("Testing the /api/ping endpoint") test_that("Calling /api/ping returns Status 200", { res <- httr::GET("http://pecan.localhost/api/ping") expect_equal(res$status, 200) -}) \ No newline at end of file +}) diff --git a/apps/api/tests/test.sites.R b/apps/api/tests/test.sites.R index 7c75eb3ca6a..7fb802d54d4 100644 --- a/apps/api/tests/test.sites.R +++ b/apps/api/tests/test.sites.R @@ -30,4 +30,4 @@ test_that("Calling /api/sites/{site_id} with invalid parameters returns Status 4 httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) -}) \ No newline at end of file +}) diff --git a/apps/api/tests/test.workflows.R b/apps/api/tests/test.workflows.R index c17f16b598e..8544313f561 100644 --- a/apps/api/tests/test.workflows.R +++ b/apps/api/tests/test.workflows.R @@ -54,7 +54,7 @@ test_that("Submitting JSON workflow to /api/workflows/ returns Status 201", { "http://pecan.localhost/api/workflows/", httr::authenticate("carya", "illinois"), body = json_workflow, - encode='json' + encode = "json" ) expect_equal(res$status, 201) }) @@ -89,4 +89,4 @@ test_that("Calling /api/workflows/{id}/file/{filename} with invalid parameters r httr::authenticate("carya", "illinois") ) expect_equal(res$status, 404) -}) \ No newline at end of file +}) diff --git a/base/all/R/pecan_version.R b/base/all/R/pecan_version.R index 7d686af6d1f..8c057ae19f8 100644 --- a/base/all/R/pecan_version.R +++ b/base/all/R/pecan_version.R @@ -53,13 +53,13 @@ pecan_version <- function(version = max(PEcAn.all::pecan_releases$version), cols_to_return <- c(cols_to_return, "source") all_pkgs <- sessioninfo::package_info(pkgs = "installed", dependencies = FALSE) - our_pkgs <- all_pkgs[grepl("PEcAn", all_pkgs$package),] + our_pkgs <- all_pkgs[grepl("PEcAn", all_pkgs$package), ] # Why do we need this when `pkgs = "installed"` usually shows loaded too? # Because there are times a package is loaded but not installed # (e.g. notably during R CMD check) all_loaded <- sessioninfo::package_info(pkgs = "loaded", dependencies = FALSE) - our_loaded <- all_loaded[grepl("PEcAn", all_loaded$package),] + our_loaded <- all_loaded[grepl("PEcAn", all_loaded$package), ] # TODO: consider using package_info's callouts of packages where loaded and # installed versions mismatch -- it's a more elegant version of what we @@ -71,10 +71,10 @@ pecan_version <- function(version = max(PEcAn.all::pecan_releases$version), by.x = c("package", "ondiskversion", "source"), by.y = c("package", "loadedversion", "source"), all = TRUE, - sort = TRUE) + sort = TRUE + ) colnames(our_pkgs) <- c("package", "installed", "source") our_pkgs$installed <- package_version(our_pkgs$installed) - } else { all_pkgs <- as.data.frame(utils::installed.packages()) our_pkgs <- all_pkgs[ @@ -88,21 +88,24 @@ pecan_version <- function(version = max(PEcAn.all::pecan_releases$version), our_loaded <- sess[grepl("PEcAn", names(sess))] our_loaded <- data.frame( package = names(our_loaded), - installed = sapply(our_loaded, `[[`, "Version")) + installed = sapply(our_loaded, `[[`, "Version") + ) our_loaded$installed <- package_version(our_loaded$installed) our_pkgs <- merge(our_pkgs, our_loaded, all = TRUE, sort = TRUE) - our_pkgs <- our_pkgs[!duplicated(our_pkgs),] + our_pkgs <- our_pkgs[!duplicated(our_pkgs), ] } want_hash <- !is.na(our_pkgs$installed) our_pkgs$build_hash[want_hash] <- sapply( our_pkgs$package[want_hash], - get_buildhash) + get_buildhash + ) res <- merge( x = our_pkgs, y = PEcAn.all::pecan_version_history, - all = TRUE) + all = TRUE + ) res <- drop_na_version_rows(res[, cols_to_return]) rownames(res) <- res$package class(res) <- c("pecan_version_report", class(res)) @@ -134,25 +137,30 @@ get_buildhash <- function(pkg) { # (Just to help it display more compactly) #' @export print.pecan_version_report <- function(x, ...) { - dots <- list(...) - if (is.null(dots$row.names)) { dots$row.names <- FALSE } - if (is.null(dots$right)) { dots$right <- FALSE } + if (is.null(dots$row.names)) { + dots$row.names <- FALSE + } + if (is.null(dots$right)) { + dots$right <- FALSE + } xx <- as.data.frame(x) # only print hash for dev versions # (typically x.y.z.9000, but we'll use anything with a 4th version component) - skip_hash <- is.na(xx$installed[,4]) | is.na(xx$build_hash) + skip_hash <- is.na(xx$installed[, 4]) | is.na(xx$build_hash) xx$build_hash[skip_hash] <- "" xx$build_hash <- sub(".{4}\\+mod$", "+mod", xx$build_hash) xx$installed <- paste0( xx$installed, - sub("(.+)", " (\\1)", xx$build_hash)) + sub("(.+)", " (\\1)", xx$build_hash) + ) xx$build_hash <- NULL if (!is.null(xx$source)) { xx$source <- paste0( strtrim(xx$source, 17), - ifelse(nchar(xx$source, type="width") <= 17, "", "...")) + ifelse(nchar(xx$source, type = "width") <= 17, "", "...") + ) } dots$x <- xx do.call("print", dots) diff --git a/base/all/data-raw/record_versions.R b/base/all/data-raw/record_versions.R index 0253c7cc380..6875b7ca19e 100755 --- a/base/all/data-raw/record_versions.R +++ b/base/all/data-raw/record_versions.R @@ -68,9 +68,9 @@ if (!all(c("./base", "./models", "./modules") %in% list.dirs())) { } tag <- args[[1]] -date = as.Date(args[[2]]) -version = package_version(args[[3]]) - +date <- as.Date(args[[2]]) +version <- package_version(args[[3]]) + save_result <- FALSE if (length(args) == 4 && args[[4]] == "save") { save_result <- TRUE @@ -112,11 +112,11 @@ load(release_file, envir = old_rel) pecan_releases <- data.frame(tag = tag, date = date, version = version) |> merge(old_rel$pecan_releases, all = TRUE) |> (\(.) .[order(.$date), ])() -if(save_result) { +if (save_result) { save(pecan_releases, file = release_file, version = 2) } -if(!save_result) { +if (!save_result) { print("PEcAn release info:") print(pecan_releases) print("PEcAn package versions:") diff --git a/base/all/data/pecan_releases.R b/base/all/data/pecan_releases.R index b763821f46a..3c5fc675144 100644 --- a/base/all/data/pecan_releases.R +++ b/base/all/data/pecan_releases.R @@ -1,7 +1,7 @@ - pecan_releases <- utils::read.csv( "pecan_releases.csv", - colClasses = c(tag = "character", date = "Date", version = "character")) + colClasses = c(tag = "character", date = "Date", version = "character") +) pecan_releases$version <- package_version(pecan_releases$version) diff --git a/base/all/data/pecan_version_history.R b/base/all/data/pecan_version_history.R index efff2cdf1ac..465ae4faf68 100644 --- a/base/all/data/pecan_version_history.R +++ b/base/all/data/pecan_version_history.R @@ -1,4 +1,3 @@ - # Read and format a list of pecan versions # The local() wrapper is to avoid adding objects to the package data: @@ -9,7 +8,8 @@ pecan_version_history <- local({ pvh <- utils::read.csv( "pecan_version_history.csv", colClasses = "character", - check.names = FALSE) + check.names = FALSE + ) # We'd like to parse strictly to catch invalid versions (probably typos). # But we _need_ to allow NAs... and in R < 4.4, package_version did not @@ -17,7 +17,8 @@ pecan_version_history <- local({ strict <- TRUE na_version <- try( package_version(NA_character_, strict = strict), - silent = TRUE) + silent = TRUE + ) if (inherits(na_version, "try-error")) { strict <- FALSE } @@ -26,7 +27,8 @@ pecan_version_history <- local({ if (col != "package") { pvh[[col]] <- package_version( pvh[[col]], - strict = strict) + strict = strict + ) } } diff --git a/base/all/tests/testthat/test-pecan_version.R b/base/all/tests/testthat/test-pecan_version.R index b66172709a2..74ba482af4a 100644 --- a/base/all/tests/testthat/test-pecan_version.R +++ b/base/all/tests/testthat/test-pecan_version.R @@ -1,5 +1,4 @@ test_that("pecan_version", { - # defunct packages only shown when requesting versions that contained them expect_true("PEcAn.dalec" %in% pecan_version("v1.3.5")$package) expect_false("PEcAn.dalec" %in% pecan_version("v1.7.2")$package) @@ -54,34 +53,34 @@ test_that("pecan_version", { expect_true( any( noargs[noargs$package == "PEcAn.all", ]$installed == - packageVersion("PEcAn.all") + packageVersion("PEcAn.all") ) ) expect_true( any( noargs[noargs$package == "PEcAn.all", expected_tag] == - PEcAn.all::pecan_version_history[ - PEcAn.all::pecan_version_history$package == "PEcAn.all", - expected_tag - ] + PEcAn.all::pecan_version_history[ + PEcAn.all::pecan_version_history$package == "PEcAn.all", + expected_tag + ] ) ) }) test_that("pecan_version without sessioninfo", { - with_sessinfo <- pecan_version() # make pecan_version think the sessioninfo package is unavailable - mockery::stub(pecan_version, 'requireNamespace', FALSE) + mockery::stub(pecan_version, "requireNamespace", FALSE) without_sessinfo <- pecan_version() expect_length(with_sessinfo, 5) expect_length(without_sessinfo, 4) expect_equal( with_sessinfo[, colnames(with_sessinfo) != "source"], - without_sessinfo) + without_sessinfo + ) }) # TODO: Would be nice to add a check here that will notice if the list of PEcAn @@ -100,13 +99,14 @@ test_that("printing", { v0.0 = package_version("1.2.3"), installed = package_version("1.2.3.9000"), build_hash = "01234567ab", - source = "13 characters"), + source = "13 characters" + ), class = c("pecan_version_report", "data.frame") ) long_ver <- ver - long_ver$build_hash = "01234567ab+mod" - long_ver$source = "twenty-two characters" + long_ver$build_hash <- "01234567ab+mod" + long_ver$source <- "twenty-two characters" # hash truncated to fit "+mod" if present expect_output(print(ver), "01234567ab", fixed = TRUE) diff --git a/base/all/tests/testthat/test.workflow.R b/base/all/tests/testthat/test.workflow.R index 85efe3437c8..bb25dd5ab14 100644 --- a/base/all/tests/testthat/test.workflow.R +++ b/base/all/tests/testthat/test.workflow.R @@ -1,9 +1,9 @@ # TODO This is an integration test (#1125) -#context("tests of overall workflow") +# context("tests of overall workflow") -#settings.file <- system.file("inst/extdata/test.settings.xml", package = "PEcAn.utils") -#settings <- read.settings(settings.file) +# settings.file <- system.file("inst/extdata/test.settings.xml", package = "PEcAn.utils") +# settings <- read.settings(settings.file) # settings$pfts <- get.trait.data(settings$pfts, settings$model$type, settings$database$dbfiles, settings$database$bety, settings$meta.analysis$update) # run.meta.analysis(settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects$on, settings$meta.analysis$threshold, settings$database$dbfiles, settings$database$bety) diff --git a/base/db/R/assign.treatments.R b/base/db/R/assign.treatments.R index 3779d327676..3782f4d1381 100644 --- a/base/db/R/assign.treatments.R +++ b/base/db/R/assign.treatments.R @@ -10,7 +10,7 @@ ##' @return dataframe with sequential treatments ##' @export ##' @author David LeBauer, Carl Davidson, Alexey Shiklomanov -assign.treatments <- function(data){ +assign.treatments <- function(data) { data$trt_id[which(data$control == 1)] <- "control" sites <- unique(data$site_id) # Site IDs may be returned as `integer64`, which the `for` loop @@ -19,9 +19,9 @@ assign.treatments <- function(data){ for (si in seq_along(sites)) { ss <- sites[[si]] site.i <- data$site_id == ss - #if only one treatment, it's control + # if only one treatment, it's control if (length(unique(data$trt_id[site.i])) == 1) data$trt_id[site.i] <- "control" - if (!"control" %in% data$trt_id[site.i]){ + if (!"control" %in% data$trt_id[site.i]) { PEcAn.logger::logger.severe(paste0( "No control treatment set for site_id ", unique(data$site_id[site.i]), " and citation id ", unique(data$citation_id[site.i]), ".\n", @@ -32,7 +32,7 @@ assign.treatments <- function(data){ return(data) } -drop.columns <- function(data, columns){ +drop.columns <- function(data, columns) { return(data[, which(!colnames(data) %in% columns)]) } -##=============================================================================# +## =============================================================================# diff --git a/base/db/R/check.lists.R b/base/db/R/check.lists.R index cc131ca59ed..a9352167d75 100644 --- a/base/db/R/check.lists.R +++ b/base/db/R/check.lists.R @@ -13,10 +13,10 @@ check.lists <- function(x, y, filename = "species.csv") { if (nrow(x) != nrow(y)) { return(FALSE) } - if(filename == "species.csv"){ - cols <- c('id', 'genus', 'species', 'scientificname') + if (filename == "species.csv") { + cols <- c("id", "genus", "species", "scientificname") } else if (filename == "cultivars.csv") { - cols <- c('id', 'specie_id', 'species_name', 'cultivar_name') + cols <- c("id", "specie_id", "species_name", "cultivar_name") } else { return(FALSE) } diff --git a/base/db/R/clone_pft.R b/base/db/R/clone_pft.R index 04fa8e09392..4e17dcb88f0 100644 --- a/base/db/R/clone_pft.R +++ b/base/db/R/clone_pft.R @@ -21,8 +21,7 @@ clone_pft <- function(parent.pft.name, new.pft.name, new.pft.definition, - settings){ - + settings) { if (new.pft.name == parent.pft.name) { PEcAn.logger::logger.severe("new.pft.name must not be the same as parent.pft.name") } @@ -35,7 +34,7 @@ clone_pft <- function(parent.pft.name, on.exit(db.close(con), add = TRUE) parent.pft <- (dplyr::tbl(con, "pfts") - %>% dplyr::filter(.data$name == !!parent.pft.name) + %>% dplyr::filter(.data$name == !!parent.pft.name) %>% dplyr::collect()) if (nrow(parent.pft) == 0) { @@ -43,11 +42,12 @@ clone_pft <- function(parent.pft.name, } new.pft <- (parent.pft - %>% dplyr::select(-"id", -"created_at", -"updated_at") + %>% dplyr::select(-"id", -"created_at", -"updated_at") %>% dplyr::mutate( name = !!new.pft.name, definition = !!new.pft.definition, - parent_id = !!parent.pft$id)) + parent_id = !!parent.pft$id + )) ## create new pft DBI::dbWriteTable( @@ -55,10 +55,11 @@ clone_pft <- function(parent.pft.name, name = "pfts", value = as.data.frame(new.pft), append = TRUE, - row.names = FALSE) + row.names = FALSE + ) new.pft$id <- (dplyr::tbl(con, "pfts") - %>% dplyr::filter(.data$name == !!new.pft.name) + %>% dplyr::filter(.data$name == !!new.pft.name) %>% dplyr::pull("id")) @@ -72,33 +73,35 @@ clone_pft <- function(parent.pft.name, member_tbl <- "pfts_species" } new_members <- (dplyr::tbl(con, member_tbl) - %>% dplyr::filter(.data$pft_id == !!parent.pft$id) + %>% dplyr::filter(.data$pft_id == !!parent.pft$id) %>% dplyr::mutate(pft_id = !!new.pft$id) %>% dplyr::distinct() %>% dplyr::collect()) - if(nrow(new_members) > 0){ + if (nrow(new_members) > 0) { DBI::dbWriteTable( conn = con, name = member_tbl, value = as.data.frame(new_members), append = TRUE, - row.names = FALSE) + row.names = FALSE + ) } new_priors <- (dplyr::tbl(con, "pfts_priors") - %>% dplyr::filter(.data$pft_id == !!parent.pft$id) + %>% dplyr::filter(.data$pft_id == !!parent.pft$id) %>% dplyr::mutate(pft_id = !!new.pft$id) %>% dplyr::distinct() %>% dplyr::collect()) - if(nrow(new_priors) > 0){ + if (nrow(new_priors) > 0) { DBI::dbWriteTable( conn = con, name = "pfts_priors", value = as.data.frame(new_priors), append = TRUE, - row.names = FALSE) + row.names = FALSE + ) } return(new.pft$id) diff --git a/base/db/R/convert_input.R b/base/db/R/convert_input.R index 1ff74a13014..04e45359294 100644 --- a/base/db/R/convert_input.R +++ b/base/db/R/convert_input.R @@ -3,29 +3,29 @@ ##' \code{convert_input} is a relatively generic function that applies the function \code{fcn} and inserts a record of it into the database. It is primarily designed for converting meteorological data between formats and can be used on observed data, forecasts, and ensembles of forecasts. ##' To minimize downloading and storing duplicate data, it first checks to see if a given file is already in the ##' database before applying \code{fcn}. -##' +##' ##' @section Executing the function: ##' convert_input executes the function fcn in package pkg via PEcAn.remote::remote.execute.R. All additional arguments passed to ##' convert_input (...) are in turn passed along to fcn as arguments. In addition, several named arguments to convert_input are passed ##' along to fcn. The command to execute fcn is built as a string. -##' +##' ##' @section Database files: ##' There are two kinds of database records (in different tables) that represent a given data file in the file system. An input file -##' contains information about the contents of the data file. A dbfile contains machine spacific information for a given input file, -##' such as the file path. Because duplicates of data files for a given input can be on multiple different machines, there can be more +##' contains information about the contents of the data file. A dbfile contains machine spacific information for a given input file, +##' such as the file path. Because duplicates of data files for a given input can be on multiple different machines, there can be more ##' than one dbfile for a given input file. -##' +##' ##' @section Time-span appending: -##' By default, convert_input tries to optimize the download of most data products by only downloading the years of data not present on +##' By default, convert_input tries to optimize the download of most data products by only downloading the years of data not present on ##' the current machine. (For example, if files for 2004-2008 exist for a given data product exist on this machine and the user requests ##' 2006-2010, the function will only download data for 2009 and 2010). In year-long data files, each year exists as a separate file. ##' The database input file contains records of the bounds of the range stored by those years. The data optimization can be turned off ##' by overriding the default values for exact.dates and allow.conflicting.dates. -##' +##' ##' @section Forecast data: ##' If the flag forecast is TRUE, convert_input treats data as if it were forecast data. Forecast data do not undergo time span ##' appending. -##' +##' ##' @section Ensembles: ##' convert_input has the capability to handle ensembles of met data. If ensemble = an integer > 1, convert_input checks the database ##' for records of all ensemble members, and calls fcn if at least one is missing. convert_input assumes that fcn will return records @@ -86,717 +86,767 @@ convert_input <- forecast = FALSE, ensemble = FALSE, ensemble_name = NULL, - dbparms=NULL, - ... - ) { - - input.args <- list(...) - - PEcAn.logger::logger.debug(paste("convert_inputs", fcn, input.id, host$name, outfolder, formatname, - mimetype, site.id, start_date, end_date)) - - Rbinary <- ifelse(is.null(host$Rbinary),"R",host$Rbinary) - - n <- nchar(outfolder) - if (substr(outfolder, n, n) != "/") { - outfolder <- paste0(outfolder, "/") - } - - outname <- utils::tail(unlist(strsplit(outfolder, "/")), n = 1) - - PEcAn.logger::logger.info(paste("start CHECK convert_inputs", fcn, input.id, host$name, outfolder, - formatname, mimetype, site.id, start_date, end_date, forecast, ensemble)) - - - ##----------------------------------------------------------------------------------------------------------------## - # Forecast data sets require their own set of database checking operations, making the following else if and else irrelevant - # for such data. Forecast data are different if their start date (and if they are part of an ensemble, their ensemble id) are - # different. - if (forecast) { - #if the data is an ensemble, ensemble will be set equal to the number of ensemble members. - #However, if the data is not an ensemble, ensemble will be equal to FALSE. In order to treat ensemble and - #non-ensemble members together in one piece of code, we set ensemble=1 if it's FALSE. - if (!is.integer(ensemble)) {ensemble = as.integer(1) } - - # Convert dates to Date objects and strip all time zones - # (DB values are timezone-free) - start_date <- lubridate::force_tz(lubridate::as_datetime(start_date), "UTC") - end_date <- lubridate::force_tz(lubridate::as_datetime(end_date), "UTC") - - # Each ensemble member is associated with its own input file. Therefore, each of these need to be lists. - existing.dbfile <- list() - existing.input <- list() - - existing_records <- list(input.id = NULL, dbfile.id = NULL) # Empty vectors are null - files.to.delete <- list() - - for (i in seq_len(ensemble)) { - # In dbfile.input.check, pattern searches the file path for the specified regular expression. filename_pattern - # contains a portion of the file name which can uniquely identify a particular data product at a particular - # site and time, regardless of similarities in formatname, mimetype, etc. Because sitenames can contain - # regular expression specific special characters and site-specific identification is already present through - # site.id, a regex placeholder is used instead. - - # This regular expression identifies a particular ensemble from input$name - # It can recognize the following components: - # name of the data product, followed by a dot, and optionally a site name followed by a dot, then - # the ensemble number/name, followed either by a dot or a termination of the string. - # This last dot/string terminator matcher is required so that the regex does not recognize 12, 13, etc. as 1. - # Example: NOAA_GEFS.Willow Creek(US-WCr).3. - # Met product name: NOAA_GEFS, site name: Willow Creek(US-WCr), ensemble number: 3 - # The regular expression does not have to match the entire file name. - - # pattern is the name of the data product - filename_pattern = paste0(pattern, "\\.([^.]*\\.)?") #double backslash for regex - - # Specify ensemble name/number and add termination sequence to ensure each number is recognized uniquely (e.g. - # 12 is not recognized as 1). - if (!is.null(ensemble_name)) { - filename_pattern = paste0(filename_pattern, ensemble_name, "($|\\.)") - } else if (ensemble > 1) { - filename_pattern = paste0(filename_pattern, i, "($|\\.)") - } - - existing.dbfile[[i]] <- dbfile.input.check(siteid = site.id, - mimetype = mimetype, - formatname = formatname, - parentid = input.id, - startdate = start_date, - enddate = end_date, - con = con, - hostname = host$name, - exact.dates = TRUE, - pattern = filename_pattern) - - if(nrow(existing.dbfile[[i]]) > 0) { - existing.input[[i]] <- db.query(paste0("SELECT * FROM inputs WHERE id=", existing.dbfile[[i]]$container_id),con) - - # Date/time processing for existing input - existing.input[[i]]$start_date <- lubridate::force_tz(lubridate::as_datetime(existing.input[[i]]$start_date), "UTC") - existing.input[[i]]$end_date <- lubridate::force_tz(lubridate::as_datetime(existing.input[[i]]$end_date), "UTC") - - ## Obtain machine information - #Grab machine info of file that exists - existing.machine <- db.query(paste0("SELECT * from machines where id = '", - existing.dbfile[[i]]$machine_id, "'"), con) - - #Grab machine info of host machine - machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) - machine <- db.query(paste0("SELECT * from machines where hostname = '", - machine.host, "'"), con) - - - # If the files aren't on the machine, we have to download them, so "overwrite" is meaningless. - if (existing.machine$id == machine$id) { - if (overwrite) { #If the files are on the current machine, check to see if we should overwrite them. - #Collect files for deletion, and store them in a list. - #Actually setting up the deletion will be done after the loop. - #c() concantanes the elements in a list, not just vectors. - files.to.delete <- c(files.to.delete, as.list(PEcAn.remote::remote.execute.R( paste0("list.files('", - existing.dbfile[[i]]$file_path, - "', full.names=TRUE)"), - host, user = NA, verbose = TRUE,R = Rbinary, scratchdir = outfolder))) - } else { # If we're not overriding, we can just use the files that are already here. - existing_records$input.id = c(existing_records$input.id, existing.input[[i]]$id) - existing_records$dbfile.id = c(existing_records$dbfile.id, existing.dbfile[[i]]$id) - } - } else { # Else to "existing.machine$id == machine$id" - insert.new.file <- TRUE - } - - } else { # Else to "nrow(existing.dbfile[[i]]) > 0)" - existing.input[[i]] <- data.frame() # We don't want there to be a "gap" in existing input which would cause the lists to not be parellel. - # Empty data frames are screened for when input/dbfile are processed below. - } - } # -- End for loop -- - - # Set up files to be deleted. The deletion will actually happen after the function finishes execution. In case there - # are any errors, this practice will make sure that the old files are preserved. - if (length(files.to.delete) > 0) { # Each list item is a file to delete. - file.deletion.commands <- .get.file.deletion.commands(unlist(files.to.delete)) - - PEcAn.remote::remote.execute.R( file.deletion.commands$move.to.tmp, - host, user = NA, - verbose = TRUE,R = Rbinary, scratchdir = outfolder) - - successful <- FALSE - on.exit( - if (exists("successful") && successful) { - PEcAn.logger::logger.info( - "Conversion successful, with overwrite=TRUE. Deleting old files.") - PEcAn.remote::remote.execute.R( - file.deletion.commands$delete.tmp, - host, user = NA, - verbose = TRUE, R = Rbinary, scratchdir = outfolder) - } else { - PEcAn.logger::logger.info("Conversion failed. Replacing old files.") - PEcAn.remote::remote.execute.R( - file.deletion.commands$replace.from.tmp, - host, user = NA, - verbose = TRUE, R = Rbinary, scratchdir = outfolder) - }, - add = TRUE - ) # Close on.exit - } - - # If all of the files for an existing ensemble exist, we'll just use those files. Otherwise, we'll need to run the function to - # fill in the gaps. (The function should be smart enough not to overwrite previous files unless overwrite == TRUE). If overwrite is TRUE, - # then exisitng_records$input.id will have length 0, and this if statement won't be entered. - if (length(existing_records$input.id) == ensemble) { - if (ensemble == 1) { # Used to give a little more precise of an info message. - PEcAn.logger::logger.info("File with forecast data in the given range already exists on this machine.") - } else { - PEcAn.logger::logger.info("Files for all ensemble members for this forecast already exist on this machine.") - } - - return(existing_records) + dbparms = NULL, + ...) { + input.args <- list(...) + + PEcAn.logger::logger.debug(paste( + "convert_inputs", fcn, input.id, host$name, outfolder, formatname, + mimetype, site.id, start_date, end_date + )) + + Rbinary <- ifelse(is.null(host$Rbinary), "R", host$Rbinary) + + n <- nchar(outfolder) + if (substr(outfolder, n, n) != "/") { + outfolder <- paste0(outfolder, "/") } - ##----------------------------------------- End of forecast section --------------------------------## - - } else if (exact.dates) { - - # Find Existing input with exact dates. - - #--- This is for met2model part - if(!is.null(input.args$dbfile.id)){ - existing.dbfile <- dbfile.input.check(siteid = site.id, - mimetype = mimetype, - formatname = formatname, - parentid = input.id, - startdate = start_date, - enddate = end_date, - con = con, - hostname = host$name, - exact.dates = TRUE, - pattern = pattern - ) - if ("id" %in% colnames(existing.dbfile)) { - existing.dbfile <- existing.dbfile %>% - dplyr::filter(.data$id==input.args$dbfile.id) - } - }else{ - existing.dbfile <- dbfile.input.check(siteid = site.id, - mimetype = mimetype, - formatname = formatname, - parentid = input.id, - startdate = start_date, - enddate = end_date, - con = con, - hostname = host$name, - exact.dates = TRUE, - pattern = pattern - ) + + outname <- utils::tail(unlist(strsplit(outfolder, "/")), n = 1) + + PEcAn.logger::logger.info(paste( + "start CHECK convert_inputs", fcn, input.id, host$name, outfolder, + formatname, mimetype, site.id, start_date, end_date, forecast, ensemble + )) + + + ## ----------------------------------------------------------------------------------------------------------------## + # Forecast data sets require their own set of database checking operations, making the following else if and else irrelevant + # for such data. Forecast data are different if their start date (and if they are part of an ensemble, their ensemble id) are + # different. + if (forecast) { + # if the data is an ensemble, ensemble will be set equal to the number of ensemble members. + # However, if the data is not an ensemble, ensemble will be equal to FALSE. In order to treat ensemble and + # non-ensemble members together in one piece of code, we set ensemble=1 if it's FALSE. + if (!is.integer(ensemble)) { + ensemble <- as.integer(1) } - - PEcAn.logger::logger.debug("File id =", existing.dbfile$id, - " File name =", existing.dbfile$file_name, - " File path =", existing.dbfile$file_path, - " Input id =", existing.dbfile$container_id, - digits = 10) - - PEcAn.logger::logger.info("end CHECK for existing input record") - - - if (nrow(existing.dbfile) > 0) { - - existing.input <- db.query(paste0("SELECT * FROM inputs WHERE id=", existing.dbfile[["container_id"]]),con) - + # Convert dates to Date objects and strip all time zones # (DB values are timezone-free) - start_date <- lubridate::force_tz(lubridate::as_date(start_date), "UTC") - end_date <- lubridate::force_tz(lubridate::as_date(end_date), "UTC") - - existing.input$start_date <- lubridate::force_tz(lubridate::as_date(existing.input$start_date), "UTC") - existing.input$end_date <- lubridate::force_tz(lubridate::as_date(existing.input$end_date), "UTC") - - ## Do overwrite if set to TRUE - if(overwrite){ - # collect files to flag for deletion - files.to.delete <- PEcAn.remote::remote.execute.R( paste0("list.files('", - existing.dbfile[["file_path"]], - "', full.names=TRUE)"), - host, user = NA, verbose = TRUE,R = Rbinary, scratchdir = outfolder) - - file.deletion.commands <- .get.file.deletion.commands(files.to.delete) - - PEcAn.remote::remote.execute.R( file.deletion.commands$move.to.tmp, - host, user = NA, - verbose = TRUE,R = Rbinary, scratchdir = outfolder) - - - # Schedule files to be replaced or deleted on exiting the function + start_date <- lubridate::force_tz(lubridate::as_datetime(start_date), "UTC") + end_date <- lubridate::force_tz(lubridate::as_datetime(end_date), "UTC") + + # Each ensemble member is associated with its own input file. Therefore, each of these need to be lists. + existing.dbfile <- list() + existing.input <- list() + + existing_records <- list(input.id = NULL, dbfile.id = NULL) # Empty vectors are null + files.to.delete <- list() + + for (i in seq_len(ensemble)) { + # In dbfile.input.check, pattern searches the file path for the specified regular expression. filename_pattern + # contains a portion of the file name which can uniquely identify a particular data product at a particular + # site and time, regardless of similarities in formatname, mimetype, etc. Because sitenames can contain + # regular expression specific special characters and site-specific identification is already present through + # site.id, a regex placeholder is used instead. + + # This regular expression identifies a particular ensemble from input$name + # It can recognize the following components: + # name of the data product, followed by a dot, and optionally a site name followed by a dot, then + # the ensemble number/name, followed either by a dot or a termination of the string. + # This last dot/string terminator matcher is required so that the regex does not recognize 12, 13, etc. as 1. + # Example: NOAA_GEFS.Willow Creek(US-WCr).3. + # Met product name: NOAA_GEFS, site name: Willow Creek(US-WCr), ensemble number: 3 + # The regular expression does not have to match the entire file name. + + # pattern is the name of the data product + filename_pattern <- paste0(pattern, "\\.([^.]*\\.)?") # double backslash for regex + + # Specify ensemble name/number and add termination sequence to ensure each number is recognized uniquely (e.g. + # 12 is not recognized as 1). + if (!is.null(ensemble_name)) { + filename_pattern <- paste0(filename_pattern, ensemble_name, "($|\\.)") + } else if (ensemble > 1) { + filename_pattern <- paste0(filename_pattern, i, "($|\\.)") + } + + existing.dbfile[[i]] <- dbfile.input.check( + siteid = site.id, + mimetype = mimetype, + formatname = formatname, + parentid = input.id, + startdate = start_date, + enddate = end_date, + con = con, + hostname = host$name, + exact.dates = TRUE, + pattern = filename_pattern + ) + + if (nrow(existing.dbfile[[i]]) > 0) { + existing.input[[i]] <- db.query(paste0("SELECT * FROM inputs WHERE id=", existing.dbfile[[i]]$container_id), con) + + # Date/time processing for existing input + existing.input[[i]]$start_date <- lubridate::force_tz(lubridate::as_datetime(existing.input[[i]]$start_date), "UTC") + existing.input[[i]]$end_date <- lubridate::force_tz(lubridate::as_datetime(existing.input[[i]]$end_date), "UTC") + + ## Obtain machine information + # Grab machine info of file that exists + existing.machine <- db.query(paste0( + "SELECT * from machines where id = '", + existing.dbfile[[i]]$machine_id, "'" + ), con) + + # Grab machine info of host machine + machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) + machine <- db.query(paste0( + "SELECT * from machines where hostname = '", + machine.host, "'" + ), con) + + + # If the files aren't on the machine, we have to download them, so "overwrite" is meaningless. + if (existing.machine$id == machine$id) { + if (overwrite) { # If the files are on the current machine, check to see if we should overwrite them. + # Collect files for deletion, and store them in a list. + # Actually setting up the deletion will be done after the loop. + # c() concantanes the elements in a list, not just vectors. + files.to.delete <- c(files.to.delete, as.list(PEcAn.remote::remote.execute.R( + paste0( + "list.files('", + existing.dbfile[[i]]$file_path, + "', full.names=TRUE)" + ), + host, + user = NA, verbose = TRUE, R = Rbinary, scratchdir = outfolder + ))) + } else { # If we're not overriding, we can just use the files that are already here. + existing_records$input.id <- c(existing_records$input.id, existing.input[[i]]$id) + existing_records$dbfile.id <- c(existing_records$dbfile.id, existing.dbfile[[i]]$id) + } + } else { # Else to "existing.machine$id == machine$id" + insert.new.file <- TRUE + } + } else { # Else to "nrow(existing.dbfile[[i]]) > 0)" + existing.input[[i]] <- data.frame() # We don't want there to be a "gap" in existing input which would cause the lists to not be parellel. + # Empty data frames are screened for when input/dbfile are processed below. + } + } # -- End for loop -- + + # Set up files to be deleted. The deletion will actually happen after the function finishes execution. In case there + # are any errors, this practice will make sure that the old files are preserved. + if (length(files.to.delete) > 0) { # Each list item is a file to delete. + file.deletion.commands <- .get.file.deletion.commands(unlist(files.to.delete)) + + PEcAn.remote::remote.execute.R(file.deletion.commands$move.to.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + successful <- FALSE on.exit( if (exists("successful") && successful) { - PEcAn.logger::logger.info("Conversion successful, with overwrite=TRUE. Deleting old files.") + PEcAn.logger::logger.info( + "Conversion successful, with overwrite=TRUE. Deleting old files." + ) PEcAn.remote::remote.execute.R( file.deletion.commands$delete.tmp, - host, user = NA, - verbose = TRUE, R = Rbinary, scratchdir = outfolder) + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) } else { PEcAn.logger::logger.info("Conversion failed. Replacing old files.") PEcAn.remote::remote.execute.R( file.deletion.commands$replace.from.tmp, - host, user = NA, - verbose = TRUE, R = Rbinary, scratchdir = outfolder) + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) }, add = TRUE ) # Close on.exit } - - - - #Grab machine info of file that exists - existing.machine <- db.query(paste0("SELECT * from machines where id = '", - existing.dbfile$machine_id, "'"), con) - - #Grab machine info of host machine - machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) - machine <- db.query(paste0("SELECT * from machines where hostname = '", - machine.host, "'"), con) - - if (existing.machine$id != machine$id) { - - PEcAn.logger::logger.info("Valid Input record found that spans desired dates, but valid files do not exist on this machine.") - PEcAn.logger::logger.info("Downloading all years of Valid input to ensure consistency") - insert.new.file <- TRUE - start_date <- existing.input$start_date - end_date <- existing.input$end_date - - } else { - # There's an existing input that spans desired start/end dates with files on this machine - PEcAn.logger::logger.info("Skipping this input conversion because files are already available.") - return(list(input.id = existing.input$id, dbfile.id = existing.dbfile$id)) + + # If all of the files for an existing ensemble exist, we'll just use those files. Otherwise, we'll need to run the function to + # fill in the gaps. (The function should be smart enough not to overwrite previous files unless overwrite == TRUE). If overwrite is TRUE, + # then exisitng_records$input.id will have length 0, and this if statement won't be entered. + if (length(existing_records$input.id) == ensemble) { + if (ensemble == 1) { # Used to give a little more precise of an info message. + PEcAn.logger::logger.info("File with forecast data in the given range already exists on this machine.") + } else { + PEcAn.logger::logger.info("Files for all ensemble members for this forecast already exist on this machine.") + } + + return(existing_records) } - - - } else { - # No existing record found. Should be good to go with regular conversion. - } - - ##-------------------------end of exact.dates chunk------------------------------------# - - } else { - - #existing file for ensembles takes an advantage of the pattern argument - if (!is.null(ensemble) && ensemble) { - return.all <-TRUE - - }else{ - return.all <- FALSE - } - existing.dbfile <- dbfile.input.check(siteid = site.id, - mimetype = mimetype, - formatname = formatname, - parentid = input.id, - startdate = start_date, - enddate = end_date, - con = con, - hostname = host$name, - pattern = pattern, - return.all = return.all - ) - - - - PEcAn.logger::logger.debug("File id =", existing.dbfile$id, - " File name =", existing.dbfile$file_name, - " File path =", existing.dbfile$file_path, - " Input id =", existing.dbfile$container_id, - digits = 10) - - PEcAn.logger::logger.info("end CHECK for existing input record.") - - if (nrow(existing.dbfile) > 0) { + ## ----------------------------------------- End of forecast section --------------------------------## + } else if (exact.dates) { + # Find Existing input with exact dates. - if (!is.null(ensemble) && ensemble) { - - existing.input <- existing.dbfile[["container_id"]] %>% - unique() %>% - purrr::map_dfr(function(one.cont.id) { - db.query(paste0("SELECT * FROM inputs WHERE id=", one.cont.id), con) - }) - - } else{ - existing.input <- - db.query(paste0("SELECT * FROM inputs WHERE id=", existing.dbfile[["container_id"]]), con) + #--- This is for met2model part + if (!is.null(input.args$dbfile.id)) { + existing.dbfile <- dbfile.input.check( + siteid = site.id, + mimetype = mimetype, + formatname = formatname, + parentid = input.id, + startdate = start_date, + enddate = end_date, + con = con, + hostname = host$name, + exact.dates = TRUE, + pattern = pattern + ) + if ("id" %in% colnames(existing.dbfile)) { + existing.dbfile <- existing.dbfile %>% + dplyr::filter(.data$id == input.args$dbfile.id) + } + } else { + existing.dbfile <- dbfile.input.check( + siteid = site.id, + mimetype = mimetype, + formatname = formatname, + parentid = input.id, + startdate = start_date, + enddate = end_date, + con = con, + hostname = host$name, + exact.dates = TRUE, + pattern = pattern + ) } - - - # Convert dates to Date objects and strip all time zones - # (DB values are timezone-free) - start_date <- lubridate::force_tz(lubridate::as_date(start_date), "UTC") - end_date <- lubridate::force_tz(lubridate::as_date(end_date), "UTC") - - existing.input$start_date <- lubridate::force_tz(lubridate::as_date(existing.input$start_date), "UTC") - existing.input$end_date <- lubridate::force_tz(lubridate::as_date(existing.input$end_date), "UTC") - - if (overwrite) { - # collect files to flag for deletion - - files.to.delete <- PEcAn.remote::remote.execute.R( paste0("list.files('", - existing.dbfile[["file_path"]], - "', full.names=TRUE)"), - host, user = NA, verbose = TRUE,R = Rbinary, scratchdir = outfolder) - - file.deletion.commands <- .get.file.deletion.commands(files.to.delete) - - PEcAn.remote::remote.execute.R( file.deletion.commands$move.to.tmp, - host, user = NA, - verbose = TRUE,R = Rbinary, scratchdir = outfolder) - - # Schedule files to be replaced or deleted on exiting the function - successful <- FALSE - on.exit( - if (exists("successful") && successful) { - PEcAn.logger::logger.info( - "Conversion successful, with overwrite=TRUE. Deleting old files.") - PEcAn.remote::remote.execute.R( - file.deletion.commands$delete.tmp, - host, user = NA, - verbose = TRUE, R = Rbinary, scratchdir = outfolder) - } else { - PEcAn.logger::logger.info( - "Conversion failed. Replacing old files.") - PEcAn.remote::remote.execute.R( - file.deletion.commands$replace.from.tmp, - host, user = NA, - verbose = TRUE, R = Rbinary, scratchdir = outfolder) - }, - add = TRUE - ) # close on.exit - - } else if ((start_date >= existing.input$start_date) && - (end_date <= existing.input$end_date)) { - - #Grab machine info of file that exists - existing.machine <- db.query(paste0("SELECT * from machines where id = '", - existing.dbfile$machine_id, "'"), con) - - #Grab machine info of + + PEcAn.logger::logger.debug("File id =", existing.dbfile$id, + " File name =", existing.dbfile$file_name, + " File path =", existing.dbfile$file_path, + " Input id =", existing.dbfile$container_id, + digits = 10 + ) + + PEcAn.logger::logger.info("end CHECK for existing input record") + + + if (nrow(existing.dbfile) > 0) { + existing.input <- db.query(paste0("SELECT * FROM inputs WHERE id=", existing.dbfile[["container_id"]]), con) + + # Convert dates to Date objects and strip all time zones + # (DB values are timezone-free) + start_date <- lubridate::force_tz(lubridate::as_date(start_date), "UTC") + end_date <- lubridate::force_tz(lubridate::as_date(end_date), "UTC") + + existing.input$start_date <- lubridate::force_tz(lubridate::as_date(existing.input$start_date), "UTC") + existing.input$end_date <- lubridate::force_tz(lubridate::as_date(existing.input$end_date), "UTC") + + ## Do overwrite if set to TRUE + if (overwrite) { + # collect files to flag for deletion + files.to.delete <- PEcAn.remote::remote.execute.R( + paste0( + "list.files('", + existing.dbfile[["file_path"]], + "', full.names=TRUE)" + ), + host, + user = NA, verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + + file.deletion.commands <- .get.file.deletion.commands(files.to.delete) + + PEcAn.remote::remote.execute.R(file.deletion.commands$move.to.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + + + # Schedule files to be replaced or deleted on exiting the function + successful <- FALSE + on.exit( + if (exists("successful") && successful) { + PEcAn.logger::logger.info("Conversion successful, with overwrite=TRUE. Deleting old files.") + PEcAn.remote::remote.execute.R( + file.deletion.commands$delete.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + } else { + PEcAn.logger::logger.info("Conversion failed. Replacing old files.") + PEcAn.remote::remote.execute.R( + file.deletion.commands$replace.from.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + }, + add = TRUE + ) # Close on.exit + } + + + + # Grab machine info of file that exists + existing.machine <- db.query(paste0( + "SELECT * from machines where id = '", + existing.dbfile$machine_id, "'" + ), con) + + # Grab machine info of host machine machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) - machine <- db.query(paste0("SELECT * from machines where hostname = '", - machine.host, "'"), con) - - if(existing.machine$id != machine$id){ + machine <- db.query(paste0( + "SELECT * from machines where hostname = '", + machine.host, "'" + ), con) + + if (existing.machine$id != machine$id) { PEcAn.logger::logger.info("Valid Input record found that spans desired dates, but valid files do not exist on this machine.") PEcAn.logger::logger.info("Downloading all years of Valid input to ensure consistency") insert.new.file <- TRUE start_date <- existing.input$start_date - end_date <- existing.input$end_date + end_date <- existing.input$end_date } else { - # There's an existing input that spans desired start/end dates with files on this machine + # There's an existing input that spans desired start/end dates with files on this machine PEcAn.logger::logger.info("Skipping this input conversion because files are already available.") return(list(input.id = existing.input$id, dbfile.id = existing.dbfile$id)) } - } else { - # Start/end dates need to be updated so that the input spans a continuous - # timeframe - start_date <- min(start_date, existing.input$start_date) - end_date <- max(end_date, existing.input$end_date) - PEcAn.logger::logger.info( - paste0( - "Changed start/end dates to '", - start_date, - "'/'", - end_date, - "' ", - " so that existing input can be updated while maintaining continuous time span." - ) - ) - - # There might be existing files for some years (but not all; checked that above) - # fcn should be smart enough not overwrite the existing ones, and hopefully won't - # waste time working on them either At the end, if convert_inputs was successful - # we'll need to update its start/end dates . - } + # No existing record found. Should be good to go with regular conversion. + } + + ## -------------------------end of exact.dates chunk------------------------------------# } else { - # No existing record found. Should be good to go. - } - } - - #---------------------------------------------------------------------------------------------------------------# - # Get machine information - - machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) - machine <- db.query(paste0("SELECT * from machines where hostname = '", - machine.host, "'"), con) - - if (nrow(machine) == 0) { - PEcAn.logger::logger.error("machine not found", host$name) - return(NULL) - } - - if (missing(input.id) || is.na(input.id) || is.null(input.id)) { - input <- dbfile <- NULL - } else { - input <- db.query(paste("SELECT * from inputs where id =", input.id), con) - if (nrow(input) == 0) { - PEcAn.logger::logger.error("input not found", input.id) - return(NULL) - } - - if(!is.null(input.args$dbfile.id)){ - dbfile <- - db.query( - paste( - "SELECT * from dbfiles where id=",input.args$dbfile.id," and container_id =", - input.id, - " and container_type = 'Input' and machine_id =", - machine$id - ), - con - ) - }else{ - dbfile <- - db.query( - paste( - "SELECT * from dbfiles where container_id =", - input.id, - " and container_type = 'Input' and machine_id =", - machine$id - ), - con - ) + # existing file for ensembles takes an advantage of the pattern argument + if (!is.null(ensemble) && ensemble) { + return.all <- TRUE + } else { + return.all <- FALSE + } + existing.dbfile <- dbfile.input.check( + siteid = site.id, + mimetype = mimetype, + formatname = formatname, + parentid = input.id, + startdate = start_date, + enddate = end_date, + con = con, + hostname = host$name, + pattern = pattern, + return.all = return.all + ) + + + + PEcAn.logger::logger.debug("File id =", existing.dbfile$id, + " File name =", existing.dbfile$file_name, + " File path =", existing.dbfile$file_path, + " Input id =", existing.dbfile$container_id, + digits = 10 + ) + + PEcAn.logger::logger.info("end CHECK for existing input record.") + + if (nrow(existing.dbfile) > 0) { + if (!is.null(ensemble) && ensemble) { + existing.input <- existing.dbfile[["container_id"]] %>% + unique() %>% + purrr::map_dfr(function(one.cont.id) { + db.query(paste0("SELECT * FROM inputs WHERE id=", one.cont.id), con) + }) + } else { + existing.input <- + db.query(paste0("SELECT * FROM inputs WHERE id=", existing.dbfile[["container_id"]]), con) + } + + + # Convert dates to Date objects and strip all time zones + # (DB values are timezone-free) + start_date <- lubridate::force_tz(lubridate::as_date(start_date), "UTC") + end_date <- lubridate::force_tz(lubridate::as_date(end_date), "UTC") + + existing.input$start_date <- lubridate::force_tz(lubridate::as_date(existing.input$start_date), "UTC") + existing.input$end_date <- lubridate::force_tz(lubridate::as_date(existing.input$end_date), "UTC") + + if (overwrite) { + # collect files to flag for deletion + + files.to.delete <- PEcAn.remote::remote.execute.R( + paste0( + "list.files('", + existing.dbfile[["file_path"]], + "', full.names=TRUE)" + ), + host, + user = NA, verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + + file.deletion.commands <- .get.file.deletion.commands(files.to.delete) + + PEcAn.remote::remote.execute.R(file.deletion.commands$move.to.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + + # Schedule files to be replaced or deleted on exiting the function + successful <- FALSE + on.exit( + if (exists("successful") && successful) { + PEcAn.logger::logger.info( + "Conversion successful, with overwrite=TRUE. Deleting old files." + ) + PEcAn.remote::remote.execute.R( + file.deletion.commands$delete.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + } else { + PEcAn.logger::logger.info( + "Conversion failed. Replacing old files." + ) + PEcAn.remote::remote.execute.R( + file.deletion.commands$replace.from.tmp, + host, + user = NA, + verbose = TRUE, R = Rbinary, scratchdir = outfolder + ) + }, + add = TRUE + ) # close on.exit + } else if ((start_date >= existing.input$start_date) && + (end_date <= existing.input$end_date)) { + # Grab machine info of file that exists + existing.machine <- db.query(paste0( + "SELECT * from machines where id = '", + existing.dbfile$machine_id, "'" + ), con) + + # Grab machine info of + machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) + machine <- db.query(paste0( + "SELECT * from machines where hostname = '", + machine.host, "'" + ), con) + + if (existing.machine$id != machine$id) { + PEcAn.logger::logger.info("Valid Input record found that spans desired dates, but valid files do not exist on this machine.") + PEcAn.logger::logger.info("Downloading all years of Valid input to ensure consistency") + insert.new.file <- TRUE + start_date <- existing.input$start_date + end_date <- existing.input$end_date + } else { + # There's an existing input that spans desired start/end dates with files on this machine + PEcAn.logger::logger.info("Skipping this input conversion because files are already available.") + return(list(input.id = existing.input$id, dbfile.id = existing.dbfile$id)) + } + } else { + # Start/end dates need to be updated so that the input spans a continuous + # timeframe + start_date <- min(start_date, existing.input$start_date) + end_date <- max(end_date, existing.input$end_date) + PEcAn.logger::logger.info( + paste0( + "Changed start/end dates to '", + start_date, + "'/'", + end_date, + "' ", + " so that existing input can be updated while maintaining continuous time span." + ) + ) + + # There might be existing files for some years (but not all; checked that above) + # fcn should be smart enough not overwrite the existing ones, and hopefully won't + # waste time working on them either At the end, if convert_inputs was successful + # we'll need to update its start/end dates . + } + } else { + # No existing record found. Should be good to go. + } } - - - if (nrow(dbfile) == 0) { - PEcAn.logger::logger.error("dbfile not found", input.id) + #---------------------------------------------------------------------------------------------------------------# + # Get machine information + + machine.host <- ifelse(host$name == "localhost", PEcAn.remote::fqdn(), host$name) + machine <- db.query(paste0( + "SELECT * from machines where hostname = '", + machine.host, "'" + ), con) + + if (nrow(machine) == 0) { + PEcAn.logger::logger.error("machine not found", host$name) return(NULL) } - if (nrow(dbfile) > 1) { - PEcAn.logger::logger.warn("multiple dbfile records, using last", dbfile) - dbfile <- dbfile[nrow(dbfile), ] - } - } - - #--------------------------------------------------------------------------------------------------# - # Perform Conversion - - conversion <- "local.remote" #default - - if (conversion == "local.remote") { - # perform conversion on local or remote host - - fcn.args <- input.args - fcn.args$overwrite <- overwrite - fcn.args$in.path <- dbfile$file_path - fcn.args$in.prefix <- dbfile$file_name - fcn.args$outfolder <- outfolder - fcn.args$start_date <- start_date - fcn.args$end_date <- end_date - fcn.args$dbparms <- dbparms - - if (forecast && !is.null(input.id) && !is.na(input.id)) { # for downstream code adapted to handle forecast file conventions - fcn.args$year.fragment = TRUE # such as met2model conversions; arguments will be extraneous otherwise. + + if (missing(input.id) || is.na(input.id) || is.null(input.id)) { + input <- dbfile <- NULL + } else { + input <- db.query(paste("SELECT * from inputs where id =", input.id), con) + if (nrow(input) == 0) { + PEcAn.logger::logger.error("input not found", input.id) + return(NULL) + } + + if (!is.null(input.args$dbfile.id)) { + dbfile <- + db.query( + paste( + "SELECT * from dbfiles where id=", input.args$dbfile.id, " and container_id =", + input.id, + " and container_type = 'Input' and machine_id =", + machine$id + ), + con + ) + } else { + dbfile <- + db.query( + paste( + "SELECT * from dbfiles where container_id =", + input.id, + " and container_type = 'Input' and machine_id =", + machine$id + ), + con + ) + } + + + + if (nrow(dbfile) == 0) { + PEcAn.logger::logger.error("dbfile not found", input.id) + return(NULL) + } + if (nrow(dbfile) > 1) { + PEcAn.logger::logger.warn("multiple dbfile records, using last", dbfile) + dbfile <- dbfile[nrow(dbfile), ] + } } - - arg.string <- PEcAn.utils::listToArgString(fcn.args) - PEcAn.logger::logger.debug("input.args converted to string") - - if (!missing(format.vars)) { - arg.string <- paste0(arg.string, ", format=", paste0(list(format.vars))) + + #--------------------------------------------------------------------------------------------------# + # Perform Conversion + + conversion <- "local.remote" # default + + if (conversion == "local.remote") { + # perform conversion on local or remote host + + fcn.args <- input.args + fcn.args$overwrite <- overwrite + fcn.args$in.path <- dbfile$file_path + fcn.args$in.prefix <- dbfile$file_name + fcn.args$outfolder <- outfolder + fcn.args$start_date <- start_date + fcn.args$end_date <- end_date + fcn.args$dbparms <- dbparms + + if (forecast && !is.null(input.id) && !is.na(input.id)) { # for downstream code adapted to handle forecast file conventions + fcn.args$year.fragment <- TRUE # such as met2model conversions; arguments will be extraneous otherwise. + } + + arg.string <- PEcAn.utils::listToArgString(fcn.args) + PEcAn.logger::logger.debug("input.args converted to string") + + if (!missing(format.vars)) { + arg.string <- paste0(arg.string, ", format=", paste0(list(format.vars))) + } + + cmdFcn <- paste0(pkg, "::", fcn, "(", arg.string, ")") + PEcAn.logger::logger.debug(paste0("convert_input executing the following function:\n", cmdFcn)) + + + result <- + PEcAn.remote::remote.execute.R( + script = cmdFcn, + host, + user = NA, + verbose = TRUE, + R = Rbinary, + scratchdir = outfolder + ) + + + # Wraps the result in a list. This way, everything returned by fcn will be a list, and all of the + # code below can process everything as if it were a list without worrying about data types. + if (is.data.frame(result)) { + result <- list(result) + } } - - cmdFcn <- paste0(pkg, "::", fcn, "(", arg.string, ")") - PEcAn.logger::logger.debug(paste0("convert_input executing the following function:\n", cmdFcn)) - - - result <- - PEcAn.remote::remote.execute.R( - script = cmdFcn, - host, - user = NA, - verbose = TRUE, - R = Rbinary, - scratchdir = outfolder - ) + PEcAn.logger::logger.info("RESULTS: convert_input") + PEcAn.logger::logger.info(result) - # Wraps the result in a list. This way, everything returned by fcn will be a list, and all of the - # code below can process everything as if it were a list without worrying about data types. - if (is.data.frame(result)) { - result <- list(result) + if (length(result[[1]]) <= 1) { # result, a list, is guaranteed to have at least one element. However, that element could be an empty data frame. + PEcAn.logger::logger.debug(paste0("Processing data failed, please check validity of args:", arg.string)) + PEcAn.logger::logger.severe(paste0("Unable to process data using this function:", fcn)) } - } - - PEcAn.logger::logger.info("RESULTS: convert_input") - PEcAn.logger::logger.info(result) - - if (length(result[[1]]) <= 1){ # result, a list, is guaranteed to have at least one element. However, that element could be an empty data frame. - PEcAn.logger::logger.debug(paste0("Processing data failed, please check validity of args:", arg.string)) - PEcAn.logger::logger.severe(paste0("Unable to process data using this function:",fcn)) - } - - #--------------------------------------------------------------------------------------------------# - # Check if result has empty or missing files - - result_sizes <- purrr::map_dfr( - result, - ~ dplyr::mutate( - ., - file_size = purrr::map_dbl(file, file.size), - missing = is.na(file_size), - empty = file_size == 0 + + #--------------------------------------------------------------------------------------------------# + # Check if result has empty or missing files + + result_sizes <- purrr::map_dfr( + result, + ~ dplyr::mutate( + ., + file_size = purrr::map_dbl(file, file.size), + missing = is.na(file_size), + empty = file_size == 0 + ) ) - ) - - if (any(result_sizes$missing) || any(result_sizes$empty)){ - log_format_df = function(df){ + + if (any(result_sizes$missing) || any(result_sizes$empty)) { + log_format_df <- function(df) { rbind(colnames(df), format(df)) - purrr::reduce( paste, sep=" ") %>% - paste(collapse="\n") + purrr::reduce(paste, sep = " ") %>% + paste(collapse = "\n") + } + + PEcAn.logger::logger.severe( + "Requested Processing produced empty files or Nonexistant files :\n", + log_format_df(result_sizes[, c(1, 8, 9, 10)]), + "\n Table of results printed above.", + wrap = FALSE + ) } - - PEcAn.logger::logger.severe( - "Requested Processing produced empty files or Nonexistant files :\n", - log_format_df(result_sizes[,c(1,8,9,10)]), - "\n Table of results printed above.", - wrap = FALSE) - } - - # Insert into Database - outlist <- unlist(strsplit(outname, "_")) - - # Wrap in a list for consistant processing later - if (exists("existing.input") && is.data.frame(existing.input)) { - existing.input <- list(existing.input) - } - - if (exists("existing.dbfile") && is.data.frame(existing.dbfile)) { - existing.dbfile <- list(existing.dbfile) - } - - #---------------------------------------------------------------# - # New arrangement of database adding code to deal with ensembles. - if (write) { - - # Setup newinput. This list will contain two variables: a vector of input IDs and a vector of DB IDs for each entry in result. - # This list will be returned. - newinput = list(input.id = NULL, dbfile.id = NULL) #Blank vectors are null. - for(i in 1:length(result)) { # Master for loop - id_not_added <- TRUE - - if (exists("existing.input") && nrow(existing.input[[i]]) > 0 && + + # Insert into Database + outlist <- unlist(strsplit(outname, "_")) + + # Wrap in a list for consistant processing later + if (exists("existing.input") && is.data.frame(existing.input)) { + existing.input <- list(existing.input) + } + + if (exists("existing.dbfile") && is.data.frame(existing.dbfile)) { + existing.dbfile <- list(existing.dbfile) + } + + #---------------------------------------------------------------# + # New arrangement of database adding code to deal with ensembles. + if (write) { + # Setup newinput. This list will contain two variables: a vector of input IDs and a vector of DB IDs for each entry in result. + # This list will be returned. + newinput <- list(input.id = NULL, dbfile.id = NULL) # Blank vectors are null. + for (i in 1:length(result)) { # Master for loop + id_not_added <- TRUE + + if (exists("existing.input") && nrow(existing.input[[i]]) > 0 && (existing.input[[i]]$start_date != start_date || existing.input[[i]]$end_date != end_date)) { - - # Updating record with new dates - db.query(paste0("UPDATE inputs SET start_date='", start_date, "', end_date='", - end_date, "' WHERE id=", existing.input[[i]]$id), - con) - id_not_added = FALSE - - # The overall structure of this loop has been set up so that exactly one input.id and one dbfile.id will be written to newinput every interation. - newinput$input.id = c(newinput$input.id, existing.input[[i]]$id) - newinput$dbfile.id = c(newinput$dbfile.id, existing.dbfile[[i]]$id) - } - - if (overwrite) { - # A bit hacky, but need to make sure that all fields are updated to expected - # values (i.e., what they'd be if convert_input was creating a new record) - if (exists("existing.input") && nrow(existing.input[[i]]) > 0) { - db.query(paste0("UPDATE inputs SET name='", basename(dirname(result[[i]]$file[1])), - "' WHERE id=", existing.input[[i]]$id), con) - + # Updating record with new dates + db.query( + paste0( + "UPDATE inputs SET start_date='", start_date, "', end_date='", + end_date, "' WHERE id=", existing.input[[i]]$id + ), + con + ) + id_not_added <- FALSE + + # The overall structure of this loop has been set up so that exactly one input.id and one dbfile.id will be written to newinput every interation. + newinput$input.id <- c(newinput$input.id, existing.input[[i]]$id) + newinput$dbfile.id <- c(newinput$dbfile.id, existing.dbfile[[i]]$id) } - - if (exists("existing.dbfile") && nrow(existing.dbfile[[i]]) > 0) { - db.query(paste0("UPDATE dbfiles SET file_path='", dirname(result[[i]]$file[1]), - "', ", "file_name='", result[[i]]$dbfile.name[1], - "' WHERE id=", existing.dbfile[[i]]$id), con) - + + if (overwrite) { + # A bit hacky, but need to make sure that all fields are updated to expected + # values (i.e., what they'd be if convert_input was creating a new record) + if (exists("existing.input") && nrow(existing.input[[i]]) > 0) { + db.query(paste0( + "UPDATE inputs SET name='", basename(dirname(result[[i]]$file[1])), + "' WHERE id=", existing.input[[i]]$id + ), con) + } + + if (exists("existing.dbfile") && nrow(existing.dbfile[[i]]) > 0) { + db.query(paste0( + "UPDATE dbfiles SET file_path='", dirname(result[[i]]$file[1]), + "', ", "file_name='", result[[i]]$dbfile.name[1], + "' WHERE id=", existing.dbfile[[i]]$id + ), con) + } } - } - - # If there is no ensemble then for each record there should be one parent - #But when you have ensembles, all of the members have one parent !! - if (is.numeric(ensemble)){ - parent.id <- ifelse(is.null(input[i]), NA, input[1]$id) - }else{ - parent.id <- ifelse(is.null(input[i]), NA, input[i]$id) - } - - - - if ("newsite" %in% names(input.args) && !is.null(input.args[["newsite"]])) { - site.id <- input.args$newsite - } - - if (insert.new.file && id_not_added) { - dbfile.id <- dbfile.insert(in.path = dirname(result[[i]]$file[1]), - in.prefix = result[[i]]$dbfile.name[1], - 'Input', existing.input[[i]]$id, - con, reuse=TRUE, hostname = machine$hostname) - newinput$input.id <- c(newinput$input.id, existing.input[[i]]$id) - newinput$dbfile.id <- c(newinput$dbfile.id, dbfile.id) - } else if (id_not_added) { - - # This is to tell input.insert if we are wrting ensembles - # Why does it need it ? bc it checks for inputs with the same time period, site and machine - # and if it returns somethings it does not insert anymore, but for ensembles it needs to bypass this condition - if (!is.null(ensemble) | is.null(ensemble_name)){ - ens.flag <- TRUE - }else{ - ens.flag <- FALSE - } - - new_entry <- dbfile.input.insert(in.path = dirname(result[[i]]$file[1]), - in.prefix = result[[i]]$dbfile.name[1], - siteid = site.id, - startdate = start_date, - enddate = end_date, - mimetype, - formatname, - parentid = parent.id, - con = con, - hostname = machine$hostname, - allow.conflicting.dates = allow.conflicting.dates, - ens=ens.flag - ) - - - newinput$input.id <- c(newinput$input.id, new_entry$input.id) - newinput$dbfile.id <- c(newinput$dbfile.id, new_entry$dbfile.id) - } - - } #End for loop - - successful <- TRUE - return(newinput) - } else { - PEcAn.logger::logger.warn("Input was not added to the database") - successful <- TRUE - return(NULL) - } -} # convert_input + + # If there is no ensemble then for each record there should be one parent + # But when you have ensembles, all of the members have one parent !! + if (is.numeric(ensemble)) { + parent.id <- ifelse(is.null(input[i]), NA, input[1]$id) + } else { + parent.id <- ifelse(is.null(input[i]), NA, input[i]$id) + } + + + + if ("newsite" %in% names(input.args) && !is.null(input.args[["newsite"]])) { + site.id <- input.args$newsite + } + + if (insert.new.file && id_not_added) { + dbfile.id <- dbfile.insert( + in.path = dirname(result[[i]]$file[1]), + in.prefix = result[[i]]$dbfile.name[1], + "Input", existing.input[[i]]$id, + con, reuse = TRUE, hostname = machine$hostname + ) + newinput$input.id <- c(newinput$input.id, existing.input[[i]]$id) + newinput$dbfile.id <- c(newinput$dbfile.id, dbfile.id) + } else if (id_not_added) { + # This is to tell input.insert if we are wrting ensembles + # Why does it need it ? bc it checks for inputs with the same time period, site and machine + # and if it returns somethings it does not insert anymore, but for ensembles it needs to bypass this condition + if (!is.null(ensemble) | is.null(ensemble_name)) { + ens.flag <- TRUE + } else { + ens.flag <- FALSE + } + + new_entry <- dbfile.input.insert( + in.path = dirname(result[[i]]$file[1]), + in.prefix = result[[i]]$dbfile.name[1], + siteid = site.id, + startdate = start_date, + enddate = end_date, + mimetype, + formatname, + parentid = parent.id, + con = con, + hostname = machine$hostname, + allow.conflicting.dates = allow.conflicting.dates, + ens = ens.flag + ) + + + newinput$input.id <- c(newinput$input.id, new_entry$input.id) + newinput$dbfile.id <- c(newinput$dbfile.id, new_entry$dbfile.id) + } + } # End for loop + + successful <- TRUE + return(newinput) + } else { + PEcAn.logger::logger.warn("Input was not added to the database") + successful <- TRUE + return(NULL) + } + } # convert_input .get.file.deletion.commands <- function(files.to.delete) { - if(length(files.to.delete) > 0) { - tmp.dirs <- file.path(unique(dirname(files.to.delete)), 'tmp') - tmp.paths <- file.path(dirname(files.to.delete), 'tmp', basename(files.to.delete)) - - tmp.dirs.string <- paste0("c(", paste(paste0("'", tmp.dirs, "'"), collapse=', '), ")") - tmp.path.string <- paste0("c(", paste(paste0("'", tmp.paths, "'"), collapse=', '), ")") - original.path.string <- paste0("c(", paste(paste0("'", files.to.delete, "'"), collapse=', '), ")") - + if (length(files.to.delete) > 0) { + tmp.dirs <- file.path(unique(dirname(files.to.delete)), "tmp") + tmp.paths <- file.path(dirname(files.to.delete), "tmp", basename(files.to.delete)) + + tmp.dirs.string <- paste0("c(", paste(paste0("'", tmp.dirs, "'"), collapse = ", "), ")") + tmp.path.string <- paste0("c(", paste(paste0("'", tmp.paths, "'"), collapse = ", "), ")") + original.path.string <- paste0("c(", paste(paste0("'", files.to.delete, "'"), collapse = ", "), ")") + move.to.tmp <- paste0( "dir.create(", tmp.dirs.string, ", recursive=TRUE, showWarnings=FALSE); ", "file.rename(from=", original.path.string, ", to=", tmp.path.string, ")" - ) - + ) + replace.from.tmp <- paste0( "file.rename(from=", tmp.path.string, ", to=", original.path.string, ");", "unlink(", tmp.dirs.string, ", recursive=TRUE)" ) - + delete.tmp <- paste0( "unlink(", tmp.dirs.string, ", recursive=TRUE)" ) - return(list(move.to.tmp=move.to.tmp, replace.from.tmp=replace.from.tmp, delete.tmp=delete.tmp)) + return(list(move.to.tmp = move.to.tmp, replace.from.tmp = replace.from.tmp, delete.tmp = delete.tmp)) } else { return(NULL) } diff --git a/base/db/R/covariate.functions.R b/base/db/R/covariate.functions.R index c40e3ca5a05..7499cf001e1 100644 --- a/base/db/R/covariate.functions.R +++ b/base/db/R/covariate.functions.R @@ -1,6 +1,6 @@ ######################## COVARIATE FUNCTIONS ################################# -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Append covariate data as a column within a table ##' ##' \code{append.covariate} appends a data frame of covariates as a new column in a data frame @@ -16,23 +16,23 @@ ##' ##' @author Carl Davidson, Ryan Kelly ##' @export -##--------------------------------------------------------------------------------------------------# -append.covariate <- function(data, column.name, covariates.data){ +## --------------------------------------------------------------------------------------------------# +append.covariate <- function(data, column.name, covariates.data) { # Keep only the highest-priority covariate for each trait covariates.data <- covariates.data[!duplicated(covariates.data$trait_id), ] - + # Select columns to keep, and rename the covariate column - covariates.data <- covariates.data[, c('trait_id', 'level')] - names(covariates.data) <- c('id', column.name) - + covariates.data <- covariates.data[, c("trait_id", "level")] + names(covariates.data) <- c("id", column.name) + # Merge on trait ID merged <- merge(covariates.data, data, all = TRUE, by = "id") return(merged) } -##==================================================================================================# +## ==================================================================================================# -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Queries covariates from database for a given vector of trait id's ##' ##' @param trait.ids list of trait ids @@ -40,17 +40,19 @@ append.covariate <- function(data, column.name, covariates.data){ ##' @param ... extra arguments ##' ##' @author David LeBauer -query.covariates <- function(trait.ids, con = NULL, ...){ - covariate.query <- paste("select covariates.trait_id, covariates.level,variables.name", - "from covariates left join variables on variables.id = covariates.variable_id", - "where trait_id in (", PEcAn.utils::vecpaste(trait.ids), ")") +query.covariates <- function(trait.ids, con = NULL, ...) { + covariate.query <- paste( + "select covariates.trait_id, covariates.level,variables.name", + "from covariates left join variables on variables.id = covariates.variable_id", + "where trait_id in (", PEcAn.utils::vecpaste(trait.ids), ")" + ) covariates <- db.query(query = covariate.query, con = con) return(covariates) } -##==================================================================================================# +## ==================================================================================================# -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Apply Arrhenius scaling to 25 degC for temperature-dependent traits ##' ##' @param data data frame of data to scale, as returned by query.data() @@ -61,36 +63,37 @@ query.covariates <- function(trait.ids, con = NULL, ...){ ##' @param new.temp the reference temperature for the scaled traits. Curerntly 25 degC ##' @param missing.temp the temperature assumed for traits with no covariate found. Curerntly 25 degC ##' @author Carl Davidson, David LeBauer, Ryan Kelly -arrhenius.scaling.traits <- function(data, covariates, temp.covariates, new.temp = 25, missing.temp = 25){ +arrhenius.scaling.traits <- function(data, covariates, temp.covariates, new.temp = 25, missing.temp = 25) { # Select covariates that match temp.covariates - covariates <- covariates[covariates$name %in% temp.covariates,] - - if(nrow(covariates)>0) { + covariates <- covariates[covariates$name %in% temp.covariates, ] + + if (nrow(covariates) > 0) { # Sort covariates in order of priority - covariates <- do.call(rbind, - lapply(temp.covariates, function(temp.covariate) covariates[covariates$name == temp.covariate, ]) + covariates <- do.call( + rbind, + lapply(temp.covariates, function(temp.covariate) covariates[covariates$name == temp.covariate, ]) ) - - data <- append.covariate(data, 'temp', covariates) - + + data <- append.covariate(data, "temp", covariates) + # Assign default value for traits with no covariates data$temp[is.na(data$temp)] <- missing.temp - + # Scale traits - data$mean <- PEcAn.utils::arrhenius.scaling(observed.value = data$mean, old.temp = data$temp, new.temp=new.temp) - data$stat <- PEcAn.utils::arrhenius.scaling(observed.value = data$stat, old.temp = data$temp, new.temp=new.temp) - - #remove temporary covariate column. - data<-data[,colnames(data)!='temp'] + data$mean <- PEcAn.utils::arrhenius.scaling(observed.value = data$mean, old.temp = data$temp, new.temp = new.temp) + data$stat <- PEcAn.utils::arrhenius.scaling(observed.value = data$stat, old.temp = data$temp, new.temp = new.temp) + + # remove temporary covariate column. + data <- data[, colnames(data) != "temp"] } else { data <- NULL } return(data) } -##==================================================================================================# +## ==================================================================================================# -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Function to filter out upper canopy leaves ##' ##' @name filter_sunleaf_traits @@ -99,17 +102,19 @@ arrhenius.scaling.traits <- function(data, covariates, temp.covariates, new.temp ##' @param covariates covariate data ##' ##' @author David LeBauer -filter_sunleaf_traits <- function(data, covariates){ - if(length(covariates)>0) { - data <- append.covariate(data = data, column.name = 'canopy_layer', - covariates.data = covariates[covariates$name == 'canopy_layer',]) - data <- data[data$canopy_layer >= 0.66 | is.na(data$canopy_layer),] - +filter_sunleaf_traits <- function(data, covariates) { + if (length(covariates) > 0) { + data <- append.covariate( + data = data, column.name = "canopy_layer", + covariates.data = covariates[covariates$name == "canopy_layer", ] + ) + data <- data[data$canopy_layer >= 0.66 | is.na(data$canopy_layer), ] + # remove temporary covariate column - data <- data[,colnames(data)!='canopy_layer'] + data <- data[, colnames(data) != "canopy_layer"] } else { data <- NULL } return(data) } -##==================================================================================================# \ No newline at end of file +## ==================================================================================================# diff --git a/base/db/R/db_merge_into.R b/base/db/R/db_merge_into.R index a00718e5ac3..28edb7d07dc 100644 --- a/base/db/R/db_merge_into.R +++ b/base/db/R/db_merge_into.R @@ -7,8 +7,8 @@ #' @export #' @examples #' irisdb <- DBI::dbConnect(RSQLite::SQLite(), ":memory:") -#' dplyr::copy_to(irisdb, iris[1:10,], name = "iris", overwrite = TRUE) -#' db_merge_into(iris[1:12,], "iris", irisdb) +#' dplyr::copy_to(irisdb, iris[1:10, ], name = "iris", overwrite = TRUE) +#' db_merge_into(iris[1:12, ], "iris", irisdb) #' dplyr::tbl(irisdb, "iris") %>% dplyr::count() db_merge_into <- function(values, table, con, by = NULL, drop = FALSE, ...) { values_fixed <- match_dbcols(values, table, con, drop = FALSE) diff --git a/base/db/R/dbfiles.R b/base/db/R/dbfiles.R index 1cab9e069e7..7d67f3bd172 100644 --- a/base/db/R/dbfiles.R +++ b/base/db/R/dbfiles.R @@ -241,12 +241,12 @@ dbfile.input.check <- function(siteid, startdate = NULL, enddate = NULL, mimetyp formatid <- get.id(table = "formats", colnames = c("mimetype_id", "name"), values = c(mimetypeid, formatname), con = con) if (is.null(formatid)) { - return (invisible(data.frame())) + return(invisible(data.frame())) } # setup parent part of query if specified parent <- "" - + if (!is.na(parentid)) { parent <- paste0(" AND parent_id=", parentid) } @@ -277,12 +277,12 @@ dbfile.input.check <- function(siteid, startdate = NULL, enddate = NULL, mimetyp } } else { # not exact dates inputs <- db.query( - query = paste0( - "SELECT * FROM inputs WHERE site_id=", siteid, - " AND format_id=", formatid, - parent - ), - con = con + query = paste0( + "SELECT * FROM inputs WHERE site_id=", siteid, + " AND format_id=", formatid, + parent + ), + con = con ) } @@ -329,7 +329,6 @@ dbfile.input.check <- function(siteid, startdate = NULL, enddate = NULL, mimetyp return(dbfile) } else if (length(inputs$id) == 0) { - # need this third case here because prent check above can return an empty inputs return(data.frame()) } else { @@ -449,7 +448,7 @@ dbfile.posterior.check <- function(pft, mimetype, formatname, con, hostname = PE # find appropriate pft pftid <- get.id(table = "pfts", values = "name", colnames = pft, con = con) if (is.null(pftid)) { - return (invisible(data.frame())) + return(invisible(data.frame())) } # find appropriate format @@ -460,7 +459,7 @@ dbfile.posterior.check <- function(pft, mimetype, formatname, con, hostname = PE formatid <- get.id(table = "formats", colnames = c("mimetype_id", "name"), values = c(mimetypeid, formatname), con = con) if (is.null(formatid)) { - return (invisible(data.frame())) + return(invisible(data.frame())) } # find appropriate posterior @@ -472,7 +471,7 @@ dbfile.posterior.check <- function(pft, mimetype, formatname, con, hostname = PE con = con )[["id"]] if (is.null(posteriorid)) { - return (invisible(data.frame())) + return(invisible(data.frame())) } invisible(dbfile.check(type = "Posterior", container.id = posteriorid, con = con, hostname = hostname)) @@ -662,7 +661,7 @@ dbfile.id <- function(type, file, con, hostname = PEcAn.remote::fqdn()) { hostid <- db.query(query = paste0("SELECT id FROM machines WHERE hostname='", hostname, "'"), con = con)[["id"]] if (is.null(hostid)) { PEcAn.logger::logger.warn("hostid not found in database") - return (invisible(NA)) + return(invisible(NA)) } # find file @@ -718,8 +717,6 @@ dbfile.id <- function(type, file, con, hostname = PEcAn.remote::fqdn()) { dbfile.move <- function(old.dir, new.dir, file.type, siteid = NULL, register = FALSE) { - - # create nulls for file movement and error info error <- 0 files.sym <- 0 @@ -774,10 +771,11 @@ dbfile.move <- function(old.dir, new.dir, file.type, siteid = NULL, register = F con <- db.open( params = list( driver = "Postgres", - dbname = "bety", - host = "psql-pecan.bu.edu", - user = "bety", - password = "bety") + dbname = "bety", + host = "psql-pecan.bu.edu", + user = "bety", + password = "bety" + ) ) # get matching dbfiles from BETY @@ -790,7 +788,6 @@ dbfile.move <- function(old.dir, new.dir, file.type, siteid = NULL, register = F # if there are matching db files if (dim(dbfiles)[1] > 0) { - # Check to make sure files line up if (dim(dbfiles)[1] != length(full.old.file)) { PEcAn.logger::logger.warn("Files to be moved don't match up with BETY files, only moving the files that match") @@ -831,7 +828,6 @@ dbfile.move <- function(old.dir, new.dir, file.type, siteid = NULL, register = F # if there are files that are in the folder but not in BETY, we can either register them or not if (dim(dbfiles)[1] == 0 | files.changed > 0) { - # Recheck what files are in the directory since others may have been moved above old.files <- list.files(path = old.dir, pattern = file.pattern) @@ -853,7 +849,6 @@ dbfile.move <- function(old.dir, new.dir, file.type, siteid = NULL, register = F if (error == 0 & register == TRUE) { - # Record how many files are being registered to BETY files.reg <- length(full.old.file) @@ -864,12 +859,10 @@ dbfile.move <- function(old.dir, new.dir, file.type, siteid = NULL, register = F if (file.type == "nc") { mimetype <- "application/x-netcdf" formatname <- "CF Meteorology application" - } - else if (file.type == "clim") { + } else if (file.type == "clim") { mimetype <- "text/csv" formatname <- "Sipnet.climna" - } - else { + } else { PEcAn.logger::logger.error("File Type is currently not supported") } diff --git a/base/db/R/derive.trait.R b/base/db/R/derive.trait.R index 01264b25e5e..e06bdb25d40 100644 --- a/base/db/R/derive.trait.R +++ b/base/db/R/derive.trait.R @@ -20,17 +20,19 @@ ##' @examples ##' input <- list(x = data.frame(mean = 1, stat = 1, n = 1)) ##' derive.trait(FUN = identity, input = input, var.name = 'x') -derive.trait <- function(FUN, ..., input = list(...), var.name = NA, sample.size = 10^6){ - if(any(lapply(input, nrow) > 1)){ +derive.trait <- function(FUN, ..., input = list(...), var.name = NA, sample.size = 10^6) { + if (any(lapply(input, nrow) > 1)) { return(NULL) } - input.samples <- lapply(input, take.samples, sample.size=sample.size) + input.samples <- lapply(input, take.samples, sample.size = sample.size) output.samples <- do.call(FUN, input.samples) - output <- input[[1]] - output$mean <- mean(output.samples) - output$stat <- ifelse(length(output.samples) > 1, stats::sd(output.samples), NA) - output$n <- min(sapply(input, function(trait){trait$n})) - output$vname <- ifelse(is.na(var.name), output$vname, var.name) + output <- input[[1]] + output$mean <- mean(output.samples) + output$stat <- ifelse(length(output.samples) > 1, stats::sd(output.samples), NA) + output$n <- min(sapply(input, function(trait) { + trait$n + })) + output$vname <- ifelse(is.na(var.name), output$vname, var.name) return(output) } -##==================================================================================================# \ No newline at end of file +## ==================================================================================================# diff --git a/base/db/R/derive.traits.R b/base/db/R/derive.traits.R index cb8265149ab..fb2bfe186d6 100644 --- a/base/db/R/derive.traits.R +++ b/base/db/R/derive.traits.R @@ -13,33 +13,42 @@ ##' this specifies the columns that identify a unique data point ##' @return a copy of the first input trait with modified mean, stat, and n derive.traits <- function(FUN, ..., input = list(...), - match.columns = c('citation_id', 'site_id', 'specie_id'), - var.name = NA, sample.size = 10^6){ - if(length(input) == 1){ + match.columns = c("citation_id", "site_id", "specie_id"), + var.name = NA, sample.size = 10^6) { + if (length(input) == 1) { input <- input[[1]] - #KLUDGE: modified to handle empty datasets - for(i in (0:nrow(input))[-1]){ - input[i,] <- derive.trait(FUN, input[i,], sample.size=sample.size) + # KLUDGE: modified to handle empty datasets + for (i in (0:nrow(input))[-1]) { + input[i, ] <- derive.trait(FUN, input[i, ], sample.size = sample.size) } return(input) - } - else if(length(match.columns) > 0){ - #function works recursively to reduce the number of match columns + } else if (length(match.columns) > 0) { + # function works recursively to reduce the number of match columns match.column <- match.columns[[1]] - #find unique values within the column that intersect among all input datasets - columns <- lapply(input, function(data){data[[match.column]]}) + # find unique values within the column that intersect among all input datasets + columns <- lapply(input, function(data) { + data[[match.column]] + }) intersection <- Reduce(intersect, columns) - - #run derive.traits() on subsets of input that contain those unique values - derived.traits<-lapply(intersection, - function(id){ - filtered.input <- lapply(input, - function(data){data[data[[match.column]] == id,]}) - derive.traits(FUN, input=filtered.input, - match.columns=match.columns[-1], - var.name=var.name, - sample.size=sample.size) - }) + + # run derive.traits() on subsets of input that contain those unique values + derived.traits <- lapply( + intersection, + function(id) { + filtered.input <- lapply( + input, + function(data) { + data[data[[match.column]] == id, ] + } + ) + derive.traits(FUN, + input = filtered.input, + match.columns = match.columns[-1], + var.name = var.name, + sample.size = sample.size + ) + } + ) derived.traits <- derived.traits[!is.null(derived.traits)] derived.traits <- do.call(rbind, derived.traits) return(derived.traits) diff --git a/base/db/R/fetch.stats2se.R b/base/db/R/fetch.stats2se.R index d0bad8ae465..e65674402b3 100644 --- a/base/db/R/fetch.stats2se.R +++ b/base/db/R/fetch.stats2se.R @@ -9,7 +9,7 @@ ##' @return dataframe with trait data ##' @seealso used in \code{\link{query.trait.data}}; \code{\link{transformstats}} performs transformation calculations ##' @author -fetch.stats2se <- function(connection, query){ +fetch.stats2se <- function(connection, query) { transformed <- PEcAn.utils::transformstats(db.query(query = query, con = connection)) return(transformed) -} \ No newline at end of file +} diff --git a/base/db/R/get.trait.data.R b/base/db/R/get.trait.data.R index b307da08c0b..3ca360e3b7c 100644 --- a/base/db/R/get.trait.data.R +++ b/base/db/R/get.trait.data.R @@ -6,7 +6,7 @@ ##' - `settings$database$bety` ##' - `settings$database$dbfiles` ##' - `settings$meta.analysis$update` -##' +##' ##' @param pfts the list of pfts to get traits for ##' @param modeltype type of model that is used, this is is used to distinguish ##' between different PFTs with the same name. @@ -32,46 +32,47 @@ get.trait.data <- forceupdate, write = FALSE, trait.names = NULL) { - - if (!is.list(pfts)) { - PEcAn.logger::logger.severe('pfts must be a list') - } - # Check that all PFTs have associated outdir entries - pft_outdirs <- lapply(pfts, '[[', 'outdir') - if (any(sapply(pft_outdirs, is.null))) { - PEcAn.logger::logger.severe('At least one pft in settings is missing its "outdir"') - } - - dbcon <- db.open(database) - on.exit(db.close(dbcon), add = TRUE) - - if (is.null(trait.names)) { - PEcAn.logger::logger.debug(paste0( - "`trait.names` is NULL, so retrieving all traits ", - "that have at least one prior for these PFTs." - )) - pft_names <- vapply(pfts, "[[", character(1), "name") - pft_ids <- query_pfts(dbcon, pft_names, modeltype, strict = TRUE)[["id"]] - # NOTE: Use `format` here to avoid implicit (incorrect) coercion - # to double by `lapply`. This works fine if we switch to - # `query_priors`, but haven't done so yet because that requires - # prepared statements and therefore requires the Postgres driver. - all_priors_list <- lapply(format(pft_ids, scientific = FALSE), query.priors, - con = dbcon, trstr = trait.names) - trait.names <- unique(unlist(lapply(all_priors_list, rownames))) - # Eventually, can replace with this: - # all_priors <- query_priors(pfts, params = database) - # trait.names <- unique(all_priors[["name"]]) + if (!is.list(pfts)) { + PEcAn.logger::logger.severe("pfts must be a list") + } + # Check that all PFTs have associated outdir entries + pft_outdirs <- lapply(pfts, "[[", "outdir") + if (any(sapply(pft_outdirs, is.null))) { + PEcAn.logger::logger.severe('At least one pft in settings is missing its "outdir"') + } + + dbcon <- db.open(database) + on.exit(db.close(dbcon), add = TRUE) + + if (is.null(trait.names)) { + PEcAn.logger::logger.debug(paste0( + "`trait.names` is NULL, so retrieving all traits ", + "that have at least one prior for these PFTs." + )) + pft_names <- vapply(pfts, "[[", character(1), "name") + pft_ids <- query_pfts(dbcon, pft_names, modeltype, strict = TRUE)[["id"]] + # NOTE: Use `format` here to avoid implicit (incorrect) coercion + # to double by `lapply`. This works fine if we switch to + # `query_priors`, but haven't done so yet because that requires + # prepared statements and therefore requires the Postgres driver. + all_priors_list <- lapply(format(pft_ids, scientific = FALSE), query.priors, + con = dbcon, trstr = trait.names + ) + trait.names <- unique(unlist(lapply(all_priors_list, rownames))) + # Eventually, can replace with this: + # all_priors <- query_priors(pfts, params = database) + # trait.names <- unique(all_priors[["name"]]) + } + + # process all pfts + result <- lapply(pfts, get.trait.data.pft, + modeltype = modeltype, + dbfiles = dbfiles, + dbcon = dbcon, + write = write, + forceupdate = forceupdate, + trait.names = trait.names + ) + + invisible(result) } - - # process all pfts - result <- lapply(pfts, get.trait.data.pft, - modeltype = modeltype, - dbfiles = dbfiles, - dbcon = dbcon, - write = write, - forceupdate = forceupdate, - trait.names = trait.names) - - invisible(result) -} \ No newline at end of file diff --git a/base/db/R/get.trait.data.pft.R b/base/db/R/get.trait.data.pft.R index a2af9c5215a..f1d146de279 100644 --- a/base/db/R/get.trait.data.pft.R +++ b/base/db/R/get.trait.data.pft.R @@ -20,336 +20,349 @@ get.trait.data.pft <- trait.names, forceupdate = FALSE, write = FALSE) { - - - # Create directory if necessary - if (!file.exists(pft$outdir) && !dir.create(pft$outdir, recursive = TRUE)) { - PEcAn.logger::logger.error(paste0("Couldn't create PFT output directory: ", pft$outdir)) - } + # Create directory if necessary + if (!file.exists(pft$outdir) && !dir.create(pft$outdir, recursive = TRUE)) { + PEcAn.logger::logger.error(paste0("Couldn't create PFT output directory: ", pft$outdir)) + } - ## Remove old files. Clean up. - old.files <- list.files(path = pft$outdir, full.names = TRUE, include.dirs = FALSE) - file.remove(old.files) + ## Remove old files. Clean up. + old.files <- list.files(path = pft$outdir, full.names = TRUE, include.dirs = FALSE) + file.remove(old.files) - # find appropriate pft - pftres <- query_pfts(dbcon, pft[["name"]], modeltype) - pfttype <- pftres[["pft_type"]] - pftid <- pftres[["id"]] + # find appropriate pft + pftres <- query_pfts(dbcon, pft[["name"]], modeltype) + pfttype <- pftres[["pft_type"]] + pftid <- pftres[["id"]] - if (nrow(pftres) > 1) { - PEcAn.logger::logger.severe( - "Multiple PFTs named", pft[["name"]], "found,", - "with ids", PEcAn.utils::vecpaste(pftres[["id"]]), ".", - "Specify modeltype to fix this.") - } + if (nrow(pftres) > 1) { + PEcAn.logger::logger.severe( + "Multiple PFTs named", pft[["name"]], "found,", + "with ids", PEcAn.utils::vecpaste(pftres[["id"]]), ".", + "Specify modeltype to fix this." + ) + } - if (nrow(pftres) == 0) { - PEcAn.logger::logger.severe("Could not find pft", pft[["name"]]) - return(NA) - } + if (nrow(pftres) == 0) { + PEcAn.logger::logger.severe("Could not find pft", pft[["name"]]) + return(NA) + } - # get the member species/cultivars, we need to check if anything changed - if (pfttype == "plant") { - pft_member_filename = "species.csv" - pft_members <- PEcAn.DB::query.pft_species(pft$name, modeltype, dbcon) - } else if (pfttype == "cultivar") { - pft_member_filename = "cultivars.csv" - pft_members <- PEcAn.DB::query.pft_cultivars(pft$name, modeltype, dbcon) - } else { - PEcAn.logger::logger.severe("Unknown pft type! Expected 'plant' or 'cultivar', got", pfttype) - } + # get the member species/cultivars, we need to check if anything changed + if (pfttype == "plant") { + pft_member_filename <- "species.csv" + pft_members <- PEcAn.DB::query.pft_species(pft$name, modeltype, dbcon) + } else if (pfttype == "cultivar") { + pft_member_filename <- "cultivars.csv" + pft_members <- PEcAn.DB::query.pft_cultivars(pft$name, modeltype, dbcon) + } else { + PEcAn.logger::logger.severe("Unknown pft type! Expected 'plant' or 'cultivar', got", pfttype) + } - # ANS: Need to do this conversion for the check against existing - # membership later on. Otherwise, `NA` from the CSV is interpreted - # as different from `""` returned here, even though they are really - # the same thing. - pft_members <- pft_members %>% - dplyr::mutate_if(is.character, ~dplyr::na_if(., "")) + # ANS: Need to do this conversion for the check against existing + # membership later on. Otherwise, `NA` from the CSV is interpreted + # as different from `""` returned here, even though they are really + # the same thing. + pft_members <- pft_members %>% + dplyr::mutate_if(is.character, ~ dplyr::na_if(., "")) - # get the priors - prior.distns <- PEcAn.DB::query.priors( - pft = pftid, - trstr = PEcAn.utils::vecpaste(trait.names), - con = dbcon) - prior.distns <- prior.distns[which(!rownames(prior.distns) %in% names(pft$constants)),] - traits <- rownames(prior.distns) + # get the priors + prior.distns <- PEcAn.DB::query.priors( + pft = pftid, + trstr = PEcAn.utils::vecpaste(trait.names), + con = dbcon + ) + prior.distns <- prior.distns[which(!rownames(prior.distns) %in% names(pft$constants)), ] + traits <- rownames(prior.distns) - # get the trait data (don't bother sampling derived traits until after update check) - trait.data.check <- PEcAn.DB::query.traits(ids = pft_members$id, priors = traits, con = dbcon, update.check.only = TRUE, ids_are_cultivars = (pfttype=="cultivar")) - traits <- names(trait.data.check) + # get the trait data (don't bother sampling derived traits until after update check) + trait.data.check <- PEcAn.DB::query.traits(ids = pft_members$id, priors = traits, con = dbcon, update.check.only = TRUE, ids_are_cultivars = (pfttype == "cultivar")) + traits <- names(trait.data.check) - # Set forceupdate FALSE if it's a string (backwards compatible with 'AUTO' flag used in the past) - forceupdate <- isTRUE(as.logical(forceupdate)) + # Set forceupdate FALSE if it's a string (backwards compatible with 'AUTO' flag used in the past) + forceupdate <- isTRUE(as.logical(forceupdate)) - # check to see if we need to update - if (!forceupdate) { - if (is.null(pft$posteriorid)) { - recent_posterior <- dplyr::tbl(dbcon, "posteriors") %>% - dplyr::filter(.data$pft_id == !!pftid) %>% - dplyr::collect() - if (length(recent_posterior) > 0) { - pft$posteriorid <- dplyr::tbl(dbcon, "posteriors") %>% + # check to see if we need to update + if (!forceupdate) { + if (is.null(pft$posteriorid)) { + recent_posterior <- dplyr::tbl(dbcon, "posteriors") %>% dplyr::filter(.data$pft_id == !!pftid) %>% - dplyr::arrange(dplyr::desc(.data$created_at)) %>% - utils::head(1) %>% - dplyr::pull("id") - } else { - PEcAn.logger::logger.info("No previous posterior found. Forcing update") + dplyr::collect() + if (length(recent_posterior) > 0) { + pft$posteriorid <- dplyr::tbl(dbcon, "posteriors") %>% + dplyr::filter(.data$pft_id == !!pftid) %>% + dplyr::arrange(dplyr::desc(.data$created_at)) %>% + utils::head(1) %>% + dplyr::pull("id") + } else { + PEcAn.logger::logger.info("No previous posterior found. Forcing update") + } } - } - if (!is.null(pft$posteriorid)) { - files <- dbfile.check(type = "Posterior", container.id = pft$posteriorid, con = dbcon, - return.all = TRUE) - need_files <- c( - trait_data = "trait.data.Rdata", - priors = "prior.distns.Rdata", - pft_membership = pft_member_filename - ) - ids <- match(need_files, files$file_name) - names(ids) <- names(need_files) - if (any(is.na(ids))) { - missing_files <- need_files[is.na(ids)] - PEcAn.logger::logger.info(paste0( - "Forcing meta-analysis update because ", - "the following files are missing from the posterior: ", - paste0(shQuote(missing_files), collapse = ", ") - )) - PEcAn.logger::logger.debug( - "\n `dbfile.check` returned the following output:\n", - PEcAn.logger::print2string(files), - wrap = FALSE + if (!is.null(pft$posteriorid)) { + files <- dbfile.check( + type = "Posterior", container.id = pft$posteriorid, con = dbcon, + return.all = TRUE ) - } else { - PEcAn.logger::logger.debug( - "All posterior files are present. Performing additional checks ", - "to determine if meta-analysis needs to be updated." + need_files <- c( + trait_data = "trait.data.Rdata", + priors = "prior.distns.Rdata", + pft_membership = pft_member_filename ) - # check if all files exist - need_paths <- file.path(files$file_path[ids], need_files) - names(need_paths) <- names(need_files) - files_exist <- file.exists(need_paths) - foundallfiles <- all(files_exist) - if (!foundallfiles) { - PEcAn.logger::logger.warn( - "The following files are in database but not found on disk: ", - paste(shQuote(need_files[!files_exist]), collapse = ", "), ". ", - "Re-running meta-analysis." + ids <- match(need_files, files$file_name) + names(ids) <- names(need_files) + if (any(is.na(ids))) { + missing_files <- need_files[is.na(ids)] + PEcAn.logger::logger.info(paste0( + "Forcing meta-analysis update because ", + "the following files are missing from the posterior: ", + paste0(shQuote(missing_files), collapse = ", ") + )) + PEcAn.logger::logger.debug( + "\n `dbfile.check` returned the following output:\n", + PEcAn.logger::print2string(files), + wrap = FALSE ) } else { - # Check if PFT membership has changed - PEcAn.logger::logger.debug("Checking if PFT membership has changed.") - if (pfttype == "plant") { - # Columns are: id, genus, species, scientificname - colClass = c("double", "character", "character", "character") - } else if (pfttype == "cultivar") { - # Columns are: id, specie_id, genus, species, scientificname, cultivar - colClass = c("double", "double", "character", "character", "character", "character") - } - existing_membership <- utils::read.csv( - need_paths[["pft_membership"]], - # Need this so NA values are formatted consistently - colClasses = colClass, - stringsAsFactors = FALSE, - na.strings = c("", "NA") - ) - diff_membership <- symmetric_setdiff( - existing_membership, - pft_members, - xname = "existing", - yname = "current" - ) - if (nrow(diff_membership) > 0) { - PEcAn.logger::logger.error( - "\n PFT membership has changed. \n", - "Difference is:\n", - PEcAn.logger::print2string(diff_membership), - wrap = FALSE - ) - foundallfiles <- FALSE - } - - # Check if priors have changed - PEcAn.logger::logger.debug("Checking if priors have changed") - existing_prior <- PEcAn.utils::load_local(need_paths[["priors"]])[["prior.distns"]] - diff_prior <- symmetric_setdiff( - dplyr::as_tibble(prior.distns, rownames = "trait"), - dplyr::as_tibble(existing_prior, rownames = "trait") + PEcAn.logger::logger.debug( + "All posterior files are present. Performing additional checks ", + "to determine if meta-analysis needs to be updated." ) - if (nrow(diff_prior) > 0) { - PEcAn.logger::logger.error( - "\n Prior has changed. \n", - "Difference is:\n", - PEcAn.logger::print2string(diff_prior), - wrap = FALSE - ) - foundallfiles <- FALSE - } - - # Check if trait data have changed - PEcAn.logger::logger.debug("Checking if trait data have changed") - existing_trait_data <- PEcAn.utils::load_local( - need_paths[["trait_data"]] - )[["trait.data"]] - if (length(trait.data.check) != length(existing_trait_data)) { + # check if all files exist + need_paths <- file.path(files$file_path[ids], need_files) + names(need_paths) <- names(need_files) + files_exist <- file.exists(need_paths) + foundallfiles <- all(files_exist) + if (!foundallfiles) { PEcAn.logger::logger.warn( - "Lengths of new and existing `trait.data` differ. ", + "The following files are in database but not found on disk: ", + paste(shQuote(need_files[!files_exist]), collapse = ", "), ". ", "Re-running meta-analysis." ) - foundallfiles <- FALSE - } else if (length(trait.data.check) == 0) { - PEcAn.logger::logger.warn("New and existing trait data are both empty. Skipping this check.") } else { - current_traits <- dplyr::bind_rows(trait.data.check, .id = "trait") %>% - dplyr::select(-mean, -"stat") - existing_traits <- dplyr::bind_rows(existing_trait_data, .id = "trait") %>% - dplyr::select(-mean, -"stat") - diff_traits <- symmetric_setdiff(current_traits, existing_traits) - if (nrow(diff_traits) > 0) { - diff_summary <- diff_traits %>% - dplyr::count(source, .data$trait) + # Check if PFT membership has changed + PEcAn.logger::logger.debug("Checking if PFT membership has changed.") + if (pfttype == "plant") { + # Columns are: id, genus, species, scientificname + colClass <- c("double", "character", "character", "character") + } else if (pfttype == "cultivar") { + # Columns are: id, specie_id, genus, species, scientificname, cultivar + colClass <- c("double", "double", "character", "character", "character", "character") + } + existing_membership <- utils::read.csv( + need_paths[["pft_membership"]], + # Need this so NA values are formatted consistently + colClasses = colClass, + stringsAsFactors = FALSE, + na.strings = c("", "NA") + ) + diff_membership <- symmetric_setdiff( + existing_membership, + pft_members, + xname = "existing", + yname = "current" + ) + if (nrow(diff_membership) > 0) { PEcAn.logger::logger.error( - "\n Prior has changed. \n", - "Here are the number of differing trait records by trait:\n", - PEcAn.logger::print2string(diff_summary), + "\n PFT membership has changed. \n", + "Difference is:\n", + PEcAn.logger::print2string(diff_membership), wrap = FALSE ) foundallfiles <- FALSE } - } - } + # Check if priors have changed + PEcAn.logger::logger.debug("Checking if priors have changed") + existing_prior <- PEcAn.utils::load_local(need_paths[["priors"]])[["prior.distns"]] + diff_prior <- symmetric_setdiff( + dplyr::as_tibble(prior.distns, rownames = "trait"), + dplyr::as_tibble(existing_prior, rownames = "trait") + ) + if (nrow(diff_prior) > 0) { + PEcAn.logger::logger.error( + "\n Prior has changed. \n", + "Difference is:\n", + PEcAn.logger::print2string(diff_prior), + wrap = FALSE + ) + foundallfiles <- FALSE + } - if (foundallfiles) { - PEcAn.logger::logger.info( - "Reusing existing files from posterior", pft$posteriorid, - "for PFT", shQuote(pft$name) - ) - for (id in seq_len(nrow(files))) { - file.copy(from = file.path(files[[id, "file_path"]], files[[id, "file_name"]]), - to = file.path(pft$outdir, files[[id, "file_name"]])) + # Check if trait data have changed + PEcAn.logger::logger.debug("Checking if trait data have changed") + existing_trait_data <- PEcAn.utils::load_local( + need_paths[["trait_data"]] + )[["trait.data"]] + if (length(trait.data.check) != length(existing_trait_data)) { + PEcAn.logger::logger.warn( + "Lengths of new and existing `trait.data` differ. ", + "Re-running meta-analysis." + ) + foundallfiles <- FALSE + } else if (length(trait.data.check) == 0) { + PEcAn.logger::logger.warn("New and existing trait data are both empty. Skipping this check.") + } else { + current_traits <- dplyr::bind_rows(trait.data.check, .id = "trait") %>% + dplyr::select(-mean, -"stat") + existing_traits <- dplyr::bind_rows(existing_trait_data, .id = "trait") %>% + dplyr::select(-mean, -"stat") + diff_traits <- symmetric_setdiff(current_traits, existing_traits) + if (nrow(diff_traits) > 0) { + diff_summary <- diff_traits %>% + dplyr::count(source, .data$trait) + PEcAn.logger::logger.error( + "\n Prior has changed. \n", + "Here are the number of differing trait records by trait:\n", + PEcAn.logger::print2string(diff_summary), + wrap = FALSE + ) + foundallfiles <- FALSE + } + } } - done <- TRUE - # May need to symlink the generic post.distns.Rdata to a specific post.distns.*.Rdata file. - if (length(list.files(pft$outdir, "post.distns.Rdata")) == 0) { - all.files <- list.files(pft$outdir) - post.distn.file <- all.files[grep("post\\.distns\\..*\\.Rdata", all.files)] - if (length(post.distn.file) > 1) - PEcAn.logger::logger.severe( - "get.trait.data.pft() doesn't know how to ", - "handle multiple `post.distns.*.Rdata` files.", - "Found the following files: ", - paste(shQuote(post.distn.file), collapse = ", ") - ) - else if (length(post.distn.file) == 1) { - # Found exactly one post.distns.*.Rdata file. Use it. - link_input <- file.path(pft[["outdir"]], post.distn.file) - link_target <- file.path(pft[["outdir"]], "post.distns.Rdata") - PEcAn.logger::logger.debug( - "Found exactly one posterior distribution file: ", - shQuote(link_input), - ". Symlinking it to PFT output directory: ", - shQuote(link_target) - ) - file.symlink(from = link_input, to = link_target) - } else { - PEcAn.logger::logger.error( - "No previous posterior distribution file found. ", - "Most likely, trait data were retrieved, but meta-analysis ", - "was not run. Meta-analysis will be run." + if (foundallfiles) { + PEcAn.logger::logger.info( + "Reusing existing files from posterior", pft$posteriorid, + "for PFT", shQuote(pft$name) + ) + for (id in seq_len(nrow(files))) { + file.copy( + from = file.path(files[[id, "file_path"]], files[[id, "file_name"]]), + to = file.path(pft$outdir, files[[id, "file_name"]]) ) - done <- FALSE + } + + done <- TRUE + + # May need to symlink the generic post.distns.Rdata to a specific post.distns.*.Rdata file. + if (length(list.files(pft$outdir, "post.distns.Rdata")) == 0) { + all.files <- list.files(pft$outdir) + post.distn.file <- all.files[grep("post\\.distns\\..*\\.Rdata", all.files)] + if (length(post.distn.file) > 1) { + PEcAn.logger::logger.severe( + "get.trait.data.pft() doesn't know how to ", + "handle multiple `post.distns.*.Rdata` files.", + "Found the following files: ", + paste(shQuote(post.distn.file), collapse = ", ") + ) + } else if (length(post.distn.file) == 1) { + # Found exactly one post.distns.*.Rdata file. Use it. + link_input <- file.path(pft[["outdir"]], post.distn.file) + link_target <- file.path(pft[["outdir"]], "post.distns.Rdata") + PEcAn.logger::logger.debug( + "Found exactly one posterior distribution file: ", + shQuote(link_input), + ". Symlinking it to PFT output directory: ", + shQuote(link_target) + ) + file.symlink(from = link_input, to = link_target) + } else { + PEcAn.logger::logger.error( + "No previous posterior distribution file found. ", + "Most likely, trait data were retrieved, but meta-analysis ", + "was not run. Meta-analysis will be run." + ) + done <- FALSE + } + } + if (done) { + return(pft) } } - if (done) return(pft) } } } - } - - # get the trait data (including sampling of derived traits, if any) - trait.data <- query.traits(pft_members$id, traits, con = dbcon, - update.check.only = FALSE, - ids_are_cultivars = (pfttype == "cultivar")) - traits <- names(trait.data) - if (length(trait.data) > 0) { - trait_counts <- trait.data %>% - dplyr::bind_rows(.id = "trait") %>% - dplyr::count(.data$trait) - - PEcAn.logger::logger.info( - "\n Number of observations per trait for PFT ", shQuote(pft[["name"]]), ":\n", - PEcAn.logger::print2string(trait_counts, n = Inf, na.print = ""), - wrap = FALSE - ) - } else { - PEcAn.logger::logger.warn( - "None of the requested traits were found for PFT ", - format(pft_members[["id"]], scientific = FALSE) + # get the trait data (including sampling of derived traits, if any) + trait.data <- query.traits(pft_members$id, traits, + con = dbcon, + update.check.only = FALSE, + ids_are_cultivars = (pfttype == "cultivar") ) - } + traits <- names(trait.data) - # get list of existing files so they get ignored saving - old.files <- list.files(path = pft$outdir) + if (length(trait.data) > 0) { + trait_counts <- trait.data %>% + dplyr::bind_rows(.id = "trait") %>% + dplyr::count(.data$trait) - # create a new posterior - insert_result <- db.query( - paste0("INSERT INTO posteriors (pft_id) VALUES (", pftid, ") RETURNING id"), - con = dbcon) - pft$posteriorid <- insert_result[["id"]] + PEcAn.logger::logger.info( + "\n Number of observations per trait for PFT ", shQuote(pft[["name"]]), ":\n", + PEcAn.logger::print2string(trait_counts, n = Inf, na.print = ""), + wrap = FALSE + ) + } else { + PEcAn.logger::logger.warn( + "None of the requested traits were found for PFT ", + format(pft_members[["id"]], scientific = FALSE) + ) + } - # create path where to store files - pathname <- file.path(dbfiles, "posterior", pft$posteriorid) - dir.create(pathname, showWarnings = FALSE, recursive = TRUE) + # get list of existing files so they get ignored saving + old.files <- list.files(path = pft$outdir) - ## 1. get species/cultivar list based on pft - utils::write.csv(pft_members, file.path(pft$outdir, pft_member_filename), - row.names = FALSE) + # create a new posterior + insert_result <- db.query( + paste0("INSERT INTO posteriors (pft_id) VALUES (", pftid, ") RETURNING id"), + con = dbcon + ) + pft$posteriorid <- insert_result[["id"]] - ## save priors - save(prior.distns, file = file.path(pft$outdir, "prior.distns.Rdata")) - utils::write.csv(prior.distns, file.path(pft$outdir, "prior.distns.csv"), - row.names = TRUE) + # create path where to store files + pathname <- file.path(dbfiles, "posterior", pft$posteriorid) + dir.create(pathname, showWarnings = FALSE, recursive = TRUE) - ## 3. display info to the console - PEcAn.logger::logger.info( - "\n Summary of prior distributions for PFT ", shQuote(pft$name), ":\n", - PEcAn.logger::print2string(prior.distns), - wrap = FALSE - ) + ## 1. get species/cultivar list based on pft + utils::write.csv(pft_members, file.path(pft$outdir, pft_member_filename), + row.names = FALSE + ) - ## traits = variables with prior distributions for this pft - trait.data.file <- file.path(pft$outdir, "trait.data.Rdata") - save(trait.data, file = trait.data.file) - utils::write.csv( - dplyr::bind_rows(trait.data), - file.path(pft$outdir, "trait.data.csv"), - row.names = FALSE - ) + ## save priors + save(prior.distns, file = file.path(pft$outdir, "prior.distns.Rdata")) + utils::write.csv(prior.distns, file.path(pft$outdir, "prior.distns.csv"), + row.names = TRUE + ) - ### save and store in database all results except those that were there already - if(isTRUE(write)) { - store_files_all <- list.files(path = pft[["outdir"]]) - store_files <- setdiff(store_files_all, old.files) - PEcAn.logger::logger.debug( - "The following posterior files found in PFT outdir ", - "(", shQuote(pft[["outdir"]]), ") will be registered in BETY ", - "under posterior ID ", format(pft[["posteriorid"]], scientific = FALSE), ": ", - paste(shQuote(store_files), collapse = ", "), ". ", - "The following files (if any) will not be registered because they already existed: ", - paste(shQuote(intersect(store_files, old.files)), collapse = ", "), + ## 3. display info to the console + PEcAn.logger::logger.info( + "\n Summary of prior distributions for PFT ", shQuote(pft$name), ":\n", + PEcAn.logger::print2string(prior.distns), wrap = FALSE ) - for (file in store_files) { - filename <- file.path(pathname, file) - file.copy(file.path(pft$outdir, file), filename) - dbfile.insert(in.path = pathname, in.prefix = file, - type = "Posterior", id = pft[["posteriorid"]], - con = dbcon) + + ## traits = variables with prior distributions for this pft + trait.data.file <- file.path(pft$outdir, "trait.data.Rdata") + save(trait.data, file = trait.data.file) + utils::write.csv( + dplyr::bind_rows(trait.data), + file.path(pft$outdir, "trait.data.csv"), + row.names = FALSE + ) + + ### save and store in database all results except those that were there already + if (isTRUE(write)) { + store_files_all <- list.files(path = pft[["outdir"]]) + store_files <- setdiff(store_files_all, old.files) + PEcAn.logger::logger.debug( + "The following posterior files found in PFT outdir ", + "(", shQuote(pft[["outdir"]]), ") will be registered in BETY ", + "under posterior ID ", format(pft[["posteriorid"]], scientific = FALSE), ": ", + paste(shQuote(store_files), collapse = ", "), ". ", + "The following files (if any) will not be registered because they already existed: ", + paste(shQuote(intersect(store_files, old.files)), collapse = ", "), + wrap = FALSE + ) + for (file in store_files) { + filename <- file.path(pathname, file) + file.copy(file.path(pft$outdir, file), filename) + dbfile.insert( + in.path = pathname, in.prefix = file, + type = "Posterior", id = pft[["posteriorid"]], + con = dbcon + ) + } } - } - return(pft) -} + return(pft) + } diff --git a/base/db/R/get_postgres_envvars.R b/base/db/R/get_postgres_envvars.R index 7d5773b21b4..f490203c520 100644 --- a/base/db/R/get_postgres_envvars.R +++ b/base/db/R/get_postgres_envvars.R @@ -19,11 +19,11 @@ #' @return list of connection parameters suitable for passing on to `db.open` #' #' @examples -#' host <- Sys.getenv("PGHOST") # to restore environment after demo +#' host <- Sys.getenv("PGHOST") # to restore environment after demo #' -#' Sys.unsetenv("PGHOST") -#' get_postgres_envvars()$host # NULL -#' get_postgres_envvars(host = "default", port = 5432)$host # "default" +#' Sys.unsetenv("PGHOST") +#' get_postgres_envvars()$host # NULL +#' get_postgres_envvars(host = "default", port = 5432)$host # "default" #' # defaults are ignored for a variable that exists #' Sys.setenv(PGHOST = "localhost") @@ -62,7 +62,8 @@ get_postgres_envvars <- function(...) { gsslib = "PGGSSLIB", connect_timeout = "PGCONNECT_TIMEOUT", client_encoding = "PGCLIENTENCODING", - target_session_attrs = "PGTARGETSESSIONATTRS") + target_session_attrs = "PGTARGETSESSIONATTRS" + ) vals <- Sys.getenv(pg_vars) names(vals) <- names(pg_vars) diff --git a/base/db/R/input.name.check.R b/base/db/R/input.name.check.R index 71eb20e8686..b72e6110bcf 100644 --- a/base/db/R/input.name.check.R +++ b/base/db/R/input.name.check.R @@ -1,3 +1,3 @@ -input.name.check <- function(inputname, con){ - db.query(query = paste0("SELECT id FROM inputs WHERE name = '", inputname, "'"), con = con)[['id']] +input.name.check <- function(inputname, con) { + db.query(query = paste0("SELECT id FROM inputs WHERE name = '", inputname, "'"), con = con)[["id"]] } diff --git a/base/db/R/insert.format.vars.R b/base/db/R/insert.format.vars.R index 4934a438392..fa53b5a96f6 100644 --- a/base/db/R/insert.format.vars.R +++ b/base/db/R/insert.format.vars.R @@ -27,7 +27,8 @@ #' name = c("NPP", NA, "YEAR"), #' unit = c("g C m-2 yr-1", NA, NA), #' storage_type = c(NA, NA, "%Y"), -#' column_number = c(2, NA, 4)) +#' column_number = c(2, NA, 4) +#' ) #' #' insert.format.vars( #' con = con, @@ -36,82 +37,88 @@ #' notes = "NPP from Harvard Forest.", #' header = FALSE, #' skip = 0, -#' formats_variables = formats_variables_tibble) +#' formats_variables = formats_variables_tibble +#' ) #' } -insert.format.vars <- function(con, format_name, mimetype_id, notes = NULL, header = TRUE, skip = 0, formats_variables = NULL, suppress = TRUE){ - +insert.format.vars <- function(con, format_name, mimetype_id, notes = NULL, header = TRUE, skip = 0, formats_variables = NULL, suppress = TRUE) { # Test if name is a character string - if(!is.character(format_name)){ + if (!is.character(format_name)) { PEcAn.logger::logger.error( "Name must be a character string" ) } # Test if format name already exists - name_test <- dplyr::tbl(con, "formats") %>% dplyr::select("id", "name") %>% dplyr::filter(.data$name %in% !!format_name) %>% dplyr::collect() + name_test <- dplyr::tbl(con, "formats") %>% + dplyr::select("id", "name") %>% + dplyr::filter(.data$name %in% !!format_name) %>% + dplyr::collect() name_test_df <- as.data.frame(name_test) - if(!is.null(name_test_df[1,1])){ + if (!is.null(name_test_df[1, 1])) { PEcAn.logger::logger.error( "Name already exists" ) } - #Test if skip is an integer - if(!is.character(skip)){ - PEcAn.logger::logger.error( - "Skip must be of type character" + # Test if skip is an integer + if (!is.character(skip)) { + PEcAn.logger::logger.error( + "Skip must be of type character" ) } # Test if header is a Boolean - if(!is.logical(header)){ + if (!is.logical(header)) { PEcAn.logger::logger.error( "Header must be of type Boolean" ) } # Test if notes are a character string - if(!is.character(notes)&!is.null(notes)){ + if (!is.character(notes) & !is.null(notes)) { PEcAn.logger::logger.error( "Notes must be of type character" ) } ######## Formats-Variables tests ############### - if(!is.null(formats_variables)){ - for(i in 1:nrow(formats_variables)){ - if(!is.numeric(formats_variables[[i,"variable_id"]])){ + if (!is.null(formats_variables)) { + for (i in 1:nrow(formats_variables)) { + if (!is.numeric(formats_variables[[i, "variable_id"]])) { PEcAn.logger::logger.error( "variable_id must be an integer" ) } - if(suppress == FALSE){ + if (suppress == FALSE) { ## Test if variable_id already exists ## - var_id_test <- dplyr::tbl(con, "variables") %>% dplyr::select("id") %>% dplyr::filter(.data$id %in% !!formats_variables[[i, "variable_id"]]) %>% dplyr::collect(.data$id) - if(!is.null(var_id_test[1,1])){ + var_id_test <- dplyr::tbl(con, "variables") %>% + dplyr::select("id") %>% + dplyr::filter(.data$id %in% !!formats_variables[[i, "variable_id"]]) %>% + dplyr::collect(.data$id) + if (!is.null(var_id_test[1, 1])) { PEcAn.logger::logger.error( "variable_id already exists" ) } } - if(!is.character(formats_variables[[i, "name"]])&!is.na(formats_variables[[i, "name"]])){ + if (!is.character(formats_variables[[i, "name"]]) & !is.na(formats_variables[[i, "name"]])) { PEcAn.logger::logger.error( "Variable name must be of type character or NA" ) } - if(!is.character(formats_variables[[i, "unit"]])&!is.na(formats_variables[[i, "unit"]])){ + if (!is.character(formats_variables[[i, "unit"]]) & !is.na(formats_variables[[i, "unit"]])) { PEcAn.logger::logger.error( "Units must be of type character or NA" ) } - if(!is.character(formats_variables[[i, "storage_type"]])&!is.na(formats_variables[[i, "storage_type"]])){ + if (!is.character(formats_variables[[i, "storage_type"]]) & !is.na(formats_variables[[i, "storage_type"]])) { PEcAn.logger::logger.error( "storage_type must be of type character or NA" ) } - if(!is.numeric(formats_variables[[i, "column_number"]])&!is.na(formats_variables[[i, "column_number"]])){ + if (!is.numeric(formats_variables[[i, "column_number"]]) & !is.na(formats_variables[[i, "column_number"]])) { PEcAn.logger::logger.error( "column_number must be of type numeric or NA" ) @@ -122,18 +129,21 @@ insert.format.vars <- function(con, format_name, mimetype_id, notes = NULL, head formats_variables[is.na(formats_variables)] <- "" ### udunit tests ### - for(i in 1:nrow(formats_variables)){ - u1 <- formats_variables[1,"unit"] - u2 <- dplyr::tbl(con, "variables") %>% dplyr::select("id", units) %>% dplyr::filter(.data$id %in% !!formats_variables[[1, "variable_id"]]) %>% dplyr::pull("units") - - if(!PEcAn.utils::unit_is_parseable(u1)){ + for (i in 1:nrow(formats_variables)) { + u1 <- formats_variables[1, "unit"] + u2 <- dplyr::tbl(con, "variables") %>% + dplyr::select("id", units) %>% + dplyr::filter(.data$id %in% !!formats_variables[[1, "variable_id"]]) %>% + dplyr::pull("units") + + if (!PEcAn.utils::unit_is_parseable(u1)) { PEcAn.logger::logger.error( - "Units '", u1, "' not parseable.", + "Units '", u1, "' not parseable.", "Please provide a unit that is parseable by the udunits library." ) } # Grab the bety units and - if(!units::ud_are_convertible(u1, u2)){ + if (!units::ud_are_convertible(u1, u2)) { PEcAn.logger::logger.error( "Units are not convertable." ) @@ -141,20 +151,20 @@ insert.format.vars <- function(con, format_name, mimetype_id, notes = NULL, head } } - formats_df <- tibble::tibble( - header = as.character(header), - skip = skip, - mimetype_id = mimetype_id, - notes = notes, - name = format_name, - stringsAsFactors = FALSE - ) + formats_df <- tibble::tibble( + header = as.character(header), + skip = skip, + mimetype_id = mimetype_id, + notes = notes, + name = format_name, + stringsAsFactors = FALSE + ) - ## Insert format record - inserted_formats <- db_merge_into(formats_df, "formats", con = con, by = c("name", "mimetype_id")) ## Make sure to include a 'by' argument - format_id <- dplyr::pull(inserted_formats, "id") + ## Insert format record + inserted_formats <- db_merge_into(formats_df, "formats", con = con, by = c("name", "mimetype_id")) ## Make sure to include a 'by' argument + format_id <- dplyr::pull(inserted_formats, "id") - if(!is.null(formats_variables)){ + if (!is.null(formats_variables)) { ## Insert format_id into n <- nrow(formats_variables) format_id_df <- matrix(data = format_id, nrow = n, ncol = 1) @@ -164,8 +174,7 @@ insert.format.vars <- function(con, format_name, mimetype_id, notes = NULL, head formats_variables_input <- cbind(format_id_df, formats_variables) ## Insert Format-Variable record - inserted_formats_variables <- db_merge_into(formats_variables_input, "formats_variables", con = con, by = c("variable_id")) + inserted_formats_variables <- db_merge_into(formats_variables_input, "formats_variables", con = con, by = c("variable_id")) } - return(format_id) - + return(format_id) } diff --git a/base/db/R/insert_table.R b/base/db/R/insert_table.R index 4e10999e329..434a8c5376e 100644 --- a/base/db/R/insert_table.R +++ b/base/db/R/insert_table.R @@ -1,13 +1,13 @@ #' Insert R data frame into SQL database #' -#' First, subset to matching columns. Then, make sure the local and SQL column -#' classes match, coercing local to SQL as necessary (or throwing an error). -#' Then, build an SQL string for the insert statement. Finally, insert into the +#' First, subset to matching columns. Then, make sure the local and SQL column +#' classes match, coercing local to SQL as necessary (or throwing an error). +#' Then, build an SQL string for the insert statement. Finally, insert into the #' database. #' #' @param values `data.frame` of values to write to SQL database #' @param table Name of target SQL table, as character -#' @param coerce_col_class logical, whether or not to coerce local data columns +#' @param coerce_col_class logical, whether or not to coerce local data columns #' to SQL classes. Default = `TRUE.` #' @param drop logical. If `TRUE` (default), drop columns not found in SQL table. #' @inheritParams db.query @@ -15,8 +15,8 @@ #' @export #' @examples #' irisdb <- DBI::dbConnect(RSQLite::SQLite(), ":memory:") -#' dplyr::copy_to(irisdb, iris[1,], name = "iris", overwrite = TRUE) -#' insert_table(iris[-1,], "iris", irisdb) +#' dplyr::copy_to(irisdb, iris[1, ], name = "iris", overwrite = TRUE) +#' insert_table(iris[-1, ], "iris", irisdb) #' dplyr::tbl(irisdb, "iris") insert_table <- function(values, table, con, coerce_col_class = TRUE, drop = TRUE) { values_fixed <- match_dbcols(values, table, con, coerce_col_class, drop = TRUE) @@ -42,9 +42,11 @@ match_dbcols <- function(values, table, con, coerce_col_class = TRUE, drop = TRU ) values_sub <- values[, use_cols] # Load one row to get column types - sql_row <- dplyr::tbl(con, table) %>% utils::head(1) %>% dplyr::collect() + sql_row <- dplyr::tbl(con, table) %>% + utils::head(1) %>% + dplyr::collect() sql_types <- purrr::map(sql_row, class) %>% - purrr::map_chr(1) + purrr::map_chr(1) sql_types <- sql_types[use_cols] values_types <- purrr::map(values_sub, class) %>% purrr::map_chr(1) type_mismatch <- sql_types != values_types diff --git a/base/db/R/pft.add.spp.R b/base/db/R/pft.add.spp.R index 0193aae9c5a..de3e852fa7a 100644 --- a/base/db/R/pft.add.spp.R +++ b/base/db/R/pft.add.spp.R @@ -10,106 +10,103 @@ ##' @param con Database connection object. ##' @param ... optional arguements for connecting to database (e.g. password, user name, database) ##' @return Function does not return a value but does print out diagnostic statements. -##' @details -##' The Symbols object are +##' @details +##' The Symbols object are ##' @author Michael C. Dietze, Dongchen Zhang pft.add.spp <- function(pft, acronym = NULL, ID = NULL, test = TRUE, con = NULL, ...) { - ## establish database connection # default points to psql-pecan.bu.edu. if (is.null(con)) { con <- PEcAn.DB::db.open(...) on.exit(PEcAn.DB::db.close(con), add = TRUE) } - - #detect if we input Symbol or IDs - if(!is.null(acronym)){ + + # detect if we input Symbol or IDs + if (!is.null(acronym)) { Species_elements <- acronym print("Input is Symbol!") - }else if(!is.null(ID)){ + } else if (!is.null(ID)) { Species_elements <- ID print("Input is ID!") - }else{ + } else { print("No IDs or Symbols imported!, Please check the data input!") return(0) } - + ## look up pfts.id based on pfts.name pft_qry <- glue::glue_sql(paste0("select * from pfts where name = '", pft, "'"), .con = con) my.pft <- PEcAn.DB::db.query(con = con, query = pft_qry) - + ## if pfts.name does not exist, stop and send error - if(nrow(my.pft) > 1){ + if (nrow(my.pft) > 1) { print("More than one pft matched!!! Please check your pft name and make sure it is unique to all other pfts!!!") - - #find similar pfts that might match and return it + + # find similar pfts that might match and return it similar_pft_query <- glue::glue_sql(paste0("select * from pfts where name like \'%", pft, "%\'"), .con = con) similar_pfts <- PEcAn.DB::db.query(con = con, query = similar_pft_query) print("similar pfts are returned, please check that!!!") return(similar_pfts) - }else if(nrow(my.pft) == 0){ + } else if (nrow(my.pft) == 0) { print("No pft founded that matched the name!!! Please check your pft name!!!") return(0) } - - #initialize inputid to store IDs + + # initialize inputid to store IDs inputid <- c() - - #initialize bad to store any problematic items. + + # initialize bad to store any problematic items. bad_species <- c() bad_pft_species <- c() - + ## loop over acronyms or IDs for (acro in Species_elements) { ## look up species based on acronyms. (can be either Symbols or IDs) - if(!is.null(ID)){ + if (!is.null(ID)) { species_qry <- glue::glue_sql(paste0("select * from species where \"id\" = '", acro, "'"), .con = con) - }else{ + } else { species_qry <- glue::glue_sql(paste0("select * from species where \"Symbol\" = '", acro, "'"), .con = con) } my.species <- PEcAn.DB::db.query(con = con, query = species_qry) - #if species not matched with bety records + # if species not matched with bety records if (nrow(my.species) != 1) { print(c("ACRONYM not matched", acro)) print(my.species) bad_species <- c(bad_species, acro) next } - + ## look up pfts_species.specie_id to check for duplicates - species_pft_qry <- glue::glue_sql(paste0("select * from pfts_species where pft_id = '", my.pft$id,"' and specie_id = '",my.species$id ,"'"), .con = con) + species_pft_qry <- glue::glue_sql(paste0("select * from pfts_species where pft_id = '", my.pft$id, "' and specie_id = '", my.species$id, "'"), .con = con) pft_species <- PEcAn.DB::db.query(con = con, query = species_pft_qry) - - #if record already exists + + # if record already exists if (nrow(pft_species) > 0) { print(c("Species already exists for PFT", acro)) print(pft_species) bad_pft_species <- c(bad_pft_species, paste0("Specie_ID: ", my.species$id, " . pft_ID: ", my.pft$id, ".")) next } - + ## give list of species print(c("ADDING", acro)) # print(my.species) print(my.species) if (test) { - print('TEST ONLY') - if(nrow(pft_species) == 0 && nrow(my.species) == 1 && nrow(my.pft) == 1){ + print("TEST ONLY") + if (nrow(pft_species) == 0 && nrow(my.species) == 1 && nrow(my.pft) == 1) { print("pft exists and unique; specie exists and unique; pft_species does not exists; Therefore, it's ready to go!!!") print(acro) } next } - + ## if a species is not already in the pft, add cmd <- paste0( "INSERT INTO pfts_species ", - "(pft_id, specie_id) VALUES ('",my.pft$id, "', '", my.species$id, "') RETURNING id" + "(pft_id, specie_id) VALUES ('", my.pft$id, "', '", my.species$id, "') RETURNING id" ) # This is the id that we just registered inputid <- c(PEcAn.DB::db.query(query = cmd, con = con), inputid) - - } ## end loop over acronyms + } ## end loop over acronyms return(list(input_ID = inputid, bad_species = bad_species, bad_pft_species = bad_pft_species)) - } # pft.add.spp diff --git a/base/db/R/query.data.R b/base/db/R/query.data.R index 73ca64a7b81..911b22fdc00 100644 --- a/base/db/R/query.data.R +++ b/base/db/R/query.data.R @@ -11,37 +11,43 @@ ##' @param store.unconverted determines whether or not a copy of the mean and stat fields are returned with _unconverted appended to the column names ##' @seealso used in \code{\link{query.trait.data}}; \code{\link{fetch.stats2se}}; \code{\link{transformstats}} performs transformation calculations ##' @author David LeBauer, Carl Davidson -query.data <- function( - trait, - spstr, - con, - extra.columns = paste( - "ST_X(ST_CENTROID(sites.geometry)) AS lon,", - "ST_Y(ST_CENTROID(sites.geometry)) AS lat, "), - store.unconverted = FALSE, - ids_are_cultivars = FALSE, - ...) { - id_type <- if (ids_are_cultivars) {"cultivar_id"} else {"specie_id"} +query.data <- function(trait, + spstr, + con, + extra.columns = paste( + "ST_X(ST_CENTROID(sites.geometry)) AS lon,", + "ST_Y(ST_CENTROID(sites.geometry)) AS lat, " + ), + store.unconverted = FALSE, + ids_are_cultivars = FALSE, + ...) { + id_type <- if (ids_are_cultivars) { + "cultivar_id" + } else { + "specie_id" + } query <- paste("select traits.id, traits.citation_id, traits.site_id, traits.treatment_id, treatments.name, traits.date, traits.time, traits.cultivar_id, traits.specie_id, traits.mean, traits.statname, traits.stat, traits.n, variables.name as vname, extract(month from traits.date) as month,", - extra.columns, - "treatments.control, sites.greenhouse + extra.columns, + "treatments.control, sites.greenhouse from traits left join treatments on (traits.treatment_id = treatments.id) left join sites on (traits.site_id = sites.id) left join variables on (traits.variable_id = variables.id) - where ", id_type, " in (", spstr,") - and variables.name in ('", trait,"');", sep = "") + where ", id_type, " in (", spstr, ") + and variables.name in ('", trait, "');", + sep = "" + ) result <- fetch.stats2se(connection = con, query = query) - - if(store.unconverted) { + + if (store.unconverted) { result$mean_unconverted <- result$mean result$stat_unconverted <- result$stat } - + return(result) -} \ No newline at end of file +} diff --git a/base/db/R/query.dplyr.R b/base/db/R/query.dplyr.R index d1f0a1a401a..cc5a0adf85f 100644 --- a/base/db/R/query.dplyr.R +++ b/base/db/R/query.dplyr.R @@ -11,24 +11,26 @@ betyConnect <- function(php.config = "../../web/config.php") { } ## helper function - getphp = function (item, default = "") { - value = php_params[[item]] + getphp <- function(item, default = "") { + value <- php_params[[item]] if (is.null(value)) default else value } ## fill in all data from environment variables - dbparams <- get_postgres_envvars(host = getphp("db_bety_hostname", "localhost"), - port = getphp("db_bety_port", "5432"), - dbname = getphp("db_bety_database", "bety"), - user = getphp("db_bety_username", "bety"), - password = getphp("db_bety_password", "bety")) + dbparams <- get_postgres_envvars( + host = getphp("db_bety_hostname", "localhost"), + port = getphp("db_bety_port", "5432"), + dbname = getphp("db_bety_database", "bety"), + user = getphp("db_bety_username", "bety"), + password = getphp("db_bety_password", "bety") + ) ## force driver to be postgres (only value supported by db.open) dbparams[["driver"]] <- "Postgres" ## Database connection db.open(dbparams) -} # betyConnect +} # betyConnect #' Convert number to scientific notation pretty expression #' @param l Number to convert to scientific notation @@ -45,7 +47,7 @@ fancy_scientific <- function(l) { l <- gsub("0e\\+00", "0", l) # return this as an expression return(parse(text = l)) -} # fancy_scientific +} # fancy_scientific #' Count rows of a data frame @@ -53,7 +55,7 @@ fancy_scientific <- function(l) { #' @export dplyr.count <- function(df) { return(dplyr::collect(dplyr::tally(df))[["n"]]) -} # dplyr.count +} # dplyr.count #' Convert netcdf number of days to a datetime @@ -61,11 +63,11 @@ dplyr.count <- function(df) { #' @param unit string containing CF-style time unit including origin (e.g. "days since 2010-01-01") #' @export ncdays2date <- function(time, unit) { - date <- lubridate::parse_date_time(unit, c("ymd_HMS", "ymd_H", "ymd")) - days <- PEcAn.utils::ud_convert(time, unit, paste("days since ", date)) + date <- lubridate::parse_date_time(unit, c("ymd_HMS", "ymd_H", "ymd")) + days <- PEcAn.utils::ud_convert(time, unit, paste("days since ", date)) seconds <- PEcAn.utils::ud_convert(days, "days", "seconds") return(as.POSIXct.numeric(seconds, origin = date, tz = "UTC")) -} # ncdays2date +} # ncdays2date #' Database host information @@ -83,21 +85,25 @@ dbHostInfo <- function(bety) { if (is.na(nrow(machine)) || nrow(machine) == 0) { - return(list(hostid = hostid, - hostname = "", - start = 1e+09 * hostid, - end = 1e+09 * (hostid + 1) - 1, - sync_url = "", - sync_contact = "")) + return(list( + hostid = hostid, + hostname = "", + start = 1e+09 * hostid, + end = 1e+09 * (hostid + 1) - 1, + sync_url = "", + sync_contact = "" + )) } else { - return(list(hostid = hostid, - hostname = machine$hostname, - start = machine$sync_start, - end = machine$sync_end, - sync_url = machine$sync_url, - sync_contact = machine$sync_contact)) + return(list( + hostid = hostid, + hostname = machine$hostname, + start = machine$sync_start, + end = machine$sync_end, + sync_url = machine$sync_url, + sync_contact = machine$sync_contact + )) } -} # dbHostInfo +} # dbHostInfo #' list of workflows that exist @@ -107,15 +113,17 @@ dbHostInfo <- function(bety) { workflows <- function(bety, ensemble = FALSE) { hostinfo <- dbHostInfo(bety) if (ensemble) { - query <- paste("SELECT ensembles.id AS ensemble_id, ensembles.workflow_id, workflows.folder", - "FROM ensembles, workflows WHERE runtype = 'ensemble'") + query <- paste( + "SELECT ensembles.id AS ensemble_id, ensembles.workflow_id, workflows.folder", + "FROM ensembles, workflows WHERE runtype = 'ensemble'" + ) } else { query <- "SELECT id AS workflow_id, folder FROM workflows" } dplyr::tbl(bety, dbplyr::sql(query)) %>% dplyr::filter(.data$workflow_id >= !!hostinfo$start & .data$workflow_id <= !!hostinfo$end) %>% return() -} # workflows +} # workflows #' Get single workflow by workflow_id @@ -125,7 +133,7 @@ workflows <- function(bety, ensemble = FALSE) { workflow <- function(bety, workflow_id) { workflows(bety) %>% dplyr::filter(.data$workflow_id == !!workflow_id) -} # workflow +} # workflow #' Get table of runs corresponding to a workflow @@ -143,7 +151,7 @@ runs <- function(bety, workflow_id) { dplyr::inner_join(Ensembles, by = "ensemble_id") dplyr::select(Runs, -"workflow_id", -"ensemble_id") %>% return() -} # runs +} # runs #' Get vector of workflow IDs @@ -165,7 +173,7 @@ get_workflow_ids <- function(bety, query, all.ids = FALSE) { sort(decreasing = TRUE) } return(ids) -} # get_workflow_ids +} # get_workflow_ids #' Get data frame of users and IDs #' @inheritParams dbHostInfo @@ -176,7 +184,7 @@ get_users <- function(bety) { out <- dplyr::tbl(bety, dbplyr::sql(query)) %>% dplyr::filter(.data$id >= hostinfo$start & .data$id <= hostinfo$end) return(out) -} # get_workflow_ids +} # get_workflow_ids #' Get vector of run IDs for a given workflow ID @@ -188,11 +196,11 @@ get_run_ids <- function(bety, workflow_id) { if (workflow_id != "") { runs <- runs(bety, workflow_id) if (dplyr.count(runs) > 0) { - run_ids <- dplyr::pull(runs, "run_id") %>% sort() + run_ids <- dplyr::pull(runs, "run_id") %>% sort() } } return(run_ids) -} # get_run_ids +} # get_run_ids #' Get vector of variable names for a particular workflow and run ID @@ -224,11 +232,11 @@ get_var_names <- function(bety, workflow_id, run_id, remove_pool = TRUE) { var_names <- "No variables found" } if (remove_pool) { - var_names <- var_names[!grepl("pool", var_names, ignore.case = TRUE)] ## Ignore 'poolnames' and 'carbon pools' variables + var_names <- var_names[!grepl("pool", var_names, ignore.case = TRUE)] ## Ignore 'poolnames' and 'carbon pools' variables } } return(var_names) -} # get_var_names +} # get_var_names #' Get vector of variable names for a particular workflow and run ID #' @inheritParams get_var_names @@ -240,7 +248,7 @@ var_names_all <- function(bety, workflow_id, run_id) { # Get variables for a particular workflow and run id var_names <- get_var_names(bety, workflow_id, run_id) # Remove variables which should not be shown to the user - removeVarNames <- c('Year','FracJulianDay') + removeVarNames <- c("Year", "FracJulianDay") var_names <- var_names[!var_names %in% removeVarNames] return(var_names) } # var_names_all @@ -263,7 +271,7 @@ load_data_single_run <- function(bety, workflow_id, run_id) { var_names <- var_names_all(bety, workflow_id, run_id) # lat/lon often cause trouble (like with JULES) but aren't needed for this basic plotting var_names <- setdiff(var_names, c("lat", "latitude", "lon", "longitude")) - outputfolder <- file.path(workflow$folder, 'out', run_id) + outputfolder <- file.path(workflow$folder, "out", run_id) out <- PEcAn.utils::read.output(runid = run_id, outdir = outputfolder, variables = var_names, dataframe = TRUE) ncfile <- list.files(path = outputfolder, pattern = "\\.nc$", full.names = TRUE)[1] nc <- ncdf4::nc_open(ncfile) @@ -273,18 +281,17 @@ load_data_single_run <- function(bety, workflow_id, run_id) { globalDF$workflow_id <- workflow_id globalDF$run_id <- run_id globalDF$xlab <- "Time" - globalDF$ylab <- unlist(sapply(globalDF$var_name, function(x){ - if(!is.null(nc$var[[x]]$units)){ + globalDF$ylab <- unlist(sapply(globalDF$var_name, function(x) { + if (!is.null(nc$var[[x]]$units)) { return(nc$var[[x]]$units) - }else{ + } else { return("") } - } )) - globalDF$title <- unlist(lapply(globalDF$var_name, function(x){ + })) + globalDF$title <- unlist(lapply(globalDF$var_name, function(x) { long_name <- names(which(var_names == x)) ifelse(length(long_name) > 0, long_name, x) - } - )) + })) return(globalDF) -} #load_data_single_run +} # load_data_single_run diff --git a/base/db/R/query.file.path.R b/base/db/R/query.file.path.R index ceb79f99685..e307a528ff3 100644 --- a/base/db/R/query.file.path.R +++ b/base/db/R/query.file.path.R @@ -6,9 +6,9 @@ ##' @export query.file.path ##' ##' @author Betsy Cowdery -query.file.path <- function(input.id, host_name, con){ +query.file.path <- function(input.id, host_name, con) { machine.host <- PEcAn.DB::default_hostname(host_name) - machine <- db.query(query = paste0("SELECT * from machines where hostname = '",machine.host,"';"), con = con) + machine <- db.query(query = paste0("SELECT * from machines where hostname = '", machine.host, "';"), con = con) dbfile <- db.query( query = paste( "SELECT file_name,file_path from dbfiles where container_id =", input.id, @@ -16,9 +16,9 @@ query.file.path <- function(input.id, host_name, con){ ), con = con ) - path <- file.path(dbfile$file_path,dbfile$file_name) - cmd <- paste0("file.exists( '",path,"')") - PEcAn.remote::remote.execute.R(script = cmd, host = machine.host, verbose=TRUE) + path <- file.path(dbfile$file_path, dbfile$file_name) + cmd <- paste0("file.exists( '", path, "')") + PEcAn.remote::remote.execute.R(script = cmd, host = machine.host, verbose = TRUE) # Check - to be determined later # if(file.exists(path)){ # return(path) @@ -27,4 +27,3 @@ query.file.path <- function(input.id, host_name, con){ # } return(path) } - diff --git a/base/db/R/query.format.vars.R b/base/db/R/query.format.vars.R index 6f426c134bd..cd01a5d6cad 100644 --- a/base/db/R/query.format.vars.R +++ b/base/db/R/query.format.vars.R @@ -7,9 +7,8 @@ ##' ##' @author Betsy Cowdery, Ankur Desai, Istem Fer ##' -query.format.vars <- function(bety, input.id=NA, format.id=NA, var.ids=NA) { - - if ((is.null(input.id)||is.na(input.id)) & (is.null(format.id)||is.na(format.id))){ +query.format.vars <- function(bety, input.id = NA, format.id = NA, var.ids = NA) { + if ((is.null(input.id) || is.na(input.id)) & (is.null(format.id) || is.na(format.id))) { PEcAn.logger::logger.error("Must specify input id or format id") } @@ -24,11 +23,11 @@ query.format.vars <- function(bety, input.id=NA, format.id=NA, var.ids=NA) { if (is.na(format.id)) { f <- PEcAn.DB::db.query( - query = paste("SELECT * from formats as f join inputs as i on f.id = i.format_id where i.id = ", input.id), - con = bety - ) + query = paste("SELECT * from formats as f join inputs as i on f.id = i.format_id where i.id = ", input.id), + con = bety + ) site.id <- PEcAn.DB::db.query(query = paste("SELECT site_id from inputs where id =", input.id), con = bety) - if (is.data.frame(site.id) && nrow(site.id)>0) { + if (is.data.frame(site.id) && nrow(site.id) > 0) { site.id <- site.id$site_id site.info <- PEcAn.DB::db.query( @@ -47,7 +46,7 @@ query.format.vars <- function(bety, input.id=NA, format.id=NA, var.ids=NA) { } mimetype <- PEcAn.DB::db.query(query = paste("SELECT * from mimetypes where id = ", f$mimetype_id), con = bety)[["type_string"]] - f$mimetype <- utils::tail(unlist(strsplit(mimetype, "/")),1) + f$mimetype <- utils::tail(unlist(strsplit(mimetype, "/")), 1) # get variable names and units of input data fv <- PEcAn.DB::db.query( @@ -57,28 +56,27 @@ query.format.vars <- function(bety, input.id=NA, format.id=NA, var.ids=NA) { con = bety ) - if(all(!is.na(var.ids))){ + if (all(!is.na(var.ids))) { # Need to subset the formats table fv <- fv %>% dplyr::filter(.data$variable_id %in% !!var.ids | .data$storage_type != "") - if(dim(fv)[1] == 0){ + if (dim(fv)[1] == 0) { PEcAn.logger::logger.error("None of your requested variables are available") } - } - if (nrow(fv)>0) { + if (nrow(fv) > 0) { colnames(fv) <- c("variable_id", "input_name", "input_units", "storage_type", "column_number") fv$variable_id <- as.numeric(fv$variable_id) n <- dim(fv)[1] # get bety names and units - vars <- as.data.frame(matrix(NA, ncol=2, nrow=n)) + vars <- as.data.frame(matrix(NA, ncol = 2, nrow = n)) colnames(vars) <- c("bety_name", "bety_units") # fv and vars need to go together from now on, # otherwise when there are more than one of the same variable_id it confuses merge vars_bety <- cbind(fv, vars) - for(i in 1:n){ + for (i in 1:n) { vars_bety[i, (ncol(vars_bety) - 1):ncol(vars_bety)] <- as.matrix(PEcAn.DB::db.query( query = paste("SELECT name, units from variables where id = ", fv$variable_id[i]), @@ -96,65 +94,61 @@ query.format.vars <- function(bety, input.id=NA, format.id=NA, var.ids=NA) { # Fill in CF vars # This will ultimately be useful when looking at met variables where CF != Bety - #Fill in MstMIP vars - #All PEcAn output is in MstMIP variables + # Fill in MstMIP vars + # All PEcAn output is in MstMIP variables vars_full <- bety2pecan(vars_bety) header <- as.numeric(f$header) - skip <- ifelse(is.na(as.numeric(f$skip)),0,as.numeric(f$skip)) + skip <- ifelse(is.na(as.numeric(f$skip)), 0, as.numeric(f$skip)) # Right now I'm making the inappropriate assumption that storage type will be # empty unless it's a time variable. # This is because I haven't come up for a good way to test that a character string is a date format st <- vars_full$storage_type - time.row <- which(nchar(st)>1 & substr(st, 1,1) == "%") - if(length(time.row) == 0) time.row <- NULL + time.row <- which(nchar(st) > 1 & substr(st, 1, 1) == "%") + if (length(time.row) == 0) time.row <- NULL # Final format list - format <- list(file_name = f$name, - mimetype = f$mimetype, - vars = vars_full, - skip = skip, - header = header, - na.strings=c("-9999","-6999","9999","NA"), # This shouldn't be hardcoded in, but not specified in format table ? - time.row = time.row, - site = site.id, - lat = site.lat, - lon = site.lon, - time_zone = site.time_zone + format <- list( + file_name = f$name, + mimetype = f$mimetype, + vars = vars_full, + skip = skip, + header = header, + na.strings = c("-9999", "-6999", "9999", "NA"), # This shouldn't be hardcoded in, but not specified in format table ? + time.row = time.row, + site = site.id, + lat = site.lat, + lon = site.lon, + time_zone = site.time_zone ) # Check that all bety units are convertible. If not, throw a warning. for (i in 1:length(format$vars$bety_units)) { - - if (format$vars$storage_type[i] != "") { #units with storage type are a special case + if (format$vars$storage_type[i] != "") { # units with storage type are a special case # This would be a good place to put a test for valid sotrage types. Currently not implemented. - } else if (units::ud_are_convertible(format$vars$input_units[i], format$vars$pecan_units[i]) == FALSE) { - if (PEcAn.utils::misc.are.convertible(format$vars$input_units[i], format$vars$pecan_units[i]) == FALSE) { - PEcAn.logger::logger.warn("Units not convertible for",format$vars$input_name[i], "with units of",format$vars$input_units[i], ". Please make sure the varible has units that can be converted to", format$vars$pecan_units[i]) + PEcAn.logger::logger.warn("Units not convertible for", format$vars$input_name[i], "with units of", format$vars$input_units[i], ". Please make sure the varible has units that can be converted to", format$vars$pecan_units[i]) } - } } - - } else { - format <- list(file_name = f$name, - mimetype = f$mimetype, - na.strings=c("-9999","-6999","9999"), # This shouldn't be hardcoded in, but not specified in format table ? - time.row = NULL, - site = site.id, - lat = site.lat, - lon = site.lon, - time_zone = site.time_zone + format <- list( + file_name = f$name, + mimetype = f$mimetype, + na.strings = c("-9999", "-6999", "9999"), # This shouldn't be hardcoded in, but not specified in format table ? + time.row = NULL, + site = site.id, + lat = site.lat, + lon = site.lon, + time_zone = site.time_zone ) } - if (length(unique(format$vars$pecan_name))!=length(format$vars$pecan_name)) { + if (length(unique(format$vars$pecan_name)) != length(format$vars$pecan_name)) { unique_cols <- match(unique(format$vars$pecan_name), format$vars$pecan_name) PEcAn.logger::logger.warn( "There are duplicate columns in format record", format$file_name, @@ -173,11 +167,11 @@ query.format.vars <- function(bety, input.id=NA, format.id=NA, var.ids=NA) { ##' ##' @author Betsy Cowdery -bety2pecan <- function(vars_bety){ - +bety2pecan <- function(vars_bety) { # This needs to be moved to lazy load - bety_mstmip <- utils::read.csv(system.file("bety_mstmip_lookup.csv", package= "PEcAn.DB"), - header = T, stringsAsFactors = FALSE) + bety_mstmip <- utils::read.csv(system.file("bety_mstmip_lookup.csv", package = "PEcAn.DB"), + header = T, stringsAsFactors = FALSE + ) vars_full <- merge(vars_bety, bety_mstmip, by = "bety_name", all.x = TRUE) @@ -189,7 +183,7 @@ bety2pecan <- function(vars_bety){ dups <- unique(vars_full$pecan_name[duplicated(vars_full$pecan_name)]) - if("NEE" %in% dups){ + if ("NEE" %in% dups) { # This is a hack specific to Ameriflux! # It ultimately needs to be generalized, perhaps in a better version of # bety2pecan that doesn't use a lookup table @@ -200,14 +194,14 @@ bety2pecan <- function(vars_bety){ # The variable that is not NEE in bety (assuming it's FC) is discarded. keep <- which(vars_full$bety_name[which(vars_full$pecan_name == "NEE")] == "NEE") - if(length(keep) == 1){ + if (length(keep) == 1) { discard <- vars_full$bety_name[which(vars_full$pecan_name == "NEE")][-keep] - vars_full <- vars_full[!(vars_full$bety_name %in% discard),] + vars_full <- vars_full[!(vars_full$bety_name %in% discard), ] dups <- unique(vars_full$pecan_name[duplicated(vars_full$pecan_name)]) } } - if(length(dups) > 0){ - PEcAn.logger::logger.warn(paste("The variable(s)", paste(dups, collapse = ", "),"are duplicated. + if (length(dups) > 0) { + PEcAn.logger::logger.warn(paste("The variable(s)", paste(dups, collapse = ", "), "are duplicated. Currently we cannot support data with duplicate column names.")) } return(vars_full) diff --git a/base/db/R/query.pft.R b/base/db/R/query.pft.R index 1c45f215622..1b330b15578 100644 --- a/base/db/R/query.pft.R +++ b/base/db/R/query.pft.R @@ -16,27 +16,31 @@ query.pft_species <- function(pft, modeltype = NULL, con) { # create pft subquery if (is.null(modeltype)) { - query <- paste0("select species.id, species.genus, species.species, species.scientificname", - " from species, pfts, pfts_species", - " where species.id=pfts_species.specie_id", - " and pfts.id=pfts_species.pft_id", - " and pfts.pft_type='plant'", - " and pfts.name='", pft, "'") + query <- paste0( + "select species.id, species.genus, species.species, species.scientificname", + " from species, pfts, pfts_species", + " where species.id=pfts_species.specie_id", + " and pfts.id=pfts_species.pft_id", + " and pfts.pft_type='plant'", + " and pfts.name='", pft, "'" + ) } else { - query <- paste0("select species.id, species.genus, species.species, species.scientificname", - " from species, pfts, pfts_species, modeltypes", - " where species.id=pfts_species.specie_id", - " and pfts.id=pfts_species.pft_id", - " and pfts.pft_type='plant'", - " and pfts.name='", pft, "'", - " and pfts.modeltype_id=modeltypes.id", - " and modeltypes.name='", modeltype, "'") + query <- paste0( + "select species.id, species.genus, species.species, species.scientificname", + " from species, pfts, pfts_species, modeltypes", + " where species.id=pfts_species.specie_id", + " and pfts.id=pfts_species.pft_id", + " and pfts.pft_type='plant'", + " and pfts.name='", pft, "'", + " and pfts.modeltype_id=modeltypes.id", + " and modeltypes.name='", modeltype, "'" + ) } species <- db.query(query = query, con = con) invisible(species) } -#==================================================================================================# +# ==================================================================================================# ##' Select cultivars associated with a PFT ##' @@ -54,39 +58,43 @@ query.pft_species <- function(pft, modeltype = NULL, con) { ##' and the species it comes from ##' @export query.pft_cultivars <- function(pft, modeltype = NULL, con) { - pft_tbl <- (dplyr::tbl(con, "pfts") - %>% dplyr::filter(.data$name == !!pft, .data$pft_type == "cultivar")) + %>% dplyr::filter(.data$name == !!pft, .data$pft_type == "cultivar")) if (!is.null(modeltype)) { pft_tbl <- (pft_tbl - %>% dplyr::inner_join( + %>% dplyr::inner_join( dplyr::tbl(con, "modeltypes"), by = c("modeltype_id" = "id"), - suffix = c("", ".mt")) + suffix = c("", ".mt") + ) %>% dplyr::filter(.data$name.mt == !!modeltype)) } (pft_tbl - %>% dplyr::inner_join( + %>% dplyr::inner_join( dplyr::tbl(con, "cultivars_pfts"), by = c("id" = "pft_id"), - suffix = c("", ".cvpft")) + suffix = c("", ".cvpft") + ) %>% dplyr::inner_join( dplyr::tbl(con, "cultivars"), by = c("cultivar_id" = "id"), - suffix = c("", ".cv")) + suffix = c("", ".cv") + ) %>% dplyr::inner_join( dplyr::tbl(con, "species"), - by=c("specie_id" = "id"), - suffix=c("", ".sp")) + by = c("specie_id" = "id"), + suffix = c("", ".sp") + ) %>% dplyr::select( id = "cultivar_id", "specie_id", "genus", "species", "scientificname", - cultivar = "name.cv") + cultivar = "name.cv" + ) %>% dplyr::collect()) } diff --git a/base/db/R/query.prior.R b/base/db/R/query.prior.R index 6c078dd8aa4..6d105ebe983 100644 --- a/base/db/R/query.prior.R +++ b/base/db/R/query.prior.R @@ -4,7 +4,7 @@ ##' ##' @details If neither `con` nor `...` are provided, this will try to ##' connect to BETY using a `settings` object in the current -##' environment. +##' environment. ##' ##' @param pft ID number of the PFT in the database ##' @param trstr String of traits to query priors for. If passed as a @@ -22,19 +22,19 @@ ##' con <- db.open(...) ##' query.priors("ebifarm.pavi", c("SLA", "Vcmax", "leaf_width"), con = con) ##' } -query.priors <- function(pft, trstr = NULL, con = NULL, ...){ - +query.priors <- function(pft, trstr = NULL, con = NULL, ...) { if (inherits(pft, "integer64")) { # Convert to character with correct representation pft <- format(pft, scientific = FALSE) } - + if (is.null(con)) { params <- list(...) if (!length(params)) { PEcAn.logger::logger.severe( "No connection (`con`) specified and no connection parameters given in `...`.", - "Unable to connect to database.") + "Unable to connect to database." + ) } con <- db.open(params) on.exit(db.close(con), add = TRUE) @@ -42,11 +42,12 @@ query.priors <- function(pft, trstr = NULL, con = NULL, ...){ query.text <- paste( "SELECT variables.name, distn, parama, paramb, n", - "FROM priors", - "JOIN variables ON priors.variable_id = variables.id", - "JOIN pfts_priors ON pfts_priors.prior_id = priors.id", - "JOIN pfts ON pfts.id = pfts_priors.pft_id", - "WHERE pfts.id = ", format(pft, scientific = FALSE)) + "FROM priors", + "JOIN variables ON priors.variable_id = variables.id", + "JOIN pfts_priors ON pfts_priors.prior_id = priors.id", + "JOIN pfts ON pfts.id = pfts_priors.pft_id", + "WHERE pfts.id = ", format(pft, scientific = FALSE) + ) if (!is.null(trstr) && trstr != "''") { if (length(trstr) > 1) { @@ -63,14 +64,13 @@ query.priors <- function(pft, trstr = NULL, con = NULL, ...){ priors <- db.query(query = query.text, con = con) - if(nrow(priors) <= 0){ + if (nrow(priors) <= 0) { warning(paste("No priors found for pft(s): ", pft)) - priors <- priors[, which(colnames(priors)!='name')] + priors <- priors[, which(colnames(priors) != "name")] return(priors) - } - else { + } else { rownames(priors) <- priors$name - priors <- priors[, which(colnames(priors)!='name')] + priors <- priors[, which(colnames(priors) != "name")] return(priors) } } @@ -82,7 +82,7 @@ query.priors <- function(pft, trstr = NULL, con = NULL, ...){ #' BETY `pfts` table). You cannot pass both this and `pft_ids`. #' @param traits Character vector of trait names (`name` column of #' BETY `traits` table). If `NULL` (default), return information for -#' all traits available for that PFT. +#' all traits available for that PFT. #' @param pft_ids Numeric vector of PFT IDs (`id` column of BETY #' `pfts` table). You cannot pass both this and `pft_names`. #' @param expand (Logical) If `TRUE` (default), search every trait-PFT @@ -95,40 +95,48 @@ query.priors <- function(pft, trstr = NULL, con = NULL, ...){ #' PFTs and traits. #' @examples #' \dontrun{ -#' con <- db.open(...) +#' con <- db.open(...) #' -#' # No trait provided, so return all available traits -#' pdat <- query_priors( -#' c("temperate.Early_Hardwood", "temperate.North_Mid_Hardwood", -#' "temperate.Late_Hardwood"), -#' con = con -#' ) +#' # No trait provided, so return all available traits +#' pdat <- query_priors( +#' c( +#' "temperate.Early_Hardwood", "temperate.North_Mid_Hardwood", +#' "temperate.Late_Hardwood" +#' ), +#' con = con +#' ) #' -#' # Traits provided, so restrict to only those traits. Note that -#' # because `expand = TRUE`, this will search for these traits for -#' # every PFT. -#' pdat2 <- query_priors( -#' c("Optics.Temperate_Early_Hardwood", -#' "Optics.Temperate_Mid_Hardwood", -#' "Optics.Temperate_Late_Hardwood"), -#' c("leaf_reflect_vis", "leaf_reflect_nir"), -#' con = con -#' ) +#' # Traits provided, so restrict to only those traits. Note that +#' # because `expand = TRUE`, this will search for these traits for +#' # every PFT. +#' pdat2 <- query_priors( +#' c( +#' "Optics.Temperate_Early_Hardwood", +#' "Optics.Temperate_Mid_Hardwood", +#' "Optics.Temperate_Late_Hardwood" +#' ), +#' c("leaf_reflect_vis", "leaf_reflect_nir"), +#' con = con +#' ) #' -#' # With `expand = FALSE`, search the first trait for the first PFT, -#' # the second trait for the second PFT, etc. Note that this means -#' # PFT and trait input vectors must be the same length. -#' pdat2 <- query_priors( -#' c("Optics.Temperate_Early_Hardwood", -#' "Optics.Temperate_Early_Hardwood", -#' "Optics.Temperate_Mid_Hardwood", -#' "Optics.Temperate_Late_Hardwood"), -#' c("leaf_reflect_vis", -#' "leaf_reflect_nir", -#' "leaf_reflect_vis", -#' "leaf_reflect_nir"), -#' con = con -#' ) +#' # With `expand = FALSE`, search the first trait for the first PFT, +#' # the second trait for the second PFT, etc. Note that this means +#' # PFT and trait input vectors must be the same length. +#' pdat2 <- query_priors( +#' c( +#' "Optics.Temperate_Early_Hardwood", +#' "Optics.Temperate_Early_Hardwood", +#' "Optics.Temperate_Mid_Hardwood", +#' "Optics.Temperate_Late_Hardwood" +#' ), +#' c( +#' "leaf_reflect_vis", +#' "leaf_reflect_nir", +#' "leaf_reflect_vis", +#' "leaf_reflect_nir" +#' ), +#' con = con +#' ) #' } #' @export query_priors <- function(pft_names = NULL, traits = NULL, pft_ids = NULL, @@ -137,7 +145,7 @@ query_priors <- function(pft_names = NULL, traits = NULL, pft_ids = NULL, PEcAn.logger::logger.severe( "Provide either `pft_names` or `pft_ids`, not both." ) - } + } if (is.null(pft_names)) { # Assume PFT ID where_stmt <- "WHERE pfts.id = $1" diff --git a/base/db/R/query.site.R b/base/db/R/query.site.R index 666a43b8a7d..3cfa456c915 100644 --- a/base/db/R/query.site.R +++ b/base/db/R/query.site.R @@ -6,7 +6,7 @@ ##' ##' @author Betsy Cowdery ##' -query.site <- function(site.id,con){ +query.site <- function(site.id, con) { site <- db.query( query = paste( "SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) @@ -14,8 +14,9 @@ query.site <- function(site.id,con){ ), con = con ) - if (nrow(site)==0) { - PEcAn.logger::logger.error("Site not found"); return(NULL) + if (nrow(site) == 0) { + PEcAn.logger::logger.error("Site not found") + return(NULL) } if (!(is.na(site$lon)) && !(is.na(site$lat))) { return(site) diff --git a/base/db/R/query.trait.data.R b/base/db/R/query.trait.data.R index e55b781ab9b..c14ca4cdc23 100644 --- a/base/db/R/query.trait.data.R +++ b/base/db/R/query.trait.data.R @@ -21,115 +21,130 @@ ##' query.trait.data("Vcmax", "938", con = con) ##' } ##' @author David LeBauer, Carl Davidson, Shawn Serbin -query.trait.data <- function(trait, spstr, con = NULL, update.check.only = FALSE, ids_are_cultivars = FALSE, ...){ - - if(is.list(con)){ +query.trait.data <- function(trait, spstr, con = NULL, update.check.only = FALSE, ids_are_cultivars = FALSE, ...) { + if (is.list(con)) { PEcAn.logger::logger.warn("WEB QUERY OF DATABASE NOT IMPLEMENTED") return(NULL) } - + # print trait info - if(!update.check.only) { + if (!update.check.only) { PEcAn.logger::logger.info("---------------------------------------------------------") PEcAn.logger::logger.info(trait) } - + ### Query the data from the database for trait X. data <- query.data(trait = trait, spstr = spstr, con = con, store.unconverted = TRUE, ids_are_cultivars = ids_are_cultivars) - + ### Query associated covariates from database for trait X. covariates <- query.covariates(trait.ids = data$id, con = con) - canopy.layer.covs <- covariates[covariates$name == 'canopy_layer', ] - + canopy.layer.covs <- covariates[covariates$name == "canopy_layer", ] + ### Set small sample size for derived traits if update-checking only. Otherwise use default. - if(update.check.only) { + if (update.check.only) { sample.size <- 10 } else { - sample.size <- 10^6 ## Same default as derive.trait(), derive.traits(), and take.samples() + sample.size <- 10^6 ## Same default as derive.trait(), derive.traits(), and take.samples() } - - if(trait == 'Vcmax') { + + if (trait == "Vcmax") { ######################### VCMAX ############################ ### Apply Arrhenius scaling to convert Vcmax at measurement temp to that at 25 degC (ref temp). - data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c('leafT', 'airT','T')) - + data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c("leafT", "airT", "T")) + ### Keep only top of canopy/sunlit leaf samples based on covariate. - if(nrow(canopy.layer.covs) > 0) data <- filter_sunleaf_traits(data = data, covariates = canopy.layer.covs) - + if (nrow(canopy.layer.covs) > 0) data <- filter_sunleaf_traits(data = data, covariates = canopy.layer.covs) + ## select only summer data for Panicum virgatum - ##TODO fix following hack to select only summer data - if (spstr == "'938'"){ - data <- subset(data, subset = data$month %in% c(0,5,6,7)) + ## TODO fix following hack to select only summer data + if (spstr == "'938'") { + data <- subset(data, subset = data$month %in% c(0, 5, 6, 7)) } - - } else if (trait == 'SLA') { + } else if (trait == "SLA") { ######################### SLA ############################ ## convert LMA to SLA - data <- rbind(data, - derive.traits(function(lma){1/lma}, - query.data('LMA', spstr, con=con, store.unconverted=TRUE, - ids_are_cultivars=ids_are_cultivars), - sample.size=sample.size)) - + data <- rbind( + data, + derive.traits( + function(lma) { + 1 / lma + }, + query.data("LMA", spstr, + con = con, store.unconverted = TRUE, + ids_are_cultivars = ids_are_cultivars + ), + sample.size = sample.size + ) + ) + ### Keep only top of canopy/sunlit leaf samples based on covariate. - if(nrow(canopy.layer.covs) > 0) data <- filter_sunleaf_traits(data = data, covariates = canopy.layer.covs) - + if (nrow(canopy.layer.covs) > 0) data <- filter_sunleaf_traits(data = data, covariates = canopy.layer.covs) + ## select only summer data for Panicum virgatum - ##TODO fix following hack to select only summer data - if (spstr == "'938'"){ - data <- subset(data, subset = data$month %in% c(0,5,6,7,8,NA)) + ## TODO fix following hack to select only summer data + if (spstr == "'938'") { + data <- subset(data, subset = data$month %in% c(0, 5, 6, 7, 8, NA)) } - - } else if (trait == 'leaf_turnover_rate'){ + } else if (trait == "leaf_turnover_rate") { ######################### LEAF TURNOVER ############################ ## convert Longevity to Turnover - data <- rbind(data, - derive.traits(function(leaf.longevity){ 1 / leaf.longevity }, - query.data('Leaf Longevity', spstr, con = con, store.unconverted = TRUE, - ids_are_cultivars = ids_are_cultivars), - sample.size = sample.size)) - - } else if (trait == 'root_respiration_rate') { + data <- rbind( + data, + derive.traits( + function(leaf.longevity) { + 1 / leaf.longevity + }, + query.data("Leaf Longevity", spstr, + con = con, store.unconverted = TRUE, + ids_are_cultivars = ids_are_cultivars + ), + sample.size = sample.size + ) + ) + } else if (trait == "root_respiration_rate") { ######################### ROOT RESPIRATION ############################ ## Apply Arrhenius scaling to convert root respiration at measurement temp ## to that at 25 degC (ref temp). - data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c('rootT', 'airT','soilT')) - - } else if (trait == 'leaf_respiration_rate_m2') { + data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c("rootT", "airT", "soilT")) + } else if (trait == "leaf_respiration_rate_m2") { ######################### LEAF RESPIRATION ############################ ## Apply Arrhenius scaling to convert leaf respiration at measurement temp ## to that at 25 degC (ref temp). - data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c('leafT', 'airT','T')) - - } else if (trait == 'stem_respiration_rate') { + data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c("leafT", "airT", "T")) + } else if (trait == "stem_respiration_rate") { ######################### STEM RESPIRATION ############################ ## Apply Arrhenius scaling to convert stem respiration at measurement temp ## to that at 25 degC (ref temp). - data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c('stemT', 'airT','T')) - - } else if (trait == 'c2n_leaf') { + data <- arrhenius.scaling.traits(data = data, covariates = covariates, temp.covariates = c("stemT", "airT", "T")) + } else if (trait == "c2n_leaf") { ######################### LEAF C:N ############################ - - data <- rbind(data, - derive.traits(function(leafN){ 48 / leafN }, - query.data('leafN', spstr, con = con, store.unconverted = TRUE, - ids_are_cultivars = ids_are_cultivars), - sample.size = sample.size)) - - } else if (trait == 'fineroot2leaf') { + + data <- rbind( + data, + derive.traits( + function(leafN) { + 48 / leafN + }, + query.data("leafN", spstr, + con = con, store.unconverted = TRUE, + ids_are_cultivars = ids_are_cultivars + ), + sample.size = sample.size + ) + ) + } else if (trait == "fineroot2leaf") { ######################### FINE ROOT ALLOCATION ############################ ## FRC_LC is the ratio of fine root carbon to leaf carbon - data <- rbind(data, query.data(trait = 'FRC_LC', spstr = spstr, con = con, store.unconverted = TRUE, ids_are_cultivars = ids_are_cultivars)) + data <- rbind(data, query.data(trait = "FRC_LC", spstr = spstr, con = con, store.unconverted = TRUE, ids_are_cultivars = ids_are_cultivars)) } result <- data - + ## if result is empty, stop run - + if (nrow(result) == 0) { return(NA) warning(paste("there is no data for", trait)) } else { - ## Do we really want to print each trait table?? Seems like a lot of ## info to send to console. Maybe just print summary stats? ## print(result) @@ -140,4 +155,4 @@ query.trait.data <- function(trait, spstr, con = NULL, update.check.only = FALSE # print list of traits queried and number by outdoor/glasshouse return(result) } -} \ No newline at end of file +} diff --git a/base/db/R/query.traits.R b/base/db/R/query.traits.R index 0d7f54815a1..56abe3f5bb6 100644 --- a/base/db/R/query.traits.R +++ b/base/db/R/query.traits.R @@ -22,8 +22,6 @@ query.traits <- function(ids, priors, con, update.check.only = FALSE, ids_are_cultivars = FALSE) { - - if (!inherits(con, "DBIConnection")) { PEcAn.logger::logger.severe("'con' is not a database connection") } @@ -32,28 +30,34 @@ query.traits <- function(ids, priors, con, return(list()) } - id_type = rlang::sym(if (ids_are_cultivars) {"cultivar_id"} else {"specie_id"}) + id_type <- rlang::sym(if (ids_are_cultivars) { + "cultivar_id" + } else { + "specie_id" + }) traits <- (dplyr::tbl(con, "traits") - %>% dplyr::inner_join(dplyr::tbl(con, "variables"), by = c("variable_id" = "id")) - %>% dplyr::filter( - (!!id_type %in% ids), - (.data$name %in% !!priors)) - %>% dplyr::distinct(.data$name) - %>% dplyr::collect()) + %>% dplyr::inner_join(dplyr::tbl(con, "variables"), by = c("variable_id" = "id")) + %>% dplyr::filter( + (!!id_type %in% ids), + (.data$name %in% !!priors) + ) + %>% dplyr::distinct(.data$name) + %>% dplyr::collect()) if (nrow(traits) == 0) { return(list()) } ### Grab trait data - trait.data <- lapply(traits$name, function(trait){ + trait.data <- lapply(traits$name, function(trait) { query.trait.data( trait = trait, spstr = PEcAn.utils::vecpaste(ids), con = con, update.check.only = update.check.only, - ids_are_cultivars = ids_are_cultivars) + ids_are_cultivars = ids_are_cultivars + ) }) names(trait.data) <- traits$name diff --git a/base/db/R/query.yields.R b/base/db/R/query.yields.R index dfaef5956dc..a3122296256 100644 --- a/base/db/R/query.yields.R +++ b/base/db/R/query.yields.R @@ -10,13 +10,16 @@ ##' @param ... extra arguments ##' @seealso used in \code{\link{query.trait.data}}; \code{\link{fetch.stats2se}}; \code{\link{transformstats}} performs transformation calculations ##' @author -query.yields <- function(trait = 'yield', spstr, extra.columns = '', con = NULL, - ids_are_cultivars = FALSE, ...){ - - member_column <- if (ids_are_cultivars) {"cultivar_id"} else {"specie_id"} - - if(!is.null(extra.columns)) { - if(!is.character(extra.columns) || length(extra.columns) != 1) { +query.yields <- function(trait = "yield", spstr, extra.columns = "", con = NULL, + ids_are_cultivars = FALSE, ...) { + member_column <- if (ids_are_cultivars) { + "cultivar_id" + } else { + "specie_id" + } + + if (!is.null(extra.columns)) { + if (!is.character(extra.columns) || length(extra.columns) != 1) { PEcAn.logger::logger.severe("`extra.columns` must be a string") } } @@ -27,16 +30,22 @@ query.yields <- function(trait = 'yield', spstr, extra.columns = '', con = NULL, yields.mean, yields.statname, yields.stat, yields.n, variables.name as vname, month(yields.date) as month,", - if(extra.columns != '') { paste(extra.columns, ",", sep = "") } else {""}, - "treatments.control, sites.greenhouse + if (extra.columns != "") { + paste(extra.columns, ",", sep = "") + } else { + "" + }, + "treatments.control, sites.greenhouse from yields left join treatments on (yields.treatment_id = treatments.id) left join sites on (yields.site_id = sites.id) left join variables on (yields.variable_id = variables.id) - where ", member_column, " in (", spstr,");", sep = "") - if(!trait == 'yield'){ - query <- gsub(";", paste(" and variables.name in ('", trait,"');", sep = ""), query) + where ", member_column, " in (", spstr, ");", + sep = "" + ) + if (!trait == "yield") { + query <- gsub(";", paste(" and variables.name in ('", trait, "');", sep = ""), query) } - + return(fetch.stats2se(connection = con, query = query)) -} \ No newline at end of file +} diff --git a/base/db/R/query_pfts.R b/base/db/R/query_pfts.R index 91bd80acee0..3e1e25bddac 100644 --- a/base/db/R/query_pfts.R +++ b/base/db/R/query_pfts.R @@ -14,14 +14,15 @@ #' @export query_pfts <- function(dbcon, pft_names, modeltype = NULL, strict = FALSE) { pftres <- (dplyr::tbl(dbcon, "pfts") - %>% dplyr::filter(.data$name %in% !!pft_names)) + %>% dplyr::filter(.data$name %in% !!pft_names)) if (!is.null(modeltype)) { pftres <- (pftres %>% dplyr::semi_join( - (dplyr::tbl(dbcon, "modeltypes") %>% dplyr::filter(.data$name == !!modeltype)), - by = c("modeltype_id" = "id"))) + (dplyr::tbl(dbcon, "modeltypes") %>% dplyr::filter(.data$name == !!modeltype)), + by = c("modeltype_id" = "id") + )) } result <- (pftres - %>% dplyr::select("id", "pft_type", "name") + %>% dplyr::select("id", "pft_type", "name") %>% dplyr::collect() # Arrange in order of inputs %>% dplyr::slice(match(.data$name, pft_names))) @@ -35,7 +36,8 @@ query_pfts <- function(dbcon, pft_names, modeltype = NULL, strict = FALSE) { if (strict) { PEcAn.logger::logger.severe( "Strict matching requested, but failed with message:\n", - msg, wrap = FALSE + msg, + wrap = FALSE ) } else { PEcAn.logger::logger.warn(msg, wrap = FALSE) diff --git a/base/db/R/search_references.R b/base/db/R/search_references.R index 1e59df4e2c2..40e4e4b1099 100644 --- a/base/db/R/search_references.R +++ b/base/db/R/search_references.R @@ -37,7 +37,8 @@ search_reference_single <- function(query, limit = 1, min_score = 85) { if (nrow(crdata) < 1) { PEcAn.logger::logger.info( "No matches found. ", - "Setting title to search string and leaving other fields blank.") + "Setting title to search string and leaving other fields blank." + ) return(tibble::tibble(query = query)) } keep_cols <- c( diff --git a/base/db/R/symmetric_setdiff.R b/base/db/R/symmetric_setdiff.R index 9f8b2f146dd..d24c702b761 100644 --- a/base/db/R/symmetric_setdiff.R +++ b/base/db/R/symmetric_setdiff.R @@ -11,18 +11,24 @@ #' (`xname`) or y (`yname`) #' @export #' @examples -#' xdf <- data.frame(a = c("a", "b", "c"), -#' b = c(1, 2, 3), -#' stringsAsFactors = FALSE) -#' ydf <- data.frame(a = c("a", "b", "d"), -#' b = c(1, 2.5, 3), -#' stringsAsFactors = FALSE) +#' xdf <- data.frame( +#' a = c("a", "b", "c"), +#' b = c(1, 2, 3), +#' stringsAsFactors = FALSE +#' ) +#' ydf <- data.frame( +#' a = c("a", "b", "d"), +#' b = c(1, 2.5, 3), +#' stringsAsFactors = FALSE +#' ) #' symmetric_setdiff(xdf, ydf) symmetric_setdiff <- function(x, y, xname = "x", yname = "y", namecol = "source", simplify_types = TRUE) { - stopifnot(is.data.frame(x), is.data.frame(y), - is.character(xname), is.character(yname), - length(xname) == 1, length(yname) == 1) + stopifnot( + is.data.frame(x), is.data.frame(y), + is.character(xname), is.character(yname), + length(xname) == 1, length(yname) == 1 + ) is_i64 <- c( vapply(x, inherits, logical(1), what = "integer64"), vapply(y, inherits, logical(1), what = "integer64") @@ -43,8 +49,8 @@ symmetric_setdiff <- function(x, y, xname = "x", yname = "y", } } if (simplify_types) { - x <- dplyr::mutate_if(x, ~!is.numeric(.), as.character) - y <- dplyr::mutate_if(y, ~!is.numeric(.), as.character) + x <- dplyr::mutate_if(x, ~ !is.numeric(.), as.character) + y <- dplyr::mutate_if(y, ~ !is.numeric(.), as.character) } namecol <- dplyr::sym(namecol) xy <- dplyr::setdiff(x, y) %>% diff --git a/base/db/R/take.samples.R b/base/db/R/take.samples.R index 06507a2cfba..33b0277e9af 100644 --- a/base/db/R/take.samples.R +++ b/base/db/R/take.samples.R @@ -13,8 +13,8 @@ ##' ## return vector of length \code{sample.size} from N(mean,stat) ##' take.samples(summary = data.frame(mean = 10, stat = 10), sample.size = 10) ##' -take.samples <- function(summary, sample.size = 10^6){ - if(is.na(summary$stat)){ +take.samples <- function(summary, sample.size = 10^6) { + if (is.na(summary$stat)) { ans <- summary$mean } else { set.seed(0) @@ -22,4 +22,4 @@ take.samples <- function(summary, sample.size = 10^6){ } return(ans) } -##=============================================================================# +## =============================================================================# diff --git a/base/db/R/try2sqlite.R b/base/db/R/try2sqlite.R index 2fdfb21ba65..7e86859b5ce 100644 --- a/base/db/R/try2sqlite.R +++ b/base/db/R/try2sqlite.R @@ -1,7 +1,7 @@ #' Convert TRY text file to SQLite database #' -#' The TRY file is huge and unnecessarily long, which makes it difficult to -#' work with. The resulting SQLite database is much smaller on disk, and can be +#' The TRY file is huge and unnecessarily long, which makes it difficult to +#' work with. The resulting SQLite database is much smaller on disk, and can be #' read much faster thanks to lazy evaluation. #' #' The resulting TRY SQLite database contains the following tables: @@ -10,7 +10,7 @@ #' - `datasets` -- Description of datasets and references/citations. Links to `values` through `DatasetID` and `ReferenceID`. #' - `species` -- Species. Links to `values` through `AccSpeciesID`. #' -#' @param try_files Character vector of file names containing TRY data. +#' @param try_files Character vector of file names containing TRY data. #' Multiple files are combined with `data.table::rbindlist`. #' @param sqlite_file Target SQLite database file name, as character. #' @export @@ -27,21 +27,21 @@ try2sqlite <- function(try_files, sqlite_file = "try.sqlite") { # Create tables PEcAn.logger::logger.info("Extracting data values table.") data_cols <- c( - "ObsDataID", # TRY row ID -- unique to each observation of a given trait - "ObservationID", # TRY "entity" ID -- identifies a set of trait measurements (e.g. leaf) - "DataID", # Links to data ID - "StdValue", # Standardized, QA-QC'ed value - "UnitName", # Standardized unit - "AccSpeciesID", # Link to 'species' table - "DatasetID", # Link to 'datasets' table. - "ReferenceID", # Link to 'try_references' table. - "ValueKindName", # Type of value, e.g. mean, min, max, etc. - "UncertaintyName", # Kind of uncertainty - "Replicates", # Number of replicates + "ObsDataID", # TRY row ID -- unique to each observation of a given trait + "ObservationID", # TRY "entity" ID -- identifies a set of trait measurements (e.g. leaf) + "DataID", # Links to data ID + "StdValue", # Standardized, QA-QC'ed value + "UnitName", # Standardized unit + "AccSpeciesID", # Link to 'species' table + "DatasetID", # Link to 'datasets' table. + "ReferenceID", # Link to 'try_references' table. + "ValueKindName", # Type of value, e.g. mean, min, max, etc. + "UncertaintyName", # Kind of uncertainty + "Replicates", # Number of replicates "RelUncertaintyPercent", - "OrigValueStr", # Original data, as character string (before QA/QC) - "OrigUnitStr", # Original unit, as character string (before QA/QC) - "OrigUncertaintyStr" # Original uncertainty, as character string (before QA/QC) + "OrigValueStr", # Original data, as character string (before QA/QC) + "OrigUnitStr", # Original unit, as character string (before QA/QC) + "OrigUncertaintyStr" # Original uncertainty, as character string (before QA/QC) ) data_values <- unique(raw_data[, data_cols, with = FALSE]) diff --git a/base/db/R/utils_db.R b/base/db/R/utils_db.R index f59a9c3c2fe..d5c3c9e7d9a 100644 --- a/base/db/R/utils_db.R +++ b/base/db/R/utils_db.R @@ -1,4 +1,3 @@ - .db.utils <- new.env() .db.utils$created <- 0 .db.utils$queries <- 0 @@ -88,7 +87,7 @@ ##' @examples ##' \dontrun{ ##' db.query("SELECT count(id) FROM traits;", params = settings$database$bety) -##' +##' ##' # Prepared statements ##' con <- db.open(settings$database$bety) ##' db.query( @@ -112,7 +111,7 @@ ##' ) ##' } db.query <- function(query, con = NULL, params = NULL, values = NULL) { - if (is.null(con)){ + if (is.null(con)) { if (is.null(params)) { PEcAn.logger::logger.severe("No parameters or connection specified") } @@ -143,9 +142,11 @@ db.query <- function(query, con = NULL, params = NULL, values = NULL) { res <- DBI::dbGetException(con) if (res$errorNum != 0 || (res$errorMsg != "OK" && res$errorMsg != "")) { PEcAn.logger::logger.severe( - paste0("Error executing db query '", query, - "' errorcode=", res$errorNum, - " message='", res$errorMsg, "'") + paste0( + "Error executing db query '", query, + "' errorcode=", res$errorNum, + " message='", res$errorMsg, "'" + ) ) } } @@ -222,7 +223,7 @@ db.open <- function(params) { attr(c, "pecanid") <- id dump.log <- NULL utils::dump.frames(dumpto = "dump.log") - .db.utils$created <- .db.utils$created+1 + .db.utils$created <- .db.utils$created + 1 .db.utils$connections$id <- append(.db.utils$connections$id, id) .db.utils$connections$con <- append(.db.utils$connections$con, c) .db.utils$connections$log <- append(.db.utils$connections$log, list(dump.log)) @@ -252,7 +253,7 @@ db.close <- function(con, showWarnings = TRUE) { } else { deleteme <- which(.db.utils$connections$id == id) if (showWarnings && length(deleteme) == 0) { - PEcAn.logger::logger.warn("Connection might have been closed already."); + PEcAn.logger::logger.warn("Connection might have been closed already.") } else { .db.utils$connections$id <- .db.utils$connections$id[-deleteme] .db.utils$connections$con <- .db.utils$connections$con[-deleteme] @@ -301,36 +302,46 @@ db.print.connections <- function() { ##' @author David LeBauer, Rob Kooper db.exists <- function(params, write = TRUE, table = NA) { # open connection - con <- tryCatch({ - invisible(db.open(params)) - }, error = function(e) { - PEcAn.logger::logger.error("Could not connect to database.\n\t", e) - invisible(NULL) - }) + con <- tryCatch( + { + invisible(db.open(params)) + }, + error = function(e) { + PEcAn.logger::logger.error("Could not connect to database.\n\t", e) + invisible(NULL) + } + ) if (is.null(con)) { return(invisible(FALSE)) } else { on.exit(db.close(con), add = TRUE) } - #check table's privilege about read and write permission - user.permission <- tryCatch({ - invisible(db.query( - paste0("SELECT privilege_type FROM information_schema.role_table_grants ", - "WHERE grantee='", params$user, - "' AND table_catalog = '", params$dbname, - "' AND table_name='", table, "'"), - con - )) - }, error = function(e) { - PEcAn.logger::logger.error("Could not query database.\n\t", e) - db.close(con) - invisible(NULL) - }) + # check table's privilege about read and write permission + user.permission <- tryCatch( + { + invisible(db.query( + paste0( + "SELECT privilege_type FROM information_schema.role_table_grants ", + "WHERE grantee='", params$user, + "' AND table_catalog = '", params$dbname, + "' AND table_name='", table, "'" + ), + con + )) + }, + error = function(e) { + PEcAn.logger::logger.error("Could not query database.\n\t", e) + db.close(con) + invisible(NULL) + } + ) # If table is NA, this is just a generic check for database access, # so we're done! - if (is.na(table)) return(invisible(TRUE)) + if (is.na(table)) { + return(invisible(TRUE)) + } # We're enquiring about permissions related to a specific table, so # need to do more here. @@ -344,22 +355,30 @@ db.exists <- function(params, write = TRUE, table = NA) { } # read permission requested, but not granted - if (!read.perm) return(invisible(FALSE)) + if (!read.perm) { + return(invisible(FALSE)) + } # Read permissions granted. Now, does it actually work? To test, try # to read a row from the database - read.result <- tryCatch({ - invisible(db.query(query = paste("SELECT * FROM", table, "LIMIT 1"), con = con)) - }, error = function(e) { - PEcAn.logger::logger.error("Could not query database.\n\t", e) - db.close(con) - invisible(NULL) - }) - if (is.null(read.result)) return(invisible(FALSE)) + read.result <- tryCatch( + { + invisible(db.query(query = paste("SELECT * FROM", table, "LIMIT 1"), con = con)) + }, + error = function(e) { + PEcAn.logger::logger.error("Could not query database.\n\t", e) + db.close(con) + invisible(NULL) + } + ) + if (is.null(read.result)) { + return(invisible(FALSE)) + } # get the table's primary key column - get.key <- tryCatch({ - db.query(query = paste("SELECT pg_attribute.attname, format_type(pg_attribute.atttypid, pg_attribute.atttypmod) + get.key <- tryCatch( + { + db.query(query = paste("SELECT pg_attribute.attname, format_type(pg_attribute.atttypid, pg_attribute.atttypmod) FROM pg_index, pg_class, pg_attribute WHERE pg_class.oid = '", table, "'::regclass AND @@ -367,22 +386,30 @@ db.exists <- function(params, write = TRUE, table = NA) { pg_attribute.attrelid = pg_class.oid AND pg_attribute.attnum = any(pg_index.indkey) AND indisprimary"), con = con) - }, error = function(e) { - PEcAn.logger::logger.error("Could not query database.\n\t", e) - db.close(con) - invisible(NULL) - }) - if (is.null(read.result)) return(invisible(FALSE)) + }, + error = function(e) { + PEcAn.logger::logger.error("Could not query database.\n\t", e) + db.close(con) + invisible(NULL) + } + ) + if (is.null(read.result)) { + return(invisible(FALSE)) + } # If write permission not requested, we're done! - if (!write) return(invisible(TRUE)) + if (!write) { + return(invisible(TRUE)) + } # Write permission requested. Was it granted? - if ("INSERT" %in% user_privilege && "UPDATE" %in% user_privilege ) { + if ("INSERT" %in% user_privilege && "UPDATE" %in% user_privilege) { write.perm <- TRUE } # Write permission not granted - if (!write.perm) return(invisible(FALSE)) + if (!write.perm) { + return(invisible(FALSE)) + } # Write permission granted, but does it actually work? key <- get.key$attname @@ -396,15 +423,22 @@ db.exists <- function(params, write = TRUE, table = NA) { } } write.value <- read.result[write.coln] - result <- tryCatch({ - db.query(query = paste0("UPDATE ", table, " SET ", write.coln, "='", write.value, - "' WHERE ", key, "=", key.value), - con = con) - invisible(TRUE) - }, error = function(e) { - PEcAn.logger::logger.error("Could not write to database.\n\t", e) - invisible(FALSE) - }) + result <- tryCatch( + { + db.query( + query = paste0( + "UPDATE ", table, " SET ", write.coln, "='", write.value, + "' WHERE ", key, "=", key.value + ), + con = con + ) + invisible(TRUE) + }, + error = function(e) { + PEcAn.logger::logger.error("Could not write to database.\n\t", e) + invisible(FALSE) + } + ) invisible(result) } @@ -449,14 +483,14 @@ db.getShowQueries <- function() { ##' pftid <- get.id("pfts", "name", "salix", con) ##' pftid <- get.id("pfts", c("name", "modeltype_id"), c("ebifarm.salix", 1), con) ##' } -get.id <- function(table, colnames, values, con, create=FALSE, dates=TRUE){ +get.id <- function(table, colnames, values, con, create = FALSE, dates = TRUE) { values <- lapply(values, function(x) ifelse(is.character(x), shQuote(x), x)) - where_clause <- paste(colnames, values , sep = " = ", collapse = " and ") + where_clause <- paste(colnames, values, sep = " = ", collapse = " and ") query <- paste("select id from", table, "where", where_clause, ";") id <- db.query(query = query, con = con)[["id"]] if (is.null(id) && create) { - colinsert <- paste0(colnames, collapse=", ") - valinsert <- paste0(values, collapse=", ") + colinsert <- paste0(colnames, collapse = ", ") + valinsert <- paste0(values, collapse = ", ") PEcAn.logger::logger.info("INSERT INTO ", table, " (", colinsert, ") VALUES (", valinsert, ")") db.query(query = paste0("INSERT INTO ", table, " (", colinsert, ") VALUES (", valinsert, ")"), con = con) id <- db.query(query, con)[["id"]] @@ -472,7 +506,7 @@ get.id <- function(table, colnames, values, con, create=FALSE, dates=TRUE){ ##' @export default_hostname <- function(hostname) { if (hostname == "localhost") { - hostname <- PEcAn.remote::fqdn(); + hostname <- PEcAn.remote::fqdn() } return(hostname) } diff --git a/base/db/inst/PFT_builder/PFT_builder.R b/base/db/inst/PFT_builder/PFT_builder.R index e36f62c456b..e43b6ae3176 100644 --- a/base/db/inst/PFT_builder/PFT_builder.R +++ b/base/db/inst/PFT_builder/PFT_builder.R @@ -6,41 +6,40 @@ PFT_name <- "broadleaf_evergreen_tropical_tree" ## Get PFT id -dbparms.psql <- list(driver="PostgreSQL" , user = "bety", dbname = "bety", password='bety',host='128.197.230.32') +dbparms.psql <- list(driver = "PostgreSQL", user = "bety", dbname = "bety", password = "bety", host = "128.197.230.32") con <- db.open(dbparms.psql) -PFT <- data.table(db.query(paste0("SELECT * from pfts where name = '",PFT_name,"'"), con)) +PFT <- data.table(db.query(paste0("SELECT * from pfts where name = '", PFT_name, "'"), con)) ## GET PFT->species -PFT_species <- data.table(db.query(paste0("SELECT * from pfts_species where pft_id = '",PFT$id,"'"), con)) +PFT_species <- data.table(db.query(paste0("SELECT * from pfts_species where pft_id = '", PFT$id, "'"), con)) ## Get new species list species <- data.table(db.query("SELECT * from species where notes LIKE '%tropical%'", con)) ## note: this won't match all the species that were ALREADY in BETY when ForestGEO was added sppFile <- "ForestGEO_spp_website.csv" -spp <- read.csv(sppFile,stringsAsFactors=FALSE,strip.white = TRUE) -spp <- spp[-duplicated(spp$Species),] +spp <- read.csv(sppFile, stringsAsFactors = FALSE, strip.white = TRUE) +spp <- spp[-duplicated(spp$Species), ] species <- data.table(db.query("SELECT * from species", con)) sel <- which(tolower(species$scientificname) %in% tolower(spp$Species)) -species <- species[sel,] +species <- species[sel, ] ## Associate species with PFT -for(i in seq_len(nrow(species))){ - - if( species$id[i] %in% PFT_species$specie_id){ - print(c(i,'ALREADY EXITS')) +for (i in seq_len(nrow(species))) { + if (species$id[i] %in% PFT_species$specie_id) { + print(c(i, "ALREADY EXITS")) } else { print(i) ## insert record - query <- paste0("INSERT INTO pfts_species (pft_id, specie_id) SELECT ", - PFT$id,", ", - species$id[i], - " WHERE NOT EXISTS ( ", - "SELECT pft_id, specie_id FROM pfts_species where pft_id = ",PFT$id, - " and specie_id = ", species$id[i], - " );") + query <- paste0( + "INSERT INTO pfts_species (pft_id, specie_id) SELECT ", + PFT$id, ", ", + species$id[i], + " WHERE NOT EXISTS ( ", + "SELECT pft_id, specie_id FROM pfts_species where pft_id = ", PFT$id, + " and specie_id = ", species$id[i], + " );" + ) db.query(query, con) - - } } db.close(con) diff --git a/base/db/inst/import-ForestGEO/import_species_list.R b/base/db/inst/import-ForestGEO/import_species_list.R index 6204b90486a..d20969ba65f 100644 --- a/base/db/inst/import-ForestGEO/import_species_list.R +++ b/base/db/inst/import-ForestGEO/import_species_list.R @@ -7,23 +7,30 @@ library(RPostgreSQL) ## function from chartr example capwords <- function(s, strict = FALSE) { - cap <- function(s) paste(toupper(substring(s, 1, 1)), - {s <- substring(s, 2); if(strict) tolower(s) else s}, - sep = "", collapse = " " ) + cap <- function(s) { + paste(toupper(substring(s, 1, 1)), + { + s <- substring(s, 2) + if (strict) tolower(s) else s + }, + sep = "", + collapse = " " + ) + } sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s))) } -to.leet <- function(x){ - x <- gsub("0","o",x) - x <- gsub("1","i",x) - x <- gsub("2","z",x) - x <- gsub("3","e",x) - x <- gsub("4","a",x) - x <- gsub("5","s",x) - x <- gsub("6","b",x) - x <- gsub("7","l",x) - x <- gsub("8","q",x) - x <- gsub("9","g",x) - return(x) +to.leet <- function(x) { + x <- gsub("0", "o", x) + x <- gsub("1", "i", x) + x <- gsub("2", "z", x) + x <- gsub("3", "e", x) + x <- gsub("4", "a", x) + x <- gsub("5", "s", x) + x <- gsub("6", "b", x) + x <- gsub("7", "l", x) + x <- gsub("8", "q", x) + x <- gsub("9", "g", x) + return(x) } ## Load raw data @@ -31,13 +38,13 @@ to.leet <- function(x){ ## http://ctfs.si.edu/webatlas/neotropicaltree/ ## then saved as CSV. CTFS needs a better API sppFile <- "ForestGEO_spp_website.csv" -spp <- read.csv(sppFile,stringsAsFactors=FALSE,strip.white = TRUE) +spp <- read.csv(sppFile, stringsAsFactors = FALSE, strip.white = TRUE) ## pre-cleaning? -spp <- spp[-duplicated(spp$Species),] +spp <- spp[-duplicated(spp$Species), ] ## open database connection & grab current species list -dbparms.psql <- list(driver="PostgreSQL" , user = "bety", dbname = "bety", password='bety',host='128.197.230.32') +dbparms.psql <- list(driver = "PostgreSQL", user = "bety", dbname = "bety", password = "bety", host = "128.197.230.32") con <- db.open(dbparms.psql) bety.species <- data.table(db.query("SELECT * FROM species", con)) bety.species.dt <- data.table(db.query("SELECT DISTINCT id as bety_species_id, scientificname as bety_species, species, genus FROM species", con)) @@ -46,79 +53,80 @@ bety.species.dt[, bety.species.lower := tolower(bety_species)] base.note <- "ForestGEO Neotropical tree http://ctfs.si.edu/webatlas/neotropicaltree/" ## loop over species -for(i in seq_len(nrow(spp))){ - +for (i in seq_len(nrow(spp))) { ## check to see if genus and species exists in database - if(!(tolower(spp$Species[i]) %in% bety.species.dt$bety.species.lower)){ + if (!(tolower(spp$Species[i]) %in% bety.species.dt$bety.species.lower)) { print(i) ## generate binomial - spp$Species[i] <- gsub("'","",spp$Species[i],fixed = TRUE) - binom <- strsplit(spp$Species[i]," ",fixed=TRUE)[[1]] + spp$Species[i] <- gsub("'", "", spp$Species[i], fixed = TRUE) + binom <- strsplit(spp$Species[i], " ", fixed = TRUE)[[1]] genus <- binom[1] - genus <- gsub("(","",genus,fixed=TRUE) - genus <- gsub(")","",genus,fixed=TRUE) - genus <- gsub("/","-",genus,fixed=TRUE) - genus <- gsub(":","-",genus,fixed=TRUE) - genus <- gsub(".","",genus,fixed=TRUE) - genus <- sub("_","-",genus,fixed = TRUE) - genus <- gsub("_","",genus,fixed = TRUE) ## drop later occurrances - genus <- sub("?","UNK",genus,fixed=TRUE) ## switch on ? to UNK - genus <- gsub("?","",genus,fixed = TRUE) ## drop any later ? + genus <- gsub("(", "", genus, fixed = TRUE) + genus <- gsub(")", "", genus, fixed = TRUE) + genus <- gsub("/", "-", genus, fixed = TRUE) + genus <- gsub(":", "-", genus, fixed = TRUE) + genus <- gsub(".", "", genus, fixed = TRUE) + genus <- sub("_", "-", genus, fixed = TRUE) + genus <- gsub("_", "", genus, fixed = TRUE) ## drop later occurrances + genus <- sub("?", "UNK", genus, fixed = TRUE) ## switch on ? to UNK + genus <- gsub("?", "", genus, fixed = TRUE) ## drop any later ? genus <- to.leet(genus) - genus <- capwords(genus,strict=TRUE) - cross <- which(tolower(binom)=="x") - if(length(cross)>0){ - binom <- binom[-cross] ## remove cross from name because violates constraint + genus <- capwords(genus, strict = TRUE) + cross <- which(tolower(binom) == "x") + if (length(cross) > 0) { + binom <- binom[-cross] ## remove cross from name because violates constraint } - species <- paste(binom[-1],collapse = " ") - species <- gsub("(","",species,fixed=TRUE) - species <- gsub(")","",species,fixed=TRUE) - species <- gsub("[","",species,fixed=TRUE) - species <- gsub("]","",species,fixed=TRUE) - species <- gsub(".","",species,fixed=TRUE) - species <- gsub("&","",species,fixed=TRUE) - species <- gsub("=","",species,fixed=TRUE) - species <- gsub("/","-",species,fixed=TRUE) - species <- gsub(":","-",species,fixed=TRUE) - species <- gsub("#","-",species,fixed=TRUE) - species <- gsub(",","-",species,fixed=TRUE) - species <- sub("_","-",species,fixed = TRUE) - species <- gsub("_","",species,fixed = TRUE) ## drop later occurrances - species <- sub("?","UNK",species,fixed=TRUE) ## switch on ? to UNK - species <- gsub("?","",species,fixed = TRUE) ## drop any later ? + species <- paste(binom[-1], collapse = " ") + species <- gsub("(", "", species, fixed = TRUE) + species <- gsub(")", "", species, fixed = TRUE) + species <- gsub("[", "", species, fixed = TRUE) + species <- gsub("]", "", species, fixed = TRUE) + species <- gsub(".", "", species, fixed = TRUE) + species <- gsub("&", "", species, fixed = TRUE) + species <- gsub("=", "", species, fixed = TRUE) + species <- gsub("/", "-", species, fixed = TRUE) + species <- gsub(":", "-", species, fixed = TRUE) + species <- gsub("#", "-", species, fixed = TRUE) + species <- gsub(",", "-", species, fixed = TRUE) + species <- sub("_", "-", species, fixed = TRUE) + species <- gsub("_", "", species, fixed = TRUE) ## drop later occurrances + species <- sub("?", "UNK", species, fixed = TRUE) ## switch on ? to UNK + species <- gsub("?", "", species, fixed = TRUE) ## drop any later ? species <- to.leet(species) - if(nchar(species) == 1) species <- paste0(species,species) ## if length 1, duplicated (shouldn't have to do this) - first <- strsplit(species,"-")[[1]][1] ## also check length of part before hyphen. Again, shouldn't be required - if(!is.na(first) & nchar(first) == 1){ - species <- paste0(first,species) + if (nchar(species) == 1) species <- paste0(species, species) ## if length 1, duplicated (shouldn't have to do this) + first <- strsplit(species, "-")[[1]][1] ## also check length of part before hyphen. Again, shouldn't be required + if (!is.na(first) & nchar(first) == 1) { + species <- paste0(first, species) } - second <- strsplit(species,"-")[[1]][2] ## also check length of part after hyphen. Again, shouldn't be required - if(!is.na(second) & nchar(second) == 1){ - species <- paste0(species,second) + second <- strsplit(species, "-")[[1]][2] ## also check length of part after hyphen. Again, shouldn't be required + if (!is.na(second) & nchar(second) == 1) { + species <- paste0(species, second) } authority <- stringi::stri_trans_general(spp$Authority[i], "latin-ascii") - authority <- gsub("'","`",authority) - note <- paste(base.note,"\nTaxanomic Authority:",authority) + authority <- gsub("'", "`", authority) + note <- paste(base.note, "\nTaxanomic Authority:", authority) ## insert record - query <- paste0("INSERT INTO species (genus, species, scientificname, \"Family\", \"GrowthForm\", notes) SELECT '", - paste( - genus, - species, - spp$Species[i], - spp$Family[i], - spp$Growth.Form[i], - note,sep = "','"), - "' WHERE NOT EXISTS ( ", - "SELECT scientificname FROM species where scientificname = '", - spp$Species[i],"'", - " );") - ## Note: the WHERE NOT EXISTS clause provides additional protection against duplication + query <- paste0( + "INSERT INTO species (genus, species, scientificname, \"Family\", \"GrowthForm\", notes) SELECT '", + paste( + genus, + species, + spp$Species[i], + spp$Family[i], + spp$Growth.Form[i], + note, + sep = "','" + ), + "' WHERE NOT EXISTS ( ", + "SELECT scientificname FROM species where scientificname = '", + spp$Species[i], "'", + " );" + ) + ## Note: the WHERE NOT EXISTS clause provides additional protection against duplication db.query(query, con) - } else { - print(c(i,'ALREADY EXITS',spp$Species[i])) + print(c(i, "ALREADY EXITS", spp$Species[i])) } - } diff --git a/base/db/inst/import-try/02_citations.R b/base/db/inst/import-try/02_citations.R index 98c1dc56758..c19bb239c4e 100644 --- a/base/db/inst/import-try/02_citations.R +++ b/base/db/inst/import-try/02_citations.R @@ -34,7 +34,7 @@ refs_proc_file <- file.path(data_dir, "refs_proc.rds") if (file.exists(refs_proc_file)) { refs_proc <- readRDS(refs_proc_file) } else { - logger.setLevel("DEBUG") # To get status messages + logger.setLevel("DEBUG") # To get status messages refs_proc <- reference_dat %>% mutate(cr_df = map(Reference, search_references, min_score = 40)) %>% unnest() @@ -57,7 +57,7 @@ refs_proc2 <- refs_proc %>% mutate( title = if_else(!is.na(title), title, paste0("TRY ReferenceID ", ReferenceID)), author = if_else(!is.na(author), author, "Unknown TRY data (see title)"), - author = substr(author, 0, 254), # Trim author to 255 characters + author = substr(author, 0, 254), # Trim author to 255 characters journal = if_else(!is.na(journal), journal, "Unknown TRY data (see title)"), # Use the Kattge 2007 TRY paper's DOI as a placeholder doi = if_else(!is.na(doi), doi, "10.1111/j.1365-2486.2011.02451.x"), diff --git a/base/db/inst/import-try/91.global.subset.R b/base/db/inst/import-try/91.global.subset.R index 257eb4a9c57..b8e1f4a7226 100644 --- a/base/db/inst/import-try/91.global.subset.R +++ b/base/db/inst/import-try/91.global.subset.R @@ -3,8 +3,8 @@ library(data.table) library(bit64) message("Loading TRY data...") tryfile <- "1584.txt" -try.raw <- fread(tryfile, header=TRUE) -#load("try.RData") # Loads try.raw ("data.table") +try.raw <- fread(tryfile, header = TRUE) +# load("try.RData") # Loads try.raw ("data.table") message("Loaded!") # a. Select only standardized values @@ -16,9 +16,9 @@ try.sub <- try.raw[!is.na(StdValue)] # This accesses a Google Sheet containing TRY-BETY translation (linked), ensuring that this workflow is always up to date with the latest changes on the sheet. message("Matching with TRY-BETY translation") gs.url <- "https://docs.google.com/spreadsheets/d/1bQhwSIw4rwiWMw1O3K_zDH-i0Br0BXYJEgY3wb0weVg/pub?gid=1996728948&single=true&output=csv" -try.bety.info <- fread(gs.url, header=TRUE) +try.bety.info <- fread(gs.url, header = TRUE) data.in.bety <- try.bety.info[(!is.na(bety_id)) & (bety_id != ""), DataID] -try.sub <- try.sub[DataID %in% c(data.in.bety, 241, 394)] # 241 -- Measurement Date; 394 -- Measurement Time +try.sub <- try.sub[DataID %in% c(data.in.bety, 241, 394)] # 241 -- Measurement Date; 394 -- Measurement Time keys <- c("DataID", "DataName") setkeyv(try.sub, keys) setkeyv(try.bety.info, keys) @@ -27,10 +27,10 @@ try.sub <- try.bety.info[try.sub] # c. Select only observation IDs that have at least one trait value -- any(type == "t"), by=ObservationID message("Subsetting to only trait-containing entities") setkey(try.sub, ObservationID) -obsid.trait <- try.sub[, has.trait := any(type == "t"), by=ObservationID][has.trait == TRUE, ObservationID] +obsid.trait <- try.sub[, has.trait := any(type == "t"), by = ObservationID][has.trait == TRUE, ObservationID] try.dat <- try.sub[ObservationID %in% obsid.trait] message("Saving try.dat...") -save(try.dat, file="try.1.RData") -message("Done!") \ No newline at end of file +save(try.dat, file = "try.1.RData") +message("Done!") diff --git a/base/db/inst/import-try/92.data.specific.subset.R b/base/db/inst/import-try/92.data.specific.subset.R index 94d5d240ea5..1727d257227 100644 --- a/base/db/inst/import-try/92.data.specific.subset.R +++ b/base/db/inst/import-try/92.data.specific.subset.R @@ -6,22 +6,24 @@ load("try.1.RData") # Identify datasets that contain ValueKinds that are not "Single" valuekinds <- try.dat[, unique(ValueKindName)] vk.notsingle <- valuekinds[!(valuekinds %in% c("Single", ""))] -dataset.kinds <- try.dat[, lapply(vk.notsingle, function(x) any(ValueKindName == x)),by=DatasetID] +dataset.kinds <- try.dat[, lapply(vk.notsingle, function(x) any(ValueKindName == x)), by = DatasetID] setnames(dataset.kinds, paste0("V", 1:length(vk.notsingle)), vk.notsingle) dataset.kinds[, Sum := rowSums(.SD), .SDcols = -1] -print(dataset.kinds[Sum > 0][order(Sum, decreasing=TRUE)]) +print(dataset.kinds[Sum > 0][order(Sum, decreasing = TRUE)]) # Define an additional "keep" column, indicating values that will not be removed -keep.all.ids <- c(59, 60, 241, 394) # Lat, Lon, Date, Time +keep.all.ids <- c(59, 60, 241, 394) # Lat, Lon, Date, Time try.dat[, keep := FALSE] try.dat[DataID %in% keep.all.ids, keep := TRUE] -cols.to.check <- c("DataName", "bety_name", "bety_id", "ObservationID", "ValueKindName", - "StdValue", "UncertaintyName", "Replicates") +cols.to.check <- c( + "DataName", "bety_name", "bety_id", "ObservationID", "ValueKindName", + "StdValue", "UncertaintyName", "Replicates" +) setkey(try.dat, DatasetID) # Subset based on specific cases (set StdValue to NA, then delete them all in one command) # Dataset ID 25 -- Reports lots of summary statistics, but inconsistently -- use only "Best Estimate" -tmp.dat <- try.dat[DatasetID == 25][, cols.to.check, with=F] +tmp.dat <- try.dat[DatasetID == 25][, cols.to.check, with = F] try.dat[DatasetID == 25 & ValueKindName == "Best Estimate", keep := TRUE] # Dataset ID 68 -- Plant longevity values are defined by "Low", "High", "Maximum" @@ -31,30 +33,30 @@ try.dat[DatasetID == 25 & ValueKindName == "Best Estimate", keep := TRUE] # try.dat[DatasetID == 68 & !(ValueKindName %in% vk.notsingle), keep := TRUE] # Dataset ID 4 -- Reports Mean, Maximum, and Minimum for traits -- use only the Mean -tmp.dat <- try.dat[DatasetID == 4][, cols.to.check, with=F][, .N, by=list(DataName, ValueKindName)] +tmp.dat <- try.dat[DatasetID == 4][, cols.to.check, with = F][, .N, by = list(DataName, ValueKindName)] try.dat[DatasetID == 4 & ValueKindName == "Mean", keep := TRUE] # Dataset ID 129 reports species means, but no sample sizes -- assume 1? -tmp.dat <- try.dat[DatasetID == 129][, cols.to.check, with=F] +tmp.dat <- try.dat[DatasetID == 129][, cols.to.check, with = F] try.dat[DatasetID == 129, keep := TRUE] # Dataset ID 159 reports some values as single, others as mean, but only one of each, so leave as is -tmp.dat <- try.dat[DatasetID == 159][, cols.to.check, with=F] +tmp.dat <- try.dat[DatasetID == 159][, cols.to.check, with = F] try.dat[DatasetID == 159, keep := TRUE] # Dataset ID 216 reports means with replicates and uncertainties -tmp.dat <- try.dat[DatasetID == 216][, cols.to.check, with=F] +tmp.dat <- try.dat[DatasetID == 216][, cols.to.check, with = F] try.dat[DatasetID == 216, keep := TRUE] # Dataset ID 263 reports only mean values with no sample sizes -- assume 1 -tmp.dat <- try.dat[DatasetID == 263][, cols.to.check, with=F] +tmp.dat <- try.dat[DatasetID == 263][, cols.to.check, with = F] try.dat[DatasetID == 263, keep := TRUE] # Repeat subset on trait-containing observation IDs. # Have to do this again because this script can result in Lat-Lon pairs that don't correspond to any traits setkey(try.dat, ObservationID) -obsid.trait <- try.dat[, has.trait := any(type == "t"), by=ObservationID][has.trait == TRUE, ObservationID] +obsid.trait <- try.dat[, has.trait := any(type == "t"), by = ObservationID][has.trait == TRUE, ObservationID] try.dat <- try.dat[ObservationID %in% obsid.trait] try.dat <- try.dat[keep == TRUE] -save(try.dat, file="try.2.RData") +save(try.dat, file = "try.2.RData") diff --git a/base/db/inst/import-try/93.create.try.sites.R b/base/db/inst/import-try/93.create.try.sites.R index 026550f1edf..45c80275578 100644 --- a/base/db/inst/import-try/93.create.try.sites.R +++ b/base/db/inst/import-try/93.create.try.sites.R @@ -5,92 +5,100 @@ load("try.2.RData") # a. Cast latitudes, longitudes, and data names latlon <- c("Latitude", "Longitude") keep.cols <- c("DataName", "StdValue", "ObservationID") -try.latlon.all <- try.dat[DataName %in% latlon, keep.cols, with=F][!is.na(StdValue)] +try.latlon.all <- try.dat[DataName %in% latlon, keep.cols, with = F][!is.na(StdValue)] try.latlon.cast <- dcast(try.latlon.all, ObservationID ~ DataName, - value.var = "StdValue", - fun.aggregate = mean, na.rm=TRUE) + value.var = "StdValue", + fun.aggregate = mean, na.rm = TRUE +) # b. Assign each unique lat-lon pair a unique location.id -- := .GRP, by=list(Latitude,Longitude) -try.latlon.cast[, latlon.id := .GRP, by=list(Latitude, Longitude)] -latlon.unique <- try.latlon.cast[, .N, by=c(latlon, "latlon.id")][,c(latlon, "latlon.id"),with=F] +try.latlon.cast[, latlon.id := .GRP, by = list(Latitude, Longitude)] +latlon.unique <- try.latlon.cast[, .N, by = c(latlon, "latlon.id")][, c(latlon, "latlon.id"), with = F] # c. Determie sites using cluster analysis, and create unique site.id -radius <- 0.05 # Search radius, in degrees -- 0.4 corresponds to about 31 km at 45 N +radius <- 0.05 # Search radius, in degrees -- 0.4 corresponds to about 31 km at 45 N hc.latlon <- hclust(dist(latlon.unique)) clusters.latlon <- cutree(hc.latlon, h = radius) latlon.unique[, try.site.id := paste0("TRY_SITE_", clusters.latlon)] # This computes the site centroid, which is just the mean latitude and longitude for all points in the site -latlon.unique[, c("site.Latitude", "site.Longitude") := list(mean(Latitude, na.rm=TRUE), - mean(Longitude, na.rm=TRUE)), by=try.site.id] +latlon.unique[, c("site.Latitude", "site.Longitude") := list( + mean(Latitude, na.rm = TRUE), + mean(Longitude, na.rm = TRUE) +), by = try.site.id] latlon.unique <- latlon.unique[!(is.na(Latitude) | is.na(Longitude))] # Merge back into try.latlon.cast setkey(latlon.unique, latlon.id) setkey(try.latlon.cast, latlon.id) -try.latlon.merge <- try.latlon.cast[latlon.unique[,list(latlon.id, try.site.id, - site.Latitude, site.Longitude)]] +try.latlon.merge <- try.latlon.cast[latlon.unique[, list( + latlon.id, try.site.id, + site.Latitude, site.Longitude +)]] # d. Append location.id and site.id to full data set by merging on ObservationID setkey(try.latlon.merge, ObservationID) setkey(try.dat, ObservationID) -try.dat <- try.dat[try.latlon.merge[,list(ObservationID, latlon.id, try.site.id, - site.Latitude, site.Longitude)]] +try.dat <- try.dat[try.latlon.merge[, list( + ObservationID, latlon.id, try.site.id, + site.Latitude, site.Longitude +)]] # e. Create data.table for site creation: # sitename = "TRY_SITE_" # notes = "TRY_DATASETS = " # geometry = ST_GeomFromText('POINT(lat, lon)', 4263) -try.sites <- try.dat[, paste("TRY_DATASETS =", paste(unique(DatasetID), collapse=" ")), - by=list(try.site.id, site.Latitude, site.Longitude)] +try.sites <- try.dat[, paste("TRY_DATASETS =", paste(unique(DatasetID), collapse = " ")), + by = list(try.site.id, site.Latitude, site.Longitude) +] setnames(try.sites, "V1", "notes") try.sites[, bety.site.id := as.character(NA)] bety.site.index <- which(names(try.sites) == "bety.site.id") -# f. Loop over rows... -radius.query.string <- 'SELECT id, sitename, ST_Y(ST_Centroid(geometry)) AS lat, ST_X(ST_Centroid(geometry)) AS lon, ST_Distance(ST_Centroid(geometry), ST_SetSRID(ST_MakePoint(%2$f, %1$f), 4326)) as distance FROM sites WHERE ST_Distance(ST_Centroid(geometry), ST_SetSRID(ST_MakePoint(%2$f, %1$f), 4326)) <= %3$f' +# f. Loop over rows... +radius.query.string <- "SELECT id, sitename, ST_Y(ST_Centroid(geometry)) AS lat, ST_X(ST_Centroid(geometry)) AS lon, ST_Distance(ST_Centroid(geometry), ST_SetSRID(ST_MakePoint(%2$f, %1$f), 4326)) as distance FROM sites WHERE ST_Distance(ST_Centroid(geometry), ST_SetSRID(ST_MakePoint(%2$f, %1$f), 4326)) <= %3$f" insert.query.string <- "INSERT INTO sites(sitename,notes,geometry,user_id) VALUES('%s','%s',ST_Force3D(ST_SetSRID(ST_MakePoint(%f, %f), 4326)),'%s' ) RETURNING id;" message("Looping over sites and adding to BETY") -pb <- txtProgressBar(0, nrow(try.sites), style=3) -for(r in 1:nrow(try.sites)){ +pb <- txtProgressBar(0, nrow(try.sites), style = 3) +for (r in 1:nrow(try.sites)) { # Check site centroid against BETY. site.lat <- try.sites[r, site.Latitude] site.lon <- try.sites[r, site.Longitude] search.df <- try(db.query(sprintf(radius.query.string, site.lat, site.lon, radius), con)) - if(inherits(search.df, "try-error")){ + if (inherits(search.df, "try-error")) { warning("Error querying database.") next } - if(nrow(search.df) > 0){ - search.df <- search.df[order(search.df$distance),] + if (nrow(search.df) > 0) { + search.df <- search.df[order(search.df$distance), ] rownames(search.df) <- 1:nrow(search.df) newsite <- FALSE ## Select closest existing site automatically, if it's within the radius. - bety.site.id <- as.character(search.df[1,"id"]) + bety.site.id <- as.character(search.df[1, "id"]) ## TODO: Alternative is to select sites manually at each step, as implemented below. ## Print site options and allow user to select site -# search.df$site.lat <- site.lat -# search.df$site.lon <- site.lon -# print(search.df) -# user.input <- readline("Select site row number or type 'n' to create a new site: ") -# if(tolower(user.input) == "n"){ -# newsite <- TRUE -# } else { -# user.choice <- as.numeric(user.input) -# bety.site.id <- as.character(search.df[user.choice, "id"]) -# } + # search.df$site.lat <- site.lat + # search.df$site.lon <- site.lon + # print(search.df) + # user.input <- readline("Select site row number or type 'n' to create a new site: ") + # if(tolower(user.input) == "n"){ + # newsite <- TRUE + # } else { + # user.choice <- as.numeric(user.input) + # bety.site.id <- as.character(search.df[user.choice, "id"]) + # } } else { newsite <- TRUE } - if(newsite){ + if (newsite) { ## Create new site from centroid sitename <- try.sites[r, try.site.id] notes <- try.sites[r, notes] bety.side.id <- db.query(sprintf(insert.query.string, sitename, notes, site.lon, site.lat, user_id), con)$id } # Append "site_id" to try.sites - set(try.sites, i=r, j=bety.site.index, value=bety.site.id) + set(try.sites, i = r, j = bety.site.index, value = bety.site.id) setTxtProgressBar(pb, r) } @@ -100,6 +108,6 @@ setkey(try.sites, try.site.id) try.dat <- try.dat[try.sites[, list(try.site.id, bety.site.id)]] print(try.dat[sample(1:nrow(try.dat), 20), list(ObservationID, try.site.id, bety.site.id)]) -save(try.dat, file="try.3.RData", compress=TRUE) +save(try.dat, file = "try.3.RData", compress = TRUE) # TODO: In the future, change centroid to bounding box containing all sites? diff --git a/base/db/inst/import-try/94.match.species.R b/base/db/inst/import-try/94.match.species.R index 454796a23f2..b0c52423e24 100644 --- a/base/db/inst/import-try/94.match.species.R +++ b/base/db/inst/import-try/94.match.species.R @@ -4,7 +4,7 @@ load("try.3.RData") # Get unique species list from BETY # NOTE: This is set to PSQL-PEcAn because my clone of BETY was missing all the species -dbparms.psql <- list(driver="PostgreSQL" , user = "bety", dbname = "bety", password='bety', host='psql-pecan.bu.edu') +dbparms.psql <- list(driver = "PostgreSQL", user = "bety", dbname = "bety", password = "bety", host = "psql-pecan.bu.edu") con.psql <- db.open(dbparms.psql) bety.species.dt <- data.table(db.query("SELECT DISTINCT id as bety_species_id, scientificname as bety_species FROM species", con.psql)) bety.species.dt[, bety.species.lower := tolower(bety_species)] @@ -12,8 +12,10 @@ db.close(con.psql) # a. Get unique species list from TRY try.species.unique <- try.dat[, unique(AccSpeciesName)] -try.species.dt <- data.table(try.species = try.species.unique, - try.species.lower = tolower(encodeString(try.species.unique))) +try.species.dt <- data.table( + try.species = try.species.unique, + try.species.lower = tolower(encodeString(try.species.unique)) +) try.species.dt[, try.species.lower := gsub(" sp$", " spp.", try.species.lower)] @@ -29,11 +31,13 @@ try.unmatched <- match.species.dt[is.na(bety_species_id)] n.unmatched <- nrow(try.unmatched) bety.index.match <- list() message("Partial pattern match using grep...") -pb <- txtProgressBar(0, n.unmatched, style=3) -for(i in 1:n.unmatched){ - match.ind <- grep(sprintf(".*%s.*", try.unmatched[i,bety.species.lower]), - bety.species.dt[,bety_species], ignore.case=TRUE, perl=TRUE) - if(length(match.ind) != 0){ +pb <- txtProgressBar(0, n.unmatched, style = 3) +for (i in 1:n.unmatched) { + match.ind <- grep(sprintf(".*%s.*", try.unmatched[i, bety.species.lower]), + bety.species.dt[, bety_species], + ignore.case = TRUE, perl = TRUE + ) + if (length(match.ind) != 0) { bety.index.match[[i]] <- match.ind } setTxtProgressBar(pb, i) @@ -45,37 +49,43 @@ bety.index.nmatches <- sapply(bety.index.match, length) single.match <- which(bety.index.nmatches == 1) single.match.inds <- unlist(bety.index.match[single.match]) setkey(match.species.dt, bety.species.lower) -match.species.dt[try.unmatched[single.match,bety.species.lower], - c("bety_species_id", "bety_species") := - bety.species.dt[single.match.inds, list(bety_species_id, - bety.species.lower)]] +match.species.dt[ + try.unmatched[single.match, bety.species.lower], + c("bety_species_id", "bety_species") := + bety.species.dt[single.match.inds, list( + bety_species_id, + bety.species.lower + )] +] # Interactively sort out multiple matches multiple.matches <- which(bety.index.nmatches > 1) -for(i in seq_along(multiple.matches)){ +for (i in seq_along(multiple.matches)) { m <- multiple.matches[i] try.sp <- try.unmatched[m, bety.species.lower] bety.sp <- bety.species.dt[bety.index.match[[m]], bety.species.lower] print(paste("TRY species:", try.sp)) - print(paste("BETY species:", paste(seq_along(bety.sp), bety.sp, collapse="; ", sep=" "))) + print(paste("BETY species:", paste(seq_along(bety.sp), bety.sp, collapse = "; ", sep = " "))) user.choice <- readline("Select a species number (or enter 'n' for 'neither'):") - if(user.choice == 'n'){ + if (user.choice == "n") { next } else { user.choice <- as.numeric(user.choice) bety.index <- bety.index.match[[m]][user.choice] } - match.species.dt[try.unmatched[bety.index, bety.species.lower], - c("bety_species_id", "bety_species") := - bety.species.dt[bety.index, list(bety_species_id, bety.species.lower)]] + match.species.dt[ + try.unmatched[bety.index, bety.species.lower], + c("bety_species_id", "bety_species") := + bety.species.dt[bety.index, list(bety_species_id, bety.species.lower)] + ] } # b. Loop over TRY unmatched species list and add to BETY try.unmatched.final <- match.species.dt[is.na(bety_species_id)] sp.insert.query <- "INSERT INTO species(scientificname, notes) VALUES('%s', 'TRY_SPECIES') RETURNING id" message("Looping over unmatched species and adding to BETY") -pb <- txtProgressBar(0, nrow(try.unmatched.final), style=3) -for(i in 1:nrow(try.unmatched.final)) { +pb <- txtProgressBar(0, nrow(try.unmatched.final), style = 3) +for (i in 1:nrow(try.unmatched.final)) { sp <- try.unmatched.final[i, encodeString(try.species)] sp <- fixquote(sp) sp.bety.id <- db.query(sprintf(sp.insert.query, sp), con)$id @@ -91,6 +101,6 @@ try.dat <- match.species.dt[try.dat] try.dat[, try.species.lower := NULL] setnames(try.dat, "try.species", "AccSpeciesName") -save(try.dat, file = "try.4.RData", compress=TRUE) +save(try.dat, file = "try.4.RData", compress = TRUE) -# TODO: Grab species characteristics from TRY and add them here (or in another script) \ No newline at end of file +# TODO: Grab species characteristics from TRY and add them here (or in another script) diff --git a/base/db/inst/import-try/95.citations.R b/base/db/inst/import-try/95.citations.R index 331d8f373bf..1a47148c0a2 100644 --- a/base/db/inst/import-try/95.citations.R +++ b/base/db/inst/import-try/95.citations.R @@ -5,28 +5,28 @@ library(rcrossref) library(stringr) # a. Get unique citation list from TRY. -refs <- try.dat[, .N, by=Reference][,N := NULL] +refs <- try.dat[, .N, by = Reference][, N := NULL] refs[, bety.citation.id := character(nrow(refs))] # b. Get data for each citation via rcrossref (author, year, title, journal, doi) check.query <- "SELECT id FROM citations WHERE doi LIKE '%s'" insert.query <- "INSERT INTO citations(author, year, title, journal, doi) VALUES('%s', %d, '%s', '%s', '%s') RETURNING id" message("Looping over citations and adding to BETY") -pb <- txtProgressBar(0, nrow(refs), style=3) -for(i in 1:nrow(refs)){ +pb <- txtProgressBar(0, nrow(refs), style = 3) +for (i in 1:nrow(refs)) { r <- refs[i, encodeString(Reference)] - if(nchar(r) < 20){ + if (nchar(r) < 20) { # Unpublished data -- skip. # refs[i, DOI := "unpublished"] next } else { - result <- cr_works(query = r)$data[1,] + result <- cr_works(query = r)$data[1, ] doi <- fixquote(result$DOI) # i. Check if DOI already in BETY. check.df <- db.query(sprintf(check.query, doi), con) - if(nrow(check.df) == 0){ + if (nrow(check.df) == 0) { # ii. If no, add. - author <- fixquote(paste0(result$author[[1]][1,], collapse=", ")) + author <- fixquote(paste0(result$author[[1]][1, ], collapse = ", ")) year <- year(result$deposited) title <- fixquote(result$title) journal <- fixquote(result$container.title) @@ -44,4 +44,4 @@ setkey(refs, Reference) setkey(try.dat, Reference) try.dat <- refs[try.dat] -save(try.dat, file="try.5.RData", compress=TRUE) +save(try.dat, file = "try.5.RData", compress = TRUE) diff --git a/base/db/inst/import-try/96.load.data.R b/base/db/inst/import-try/96.load.data.R index 4570282a4a0..d5e49bb14b3 100644 --- a/base/db/inst/import-try/96.load.data.R +++ b/base/db/inst/import-try/96.load.data.R @@ -4,7 +4,7 @@ load("try.5.RData") library(stringr) setkey(try.dat, ObservationID) -try.entities <- try.dat[, .GRP, by=ObservationID] +try.entities <- try.dat[, .GRP, by = ObservationID] try.entities[, c("bety.entity.id", "bety.trait.id") := character(nrow(try.entities))] # Get units from BETY @@ -17,12 +17,12 @@ setnames(try.dat, "id", "bety_id") # a. Loop over entities... add.entity.query <- "INSERT INTO entities(name, notes) VALUES('%s', '%s') RETURNING id" -insert.trait <- function(vals){ +insert.trait <- function(vals) { vals.na <- which(is.na(vals)) vals.sub <- vals[-vals.na] n <- names(vals.sub) lhs <- paste(n, collapse = ",") - rhs <- paste(vals.sub, collapse=",") + rhs <- paste(vals.sub, collapse = ",") query.string <- sprintf("INSERT INTO traits(%s) VALUES(%s) RETURNING id", lhs, rhs) id <- db.query(query.string, con)$id return(id) @@ -34,24 +34,25 @@ string <- function(x) { } message("Looping over entities and adding values to BETY") -pb <- txtProgressBar(0, nrow(try.entities), style=3) -for(i in 1:nrow(try.entities)){ +pb <- txtProgressBar(0, nrow(try.entities), style = 3) +for (i in 1:nrow(try.entities)) { # i. Add entity to entities table entity <- try.entities[i, ObservationID] entity.name <- fixquote(paste0("TRY_OBSERVATION_", entity)) check <- db.query(sprintf("SELECT id FROM entities WHERE name LIKE '%s'", entity.name), con) - if(length(check) > 0) next - try.sub <- try.dat[ObservationID == entity & type == "t"] # Select only traits -- for now, ignore covariates - if(nrow(try.sub) == 0) next + if (length(check) > 0) next + try.sub <- try.dat[ObservationID == entity & type == "t"] # Select only traits -- for now, ignore covariates + if (nrow(try.sub) == 0) next entity.notes <- fixquote(try.sub[, paste(unique(DatasetID), - unique(Dataset), - unique(ObservationID), - collapse = " ; ")]) + unique(Dataset), + unique(ObservationID), + collapse = " ; " + )]) entity.id <- db.query(sprintf(add.entity.query, entity.name, entity.notes), con)$id # ii. Store entity_id. try.entities[, bety.entity.id := entity.id] # iii. Loop over rows... - for(j in 1:nrow(try.sub)){ + for (j in 1:nrow(try.sub)) { vals <- list( site_id = try.sub[j, bety.site.id], specie_id = try.sub[j, bety.species.id], @@ -69,4 +70,4 @@ for(i in 1:nrow(try.entities)){ setTxtProgressBar(pb, i) } -save(try.entities, file="try.entities.RData") +save(try.entities, file = "try.entities.RData") diff --git a/base/db/inst/import-try/common.R b/base/db/inst/import-try/common.R index d213931a7ac..e432d54b855 100644 --- a/base/db/inst/import-try/common.R +++ b/base/db/inst/import-try/common.R @@ -7,13 +7,13 @@ suppressMessages({ library(PEcAn.DB) library(RPostgreSQL) }) - + # Database parameters -user_id <- "1000000013" # Alexey Shiklomanov -dbparms <- list(driver="PostgreSQL" , user = "ashiklom", dbname = "bety_ashiklom") +user_id <- "1000000013" # Alexey Shiklomanov +dbparms <- list(driver = "PostgreSQL", user = "ashiklom", dbname = "bety_ashiklom") con <- db.open(dbparms) -fixquote <- function(x){ +fixquote <- function(x) { x <- stringr::str_trim(gsub("'", "''", x)) return(x) -} \ No newline at end of file +} diff --git a/base/db/inst/import-try/helpers.species.R b/base/db/inst/import-try/helpers.species.R index b203bc5f7ee..1606f553a70 100644 --- a/base/db/inst/import-try/helpers.species.R +++ b/base/db/inst/import-try/helpers.species.R @@ -2,70 +2,79 @@ library(taxize) library(data.table) -get.plants.sources <- function(){ +get.plants.sources <- function() { library(taxize) - common.plants <- c("acer rubrum", "zea mays", "tsuga canadensis") - dat <- gnr_resolve(common.plants) - plants.sources <- unique(dat$data_source_title) - all.sources <- gnr_datasources() - source.ids <- all.sources[all.sources$title %in% plants.sources, "id"] - return(source.ids) + common.plants <- c("acer rubrum", "zea mays", "tsuga canadensis") + dat <- gnr_resolve(common.plants) + plants.sources <- unique(dat$data_source_title) + all.sources <- gnr_datasources() + source.ids <- all.sources[all.sources$title %in% plants.sources, "id"] + return(source.ids) } all.sources <- get.plants.sources() -fix.species <- function(dat){ +fix.species <- function(dat) { library(taxize) library(data.table) - species.old <- tolower(dat[,unique(species_scientific)]) - usda <- 150 - all.sources <- all.sources - species.matched.usda <- gnr_resolve(names = species.old, - canonical = TRUE, - preferred_data_sources = 150, - best_match_only = TRUE) - species.matched <- data.table(species.matched.usda) - species.matched[, submitted_name := tolower(submitted_name)] - missing.ind <- which(!sapply(species.old, function(x) - any(agrep(x, species.matched[,submitted_name], - ignore.case = TRUE)))) - missing.usda <- names(missing.ind) - if(!all(is.na(missing.usda))){ - print("Missing from USDA:") - print(missing.usda) - print("Attempting all sources") - species.matched.all <- gnr_resolve(names = missing.usda, - canonical = TRUE, - best_match_only = TRUE, - preferred_data_sources = all.sources) - species.matched.all <- data.table(species.matched.all) - setkey(species.matched.all, submitted_name) - species.matched.all <- unique(species.matched.all) - print(species.matched.all) - species.matched.all[, submitted_name := tolower(submitted_name)] - missing.ind2 <- which(!sapply(missing.usda, function(x) - any(agrep(x, species.matched.all[,submitted_name], - ignore.case = TRUE)))) - missing.all <- names(missing.ind2) - if(!all(is.na(missing.all))){ - error.message <- paste("Still couldn't find:", - paste(missing.all, collapse=", "), - "\n Resolve manually and rerun") - warning(error.message) - return(missing.all) - } - species.matched <- rbindlist(list(species.matched, species.matched.all)) - } - print("All species found!") - setkey(species.matched, submitted_name) - dat2 <- copy(dat) - inds <- sapply(species.matched[,submitted_name], - function(x) grep(x, dat2[,species_scientific], ignore.case=TRUE)) - for(sp in names(inds)){ - set(dat2, i=inds[[sp]], j="species_scientific", value=species.matched[sp,matched_name2]) + species.old <- tolower(dat[, unique(species_scientific)]) + usda <- 150 + all.sources <- all.sources + species.matched.usda <- gnr_resolve( + names = species.old, + canonical = TRUE, + preferred_data_sources = 150, + best_match_only = TRUE + ) + species.matched <- data.table(species.matched.usda) + species.matched[, submitted_name := tolower(submitted_name)] + missing.ind <- which(!sapply(species.old, function(x) { + any(agrep(x, species.matched[, submitted_name], + ignore.case = TRUE + )) + })) + missing.usda <- names(missing.ind) + if (!all(is.na(missing.usda))) { + print("Missing from USDA:") + print(missing.usda) + print("Attempting all sources") + species.matched.all <- gnr_resolve( + names = missing.usda, + canonical = TRUE, + best_match_only = TRUE, + preferred_data_sources = all.sources + ) + species.matched.all <- data.table(species.matched.all) + setkey(species.matched.all, submitted_name) + species.matched.all <- unique(species.matched.all) + print(species.matched.all) + species.matched.all[, submitted_name := tolower(submitted_name)] + missing.ind2 <- which(!sapply(missing.usda, function(x) { + any(agrep(x, species.matched.all[, submitted_name], + ignore.case = TRUE + )) + })) + missing.all <- names(missing.ind2) + if (!all(is.na(missing.all))) { + error.message <- paste( + "Still couldn't find:", + paste(missing.all, collapse = ", "), + "\n Resolve manually and rerun" + ) + warning(error.message) + return(missing.all) } - return(dat2) + species.matched <- rbindlist(list(species.matched, species.matched.all)) + } + print("All species found!") + setkey(species.matched, submitted_name) + dat2 <- copy(dat) + inds <- sapply( + species.matched[, submitted_name], + function(x) grep(x, dat2[, species_scientific], ignore.case = TRUE) + ) + for (sp in names(inds)) { + set(dat2, i = inds[[sp]], j = "species_scientific", value = species.matched[sp, matched_name2]) + } + return(dat2) } - - - diff --git a/base/db/tests/testthat.R b/base/db/tests/testthat.R index 03f6b840cbf..a336616abda 100644 --- a/base/db/tests/testthat.R +++ b/base/db/tests/testthat.R @@ -3,13 +3,14 @@ library(testthat) library(PEcAn.DB) library(RPostgreSQL) dbparms <- get_postgres_envvars( - host = "localhost", - driver = "PostgreSQL", - user = "bety", - dbname = "bety", - password = "bety") + host = "localhost", + driver = "PostgreSQL", + user = "bety", + dbname = "bety", + password = "bety" +) -if(db.exists(dbparms)){ +if (db.exists(dbparms)) { con <- db.open(dbparms) PEcAn.logger::logger.setQuitOnSevere(FALSE) test_check("PEcAn.DB") diff --git a/base/db/tests/testthat/helper-db-setup.R b/base/db/tests/testthat/helper-db-setup.R index 0ca845db609..7e293676e27 100644 --- a/base/db/tests/testthat/helper-db-setup.R +++ b/base/db/tests/testthat/helper-db-setup.R @@ -27,18 +27,22 @@ get_db_params <- function() { host = "localhost", user = "bety", password = "bety", - driver = "Postgres")) + driver = "Postgres" + )) } else { if (PEcAn.remote::fqdn() == "pecan2.bu.edu") { - return(list(host = "psql-pecan.bu.edu", driver = "PostgreSQL", - dbname = "bety", user = "bety", password = "bety")) + return(list( + host = "psql-pecan.bu.edu", driver = "PostgreSQL", + dbname = "bety", user = "bety", password = "bety" + )) } else { return(get_postgres_envvars( host = "localhost", driver = "Postgres", user = "bety", dbname = "bety", - password = "bety")) + password = "bety" + )) } } } @@ -47,10 +51,13 @@ check_db_test <- function() { con <- tryCatch( db.open(params = get_db_params()), error = function(e) { - message("Failed to open connection with the following error:\n", - conditionMessage(e)) + message( + "Failed to open connection with the following error:\n", + conditionMessage(e) + ) return(NULL) - }) + } + ) if (is.null(con)) { testthat::skip("Can't get a valid test connection right now. Skipping test. ") diff --git a/base/db/tests/testthat/test-get.trait.data.pft.R b/base/db/tests/testthat/test-get.trait.data.pft.R index 5005d556302..8241649f331 100644 --- a/base/db/tests/testthat/test-get.trait.data.pft.R +++ b/base/db/tests/testthat/test-get.trait.data.pft.R @@ -15,19 +15,20 @@ teardown({ get_pft <- function(pftname) { get.trait.data.pft( - pft = list(name = pftname, outdir = outdir), - trait.names = "SLA", - dbfiles = dbdir, - modeltype = NULL, - dbcon = con) + pft = list(name = pftname, outdir = outdir), + trait.names = "SLA", + dbfiles = dbdir, + modeltype = NULL, + dbcon = con + ) } -test_that("reference species and cultivar PFTs write traits properly",{ +test_that("reference species and cultivar PFTs write traits properly", { skip("Disabled until Travis bety contains Pavi_alamo and Pavi_all (#1958)") pavi_sp <- get_pft("pavi") expect_equal(pavi_sp$name, "pavi") - sp_csv = file.path(dbdir, "posterior", pavi_sp$posteriorid, "species.csv") - sp_trt = file.path(dbdir, "posterior", pavi_sp$posteriorid, "trait.data.csv") + sp_csv <- file.path(dbdir, "posterior", pavi_sp$posteriorid, "species.csv") + sp_trt <- file.path(dbdir, "posterior", pavi_sp$posteriorid, "trait.data.csv") expect_true(file.exists(sp_csv)) expect_true(file.exists(sp_trt)) expect_gt(file.info(sp_csv)$size, 40) # i.e. longer than the 40-char header @@ -35,8 +36,8 @@ test_that("reference species and cultivar PFTs write traits properly",{ pavi_cv <- get_pft("Pavi_alamo") expect_equal(pavi_cv$name, "Pavi_alamo") - cv_csv = file.path(dbdir, "posterior", pavi_cv$posteriorid, "cultivars.csv") - cv_trt = file.path(dbdir, "posterior", pavi_cv$posteriorid, "trait.data.csv") + cv_csv <- file.path(dbdir, "posterior", pavi_cv$posteriorid, "cultivars.csv") + cv_trt <- file.path(dbdir, "posterior", pavi_cv$posteriorid, "trait.data.csv") expect_true(file.exists(cv_csv)) expect_true(file.exists(cv_trt)) expect_gt(file.info(cv_csv)$size, 63) # cultivar.csv headers are longer @@ -44,8 +45,8 @@ test_that("reference species and cultivar PFTs write traits properly",{ pavi_allcv <- get_pft("Pavi_all") expect_equal(pavi_allcv$name, "Pavi_all") - allcv_csv = file.path(dbdir, "posterior", pavi_allcv$posteriorid, "cultivars.csv") - allcv_trt = file.path(dbdir, "posterior", pavi_allcv$posteriorid, "trait.data.csv") + allcv_csv <- file.path(dbdir, "posterior", pavi_allcv$posteriorid, "cultivars.csv") + allcv_trt <- file.path(dbdir, "posterior", pavi_allcv$posteriorid, "trait.data.csv") expect_true(file.exists(allcv_csv)) expect_true(file.exists(allcv_trt)) expect_gt(file.info(allcv_csv)$size, 63) @@ -56,7 +57,7 @@ test_that("reference species and cultivar PFTs write traits properly",{ expect_gt(file.info(allcv_trt)$size, file.info(cv_trt)$size) }) -test_that("error cases complain",{ +test_that("error cases complain", { expect_error(get_pft("NOTAPFT"), "Could not find pft") expect_error(get_pft("soil"), "Multiple PFTs named soil") }) @@ -67,15 +68,23 @@ test_that("PFT with no trait data (SIPNET soil) works.", { dplyr::count() %>% dplyr::pull() skip_if_not(soil_pft == 1, "`soil.ALL` PFT not present in BETY.") - sipnet_soil <- get.trait.data(list(pft = list(name = "soil.ALL", - outdir = outdir)), - modeltype = "SIPNET", - dbfiles = dbdir, - database = get_db_params(), - forceupdate = FALSE) + sipnet_soil <- get.trait.data( + list(pft = list( + name = "soil.ALL", + outdir = outdir + )), + modeltype = "SIPNET", + dbfiles = dbdir, + database = get_db_params(), + forceupdate = FALSE + ) # Remove new record - DBI::dbExecute(con, "DELETE FROM dbfiles WHERE container_type = 'Posterior' AND container_id = $1", - list(sipnet_soil[[1]][["posteriorid"]])) - DBI::dbExecute(con, "DELETE FROM posteriors WHERE id = $1", - list(sipnet_soil[[1]][["posteriorid"]])) + DBI::dbExecute( + con, "DELETE FROM dbfiles WHERE container_type = 'Posterior' AND container_id = $1", + list(sipnet_soil[[1]][["posteriorid"]]) + ) + DBI::dbExecute( + con, "DELETE FROM posteriors WHERE id = $1", + list(sipnet_soil[[1]][["posteriorid"]]) + ) }) diff --git a/base/db/tests/testthat/test-query.traits.R b/base/db/tests/testthat/test-query.traits.R index 353d64aa130..6ac77f338db 100644 --- a/base/db/tests/testthat/test-query.traits.R +++ b/base/db/tests/testthat/test-query.traits.R @@ -6,27 +6,37 @@ teardown({ db.close(con) }) -round3 <- function(x){ round(stats::median(x, na.rm = TRUE), digits = 3) } +round3 <- function(x) { + round(stats::median(x, na.rm = TRUE), digits = 3) +} test_that("prints medians and returns a list", { msgs <- capture.output( - {res <- query.traits( - ids=938, # Switchgrass - priors=c("SLA", "Vcmax", "not_a_trait"), - con=con)}, - type = "message") + { + res <- query.traits( + ids = 938, # Switchgrass + priors = c("SLA", "Vcmax", "not_a_trait"), + con = con + ) + }, + type = "message" + ) expect_length(res, 2) expect_s3_class(res$SLA, "data.frame") expect_s3_class(res$Vcmax, "data.frame") cv_msgs <- capture.output( - {cv_res <- query.traits( - ids=10, # Switchgrass cultivar 'Cave-In-Rock' - priors=c("SLA", "LAI", "not_a_trait"), - con=con, - ids_are_cultivars = TRUE)}, - type = "message") + { + cv_res <- query.traits( + ids = 10, # Switchgrass cultivar 'Cave-In-Rock' + priors = c("SLA", "LAI", "not_a_trait"), + con = con, + ids_are_cultivars = TRUE + ) + }, + type = "message" + ) expect_length(cv_res, 2) expect_s3_class(cv_res$SLA, "data.frame") expect_s3_class(cv_res$LAI, "data.frame") @@ -36,33 +46,38 @@ test_that("prints medians and returns a list", { msgs, paste("Median SLA :", round3(res$SLA$mean)), fixed = TRUE, - all = FALSE) + all = FALSE + ) expect_match( msgs, paste("Median Vcmax :", round3(res$Vcmax$mean)), fixed = TRUE, - all = FALSE) + all = FALSE + ) expect_match( cv_msgs, paste("Median SLA :", round3(cv_res$SLA$mean)), fixed = TRUE, - all = FALSE) + all = FALSE + ) expect_match( cv_msgs, paste("Median LAI :", round3(cv_res$LAI$mean)), fixed = TRUE, - all = FALSE) + all = FALSE + ) }) test_that("returns empty list if no trait data found", { - expect_equal(query.traits(ids=1, priors="not_a_trait", con=con), list()) + expect_equal(query.traits(ids = 1, priors = "not_a_trait", con = con), list()) }) test_that("connection is required", { expect_error( query.traits(ids = 938, priors = "SLA"), - '"con" is missing') + '"con" is missing' + ) }) # Test `query_traits` function, which has a slightly different API @@ -87,9 +102,11 @@ test_that("query_traits works as expected", { }) test_that("query_traits expand argument works as expected", { - pft <- c("Optics.Temperate_Early_Hardwood", - "Optics.Temperate_Mid_Hardwood", - "Optics.Temperate_Late_Hardwood") + pft <- c( + "Optics.Temperate_Early_Hardwood", + "Optics.Temperate_Mid_Hardwood", + "Optics.Temperate_Late_Hardwood" + ) trait <- c("leaf_reflect_vis", "leaf_reflect_nir") pdat2 <- query_priors(pft, trait, con = con) expect_equal(nrow(pdat2), length(pft) * length(trait)) diff --git a/base/db/tests/testthat/test.assign.treatments.R b/base/db/tests/testthat/test.assign.treatments.R index a51c40f4804..4b7a38fe187 100644 --- a/base/db/tests/testthat/test.assign.treatments.R +++ b/base/db/tests/testthat/test.assign.treatments.R @@ -5,7 +5,7 @@ test_that("`assign.treatments` correctly assigns control treatment", { control = c(1, 0, 0, 1, 0, 0), trt_id = NA ) - + updated_data <- assign.treatments(data) expect_equal(updated_data$trt_id, c("control", NA, NA, "control", "control", "control")) }) @@ -30,4 +30,4 @@ test_that("`drop.columns` able to drop specified columns from data", { updated_data <- drop.columns(data, c("name", "not_a_column")) expect_equal(colnames(updated_data), c("id", "value")) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.check.lists.R b/base/db/tests/testthat/test.check.lists.R index e434c29a2e7..b4c3071eb35 100644 --- a/base/db/tests/testthat/test.check.lists.R +++ b/base/db/tests/testthat/test.check.lists.R @@ -5,7 +5,7 @@ test_that("`check.lists` returns false for appropriate cases", { # for unequal number of rows expect_false(check.lists(x, y)) - # for wrong filename passed + # for wrong filename passed expect_false(check.lists(x, y, filename = "wrong.csv")) # if x and y are actually unequal @@ -14,10 +14,10 @@ test_that("`check.lists` returns false for appropriate cases", { }) test_that("`check.lists` able to correctly work for matching data frames to lists read from csv files", { - withr::with_tempfile("tf", fileext = ".csv",{ + withr::with_tempfile("tf", fileext = ".csv", { x <- data.frame(id = c(1, 2, 3)) - y <- data.frame(id = c(1, 2, 3)) + y <- data.frame(id = c(1, 2, 3)) write.csv(y, file = tf) expect_true(check.lists(x, read.csv(tf), filename = "species.csv")) }) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.contents_sanity.R b/base/db/tests/testthat/test.contents_sanity.R index 9a11d2bb7ae..aa86c941355 100644 --- a/base/db/tests/testthat/test.contents_sanity.R +++ b/base/db/tests/testthat/test.contents_sanity.R @@ -1,16 +1,16 @@ context("Basic Sanity tests for PEcAn functions that query BETYdb") -test_that("append.covariates appends managements to yields",{ +test_that("append.covariates appends managements to yields", { con <- check_db_test() test.traits <- db.query("select * from traits where id in (select trait_id from covariates) limit 10;", con = con) tmpcov <- query.covariates(test.traits$id, con = con) - covariates <- tmpcov[!duplicated(tmpcov$trait_id),] + covariates <- tmpcov[!duplicated(tmpcov$trait_id), ] append.test <- append.covariate(data = test.traits, column.name = "level", covariates.data = covariates) expect_true(nrow(append.test) >= nrow(covariates)) # expect_true(nrow(append.test) >= nrow(test.traits)) try(db.close(con)) }) -test_that("query.data works",{ +test_that("query.data works", { con <- check_db_test() # expect_true(nrow(query.data("SLA", "938")) > 0) expect_equal(nrow(query.data("xyz", "-1", con = con)), 0) @@ -20,54 +20,59 @@ test_that("query.data works",{ context("test that expected tables exist") # modeltypes -expected_tables <- c("citations", "citations_sites", "citations_treatments", - "covariates", "cultivars", "dbfiles", "ensembles", "entities", - "formats", "formats_variables", "inputs", "inputs_runs", - "likelihoods", "machines", "managements", "managements_treatments", - "methods", "mimetypes", "models", "pfts", "pfts_priors", "pfts_species", - "posteriors", "priors", "runs", "schema_migrations", - "sessions", "sites", "species", "traits", "treatments", "users", - "variables", "workflows", "yields") -for (t in expected_tables){ - test_that(paste(t, "table exists and has >= 1 columns"),{ +expected_tables <- c( + "citations", "citations_sites", "citations_treatments", + "covariates", "cultivars", "dbfiles", "ensembles", "entities", + "formats", "formats_variables", "inputs", "inputs_runs", + "likelihoods", "machines", "managements", "managements_treatments", + "methods", "mimetypes", "models", "pfts", "pfts_priors", "pfts_species", + "posteriors", "priors", "runs", "schema_migrations", + "sessions", "sites", "species", "traits", "treatments", "users", + "variables", "workflows", "yields" +) +for (t in expected_tables) { + test_that(paste(t, "table exists and has >= 1 columns"), { tmp <- NULL con <- check_db_test() suppressWarnings(tmp <- db.query(paste("select * from", t, "limit 1"), con = con)) - #RyK added suppressWarnings for sites.geometry having unrecognized field type + # RyK added suppressWarnings for sites.geometry having unrecognized field type expect_false(is.null(tmp)) try(db.close(con)) }) -} +} ## the following suite could be more comprehensive, and only focus on fields used by PEcAn -test_that("database has a workflows table with appropriate columns",{ +test_that("database has a workflows table with appropriate columns", { con <- check_db_test() ## regression test for redmine #1128 workflows <- db.query("select * from workflows;", con = con) - if(nrow(workflows) >= 1){ - expect_true(all(c("id", "folder", "started_at", "finished_at", "created_at", - "updated_at", "site_id", "model_id", "hostname", "params", "advanced_edit", - "start_date", "end_date") %in% colnames(workflows))) + if (nrow(workflows) >= 1) { + expect_true(all(c( + "id", "folder", "started_at", "finished_at", "created_at", + "updated_at", "site_id", "model_id", "hostname", "params", "advanced_edit", + "start_date", "end_date" + ) %in% colnames(workflows))) } try(db.close(con)) - }) ## the following suite could be more comprehensive, and only focus on fields used by PEcAn -test_that("sites have id and geometry column",{ +test_that("sites have id and geometry column", { con <- check_db_test() ## regression test for redmine #1128 - sites <- suppressWarnings(db.query("select * from sites limit 1;", con = con)) - #RyK added suppressWarnings for geometry having unrecognized field type - expect_true(all(c("id", "city", "state", "country", "mat", "map", "soil", "som", - "notes", "soilnotes", "created_at", "updated_at", "sitename", - "greenhouse", "user_id", "sand_pct", "clay_pct", - "geometry","time_zone") - %in% colnames(sites))) + sites <- suppressWarnings(db.query("select * from sites limit 1;", con = con)) + # RyK added suppressWarnings for geometry having unrecognized field type + expect_true(all(c( + "id", "city", "state", "country", "mat", "map", "soil", "som", + "notes", "soilnotes", "created_at", "updated_at", "sitename", + "greenhouse", "user_id", "sand_pct", "clay_pct", + "geometry", "time_zone" + ) + %in% colnames(sites))) try(db.close(con)) }) -test_that("query.covariates returns expected data.frame",{ +test_that("query.covariates returns expected data.frame", { con <- check_db_test() ids <- 1:10 test.query <- query.covariates(ids, con = con) diff --git a/base/db/tests/testthat/test.convert_input.R b/base/db/tests/testthat/test.convert_input.R index c2e7f49c1e9..cc42480e55b 100644 --- a/base/db/tests/testthat/test.convert_input.R +++ b/base/db/tests/testthat/test.convert_input.R @@ -1,10 +1,10 @@ test_that("`convert_input()` able to call the respective download function for a data item with the correct arguments", { mocked_res <- mockery::mock(list(c("A", "B"))) - mockery::stub(convert_input, 'dbfile.input.check', data.frame()) - mockery::stub(convert_input, 'db.query', data.frame(id = 1)) - mockery::stub(convert_input, 'PEcAn.remote::remote.execute.R', mocked_res) - mockery::stub(convert_input, 'purrr::map_dfr', data.frame(missing = c(FALSE), empty = c(FALSE))) + mockery::stub(convert_input, "dbfile.input.check", data.frame()) + mockery::stub(convert_input, "db.query", data.frame(id = 1)) + mockery::stub(convert_input, "PEcAn.remote::remote.execute.R", mocked_res) + mockery::stub(convert_input, "purrr::map_dfr", data.frame(missing = c(FALSE), empty = c(FALSE))) convert_input( input.id = NA, @@ -14,18 +14,18 @@ test_that("`convert_input()` able to call the respective download function for a site.id = 1, start_date = "2011-01-01", end_date = "2011-12-31", - pkg = 'PEcAn.data.atmosphere', - fcn = 'download.AmerifluxLBL', + pkg = "PEcAn.data.atmosphere", + fcn = "download.AmerifluxLBL", con = NULL, host = data.frame(name = "localhost"), write = FALSE, lat.in = 40, lon.in = -88 ) - + args <- mockery::mock_args(mocked_res) expect_equal( - args[[1]]$script, + args[[1]]$script, "PEcAn.data.atmosphere::download.AmerifluxLBL(lat.in=40, lon.in=-88, overwrite=FALSE, outfolder='test/', start_date='2011-01-01', end_date='2011-12-31')" ) }) @@ -35,4 +35,4 @@ test_that("`.get.file.deletion.commands()` able to return correct file deletion expect_equal(res$move.to.tmp, "dir.create(c('./tmp'), recursive=TRUE, showWarnings=FALSE); file.rename(from=c('test'), to=c('./tmp/test'))") expect_equal(res$delete.tmp, "unlink(c('./tmp'), recursive=TRUE)") expect_equal(res$replace.from.tmp, "file.rename(from=c('./tmp/test'), to=c('test'));unlink(c('./tmp'), recursive=TRUE)") -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.covariate.functions.R b/base/db/tests/testthat/test.covariate.functions.R index 27fccd4eabe..0495f2eeb70 100644 --- a/base/db/tests/testthat/test.covariate.functions.R +++ b/base/db/tests/testthat/test.covariate.functions.R @@ -4,7 +4,7 @@ test_that("`append.covariate` able to append new column for covariates in given name = c("a", "b", "c", "d") ) covariates.data <- data.frame( - trait_id = c( 1, 2, 3, 4, 4), + trait_id = c(1, 2, 3, 4, 4), level = c("A", "B", "C", "D", "E"), name = c("a", "b", "c", "d", "e") ) @@ -29,4 +29,4 @@ test_that("`filter_sunleaf_traits`able to filter out upper canopy leaves", { # temporary column gets removed expect_equal(colnames(updated_data), c("id", "name")) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.db.utils.R b/base/db/tests/testthat/test.db.utils.R index 7d8839357e2..5a0533872e5 100644 --- a/base/db/tests/testthat/test.db.utils.R +++ b/base/db/tests/testthat/test.db.utils.R @@ -4,8 +4,8 @@ test_that("get.id works on some tables, and with different inputs", { con <- check_db_test() pftid <- get.id("pfts", "name", "salix", con) expect_true(is.numeric(pftid)) - - pftname <- 'ebifarm.salix' + + pftname <- "ebifarm.salix" modeltypeid <- 1 pftid <- get.id("pfts", c("name", "modeltype_id"), c(pftname, modeltypeid), con) pft <- db.query(paste0("select name, modeltype_id from pfts where id = ", pftid), con) diff --git a/base/db/tests/testthat/test.dbfiles.R b/base/db/tests/testthat/test.dbfiles.R index 4880a074cd8..9b028aa3f55 100644 --- a/base/db/tests/testthat/test.dbfiles.R +++ b/base/db/tests/testthat/test.dbfiles.R @@ -1,25 +1,24 @@ test_that("`dbfile.input.insert()` able to create correct sql queries to insert a file into dbfiles table", { - mocked_res <- mockery::mock(data.frame(), 1, data.frame(id = 2023)) - mockery::stub(dbfile.input.insert, 'get.id', 1) - mockery::stub(dbfile.input.insert, 'db.query', mocked_res) + mockery::stub(dbfile.input.insert, "get.id", 1) + mockery::stub(dbfile.input.insert, "db.query", mocked_res) mockery::stub( - dbfile.input.insert, - 'dbfile.check', - data.frame(id = 101, file_name = 'test-file', file_path = 'trait.data.Rdata') + dbfile.input.insert, + "dbfile.check", + data.frame(id = 101, file_name = "test-file", file_path = "trait.data.Rdata") ) - res <- dbfile.input.insert( - in.path = 'trait.data.Rdata', - in.prefix = 'test-file', - siteid = 'test-site', - startdate = '2021-01-01', - enddate = '2022-01-01', - mimetype = 'application/x-RData', - formatname = 'traits', + res <- dbfile.input.insert( + in.path = "trait.data.Rdata", + in.prefix = "test-file", + siteid = "test-site", + startdate = "2021-01-01", + enddate = "2022-01-01", + mimetype = "application/x-RData", + formatname = "traits", con = NULL ) - + expect_equal(res$dbfile.id, 101) expect_equal(res$input.id, 2023) args <- mockery::mock_args(mocked_res) @@ -31,7 +30,7 @@ test_that("`dbfile.input.insert()` able to create correct sql queries to insert args[[1]]$query ) ) - + # parent == "" and startdate not NULL expect_true( grepl( @@ -39,7 +38,7 @@ test_that("`dbfile.input.insert()` able to create correct sql queries to insert args[[2]]$query ) ) - + # startdate not NULL expect_true( grepl( @@ -50,45 +49,43 @@ test_that("`dbfile.input.insert()` able to create correct sql queries to insert }) test_that("`dbfile.input.check()` able to form the right query to check the dbfiles table to see if a file exists as an input", { - mocked_res <- mockery::mock(NULL) - mockery::stub(dbfile.input.check, 'get.id', 1) - mockery::stub(dbfile.input.check, 'db.query', mocked_res) + mockery::stub(dbfile.input.check, "get.id", 1) + mockery::stub(dbfile.input.check, "db.query", mocked_res) - dbfile.input.check('US-Akn', '2021-01-01', '2022-01-01', 'application/x-RData', 'traits', con = NULL) + dbfile.input.check("US-Akn", "2021-01-01", "2022-01-01", "application/x-RData", "traits", con = NULL) args <- mockery::mock_args(mocked_res) expect_true( grepl( "WHERE site_id=US-Akn AND format_id=1", - args[[1]]$query + args[[1]]$query ) ) }) test_that("`dbfile.posterior.insert()` able to make a correct query to insert a file into dbfiles table as a posterior", { mocked_res <- mockery::mock(NULL, NULL, data.frame(id = 10)) - mockery::stub(dbfile.posterior.insert, 'get.id', 1) - mockery::stub(dbfile.posterior.insert, 'dbfile.insert', 1010) - mockery::stub(dbfile.posterior.insert, 'db.query', mocked_res) + mockery::stub(dbfile.posterior.insert, "get.id", 1) + mockery::stub(dbfile.posterior.insert, "dbfile.insert", 1010) + mockery::stub(dbfile.posterior.insert, "db.query", mocked_res) - dbfile.posterior.insert('trait.data.Rdata', 'test-pft', 'application/x-RData', 'traits', con = NULL) + dbfile.posterior.insert("trait.data.Rdata", "test-pft", "application/x-RData", "traits", con = NULL) args <- mockery::mock_args(mocked_res) expect_true(grepl("INSERT INTO posteriors \\(pft_id, format_id\\) VALUES \\(1, 1\\)", args[[2]]$query)) - }) test_that("`dbfile.posterior.check()` able to form the correct query to retrieve correct posterior id to run further checks", { mocked_res <- mockery::mock(data.frame(id = 2020)) - mockery::stub(dbfile.posterior.check, 'get.id', 1) - mockery::stub(dbfile.posterior.check, 'db.query', mocked_res) - mockery::stub(dbfile.posterior.check, 'dbfile.check', data.frame(id = 1, filename = 'test_1', pathname = 'path_1')) + mockery::stub(dbfile.posterior.check, "get.id", 1) + mockery::stub(dbfile.posterior.check, "db.query", mocked_res) + mockery::stub(dbfile.posterior.check, "dbfile.check", data.frame(id = 1, filename = "test_1", pathname = "path_1")) - dbfile.posterior.check('testpft', 'application/x-RData', 'traits', con = NULL) + dbfile.posterior.check("testpft", "application/x-RData", "traits", con = NULL) args <- mockery::mock_args(mocked_res) expect_true( grepl( - "SELECT id FROM posteriors WHERE pft_id=1 AND format_id=1", + "SELECT id FROM posteriors WHERE pft_id=1 AND format_id=1", args[[1]]$query ) ) @@ -96,55 +93,55 @@ test_that("`dbfile.posterior.check()` able to form the correct query to retrieve test_that("`dbfile.insert()` able to add correct parameter values to the insert database query and return a file id", { mocked_res <- mockery::mock(data.frame(), data.frame(id = 2020)) - mockery::stub(dbfile.insert, 'get.id', 1) - mockery::stub(dbfile.insert, 'db.query', mocked_res) - - res <- dbfile.insert(in.path = '/test/file/path', in.prefix = 'testfile.txt', 'Input', 7, con = NULL) + mockery::stub(dbfile.insert, "get.id", 1) + mockery::stub(dbfile.insert, "db.query", mocked_res) + + res <- dbfile.insert(in.path = "/test/file/path", in.prefix = "testfile.txt", "Input", 7, con = NULL) args <- mockery::mock_args(mocked_res) expect_equal(res, 2020) expect_true(grepl("VALUES \\('Input', 7, 'testfile.txt', '/test/file/path', 1\\) RETURNING id", args[[2]]$query)) }) test_that("`dbfile.check()` able to return the most recent entries from `dbfiles` table associated with a container and machine", { - mockery::stub(dbfile.check, 'get.id', 1) + mockery::stub(dbfile.check, "get.id", 1) mockery::stub( - dbfile.check, - 'dplyr::tbl', + dbfile.check, + "dplyr::tbl", data.frame( - container_type = c('Input', 'Input', 'Model'), + container_type = c("Input", "Input", "Model"), container_id = c(7, 7, 7), - machine_id = c(1, 1, 2), - updated_at = c(20201112, 20210101, 20210102), - id = c(2, 3, 4), - filename = c('test_1', 'test_2', 'test_3'), - pathname = c('path_1', 'path_2', 'path_3') + machine_id = c(1, 1, 2), + updated_at = c(20201112, 20210101, 20210102), + id = c(2, 3, 4), + filename = c("test_1", "test_2", "test_3"), + pathname = c("path_1", "path_2", "path_3") ) ) res <- dbfile.check("Input", 7, con = NULL) - + expect_equal( - res, - data.frame(container_type = 'Input', container_id = 7, machine_id = 1, updated_at = 20210101, id = 3, filename = 'test_2', pathname = 'path_2') + res, + data.frame(container_type = "Input", container_id = 7, machine_id = 1, updated_at = 20210101, id = 3, filename = "test_2", pathname = "path_2") ) }) test_that("`dbfile.file()` able to return a correctly formed file path from entries in the `dbfiles` table for a particular container and machine", { - mockery::stub(dbfile.file, 'dbfile.check', data.frame(file_path = 'test/dir/path', file_name = 'test_file')) - expect_equal(dbfile.file('Input', 7, con = NULL), file.path('test/dir/path/test_file')) + mockery::stub(dbfile.file, "dbfile.check", data.frame(file_path = "test/dir/path", file_name = "test_file")) + expect_equal(dbfile.file("Input", 7, con = NULL), file.path("test/dir/path/test_file")) }) test_that("`dbfile.id()` able to construct a correct database query to get id for a dbfile given the container type and filepath", { mocked_res <- mockery::mock(data.frame(id = 1), data.frame(container_id = 2020)) - mockery::stub(dbfile.id, 'db.query', mocked_res) + mockery::stub(dbfile.id, "db.query", mocked_res) - res <- dbfile.id('Model', '/usr/local/bin/sipnet', con = NULL) + res <- dbfile.id("Model", "/usr/local/bin/sipnet", con = NULL) args <- mockery::mock_args(mocked_res) - + expect_equal(res, 2020) expect_true( grepl( - "WHERE container_type='Model' AND file_path='/usr/local/bin' AND file_name='sipnet' AND machine_id=1", + "WHERE container_type='Model' AND file_path='/usr/local/bin' AND file_name='sipnet' AND machine_id=1", args[[2]]$query ) ) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.derive.traits.R b/base/db/tests/testthat/test.derive.traits.R index b389b401f31..464b2e44c46 100644 --- a/base/db/tests/testthat/test.derive.traits.R +++ b/base/db/tests/testthat/test.derive.traits.R @@ -1,40 +1,45 @@ -test_that("derive.traits works",{ +test_that("derive.traits works", { set.seed(0) input <- list(x = data.frame(mean = 1, stat = 1, n = 1)) - test.derived <- derive.trait(FUN = function(x){1/x}, - input = input, - var.name = 'x') - expect_equal(test.derived, - structure(list(mean = 0.0944129994366609, stat = 687.395104414576, n = 1, vname = "x"), .Names = c("mean", "stat", "n", "vname"), row.names = c(NA, -1L), class = "data.frame")) + test.derived <- derive.trait( + FUN = function(x) { + 1 / x + }, + input = input, + var.name = "x" + ) + expect_equal( + test.derived, + structure(list(mean = 0.0944129994366609, stat = 687.395104414576, n = 1, vname = "x"), .Names = c("mean", "stat", "n", "vname"), row.names = c(NA, -1L), class = "data.frame") + ) }) -test_that("arrhenius.scaling.traits works",{ +test_that("arrhenius.scaling.traits works", { set.seed(0) con <- check_db_test() test.traits <- db.query("select * from traits where variable_id = 4 and id in (select trait_id from covariates where variable_id in (81, 86)) and mean > 0 limit 50;", con = con) test.cov <- query.covariates(test.traits$id, con = con) - test.traits.5C <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = 'leafT', new.temp = 5) - test.traits.4C <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = 'leafT', new.temp = 4) - - # Values scaled to 5 degC should be greater than those scaled to 4 degC (some will be equal, + test.traits.5C <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = "leafT", new.temp = 5) + test.traits.4C <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = "leafT", new.temp = 4) + + # Values scaled to 5 degC should be greater than those scaled to 4 degC (some will be equal, # if they don't have covariates available). expect_true(all(test.traits.5C$mean >= test.traits.4C$mean)) - test.traits.leafT <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = 'leafT', new.temp = 25) - test.traits.leafTairT <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = c('leafT','airT'), new.temp = 25) + test.traits.leafT <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = "leafT", new.temp = 25) + test.traits.leafTairT <- arrhenius.scaling.traits(data = test.traits, covariates = test.cov, temp.covariates = c("leafT", "airT"), new.temp = 25) # Traits that have a leafT covariate should have scaled the same in both cases, since leafT # is listed first in the latter, and so takes precedence - id.check <- test.cov$trait_id[test.cov$name=='leafT'] - expect_true(all(test.traits.leafT$mean [test.traits.leafT$id %in% id.check] == - test.traits.leafTairT$mean [test.traits.leafTairT$id %in% id.check])) + id.check <- test.cov$trait_id[test.cov$name == "leafT"] + expect_true(all(test.traits.leafT$mean[test.traits.leafT$id %in% id.check] == + test.traits.leafTairT$mean[test.traits.leafTairT$id %in% id.check])) # But the results should differ for any trait that has airT, but not leafT (unless - # airT is equal to the default of 25, in which case no scaling will have been done in + # airT is equal to the default of 25, in which case no scaling will have been done in # either case - id.check <- setdiff(test.cov$trait_id[test.cov$name=='airT' & test.cov$level!=25], id.check) - expect_true(all(test.traits.leafT$mean [test.traits.leafT$id %in% id.check] != - test.traits.leafTairT$mean [test.traits.leafTairT$id %in% id.check])) + id.check <- setdiff(test.cov$trait_id[test.cov$name == "airT" & test.cov$level != 25], id.check) + expect_true(all(test.traits.leafT$mean[test.traits.leafT$id %in% id.check] != + test.traits.leafTairT$mean[test.traits.leafTairT$id %in% id.check])) try(db.close(con)) - }) diff --git a/base/db/tests/testthat/test.insert.R b/base/db/tests/testthat/test.insert.R index 168bb4f4ae6..a17b1b092be 100644 --- a/base/db/tests/testthat/test.insert.R +++ b/base/db/tests/testthat/test.insert.R @@ -11,20 +11,20 @@ test_that( dplyr::mutate( Species = as.character(Species) ) - dplyr::copy_to(irisdb, iris[1,], "iris", overwrite = TRUE) + dplyr::copy_to(irisdb, iris[1, ], "iris", overwrite = TRUE) # Add extra column to see if it's successfully ignored iris2 <- dplyr::mutate(iris, extracol = seq_len(nrow(iris))) - iris_insert <- iris2[2:10,] + iris_insert <- iris2[2:10, ] .insert <- insert_table(iris_insert, "iris", irisdb) test_that( "Subset of iris was inserted into database", { iris_insert_test <- dplyr::tbl(irisdb, "iris") %>% dplyr::collect() - expect_equal(iris[1:10,], iris_insert_test) + expect_equal(iris[1:10, ], iris_insert_test) } ) - iris_merge <- iris2[5:12,] + iris_merge <- iris2[5:12, ] out_merge <- db_merge_into(iris_merge, "iris", irisdb) %>% dplyr::collect() iris_merge_nrow <- dplyr::tbl(irisdb, "iris") %>% @@ -33,7 +33,7 @@ test_that( test_that( "Only subset of iris data were merged", { - expect_equal(out_merge, iris2[5:12,]) + expect_equal(out_merge, iris2[5:12, ]) out_merge2 <- db_merge_into(iris_merge, "iris", irisdb) %>% dplyr::collect() expect_equal(out_merge, out_merge2) @@ -46,12 +46,13 @@ test_that( expect_true("extracol" %in% colnames(out_merge)) } ) - }) + } +) test_that("`match_colnames()` returns intersection of column names of a dataframe to a table", { - mockery::stub(match_colnames, 'dplyr::tbl', data.frame(id = 1, name = 'test', value = 1)) + mockery::stub(match_colnames, "dplyr::tbl", data.frame(id = 1, name = "test", value = 1)) expect_equal( - match_colnames(values = data.frame(id = 1, name = 'test'), table = 'test', con = 1), - c('id', 'name') + match_colnames(values = data.frame(id = 1, name = "test"), table = "test", con = 1), + c("id", "name") ) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.met_inputs.R b/base/db/tests/testthat/test.met_inputs.R index 49d75b7b379..70e7a169af1 100644 --- a/base/db/tests/testthat/test.met_inputs.R +++ b/base/db/tests/testthat/test.met_inputs.R @@ -1,10 +1,10 @@ test_that("`met_inputs()` able to correctly place input parameters in the database query to retrieve available met inputs", { mocked_res <- mockery::mock(0) - mockery::stub(met_inputs, 'db.query', mocked_res) + mockery::stub(met_inputs, "db.query", mocked_res) met_inputs(dbcon = NULL, site_id = 100, model_id = 200, hostname = "pecan") args <- mockery::mock_args(mocked_res) expect_true( grepl("inputs.site_id = \\$1.*machines.hostname = \\$2.*models.id = \\$3", args[[1]][[1]]) ) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.query.base.R b/base/db/tests/testthat/test.query.base.R index 301d10b7c90..33f18ffab4c 100644 --- a/base/db/tests/testthat/test.query.base.R +++ b/base/db/tests/testthat/test.query.base.R @@ -1,10 +1,10 @@ context("test db.query") -test_that("db.query can execute a trivial SQL statement and return results",{ - con <- check_db_test() - ans <- db.query("select count(*) from traits;", con = con) - expect_is(ans, "data.frame") - expect_true(is.numeric(ans[,1])) - expect_true(length(ans) == 1) - try(db.close(con)) +test_that("db.query can execute a trivial SQL statement and return results", { + con <- check_db_test() + ans <- db.query("select count(*) from traits;", con = con) + expect_is(ans, "data.frame") + expect_true(is.numeric(ans[, 1])) + expect_true(length(ans) == 1) + try(db.close(con)) }) diff --git a/base/db/tests/testthat/test.query.data.R b/base/db/tests/testthat/test.query.data.R index 87cafba5787..43d6a0475f4 100644 --- a/base/db/tests/testthat/test.query.data.R +++ b/base/db/tests/testthat/test.query.data.R @@ -1,6 +1,6 @@ test_that("`query.data()` able to correctly form the query and return result in SE", { - mocked_function <- mockery::mock(data.frame(Y=rep(1,5), stat=rep(1,5), n=rep(4,5), mean = rep(3,5), statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD'))) - mockery::stub(query.data, 'db.query', mocked_function, 2) + mocked_function <- mockery::mock(data.frame(Y = rep(1, 5), stat = rep(1, 5), n = rep(4, 5), mean = rep(3, 5), statname = c("SD", "MSE", "LSD", "HSD", "MSD"))) + mockery::stub(query.data, "db.query", mocked_function, 2) result <- query.data(con = 1, trait = "test_trait", spstr = "test_spstr", store.unconverted = TRUE) args <- mockery::mock_args(mocked_function) expect_true( @@ -10,11 +10,11 @@ test_that("`query.data()` able to correctly form the query and return result in "ST_Y\\(ST_CENTROID\\(sites\\.geometry\\)\\) AS lat,.*", "where specie_id in \\(test_spstr\\).*", "variables.name in \\('test_trait'\\);" - ), + ), args[[1]]$query ) ) expect_equal(result$mean_unconverted, result$mean) expect_equal(result$stat_unconverted, result$stat) - expect_equal(result$statname, rep('SE', 5)) -}) \ No newline at end of file + expect_equal(result$statname, rep("SE", 5)) +}) diff --git a/base/db/tests/testthat/test.query.dplyr.R b/base/db/tests/testthat/test.query.dplyr.R index cc3e6436eea..ed5af073919 100644 --- a/base/db/tests/testthat/test.query.dplyr.R +++ b/base/db/tests/testthat/test.query.dplyr.R @@ -10,7 +10,6 @@ test_that("`fancy_scientific()` converts numbers to scientific expressions with }) test_that("`dplyr.count()` returns the correct count of rows in a dataframe", { - df <- data.frame( x = c(1, 2, 3, 2, 1, 3), y = c("a", "b", "a", "b", "a", "b") @@ -24,10 +23,10 @@ test_that("`dplyr.count()` returns the correct count of rows in a dataframe", { }) test_that("`dbHostInfo()` able to return correct host information", { - mockery::stub(dbHostInfo, 'db.query', data.frame(floor = 10)) + mockery::stub(dbHostInfo, "db.query", data.frame(floor = 10)) mockery::stub( - dbHostInfo, - 'dplyr::tbl', + dbHostInfo, + "dplyr::tbl", data.frame( data.frame( sync_host_id = c(10, 11), @@ -38,7 +37,7 @@ test_that("`dbHostInfo()` able to return correct host information", { sync_contact = c("test_contact_1", "test_contact_2") ) ) - ) + ) result <- dbHostInfo(bety = 1) expect_equal(result$hostid, 10) expect_equal(result$hostname, "test_host_1") @@ -50,8 +49,8 @@ test_that("`dbHostInfo()` able to return correct host information", { test_that("`workflows()` able to correctly return a list of workflows", { mockery::stub( - workflows, - 'dbHostInfo', + workflows, + "dbHostInfo", list( hostid = 10, hostname = "test_host_1", @@ -61,15 +60,15 @@ test_that("`workflows()` able to correctly return a list of workflows", { sync_contact = "test_contact_1" ) ) - mockery::stub(workflows, 'dplyr::tbl', data.frame(workflow_id = c(1, 2, 3, 4, 5, 6))) + mockery::stub(workflows, "dplyr::tbl", data.frame(workflow_id = c(1, 2, 3, 4, 5, 6))) result <- workflows(bety = 1, ensemble = TRUE) expect_equal(result, data.frame(workflow_id = c(3, 4, 5, 6))) }) test_that("`workflow()` able to get a workflow data by id", { mockery::stub( - workflow, - 'workflows', + workflow, + "workflows", data.frame(workflow_id = c(1, 2, 3, 4, 5, 6), workflow_name = c("A", "B", "C", "D", "E", "F")) ) result <- workflow(bety = 1, workflow_id = 3) @@ -78,8 +77,8 @@ test_that("`workflow()` able to get a workflow data by id", { test_that("`runs()` is able to get table of runs for a corresponding workflow", { mockery::stub( - runs, - 'workflow', + runs, + "workflow", data.frame( workflow_id = c(1, 1), folder = c("test_folder_1", "test_folder_2") @@ -95,7 +94,7 @@ test_that("`runs()` is able to get table of runs for a corresponding workflow", ensemble_id = c(1, 1, 2) ) ) - mockery::stub(runs, 'dplyr::tbl', mocked_res) + mockery::stub(runs, "dplyr::tbl", mocked_res) result <- runs(bety = 1, workflow_id = 1) expect_equal(result$run_id, c(1, 1, 2, 2, 3, 3)) expect_equal(result$folder, c("test_folder_1", "test_folder_2", "test_folder_1", "test_folder_2", "test_folder_1", "test_folder_2")) @@ -104,7 +103,7 @@ test_that("`runs()` is able to get table of runs for a corresponding workflow", test_that("`get_workflow_ids()` able to get a vector of unique workflow IDs", { mockery::stub( get_workflow_ids, - 'workflows', + "workflows", data.frame( workflow_id = c(1, 2, 2, 3, 4, 4), workflow_name = c("A", "B", "C", "D", "E", "F") @@ -115,10 +114,10 @@ test_that("`get_workflow_ids()` able to get a vector of unique workflow IDs", { }) test_that("`get_users()` ", { - mockery::stub(get_users, 'dplyr::tbl', data.frame(id = c(20200101, 20200102, 20240103))) + mockery::stub(get_users, "dplyr::tbl", data.frame(id = c(20200101, 20200102, 20240103))) mockery::stub( - get_users, - 'dbHostInfo', + get_users, + "dbHostInfo", data.frame( start = 20190201, end = 20230101 @@ -131,7 +130,7 @@ test_that("`get_users()` ", { test_that("`get_run_ids()` able to get vector of run ids (in sorted order) for a given workflow ID", { mockery::stub( get_run_ids, - 'runs', + "runs", data.frame( run_id = c(3, 1, 2), folder = c("test_folder_1", "test_folder_2", "test_folder_3") @@ -142,13 +141,13 @@ test_that("`get_run_ids()` able to get vector of run ids (in sorted order) for a expect_equal(result, c(1, 2, 3)) # if no run ids are found - mockery::stub(get_run_ids, 'runs', data.frame()) + mockery::stub(get_run_ids, "runs", data.frame()) result <- get_run_ids(bety = 1, workflow_id = 1) expect_equal(result, c("No runs found")) }) test_that("`var_names_all()` able get vector of variable names for a particular workflow and run ID removing variables not to be shown to user", { - mockery::stub(var_names_all, 'get_var_names', c('A', 'B', 'C', 'Year','FracJulianDay')) + mockery::stub(var_names_all, "get_var_names", c("A", "B", "C", "Year", "FracJulianDay")) result <- var_names_all(bety = 1, workflow_id = 1, run_id = 1) - expect_equal(result, c('A', 'B', 'C')) -}) \ No newline at end of file + expect_equal(result, c("A", "B", "C")) +}) diff --git a/base/db/tests/testthat/test.query.file.path.R b/base/db/tests/testthat/test.query.file.path.R index 1da98a019e6..e87dbf66995 100644 --- a/base/db/tests/testthat/test.query.file.path.R +++ b/base/db/tests/testthat/test.query.file.path.R @@ -1,8 +1,8 @@ test_that("`query.file.path()`", { # mock responses for subsequent calls to db.query - mocked_res <- mockery::mock(data.frame(id = '20210101'), data.frame(file_name = 'test_file', file_path = 'test_path')) - mockery::stub(query.file.path, 'db.query', mocked_res) - mockery::stub(query.file.path, 'PEcAn.remote::remote.execute.R', TRUE) + mocked_res <- mockery::mock(data.frame(id = "20210101"), data.frame(file_name = "test_file", file_path = "test_path")) + mockery::stub(query.file.path, "db.query", mocked_res) + mockery::stub(query.file.path, "PEcAn.remote::remote.execute.R", TRUE) res <- query.file.path(input.id = 1, host_name = "pecan", con = 1) args <- mockery::mock_args(mocked_res) expect_true( @@ -17,5 +17,5 @@ test_that("`query.file.path()`", { args[[2]]$query ) ) - expect_equal(res, 'test_path/test_file') -}) \ No newline at end of file + expect_equal(res, "test_path/test_file") +}) diff --git a/base/db/tests/testthat/test.query.priors.R b/base/db/tests/testthat/test.query.priors.R index 4f0311a9e8f..11be03b63bf 100644 --- a/base/db/tests/testthat/test.query.priors.R +++ b/base/db/tests/testthat/test.query.priors.R @@ -1,13 +1,13 @@ -test_that("`query.priors()` correctly forms the query based on the parameters passed and returns priors",{ +test_that("`query.priors()` correctly forms the query based on the parameters passed and returns priors", { mocked_function <- mockery::mock(data.frame(name = c("A", "B"), value = c(0.1, 0.2))) - mockery::stub(query.priors, 'db.query', mocked_function) + mockery::stub(query.priors, "db.query", mocked_function) priors <- query.priors("ebifarm.pavi", c("SLA"), con = 1) expect_equal(priors, c(0.1, 0.2)) args <- mockery::mock_args(mocked_function) expect_true( grepl( - "WHERE pfts.id = ebifarm.pavi AND variables.name IN .* SLA", + "WHERE pfts.id = ebifarm.pavi AND variables.name IN .* SLA", args[[1]]$query ) ) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.query.site.R b/base/db/tests/testthat/test.query.site.R index ec61c210581..2658ec2b89d 100644 --- a/base/db/tests/testthat/test.query.site.R +++ b/base/db/tests/testthat/test.query.site.R @@ -1,14 +1,14 @@ test_that("`query.site()` correctly forms the query and returns the site", { mock_site_data <- data.frame(id = c(1), lon = c(1), lat = c(1)) mocked_function <- mockery::mock(mock_site_data) - mockery::stub(query.site, 'db.query', mocked_function) + mockery::stub(query.site, "db.query", mocked_function) site <- query.site(1, con = 1) expect_equal(site, mock_site_data) args <- mockery::mock_args(mocked_function) expect_true( grepl( - "WHERE id = 1", + "WHERE id = 1", args[[1]]$query ) ) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.query.yields.R b/base/db/tests/testthat/test.query.yields.R index c8a7905d51f..3fdb6f42ffd 100644 --- a/base/db/tests/testthat/test.query.yields.R +++ b/base/db/tests/testthat/test.query.yields.R @@ -1,6 +1,6 @@ test_that("`query.yields()` able to form the query correctly for trait set to 'yield' and with no extra columns", { - mocked_function <- mockery::mock(data.frame(Y=rep(1,5), stat=rep(1,5), n=rep(4,5), mean = rep(3,5), statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD'))) - mockery::stub(query.yields, 'db.query', mocked_function, 2) + mocked_function <- mockery::mock(data.frame(Y = rep(1, 5), stat = rep(1, 5), n = rep(4, 5), mean = rep(3, 5), statname = c("SD", "MSE", "LSD", "HSD", "MSD"))) + mockery::stub(query.yields, "db.query", mocked_function, 2) result <- query.yields(spstr = "test_spstr", con = 1) args <- mockery::mock_args(mocked_function) @@ -9,7 +9,7 @@ test_that("`query.yields()` able to form the query correctly for trait set to 'y paste0( "month\\(yields.date\\) as month,treatments.control.*", "where specie_id in \\(test_spstr\\);" - ), + ), args[[1]]$query ) ) @@ -21,15 +21,15 @@ test_that("`query.yields()` throws an error if extra columns is not a string", { "`extra.columns` must be a string" ) expect_error( - query.yields(spstr = "test_spstr", con = 1, extra.columns = c("a","b")), + query.yields(spstr = "test_spstr", con = 1, extra.columns = c("a", "b")), "`extra.columns` must be a string" ) }) -test_that("`query.yields()` able to form the query correctly for trait not equal to 'yield' and with extra columns",{ - mocked_function <- mockery::mock(data.frame(Y=rep(1,5), stat=rep(1,5), n=rep(4,5), mean = rep(3,5), statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD'))) - mockery::stub(query.yields, 'db.query', mocked_function, 2) - result <- query.yields(trait = 'test_trait', spstr = "test_spstr", extra.columns = 'test_col', con = 1) +test_that("`query.yields()` able to form the query correctly for trait not equal to 'yield' and with extra columns", { + mocked_function <- mockery::mock(data.frame(Y = rep(1, 5), stat = rep(1, 5), n = rep(4, 5), mean = rep(3, 5), statname = c("SD", "MSE", "LSD", "HSD", "MSD"))) + mockery::stub(query.yields, "db.query", mocked_function, 2) + result <- query.yields(trait = "test_trait", spstr = "test_spstr", extra.columns = "test_col", con = 1) args <- mockery::mock_args(mocked_function) expect_true( grepl( @@ -40,4 +40,4 @@ test_that("`query.yields()` able to form the query correctly for trait not equal args[[1]]$query ) ) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.stamp.R b/base/db/tests/testthat/test.stamp.R index c566e073ee4..83a0d9af52b 100644 --- a/base/db/tests/testthat/test.stamp.R +++ b/base/db/tests/testthat/test.stamp.R @@ -1,6 +1,6 @@ test_that("`stamp_started()` able to correctly update the query for run_id passed", { mock_function <- mockery::mock() - mockery::stub(stamp_started, 'PEcAn.DB::db.query', mock_function) + mockery::stub(stamp_started, "PEcAn.DB::db.query", mock_function) stamp_started(1, 1) args <- mockery::mock_args(mock_function) expect_true(grepl("started_at .* WHERE id = 1", args[[1]]$query)) @@ -8,8 +8,8 @@ test_that("`stamp_started()` able to correctly update the query for run_id passe test_that("`stamp_finished()` able to correctly update the query for run_id passed", { mock_function <- mockery::mock() - mockery::stub(stamp_finished, 'PEcAn.DB::db.query', mock_function) + mockery::stub(stamp_finished, "PEcAn.DB::db.query", mock_function) stamp_finished(1, 1) args <- mockery::mock_args(mock_function) expect_true(grepl("finished_at .* WHERE id = 1", args[[1]]$query)) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.symmetric-setdiff.R b/base/db/tests/testthat/test.symmetric-setdiff.R index d4fd8a5fec5..7cf81fa3552 100644 --- a/base/db/tests/testthat/test.symmetric-setdiff.R +++ b/base/db/tests/testthat/test.symmetric-setdiff.R @@ -8,9 +8,10 @@ test_that("Symmetric setdiff works", { dplyr::select(-mean, -stat) %>% dplyr::collect() y <- x %>% - dplyr::mutate_if(~inherits(., "difftime"), as.character) + dplyr::mutate_if(~ inherits(., "difftime"), as.character) msg <- paste(capture.output(xydiff <- symmetric_setdiff(x, y), type = "message"), - collapse = "\n") + collapse = "\n" + ) expect_match(msg, "Detected at least one `integer64` column") expect_equal(nrow(xydiff), 0) }) @@ -18,22 +19,23 @@ test_that("Symmetric setdiff works", { test_that("Unequal dfs compare unequal", { expect_error( symmetric_setdiff(data.frame(a = 1L), data.frame(b = 1L)), - "Cols in `?x`? but not `?y") + "Cols in `?x`? but not `?y" + ) d <- symmetric_setdiff(data.frame(a = 1:3L), data.frame(a = 1:4L)) expect_length(d$a, 1L) expect_equal(d$a, 4L) expect_equal(d$source, "y") - }) test_that("symmetric inputs give same output", { - x <- data.frame(a=1:3L, b=LETTERS[1:3L]) - y <- data.frame(a=2:5L, b=LETTERS[2:5L]) + x <- data.frame(a = 1:3L, b = LETTERS[1:3L]) + y <- data.frame(a = 2:5L, b = LETTERS[2:5L]) xy <- symmetric_setdiff(x, y) yx <- symmetric_setdiff(y, x) purrr::walk2(xy, yx, expect_setequal) expect_equal( # left input aways labeled x -> xy$source is inverse of yx$source dplyr::select(xy, -source) %>% dplyr::arrange(a), - dplyr::select(yx, -source) %>% dplyr::arrange(a)) + dplyr::select(yx, -source) %>% dplyr::arrange(a) + ) }) diff --git a/base/db/tests/testthat/test.take.samples.R b/base/db/tests/testthat/test.take.samples.R index dee2e479b44..2732d90a707 100644 --- a/base/db/tests/testthat/test.take.samples.R +++ b/base/db/tests/testthat/test.take.samples.R @@ -1,15 +1,17 @@ test_that("`take.samples` returns mean when stat is NA", { - summary = list(mean = 10, stat = NA) - expect_equal(take.samples(summary = summary), summary$mean) + summary <- list(mean = 10, stat = NA) + expect_equal(take.samples(summary = summary), summary$mean) }) test_that("`take.samples` returns a vector of length sample.size for given summary stats", { - summary = list(mean = 10, stat = 10) - sample.size = 10 + summary <- list(mean = 10, stat = 10) + sample.size <- 10 expect_equal(length(take.samples(summary = summary, sample.size = sample.size)), sample.size) # Testing for exact return values for a simple example - test.sample <- take.samples(summary = data.frame(mean = 1, stat = 1), - sample.size = 2) + test.sample <- take.samples( + summary = data.frame(mean = 1, stat = 1), + sample.size = 2 + ) expect_equal(test.sample, c(2.26295428488079, 0.673766639294351)) -}) \ No newline at end of file +}) diff --git a/base/db/tests/testthat/test.utils_db.R b/base/db/tests/testthat/test.utils_db.R index b859baea575..93fe34952e3 100644 --- a/base/db/tests/testthat/test.utils_db.R +++ b/base/db/tests/testthat/test.utils_db.R @@ -2,7 +2,7 @@ # PEcAn.logger::logger.setUseConsole(TRUE, FALSE) # on.exit(PEcAn.logger::logger.setUseConsole(TRUE, TRUE), add = TRUE) # expect_output( -# db.print.connections(), +# db.print.connections(), # paste0( # ".* Created 0 connections and executed 0 queries .* ", # "Created 0 connections and executed 0 queries.*", @@ -23,4 +23,4 @@ test_that("`default_hostname()` fixes hostname if the host is localhost", { # if not localhost expect_equal(default_hostname("pecan"), "pecan") -}) \ No newline at end of file +}) diff --git a/base/logger/R/logger.R b/base/logger/R/logger.R index ee0fc9c8e62..052e351eed1 100644 --- a/base/logger/R/logger.R +++ b/base/logger/R/logger.R @@ -4,12 +4,13 @@ .utils.logger$stderr <- TRUE .utils.logger$quit <- FALSE .utils.logger$level <- 0 -.utils.logger$width <- ifelse(getOption("width") < 10, - getOption("width"), - getOption("width") - 5) +.utils.logger$width <- ifelse(getOption("width") < 10, + getOption("width"), + getOption("width") - 5 +) ##' Prints a debug message. -##' +##' ##' This function will print a debug message. ##' ##' @param msg the message that should be printed. @@ -26,7 +27,7 @@ logger.debug <- function(msg, ...) { ##' Prints an informational message. -##' +##' ##' This function will print an informational message. ##' ##' @param msg the message that should be printed. @@ -43,7 +44,7 @@ logger.info <- function(msg, ...) { ##' Prints a warning message. -##' +##' ##' This function will print a warning message. ##' ##' @param msg the message that should be printed. @@ -60,7 +61,7 @@ logger.warn <- function(msg, ...) { ##' Prints an error message. -##' +##' ##' This function will print an error message. ##' ##' @param msg the message that should be printed. @@ -77,10 +78,10 @@ logger.error <- function(msg, ...) { ##' Prints an severe message and stops execution. -##' +##' ##' This function will print a message and stop execution of the code. This ##' should only be used if the application should terminate. -##' +##' ##' set \code{\link{logger.setQuitOnSevere}(FALSE)} to avoid terminating ##' the session. This is set by default to TRUE if interactive or running ##' inside Rstudio. @@ -96,13 +97,13 @@ logger.error <- function(msg, ...) { ##' } logger.severe <- function(msg, ..., wrap = TRUE) { logger.message("SEVERE", msg, ...) - + # run option error <- getOption("error") if (!is.null(error)) { eval(error) } - + # quit if not interactive, otherwise use stop if (.utils.logger$quit) { quit(save = "no", status = 1) @@ -113,7 +114,7 @@ logger.severe <- function(msg, ..., wrap = TRUE) { ##' Prints a message at a certain log level. -##' +##' ##' This function will print a message. This is the function that is responsible for ##' the actual printing of the message. ##' @@ -138,24 +139,25 @@ logger.message <- function(level, msg, ..., wrap = TRUE) { if (length(func) == 0) { func <- "console" } - + stamp.text <- sprintf("%s %-6s [%s] :", Sys.time(), level, func) long.msg <- stringi::stri_trans_general(paste(c(msg, ...), collapse = " "), "latin-ascii") if (nchar(long.msg) > 20 && wrap) { - new.msg <- paste("\n", strwrap(long.msg, width = .utils.logger$width, - indent = 2, exdent = 2), collapse = " ") + new.msg <- paste("\n", strwrap(long.msg, + width = .utils.logger$width, + indent = 2, exdent = 2 + ), collapse = " ") } else { new.msg <- long.msg } text <- paste(stamp.text, new.msg, "\n") - + if (.utils.logger$console) { if (.utils.logger$stderr) { cat(text, file = stderr()) } else { cat(text, file = stdout()) } - } if (!is.na(.utils.logger$filename)) { cat(text, file = .utils.logger$filename, append = TRUE) @@ -165,7 +167,7 @@ logger.message <- function(level, msg, ..., wrap = TRUE) { ##' Configure logging level. -##' +##' ##' This will configure the logger level. This allows to turn DEBUG, INFO, ##' WARN and ERROR messages on and off. ##' @@ -192,8 +194,8 @@ logger.setLevel <- function(level) { ## ERROR = 40 ## ALL = 99 ## -##@return level the level of the message -##@author Rob Kooper +## @return level the level of the message +## @author Rob Kooper logger.getLevelNumber <- function(level) { if (toupper(level) == "ALL") { return(0) @@ -217,7 +219,7 @@ logger.getLevelNumber <- function(level) { ##' Get configured logging level. -##' +##' ##' This will return the current level configured of the logging messages ##' ##' @return level the level of the message (ALL, DEBUG, INFO, WARN, ERROR, OFF) @@ -247,7 +249,7 @@ logger.getLevel <- function() { ##' Configure logging to console. -##' +##' ##' Should the logging to be printed to the console or not. ##' ##' @param console set to true to print logging to console. @@ -265,7 +267,7 @@ logger.setUseConsole <- function(console, stderr = TRUE) { ##' Configure logging output filename. -##' +##' ##' The name of the file where the logging information should be written to. ##' ##' @param filename the file to send the log messages to (or NA to not write to file) @@ -281,9 +283,9 @@ logger.setOutputFile <- function(filename) { ##' Configure whether severe should quit. -##' +##' ##' The default is for a non-interactive session to quit. Setting this to false is -##' especially useful for running tests when placed in \code{inst/tests/test..R}, +##' especially useful for running tests when placed in \code{inst/tests/test..R}, ##' but is not passed from \code{tests/run.all.R}. ##' ##' @param severeQuits should R quit on a severe error. @@ -299,7 +301,7 @@ logger.setQuitOnSevere <- function(severeQuits) { ##' Configure the number of chars per line -##' +##' ##' The default is for 60 chars per line. Setting this to any value will ##' wrap the line when printing a message at that many chars. ##' diff --git a/base/logger/R/logifnot.R b/base/logger/R/logifnot.R index 13faa7d084b..566e1c7a118 100644 --- a/base/logger/R/logifnot.R +++ b/base/logger/R/logifnot.R @@ -18,9 +18,11 @@ #' warnifnot("I would prefer it if you used lists.", is.list(a), is.list(b)) #' errorifnot("You should definitely use lists.", is.list(a), is.list(b)) #' try({ -#' severeifnot("I cannot deal with the fact that something is not a list.", +#' severeifnot( +#' "I cannot deal with the fact that something is not a list.", #' is.list(a), -#' is.list(b)) +#' is.list(b) +#' ) #' }) #' @export severeifnot <- function(msg, ...) { diff --git a/base/logger/tests/testthat/test.logger.R b/base/logger/tests/testthat/test.logger.R index bc6f354dda9..5f9a1691102 100644 --- a/base/logger/tests/testthat/test.logger.R +++ b/base/logger/tests/testthat/test.logger.R @@ -1,7 +1,6 @@ - context("Testing Logger Settings") -test_that("`logger.getLevelNumber` returns correct level number",{ +test_that("`logger.getLevelNumber` returns correct level number", { expect_equal(logger.getLevelNumber("all"), 0) expect_equal(logger.getLevelNumber("debug"), 10) expect_equal(logger.getLevelNumber("info"), 20) @@ -9,33 +8,37 @@ test_that("`logger.getLevelNumber` returns correct level number",{ expect_equal(logger.getLevelNumber("error"), 40) expect_equal(logger.getLevelNumber("severe"), 40) expect_equal(logger.getLevelNumber("off"), 60) - + old_settings <- logger.setLevel("ERROR") on.exit(logger.setLevel(old_settings), add = TRUE) expect_equal(logger.getLevelNumber("INVALID"), 20) }) -test_that("`setWidth` works for different specified number of chars per line",{ +test_that("`setWidth` works for different specified number of chars per line", { logger.setUseConsole(TRUE, FALSE) on.exit(logger.setUseConsole(TRUE, TRUE), add = TRUE) - - expect_output(logger.info("A long error message that helps us understand what the error in the function is"), - "INFO \\[.*\\] : \\n A long error message that helps us understand what the error in the \\n function is ") - + + expect_output( + logger.info("A long error message that helps us understand what the error in the function is"), + "INFO \\[.*\\] : \\n A long error message that helps us understand what the error in the \\n function is " + ) + old_width <- .utils.logger$width logger.setWidth(10) on.exit(logger.setWidth(old_width), add = TRUE) - expect_output(logger.info("A long error message that helps us understand what the error in the function is"), - "INFO \\[.*\\] : \\n A long \\n error \\n message \\n that \\n helps \\n us \\n understand \\n what \\n the \\n error \\n in the \\n function \\n is ") - + expect_output( + logger.info("A long error message that helps us understand what the error in the function is"), + "INFO \\[.*\\] : \\n A long \\n error \\n message \\n that \\n helps \\n us \\n understand \\n what \\n the \\n error \\n in the \\n function \\n is " + ) + logger.setWidth(30) - expect_output(logger.info("A long error message that helps us understand what the error in the function is"), - "INFO \\[.*\\] : \\n A long error message that \\n helps us understand what \\n the error in the function \\n is ") - + expect_output( + logger.info("A long error message that helps us understand what the error in the function is"), + "INFO \\[.*\\] : \\n A long error message that \\n helps us understand what \\n the error in the function \\n is " + ) }) -test_that("logger prints right messages, responds correctly to logger.setLevel",{ - +test_that("logger prints right messages, responds correctly to logger.setLevel", { logger.setUseConsole(TRUE, FALSE) on.exit(logger.setUseConsole(TRUE, TRUE), add = TRUE) @@ -43,29 +46,29 @@ test_that("logger prints right messages, responds correctly to logger.setLevel", on.exit(logger.setLevel(old_settings), add = TRUE) expect_equal(logger.getLevel(), "ALL") expect_output(logger.debug("message"), "DEBUG \\[.*\\] : message") - expect_output(logger.info("message"), "INFO \\[.*\\] : message") - expect_output(logger.warn("message"), "WARN \\[.*\\] : message") + expect_output(logger.info("message"), "INFO \\[.*\\] : message") + expect_output(logger.warn("message"), "WARN \\[.*\\] : message") expect_output(logger.error("message"), "ERROR \\[.*\\] : message") logger.setLevel("DEBUG") expect_equal(logger.getLevel(), "DEBUG") expect_output(logger.debug("message"), "DEBUG \\[.*\\] : message") - expect_output(logger.info("message"), "INFO \\[.*\\] : message") - expect_output(logger.warn("message"), "WARN \\[.*\\] : message") + expect_output(logger.info("message"), "INFO \\[.*\\] : message") + expect_output(logger.warn("message"), "WARN \\[.*\\] : message") expect_output(logger.error("message"), "ERROR \\[.*\\] : message") logger.setLevel("INFO") expect_equal(logger.getLevel(), "INFO") expect_silent(logger.debug("message")) - expect_output(logger.info("message"), "INFO \\[.*\\] : message") - expect_output(logger.warn("message"), "WARN \\[.*\\] : message") + expect_output(logger.info("message"), "INFO \\[.*\\] : message") + expect_output(logger.warn("message"), "WARN \\[.*\\] : message") expect_output(logger.error("message"), "ERROR \\[.*\\] : message") logger.setLevel("WARN") expect_equal(logger.getLevel(), "WARN") expect_silent(logger.debug("message")) expect_silent(logger.info("message")) - expect_output(logger.warn("message"), "WARN \\[.*\\] : message") + expect_output(logger.warn("message"), "WARN \\[.*\\] : message") expect_output(logger.error("message"), "ERROR \\[.*\\] : message") logger.setLevel("ERROR") @@ -118,4 +121,4 @@ test_that("`logger.message` able to redirect logging information to file set by readLines(tf) }) expect_true(grepl(".*WARN \\[.*\\] : message", f)) -}) \ No newline at end of file +}) diff --git a/base/logger/tests/testthat/test.logifnot.R b/base/logger/tests/testthat/test.logifnot.R index c8daa1a3de0..0a5e9a9fb52 100644 --- a/base/logger/tests/testthat/test.logifnot.R +++ b/base/logger/tests/testthat/test.logifnot.R @@ -1,25 +1,25 @@ -test_that("`is_definitely_true` handles invalid conditionals passed",{ +test_that("`is_definitely_true` handles invalid conditionals passed", { expect_equal(is_definitely_true(NULL), FALSE) expect_equal(is_definitely_true(""), FALSE) expect_equal(is_definitely_true("pecan"), FALSE) }) -test_that("`is_definitely_true` handles single conditional statement correctly",{ +test_that("`is_definitely_true` handles single conditional statement correctly", { test_list <- list(1:10) - expect_equal(is_definitely_true("pecan"=="carya"), FALSE) + expect_equal(is_definitely_true("pecan" == "carya"), FALSE) expect_equal(is_definitely_true(is.list(test_list)), TRUE) - expect_equal(is_definitely_true("pecan"=="pecan" && "pecan"!="bety" && is.list(test_list)), TRUE) - expect_equal(is_definitely_true("pecan"=="pecan" || ("pecan"=="bety" && is.list(test_list))), TRUE) + expect_equal(is_definitely_true("pecan" == "pecan" && "pecan" != "bety" && is.list(test_list)), TRUE) + expect_equal(is_definitely_true("pecan" == "pecan" || ("pecan" == "bety" && is.list(test_list))), TRUE) }) -test_that("`check_conditions` handles multiple conditional statements correctly",{ +test_that("`check_conditions` handles multiple conditional statements correctly", { test_list <- list(1:10) - expect_equal(check_conditions(FALSE && TRUE, "pecan"=="pecan"), FALSE) - expect_equal(check_conditions("pecan"=="pecan", TRUE || FALSE && is.list(test_list)), TRUE) + expect_equal(check_conditions(FALSE && TRUE, "pecan" == "pecan"), FALSE) + expect_equal(check_conditions("pecan" == "pecan", TRUE || FALSE && is.list(test_list)), TRUE) }) -test_that( "logifnot prints right message based on the conditions passed, responds correctly to logger.setLevel",{ +test_that("logifnot prints right message based on the conditions passed, responds correctly to logger.setLevel", { logger.setUseConsole(TRUE, FALSE) on.exit(logger.setUseConsole(TRUE, TRUE), add = TRUE) @@ -28,7 +28,7 @@ test_that( "logifnot prints right message based on the conditions passed, respon expect_output(debugifnot("message", FALSE), "DEBUG \\[.*\\] : message") expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") - expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") expect_silent(debugifnot("message", TRUE)) expect_silent(infoifnot("message", TRUE)) @@ -37,20 +37,20 @@ test_that( "logifnot prints right message based on the conditions passed, respon logger.setLevel("DEBUG") expect_output(debugifnot("message", FALSE), "DEBUG \\[.*\\] : message") - expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") - expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") logger.setLevel("INFO") expect_silent(debugifnot("message", FALSE)) - expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") - expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(infoifnot("message", FALSE), "INFO \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") logger.setLevel("WARN") expect_silent(debugifnot("message", FALSE)) expect_silent(infoifnot("message", FALSE)) - expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") + expect_output(warnifnot("message", FALSE), "WARN \\[.*\\] : message") expect_output(errorifnot("message", FALSE), "ERROR \\[.*\\] : message") logger.setLevel("ERROR") @@ -64,12 +64,8 @@ test_that( "logifnot prints right message based on the conditions passed, respon expect_silent(infoifnot("message", FALSE)) expect_silent(warnifnot("message", FALSE)) expect_silent(errorifnot("message", FALSE)) - + logger.setQuitOnSevere(FALSE) on.exit(logger.setQuitOnSevere(TRUE), add = TRUE) expect_error(severeifnot("message", FALSE), "message") }) - - - - diff --git a/base/qaqc/tests/testthat.R b/base/qaqc/tests/testthat.R index bf5de661ff5..78434ec615c 100644 --- a/base/qaqc/tests/testthat.R +++ b/base/qaqc/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.qaqc) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.qaqc") +# test_check("PEcAn.qaqc") diff --git a/base/qaqc/tests/testthat/test-taylorplot.R b/base/qaqc/tests/testthat/test-taylorplot.R index 7acc7853bcc..38a3e6045e1 100644 --- a/base/qaqc/tests/testthat/test-taylorplot.R +++ b/base/qaqc/tests/testthat/test-taylorplot.R @@ -1,14 +1,15 @@ test_that("taylor diagram", { + set.seed(1) + testdata <- data.frame( + site = c(1, 1, 1, 2, 2, 3), + date = c(2001, 2001, 2002, 2003, 2004, 2005), + obs = rnorm(6, 10, 2), + model1 = rnorm(6, 10, 3) + 2, + model2 = rnorm(6, 11, 3) + 2 + ) - set.seed(1) - testdata <- data.frame( - site = c(1, 1, 1, 2, 2, 3), - date = c(2001, 2001, 2002, 2003, 2004, 2005), - obs = rnorm(6, 10, 2), - model1 = rnorm(6, 10, 3) + 2, - model2 = rnorm(6, 11, 3) + 2) - - vdiffr::expect_doppelganger( - "taylor diagram", - function() new.taylor(testdata, siteid = 1:3, runid = 1:2)) + vdiffr::expect_doppelganger( + "taylor diagram", + function() new.taylor(testdata, siteid = 1:3, runid = 1:2) + ) }) diff --git a/base/qaqc/tests/testthat/test.cull_database_entries.R b/base/qaqc/tests/testthat/test.cull_database_entries.R index 0b81d294fa6..2da120ce909 100644 --- a/base/qaqc/tests/testthat/test.cull_database_entries.R +++ b/base/qaqc/tests/testthat/test.cull_database_entries.R @@ -1,25 +1,25 @@ -test_that("`cull_database_entries()` gives errors for faulty inputs",{ +test_that("`cull_database_entries()` gives errors for faulty inputs", { expect_error( - cull_database_entries(outdir = 'test'), + cull_database_entries(outdir = "test"), "If a table object hasn't been provided, a file_name must be set." ) expect_error( - cull_database_entries(table = 'test_table', file_name = 'test_file', outdir = 'test'), + cull_database_entries(table = "test_table", file_name = "test_file", outdir = "test"), "table and file_name cannot both be provided." ) expect_error( - cull_database_entries(table = 'test_table', outdir = 'test'), + cull_database_entries(table = "test_table", outdir = "test"), "Please provide a table_name" ) }) test_that("`cull_database_entries()` able to correctly add logs to the output file", { withr::with_dir(tempdir(), { - mockery::stub(cull_database_entries, 'PEcAn.DB::db.query', 'test_log') + mockery::stub(cull_database_entries, "PEcAn.DB::db.query", "test_log") dir <- getwd() - cull_database_entries(table = data.frame(id = 1), table_name = 'test', con = 1, outdir = dir) + cull_database_entries(table = data.frame(id = 1), table_name = "test", con = 1, outdir = dir) expect_true(file.exists(paste0(dir, "/deletion_log_of_test"))) file_data <- readLines(paste0(dir, "/deletion_log_of_test")) expect_equal(grepl("test_log", file_data), c(TRUE, TRUE)) }) -}) \ No newline at end of file +}) diff --git a/base/qaqc/tests/testthat/test.find_formats_without_inputs.R b/base/qaqc/tests/testthat/test.find_formats_without_inputs.R index f61af2368b5..b3b12a534ff 100644 --- a/base/qaqc/tests/testthat/test.find_formats_without_inputs.R +++ b/base/qaqc/tests/testthat/test.find_formats_without_inputs.R @@ -1,14 +1,14 @@ -test_that("`find_formats_without_inputs()` able to find formats with no input record",{ - format_command_mock <- data.frame(user_id = '2020', created_at = '2001-01-01', updated_at = '2010-01-01') - input_command_mock <- data.frame(format_id = '2000', user_id = '2021', created_at = '2002-01-02', updated_at = '2012-01-02') +test_that("`find_formats_without_inputs()` able to find formats with no input record", { + format_command_mock <- data.frame(user_id = "2020", created_at = "2001-01-01", updated_at = "2010-01-01") + input_command_mock <- data.frame(format_id = "2000", user_id = "2021", created_at = "2002-01-02", updated_at = "2012-01-02") mocked_res <- mockery::mock(input_command_mock, format_command_mock) - mockery::stub(find_formats_without_inputs, 'dplyr::tbl', mocked_res) + mockery::stub(find_formats_without_inputs, "dplyr::tbl", mocked_res) res <- find_formats_without_inputs( - con = NULL, user_id_code = '2020', created_after = '2000-01-01', updated_after = '2009-01-01', created_before = '2002-01-01', updated_before = '2011-01-01' + con = NULL, user_id_code = "2020", created_after = "2000-01-01", updated_after = "2009-01-01", created_before = "2002-01-01", updated_before = "2011-01-01" ) expect_equal( - res, - data.frame(id = '2020', created_at = '2001-01-01', updated_at = '2010-01-01', table_name = "formats") + res, + data.frame(id = "2020", created_at = "2001-01-01", updated_at = "2010-01-01", table_name = "formats") ) -}) \ No newline at end of file +}) diff --git a/base/qaqc/tests/testthat/test.find_inputs_without_formats.R b/base/qaqc/tests/testthat/test.find_inputs_without_formats.R index 75c4b86d36b..53235968db9 100644 --- a/base/qaqc/tests/testthat/test.find_inputs_without_formats.R +++ b/base/qaqc/tests/testthat/test.find_inputs_without_formats.R @@ -1,13 +1,13 @@ test_that("`find_inputs_without_formats()` able to find inputs with no format records", { - input_command_mock <- data.frame(format_id = '2020', user_id = '2020', created_at = '2001-01-01', updated_at = '2010-01-01') - format_command_mock <- data.frame(user_id = '2021', created_at = '2002-01-02', updated_at = '2012-01-02') + input_command_mock <- data.frame(format_id = "2020", user_id = "2020", created_at = "2001-01-01", updated_at = "2010-01-01") + format_command_mock <- data.frame(user_id = "2021", created_at = "2002-01-02", updated_at = "2012-01-02") mocked_res <- mockery::mock(input_command_mock, format_command_mock) - mockery::stub(find_inputs_without_formats, 'dplyr::tbl', mocked_res) + mockery::stub(find_inputs_without_formats, "dplyr::tbl", mocked_res) res <- find_inputs_without_formats( - con = NULL, user_id = '2020', created_after = '2000-01-01', updated_after = '2009-01-01', created_before = '2002-01-01', updated_before = '2011-01-01' + con = NULL, user_id = "2020", created_after = "2000-01-01", updated_after = "2009-01-01", created_before = "2002-01-01", updated_before = "2011-01-01" ) expect_equal( - res, - data.frame(id = '2020', user_id = '2020',created_at = '2001-01-01', updated_at = '2010-01-01', table_name = "inputs") + res, + data.frame(id = "2020", user_id = "2020", created_at = "2001-01-01", updated_at = "2010-01-01", table_name = "inputs") ) -}) \ No newline at end of file +}) diff --git a/base/qaqc/tests/testthat/test.get_table_column_names.R b/base/qaqc/tests/testthat/test.get_table_column_names.R index 8a313365692..65048d3b055 100644 --- a/base/qaqc/tests/testthat/test.get_table_column_names.R +++ b/base/qaqc/tests/testthat/test.get_table_column_names.R @@ -1,8 +1,8 @@ -test_that("`get_table_column_names()` able to return the column names of a table as a list",{ +test_that("`get_table_column_names()` able to return the column names of a table as a list", { mocked_res <- mockery::mock(data.frame(head1 = 1, head2 = 2)) - mockery::stub(get_table_column_names, 'PEcAn.DB::db.query', mocked_res) - res <- get_table_column_names(table = data.frame(table_name = 'test_table'), con = 1) + mockery::stub(get_table_column_names, "PEcAn.DB::db.query", mocked_res) + res <- get_table_column_names(table = data.frame(table_name = "test_table"), con = 1) args <- mockery::mock_args(mocked_res) expect_equal(args[[1]][[1]], "SELECT * from test_table LIMIT 1") expect_equal(res, list(test_table = c("head1", "head2"))) -}) \ No newline at end of file +}) diff --git a/base/qaqc/tests/testthat/test.write_out_table.R b/base/qaqc/tests/testthat/test.write_out_table.R index fb4cedd6ed5..8ca6a000d51 100644 --- a/base/qaqc/tests/testthat/test.write_out_table.R +++ b/base/qaqc/tests/testthat/test.write_out_table.R @@ -1,11 +1,11 @@ -test_that("`write_out_table()` able to create and update output file with relevant data",{ +test_that("`write_out_table()` able to create and update output file with relevant data", { withr::with_dir(tempdir(), { dir <- getwd() write_out_table( - table = data.frame(id = 1, table_name = 'test'), table_name = 'test', relevant_table_columns = c(), outdir = dir + table = data.frame(id = 1, table_name = "test"), table_name = "test", relevant_table_columns = c(), outdir = dir ) expect_true(file.exists(paste0(dir, "/query_of_test"))) file_data <- readLines(paste0(dir, "/query_of_test")) expect_equal(grepl("test", file_data), c(FALSE, TRUE)) }) -}) \ No newline at end of file +}) diff --git a/base/remote/R/check_qsub_status.R b/base/remote/R/check_qsub_status.R index 76d744de845..c74eb77da73 100644 --- a/base/remote/R/check_qsub_status.R +++ b/base/remote/R/check_qsub_status.R @@ -8,8 +8,10 @@ #' @export qsub_run_finished <- function(run, host, qstat) { if (is.na(run)) { - PEcAn.logger::logger.warn("Job", run, "encountered an error during submission.", - "NOTE that the job will be stamped as 'finished' in BETY.") + PEcAn.logger::logger.warn( + "Job", run, "encountered an error during submission.", + "NOTE that the job will be stamped as 'finished' in BETY." + ) return(FALSE) } run_id_string <- format(run, scientific = FALSE) @@ -18,8 +20,8 @@ qsub_run_finished <- function(run, host, qstat) { # Need to use `system` to allow commands with pipes out <- system(check, intern = TRUE, ignore.stdout = FALSE, ignore.stderr = FALSE, wait = TRUE) } else { - # This uses `system2` under the hood, but that's OK because the entire - # command is passed as a single quoted argument, so the pipes are + # This uses `system2` under the hood, but that's OK because the entire + # command is passed as a single quoted argument, so the pipes are # preserved. out <- remote.execute.cmd(host = host, cmd = check, stderr = TRUE) } diff --git a/base/remote/R/kill.tunnel.R b/base/remote/R/kill.tunnel.R index 199d4054dc3..54020250c57 100644 --- a/base/remote/R/kill.tunnel.R +++ b/base/remote/R/kill.tunnel.R @@ -5,7 +5,7 @@ #' @param data Kill tunnel to data? #' @export #' @author Rob Kooper -kill.tunnel <- function(settings,exe=TRUE,data=TRUE) { +kill.tunnel <- function(settings, exe = TRUE, data = TRUE) { if (exe && !is.null(settings$host$tunnel)) { pidfile <- file.path(dirname(settings$host$tunnel), "pid") pid <- readLines(pidfile) diff --git a/base/remote/R/merge_job_files.R b/base/remote/R/merge_job_files.R index 6634b398bce..0e16eadbc72 100644 --- a/base/remote/R/merge_job_files.R +++ b/base/remote/R/merge_job_files.R @@ -8,15 +8,15 @@ #' @export #' @author Dongchen Zhang #' -merge_job_files <- function(settings, jobs_per_file = 10, outdir = NULL){ +merge_job_files <- function(settings, jobs_per_file = 10, outdir = NULL) { # default outdir - if(is.null(outdir)){ + if (is.null(outdir)) { outdir <- file.path(settings$rundir, "merged_jobs") } # create folder or delete previous job files. - if(dir.exists(outdir)){ + if (dir.exists(outdir)) { unlink(list.files(outdir, recursive = T, full.names = T)) - }else{ + } else { dir.create(outdir) } # merge job files. @@ -27,14 +27,14 @@ merge_job_files <- function(settings, jobs_per_file = 10, outdir = NULL){ while (i < length(job_file_paths)) { jobs <- c() for (j in 1:jobs_per_file) { - if((i+j) > length(job_file_paths)){ + if ((i + j) > length(job_file_paths)) { break } - jobs <- c(jobs, readLines(job_file_paths[i+j])) + jobs <- c(jobs, readLines(job_file_paths[i + j])) } - writeLines(jobs, con = file.path(outdir, paste0("job_", i,".sh"))) - Sys.chmod(file.path(outdir, paste0("job_", i,".sh"))) - files <- c(files, file.path(outdir, paste0("job_", i,".sh"))) + writeLines(jobs, con = file.path(outdir, paste0("job_", i, ".sh"))) + Sys.chmod(file.path(outdir, paste0("job_", i, ".sh"))) + files <- c(files, file.path(outdir, paste0("job_", i, ".sh"))) i <- i + jobs_per_file } files diff --git a/base/remote/R/open.tunnel.R b/base/remote/R/open.tunnel.R index 79acbeb506c..978aca6e158 100644 --- a/base/remote/R/open.tunnel.R +++ b/base/remote/R/open.tunnel.R @@ -10,26 +10,25 @@ #' @return numeric giving ssh PID if configured, otherwise logical with TRUE = success #' @export open_tunnel <- function(remote_host, user = NULL, password = NULL, tunnel_dir = "~/.pecan/tunnel/", - wait.time = 15, tunnel_script = '~/pecan/web/sshtunnel.sh'){ - + wait.time = 15, tunnel_script = "~/pecan/web/sshtunnel.sh") { ## make sure local tunnel directory exists dir.create(tunnel_dir) ## get username if not provided - if(is.null(user)){ + if (is.null(user)) { user <- readline("Username:: ") } ## get password if not provided - if(is.null(password)){ + if (is.null(password)) { password <- getPass::getPass() } - sshTunnel <- file.path(tunnel_dir, "tunnel") - sshPID <- file.path(tunnel_dir, "pid") + sshTunnel <- file.path(tunnel_dir, "tunnel") + sshPID <- file.path(tunnel_dir, "pid") sshPassFile <- file.path(tunnel_dir, "password") - if(file.exists(sshTunnel)){ + if (file.exists(sshTunnel)) { PEcAn.logger::logger.warn("Tunnel already exists. If tunnel is not working try calling kill.tunnel then reopen") return(TRUE) } @@ -38,13 +37,13 @@ open_tunnel <- function(remote_host, user = NULL, password = NULL, tunnel_dir = PEcAn.logger::logger.warn(sshPassFile) write(password, file = sshPassFile) -# start <- system(paste0("ssh -nN -o ControlMaster=yes -o ControlPath=",sshTunnel," -l ",user," ",remote_host),wait = FALSE,input = password) -# Sys.sleep(5) -# end <- system2("send",password) + # start <- system(paste0("ssh -nN -o ControlMaster=yes -o ControlPath=",sshTunnel," -l ",user," ",remote_host),wait = FALSE,input = password) + # Sys.sleep(5) + # end <- system2("send",password) - stat <- system(paste(tunnel_script, remote_host, user, tunnel_dir), wait=FALSE) + stat <- system(paste(tunnel_script, remote_host, user, tunnel_dir), wait = FALSE) - ##wait for tunnel to connect + ## wait for tunnel to connect Sys.sleep(wait.time) if (file.exists(sshPassFile)) { @@ -59,5 +58,4 @@ open_tunnel <- function(remote_host, user = NULL, password = NULL, tunnel_dir = } else { return(TRUE) } - } diff --git a/base/remote/R/qsub_get_jobid.R b/base/remote/R/qsub_get_jobid.R index 3a7de77edda..cfb0df7b720 100644 --- a/base/remote/R/qsub_get_jobid.R +++ b/base/remote/R/qsub_get_jobid.R @@ -18,8 +18,7 @@ qsub_get_jobid <- function(out, qsub.jobid, stop.on.error) { } jobid <- NA } else { - jobid <- sub(qsub.jobid, '\\1', out) + jobid <- sub(qsub.jobid, "\\1", out) } return(jobid) } - diff --git a/base/remote/R/qsub_parallel.R b/base/remote/R/qsub_parallel.R index 064672f11a1..b427068c93f 100644 --- a/base/remote/R/qsub_parallel.R +++ b/base/remote/R/qsub_parallel.R @@ -8,18 +8,18 @@ #' @export #' @examples #' \dontrun{ -#' qsub_parallel(settings) +#' qsub_parallel(settings) #' } #' @author Dongchen Zhang -#' +#' #' @importFrom foreach %dopar% #' @importFrom dplyr %>% qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = 10, hybrid = TRUE) { - if("try-error" %in% class(try(find.package("doSNOW"), silent = T))){ + if ("try-error" %in% class(try(find.package("doSNOW"), silent = T))) { PEcAn.logger::logger.info("Package doSNOW is not installed! Please install it and rerun the function!") return(0) } - #declare variables within foreach section + # declare variables within foreach section run <- NULL folder <- NULL run_list <- readLines(con = file.path(settings$rundir, "runs.txt")) @@ -31,15 +31,15 @@ qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = cores <- parallel::detectCores() cl <- parallel::makeCluster(cores) doSNOW::registerDoSNOW(cl) - #progress bar - pb <- utils::txtProgressBar(min=1, max=ifelse(is.null(files), length(run_list), length(files)), style=3) + # progress bar + pb <- utils::txtProgressBar(min = 1, max = ifelse(is.null(files), length(run_list), length(files)), style = 3) progress <- function(n) utils::setTxtProgressBar(pb, n) - opts <- list(progress=progress) + opts <- list(progress = progress) PEcAn.logger::logger.info("Submitting jobs!") # if we want to submit jobs separately. - if(is.null(files)){ + if (is.null(files)) { if (is_qsub) { - jobids <- foreach::foreach(run = run_list, .packages="Kendall", .options.snow=opts, settings = rep(settings, length(run_list))) %dopar% { + jobids <- foreach::foreach(run = run_list, .packages = "Kendall", .options.snow = opts, settings = rep(settings, length(run_list))) %dopar% { run_id_string <- format(run, scientific = FALSE) qsub <- settings$host$qsub qsub <- gsub("@NAME@", paste0("PEcAn-", run_id_string), qsub) @@ -48,32 +48,33 @@ qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = qsub <- strsplit(qsub, " (?=([^\"']*\"[^\"']*\")*[^\"']*$)", perl = TRUE) # start the actual model run cmd <- qsub[[1]] - if(PEcAn.remote::is.localhost(settings$host)){ + if (PEcAn.remote::is.localhost(settings$host)) { out <- system2(cmd, file.path(settings$host$rundir, run_id_string, "job.sh"), stdout = TRUE, stderr = TRUE) - }else{ + } else { out <- PEcAn.remote::remote.execute.cmd(settings$host, cmd, file.path(settings$host$rundir, run_id_string, "job.sh"), stderr = TRUE) } jobid <- PEcAn.remote::qsub_get_jobid( out = out[length(out)], qsub.jobid = settings$host$qsub.jobid, - stop.on.error = TRUE) + stop.on.error = TRUE + ) return(jobid) } } else if (is_rabbitmq) { - out <- foreach::foreach(run = run_list, .packages="Kendall", .options.snow=opts, settings = rep(settings, length(run_list))) %dopar% { + out <- foreach::foreach(run = run_list, .packages = "Kendall", .options.snow = opts, settings = rep(settings, length(run_list))) %dopar% { run_id_string <- format(run, scientific = FALSE) PEcAn.remote::start_rabbitmq(file.path(settings$host$rundir, run_id_string), settings$host$rabbitmq$uri, settings$host$rabbitmq$queue) } } - }else{ + } else { # if we want to submit merged job files. std_out <- file.path(settings$host$outdir, "merged_stdout") - if(!dir.exists(std_out)){ + if (!dir.exists(std_out)) { dir.create(std_out) - }else{ + } else { unlink(list.files(std_out, recursive = T, full.names = T)) } - jobids <- foreach::foreach(file = files, .packages="Kendall", .options.snow=opts, settings = rep(settings, length(files))) %dopar% { + jobids <- foreach::foreach(file = files, .packages = "Kendall", .options.snow = opts, settings = rep(settings, length(files))) %dopar% { qsub <- settings$host$qsub base_name <- basename(file) num <- gsub("\\D", "", base_name) @@ -83,36 +84,38 @@ qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = qsub <- gsub("@STDERR@", file.path(std_out, paste0("stderr", num, ".log")), qsub) qsub <- strsplit(qsub, " (?=([^\"']*\"[^\"']*\")*[^\"']*$)", perl = TRUE) cmd <- qsub[[1]] - if(PEcAn.remote::is.localhost(settings$host)){ + if (PEcAn.remote::is.localhost(settings$host)) { out <- system2(cmd, file, stdout = TRUE, stderr = TRUE) - }else{ + } else { out <- PEcAn.remote::remote.execute.cmd(settings$host, cmd, file, stderr = TRUE) } jobid <- PEcAn.remote::qsub_get_jobid( out = out[length(out)], qsub.jobid = settings$host$qsub.jobid, - stop.on.error = TRUE) + stop.on.error = TRUE + ) return(jobid) } } PEcAn.logger::logger.info("Jobs submitted!") - #check if jobs are completed + # check if jobs are completed PEcAn.logger::logger.info("Checking the qsub jobs status!") PEcAn.logger::logger.info(paste0("Checking the file ", prefix)) ## setup progressbar folders <- file.path(settings$host$outdir, run_list) L_folder <- length(folders) pb <- utils::txtProgressBar(min = 0, max = L_folder, style = 3) - #here we not only detect if the target files are generated. - #we also detect if the jobs are still existed on the server. + # here we not only detect if the target files are generated. + # we also detect if the jobs are still existed on the server. if (is_rabbitmq) { while ((L_folder - length(folders)) < L_folder) { Sys.sleep(sleep) - completed_folders <- foreach::foreach(folder = folders) %dopar% { - if(file.exists(file.path(folder, prefix))){ - return(folder) - } - } %>% unlist() + completed_folders <- foreach::foreach(folder = folders) %dopar% + { + if (file.exists(file.path(folder, prefix))) { + return(folder) + } + } %>% unlist() folders <- folders[which(!folders %in% completed_folders)] pbi <- L_folder - length(folders) utils::setTxtProgressBar(pb, pbi) @@ -121,53 +124,60 @@ qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = L_jobid <- length(jobids) pb1 <- utils::txtProgressBar(min = 0, max = L_jobid, style = 3) if (hybrid) { - while ((L_folder - length(folders)) < L_folder & - (L_jobid - length(jobids)) < L_jobid) { + while ((L_folder - length(folders)) < L_folder & + (L_jobid - length(jobids)) < L_jobid) { Sys.sleep(sleep) - completed_folders <- foreach::foreach(folder = folders) %dopar% { - if(file.exists(file.path(folder, prefix))){ - return(folder) - } - } %>% unlist() + completed_folders <- foreach::foreach(folder = folders) %dopar% + { + if (file.exists(file.path(folder, prefix))) { + return(folder) + } + } %>% unlist() folders <- folders[which(!folders %in% completed_folders)] - - #or we can try detect if the jobs are still on the server. - #specify the host and qstat arguments for the future_map function. + + # or we can try detect if the jobs are still on the server. + # specify the host and qstat arguments for the future_map function. host <- settings$host qstat <- host$qstat - completed_jobs <- jobids %>% furrr::future_map(function(id) { - if (PEcAn.remote::qsub_run_finished( - run = id, - host = host, - qstat = qstat)) { - return(id) - } - }) %>% unlist() + completed_jobs <- jobids %>% + furrr::future_map(function(id) { + if (PEcAn.remote::qsub_run_finished( + run = id, + host = host, + qstat = qstat + )) { + return(id) + } + }) %>% + unlist() jobids <- jobids[which(!jobids %in% completed_jobs)] - - #compare two progresses and set the maximum progress for the progress bar. + + # compare two progresses and set the maximum progress for the progress bar. pbi <- L_folder - length(folders) utils::setTxtProgressBar(pb, pbi) } } else { - #special case that only detect the job ids on the server. + # special case that only detect the job ids on the server. while ((L_jobid - length(jobids)) < L_jobid) { - #detect if the jobs are still on the server. - #specify the host and qstat arguments for the future_map function. + # detect if the jobs are still on the server. + # specify the host and qstat arguments for the future_map function. Sys.sleep(sleep) host <- settings$host qstat <- host$qstat - completed_jobs <- jobids %>% furrr::future_map(function(id) { - if (PEcAn.remote::qsub_run_finished( - run = id, - host = host, - qstat = qstat)) { - return(id) - } - }) %>% unlist() + completed_jobs <- jobids %>% + furrr::future_map(function(id) { + if (PEcAn.remote::qsub_run_finished( + run = id, + host = host, + qstat = qstat + )) { + return(id) + } + }) %>% + unlist() jobids <- jobids[which(!jobids %in% completed_jobs)] - - #compare two progresses and set the maximum progress for the progress bar. + + # compare two progresses and set the maximum progress for the progress bar. pbi1 <- L_jobid - length(jobids) utils::setTxtProgressBar(pb1, pbi1) } @@ -176,4 +186,4 @@ qsub_parallel <- function(settings, files = NULL, prefix = "sipnet.out", sleep = close(pb) parallel::stopCluster(cl) PEcAn.logger::logger.info("Completed!") -} \ No newline at end of file +} diff --git a/base/remote/R/rabbitmq.R b/base/remote/R/rabbitmq.R index 102ec52fb7a..3c995316047 100644 --- a/base/remote/R/rabbitmq.R +++ b/base/remote/R/rabbitmq.R @@ -8,7 +8,7 @@ #' @param port the port for rabbitmq managment interface #' @return a list that contains the url to the mangement interface, username #' password and vhost. -rabbitmq_parse_uri <- function(uri, prefix="", port=15672) { +rabbitmq_parse_uri <- function(uri, prefix = "", port = 15672) { # save username/password if (!grepl("@", uri, fixed = TRUE)) { PEcAn.logger::logger.info("rabbitmq uri is not recognized, missing username and password, assuming guest/guest") @@ -43,7 +43,7 @@ rabbitmq_parse_uri <- function(uri, prefix="", port=15672) { url <- urltools::url_compose(url_split) - return(list(url=url, vhost=vhost, username=upw[[1]], password=upw[[2]])) + return(list(url = url, vhost = vhost, username = upw[[1]], password = upw[[2]])) } #' Send a message to RabbitMQ rest API. @@ -148,7 +148,7 @@ rabbitmq_create_queue <- function(url, auth, vhost, queue, auto_delete = FALSE, #' @return the result of the post if message was send, or NA if it failed. #' @author Alexey Shiklomanov, Rob Kooper #' @export -rabbitmq_post_message <- function(uri, queue, message, prefix="", port=15672) { +rabbitmq_post_message <- function(uri, queue, message, prefix = "", port = 15672) { # parse rabbitmq URI rabbitmq <- rabbitmq_parse_uri(uri, prefix, port) if (length(rabbitmq) != 4) { @@ -187,7 +187,7 @@ rabbitmq_post_message <- function(uri, queue, message, prefix="", port=15672) { #' @return NA if no message was retrieved, or a list of the messages payload. #' @author Alexey Shiklomanov, Rob Kooper #' @export -rabbitmq_get_message <- function(uri, queue, count=1, prefix="", port=15672) { +rabbitmq_get_message <- function(uri, queue, count = 1, prefix = "", port = 15672) { # parse rabbitmq URI rabbitmq <- rabbitmq_parse_uri(uri, prefix, port) if (length(rabbitmq) != 4) { @@ -216,7 +216,11 @@ rabbitmq_get_message <- function(uri, queue, count=1, prefix="", port=15672) { if (length(result) == 1 && result == "") { return(c()) } else { - return(lapply(result, function(x) { tryCatch(jsonlite::fromJSON(x$payload), error=function(e) { x$payload }) })) + return(lapply(result, function(x) { + tryCatch(jsonlite::fromJSON(x$payload), error = function(e) { + x$payload + }) + })) } } } diff --git a/base/remote/R/remote.copy.from.R b/base/remote/R/remote.copy.from.R index 2fad9044ad7..99da5567d31 100644 --- a/base/remote/R/remote.copy.from.R +++ b/base/remote/R/remote.copy.from.R @@ -16,8 +16,8 @@ #' @export #' @examples #' \dontrun{ -#' host <- list(name='geo.bu.edu', user='kooper', tunnel='/tmp/geo.tunnel') -#' remote.copy.from(host, '/tmp/kooper', '/tmp/geo.tmp', delete=TRUE) +#' host <- list(name = "geo.bu.edu", user = "kooper", tunnel = "/tmp/geo.tunnel") +#' remote.copy.from(host, "/tmp/kooper", "/tmp/geo.tmp", delete = TRUE) #' } remote.copy.from <- function(host, src, dst, options = NULL, delete = FALSE, stderr = FALSE) { args <- c("-az", "-q", options) @@ -28,15 +28,16 @@ remote.copy.from <- function(host, src, dst, options = NULL, delete = FALSE, std args <- c(args, src, dst) } else { tunnel <- host$tunnel - if(!is.null(host$data_tunnel)) tunnel <- host$data_tunnel + if (!is.null(host$data_tunnel)) tunnel <- host$data_tunnel hostname <- host$name - if(!is.null(host$data_hostname)) hostname <- host$data_hostname + if (!is.null(host$data_hostname)) hostname <- host$data_hostname if (!is.null(tunnel)) { if (!file.exists(tunnel)) { PEcAn.logger::logger.severe("Could not find tunnel", tunnel) } args <- c(args, "-e", paste0("ssh -o ControlPath=\"", tunnel, "\"", - collapse = "")) + collapse = "" + )) args <- c(args, paste0(hostname, ":", src), dst) } else if (!is.null(host$user)) { args <- c(args, paste0(host$user, "@", hostname, ":", src), dst) diff --git a/base/remote/R/remote.copy.to.R b/base/remote/R/remote.copy.to.R index 6f3e2d870bc..ed021703d57 100644 --- a/base/remote/R/remote.copy.to.R +++ b/base/remote/R/remote.copy.to.R @@ -14,8 +14,8 @@ #' @export #' @examples #' \dontrun{ -#' host <- list(name='geo.bu.edu', user='kooper', tunnel='/tmp/geo.tunnel') -#' remote.copy.to(host, '/tmp/kooper', '/tmp/kooper', delete=TRUE) +#' host <- list(name = "geo.bu.edu", user = "kooper", tunnel = "/tmp/geo.tunnel") +#' remote.copy.to(host, "/tmp/kooper", "/tmp/kooper", delete = TRUE) #' } remote.copy.to <- function(host, src, dst, options = NULL, delete = FALSE, stderr = FALSE) { args <- c("-a", "-q", options) @@ -44,7 +44,8 @@ remote.copy.to <- function(host, src, dst, options = NULL, delete = FALSE, stder PEcAn.logger::logger.severe("Could not find tunnel", tunnel) } args <- c(args, "-e", paste0("ssh -o ControlPath=\"", tunnel, "\"", - collapse = "")) + collapse = "" + )) args <- c(args, src, paste0(hostname, ":", dst)) } else if (!is.null(host$user)) { args <- c(args, src, paste0(host$user, "@", hostname, ":", dst)) @@ -53,7 +54,7 @@ remote.copy.to <- function(host, src, dst, options = NULL, delete = FALSE, stder } } PEcAn.logger::logger.debug("rsync", shQuote(args)) - out <- + out <- system2("rsync", shQuote(args), stdout = "", stderr = as.logical(stderr)) if (out != 0) { PEcAn.logger::logger.severe(paste0("rsync status: ", out)) diff --git a/base/remote/R/remote.execute.R.R b/base/remote/R/remote.execute.R.R index 1ea80b8d031..5e417358f8b 100644 --- a/base/remote/R/remote.execute.R.R +++ b/base/remote/R/remote.execute.R.R @@ -16,7 +16,7 @@ #' @export #' @examples #' \dontrun{ -#' remote.execute.R('list.files()', host='localhost', verbose=FALSE) +#' remote.execute.R("list.files()", host = "localhost", verbose = FALSE) #' } remote.execute.R <- function(script, host = "localhost", user = NA, verbose = FALSE, R = "R", scratchdir = tempdir()) { @@ -25,14 +25,17 @@ remote.execute.R <- function(script, host = "localhost", user = NA, verbose = FA } dir.create(scratchdir, showWarnings = FALSE, recursive = TRUE) uuid <- paste0("pecan-", paste(sample(c(letters[1:6], 0:9), 30, replace = TRUE), - collapse = "")) + collapse = "" + )) tmpfile <- file.path(scratchdir, uuid) - input <- c(paste0("remotefunc <- function() {", script, "}"), - "remoteout <- remotefunc()", - "print(remoteout)", - paste0("fp <- file('", tmpfile, "', 'w')"), - paste0("ign <- serialize(remoteout, fp)"), - "close(fp)") + input <- c( + paste0("remotefunc <- function() {", script, "}"), + "remoteout <- remotefunc()", + "print(remoteout)", + paste0("fp <- file('", tmpfile, "', 'w')"), + paste0("ign <- serialize(remoteout, fp)"), + "close(fp)" + ) verbose <- ifelse(as.logical(verbose), "", FALSE) if (is.localhost(host)) { if (R == "R") { @@ -41,8 +44,10 @@ remote.execute.R <- function(script, host = "localhost", user = NA, verbose = FA R <- Rbinary } } - result <- try(system2(R, "--no-save","--no-restore", stdout = verbose, stderr = verbose, - input = input)) + result <- try(system2(R, "--no-save", "--no-restore", + stdout = verbose, stderr = verbose, + input = input + )) PEcAn.logger::logger.debug(result) if (!file.exists(tmpfile)) { fp <- file(tmpfile, "w") @@ -56,7 +61,6 @@ remote.execute.R <- function(script, host = "localhost", user = NA, verbose = FA file.remove(tmpfile) PEcAn.logger::logger.debug(result) return(invisible(result)) - } else { remote <- c(host$name) if (!is.null(host$tunnel)) { @@ -68,8 +72,10 @@ remote.execute.R <- function(script, host = "localhost", user = NA, verbose = FA remote <- c("-l", host$user, remote) } PEcAn.logger::logger.debug(paste(c("ssh", "-T", remote, R), collapse = " ")) - result <- system2("ssh", c("-T", remote, R, "--no-save","--no-restore"), stdout = verbose, - stderr = verbose, input = input) + result <- system2("ssh", c("-T", remote, R, "--no-save", "--no-restore"), + stdout = verbose, + stderr = verbose, input = input + ) remote.copy.from(host, tmpfile, uuid) remote.execute.cmd(host, "rm", c("-f", tmpfile)) # load result @@ -79,7 +85,4 @@ remote.execute.R <- function(script, host = "localhost", user = NA, verbose = FA file.remove(uuid) return(invisible(result)) } - - } # remote.execute.R - diff --git a/base/remote/R/remote.execute.cmd.R b/base/remote/R/remote.execute.cmd.R index 65670ea416b..d5988463bb1 100644 --- a/base/remote/R/remote.execute.cmd.R +++ b/base/remote/R/remote.execute.cmd.R @@ -14,23 +14,23 @@ #' @export #' @examples #' \dontrun{ -#' host <- list(name='geo.bu.edu', user='kooper', tunnel='/tmp/geo.tunnel') -#' print(remote.execute.cmd(host, 'ls', c('-l', '/'), stderr=TRUE)) +#' host <- list(name = "geo.bu.edu", user = "kooper", tunnel = "/tmp/geo.tunnel") +#' print(remote.execute.cmd(host, "ls", c("-l", "/"), stderr = TRUE)) #' } remote.execute.cmd <- function(host, cmd, args = character(), stderr = FALSE) { - if(is.null(host)) { + if (is.null(host)) { PEcAn.logger::logger.severe("`host` cannot be `NULL` for remote execution") } - + if (is.character(host)) { host <- list(name = host) } if (is.localhost(host)) { - PEcAn.logger::logger.debug(paste(c(cmd, args), collapse = ' ')) + PEcAn.logger::logger.debug(paste(c(cmd, args), collapse = " ")) system2(cmd, args, stdout = TRUE, stderr = as.logical(stderr)) } else { - if(is.null(host$name) || host$name == "") { + if (is.null(host$name) || host$name == "") { PEcAn.logger::logger.severe("`name`parameter in `host` object cannot be `NULL` or empty for remote execution") } remote <- host$name @@ -45,4 +45,4 @@ remote.execute.cmd <- function(host, cmd, args = character(), stderr = FALSE) { PEcAn.logger::logger.debug(paste(c("ssh", "-T", remote, cmd, args), collapse = " ")) system2("ssh", c("-T", remote, cmd, args), stdout = TRUE, stderr = as.logical(stderr)) } -} # remote.execute.cmd \ No newline at end of file +} # remote.execute.cmd diff --git a/base/remote/R/setup_modellauncher.R b/base/remote/R/setup_modellauncher.R index 1140ea94f3f..0a6120638c6 100644 --- a/base/remote/R/setup_modellauncher.R +++ b/base/remote/R/setup_modellauncher.R @@ -16,7 +16,8 @@ setup_modellauncher <- function(run, rundir, host_rundir, mpirun, binary) { jobfile <- file(file.path(run_id_dir, "joblist.txt"), "w") writeLines(c("#!/bin/bash", paste(mpirun, binary, file.path(host_rundir, run_string, "joblist.txt"))), - con = launcherfile) + con = launcherfile + ) # making sure we can run the file. Sys.chmod(launcherfile, "755") # or 744 for letting just the owner # writing the job diff --git a/base/remote/R/start.model.runs.R b/base/remote/R/start.model.runs.R index b3ee6e38290..fe1014db648 100644 --- a/base/remote/R/start.model.runs.R +++ b/base/remote/R/start.model.runs.R @@ -14,11 +14,11 @@ ##' @author Shawn Serbin, Rob Kooper, David LeBauer, Alexey Shiklomanov ##' start.model.runs <- function(settings, write = TRUE, stop.on.error = TRUE) { - .Defunct("PEcAn.workflow::start_model_runs") + .Defunct("PEcAn.workflow::start_model_runs") } ##' @export ##' @rdname start.model.runs -runModule.start.model.runs <- function(settings,stop.on.error=TRUE) { - .Defunct("PEcAn.workflow::runModule_start_model_runs") -} \ No newline at end of file +runModule.start.model.runs <- function(settings, stop.on.error = TRUE) { + .Defunct("PEcAn.workflow::runModule_start_model_runs") +} diff --git a/base/remote/R/start_qsub.R b/base/remote/R/start_qsub.R index 1283cd7fbc5..5daa6a76eab 100644 --- a/base/remote/R/start_qsub.R +++ b/base/remote/R/start_qsub.R @@ -16,9 +16,8 @@ start_qsub <- function(run, qsub_string, rundir, host, host_rundir, host_outdir, stdout_log, stderr_log, job_script, qsub_extra = NULL) { - run_id_string <- format(run, scientific = FALSE) - + qsub <- gsub("@NAME@", paste0("PEcAn-", run_id_string), qsub_string) qsub <- gsub("@STDOUT@", file.path(host_outdir, run_id_string, stdout_log), qsub) qsub <- gsub("@STDERR@", file.path(host_outdir, run_id_string, stderr_log), qsub) diff --git a/base/remote/R/start_rabbitmq.R b/base/remote/R/start_rabbitmq.R index 4e565aca855..4de8980fcc6 100644 --- a/base/remote/R/start_rabbitmq.R +++ b/base/remote/R/start_rabbitmq.R @@ -7,7 +7,7 @@ #' (see [rabbitmq_post_message()]). #' @export start_rabbitmq <- function(folder, rabbitmq_uri, rabbitmq_queue) { - message <- list("folder"=folder) + message <- list("folder" = folder) prefix <- Sys.getenv("RABBITMQ_PREFIX", "") port <- Sys.getenv("RABBITMQ_PORT", "15672") out <- rabbitmq_post_message(rabbitmq_uri, rabbitmq_queue, message, prefix, port) diff --git a/base/remote/R/test_remote.R b/base/remote/R/test_remote.R index 7370144863a..36bafaa60a5 100644 --- a/base/remote/R/test_remote.R +++ b/base/remote/R/test_remote.R @@ -1,7 +1,7 @@ #' Test remote execution #' #' @inheritParams remote.execute.cmd -#' +#' #' @param ... additional arguments. #' #' @return `TRUE` is remote execution is successful. @@ -33,7 +33,7 @@ test_remote <- function(host, stderr = TRUE, ...) { if (length(out) > 0 && out == test_string) { return(TRUE) } else { - msg <- paste("Error in remote execution. Here is the remote output:\n", paste(out, collapse = '\n')) + msg <- paste("Error in remote execution. Here is the remote output:\n", paste(out, collapse = "\n")) if (stderr) { PEcAn.logger::logger.severe(msg) } else { diff --git a/base/remote/tests/testthat.R b/base/remote/tests/testthat.R index b7315d4ad32..7fbb77367ec 100644 --- a/base/remote/tests/testthat.R +++ b/base/remote/tests/testthat.R @@ -2,4 +2,3 @@ library(PEcAn.remote) library(testthat) test_check("PEcAn.remote") - diff --git a/base/remote/tests/testthat/test-fqdn.R b/base/remote/tests/testthat/test-fqdn.R index 44985046f2d..b648cc38367 100644 --- a/base/remote/tests/testthat/test-fqdn.R +++ b/base/remote/tests/testthat/test-fqdn.R @@ -3,11 +3,10 @@ test_that("fqdn() returns exactly one result", { }) test_that("`fqdn()` returns expected `FQDN` value", { - withr::with_envvar(c(FQDN = "pecan_host"), { expect_equal(fqdn(), "pecan_host") }) withr::with_envvar(c(FQDN = ""), { expect_equal(fqdn(), as.character(Sys.info()["nodename"])) }) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.check_model_run.R b/base/remote/tests/testthat/test.check_model_run.R index 592c2209e7f..eea6e1b2fd3 100644 --- a/base/remote/tests/testthat/test.check_model_run.R +++ b/base/remote/tests/testthat/test.check_model_run.R @@ -1,10 +1,10 @@ -test_that("`check_model_run()` gives correct output for the passed `out` value",{ +test_that("`check_model_run()` gives correct output for the passed `out` value", { # failure expect_error( - check_model_run(c("ERROR IN MODEL RUN")), + check_model_run(c("ERROR IN MODEL RUN")), "Model run aborted with the following error:\nERROR IN MODEL RUN" ) # success expect_equal(check_model_run(c("SUCCESS")), TRUE) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.kill.tunnel.R b/base/remote/tests/testthat/test.kill.tunnel.R index c75c81f1ebe..20727a03985 100644 --- a/base/remote/tests/testthat/test.kill.tunnel.R +++ b/base/remote/tests/testthat/test.kill.tunnel.R @@ -1,7 +1,7 @@ test_that("`kill.tunnel()` able to read the correct files and log the correct messages to kill tunnel for exe and data", { withr::with_dir(tempdir(), { - mockery::stub(kill.tunnel, 'tools::pskill', TRUE) - mockery::stub(kill.tunnel, 'dirname', getwd()) + mockery::stub(kill.tunnel, "tools::pskill", TRUE) + mockery::stub(kill.tunnel, "dirname", getwd()) # Kill tunnel to executable settings <- list(host = list(tunnel = getwd())) @@ -17,4 +17,4 @@ test_that("`kill.tunnel()` able to read the correct files and log the correct me writeLines("3456", file_path) expect_output(kill.tunnel(settings), "Killing tunnel with PID 3456") }) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.localhost.R b/base/remote/tests/testthat/test.localhost.R index 801dc007f9f..b04d42bd239 100644 --- a/base/remote/tests/testthat/test.localhost.R +++ b/base/remote/tests/testthat/test.localhost.R @@ -1,4 +1,4 @@ -test_that('is.localhost works', { +test_that("is.localhost works", { expect_true(is.localhost("localhost")) expect_true(is.localhost(fqdn())) expect_true(is.localhost(list(name = fqdn()))) diff --git a/base/remote/tests/testthat/test.qsub_get_jobid.R b/base/remote/tests/testthat/test.qsub_get_jobid.R index 6621a4eb1bd..8a4ac04416a 100644 --- a/base/remote/tests/testthat/test.qsub_get_jobid.R +++ b/base/remote/tests/testthat/test.qsub_get_jobid.R @@ -1,10 +1,8 @@ test_that("qsub_get_jobid returns the correct job ID", { - out_with_jobid <- "Job ID: 1234" out_no_jobid <- "Job ID: " qsub.jobid <- "Job ID: (\\d+)" - + expect_equal(qsub_get_jobid(out_with_jobid, qsub.jobid, FALSE), "1234") expect_equal(qsub_get_jobid(out_no_jobid, qsub.jobid, FALSE), NA) - -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.rabbitmq.R b/base/remote/tests/testthat/test.rabbitmq.R index 2d597a85e03..def9ce4f89e 100644 --- a/base/remote/tests/testthat/test.rabbitmq.R +++ b/base/remote/tests/testthat/test.rabbitmq.R @@ -8,9 +8,9 @@ test_that("`rabbitmq_parse_uri()` able to parse the rabbitmq uri to smaller vari }) test_that("`rabbitmq_send_message()` able to return content if the status code is between 200 and 299", { - mockery::stub(rabbitmq_send_message, 'httr::GET', data.frame(status_code = 200)) - mockery::stub(rabbitmq_send_message, 'httr::content', "test") - res <- rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "GET") + mockery::stub(rabbitmq_send_message, "httr::GET", data.frame(status_code = 200)) + mockery::stub(rabbitmq_send_message, "httr::content", "test") + res <- rabbitmq_send_message(url = "test/", auth = "test", body = "test", action = "GET") expect_equal(res, "test") }) @@ -20,60 +20,60 @@ test_that("`rabbitmq_send_message()` throws error where it should", { # errors if the action specified is unknown expect_output( - rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "TEST"), + rabbitmq_send_message(url = "test/", auth = "test", body = "test", action = "TEST"), "uknown action TEST" ) # errors if the status code is 401 (username/password may be incorrect) - mockery::stub(rabbitmq_send_message, 'httr::GET', data.frame(status_code = 401)) + mockery::stub(rabbitmq_send_message, "httr::GET", data.frame(status_code = 401)) expect_output( - rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "GET"), + rabbitmq_send_message(url = "test/", auth = "test", body = "test", action = "GET"), "error sending message to rabbitmq" ) # errors if the status code is outside of 200-299 and not 401 - mockery::stub(rabbitmq_send_message, 'httr::GET', data.frame(status_code = 501)) - mockery::stub(rabbitmq_send_message, 'httr::content', "test") + mockery::stub(rabbitmq_send_message, "httr::GET", data.frame(status_code = 501)) + mockery::stub(rabbitmq_send_message, "httr::content", "test") expect_output( - rabbitmq_send_message(url = 'test/', auth = 'test', body = 'test', action = "GET"), + rabbitmq_send_message(url = "test/", auth = "test", body = "test", action = "GET"), "error sending message to rabbitmq \\[ 501 \\]" ) }) test_that("`rabbitmq_create_queue()` able to take care of condition if the queue already exists or not while creating a queue", { - mocked_res <- mockery::mock(NA, 'test') - mockery::stub(rabbitmq_create_queue, 'rabbitmq_send_message', mocked_res) - res <- rabbitmq_create_queue(url = 'test', auth = 'test', vhost = 'test', queue = 'test') + mocked_res <- mockery::mock(NA, "test") + mockery::stub(rabbitmq_create_queue, "rabbitmq_send_message", mocked_res) + res <- rabbitmq_create_queue(url = "test", auth = "test", vhost = "test", queue = "test") args <- mockery::mock_args(mocked_res) expect_equal(res, TRUE) - expect_equal(args[[1]][[4]], 'GET') - expect_equal(args[[2]][[4]], 'PUT') + expect_equal(args[[1]][[4]], "GET") + expect_equal(args[[2]][[4]], "PUT") }) test_that("`rabbitmq_post_message()` passes the right params to send message to rabbitmq", { - mocked_res <- mockery::mock('test') - mockery::stub(rabbitmq_post_message, 'rabbitmq_send_message', mocked_res) - mockery::stub(rabbitmq_post_message, 'rabbitmq_create_queue', TRUE) - res <- rabbitmq_post_message(uri = 'amqp://guest:guest@localhost:15672/myvhost', queue = 'test_queue', message = 'test_message') + mocked_res <- mockery::mock("test") + mockery::stub(rabbitmq_post_message, "rabbitmq_send_message", mocked_res) + mockery::stub(rabbitmq_post_message, "rabbitmq_create_queue", TRUE) + res <- rabbitmq_post_message(uri = "amqp://guest:guest@localhost:15672/myvhost", queue = "test_queue", message = "test_message") args <- mockery::mock_args(mocked_res) - expect_equal(res, 'test') - expect_equal(args[[1]][[1]], 'http://localhost:15672/api/exchanges/myvhost//publish') + expect_equal(res, "test") + expect_equal(args[[1]][[1]], "http://localhost:15672/api/exchanges/myvhost//publish") expect_equal(args[[1]][[3]]$properties$delivery_mode, 2) - expect_equal(args[[1]][[3]]$routing_key, 'test_queue') - expect_equal(args[[1]][[3]]$payload, jsonlite::toJSON('test_message', auto_unbox = TRUE)) - expect_equal(args[[1]][[3]]$payload_encoding, 'string') - expect_equal(args[[1]][[4]], 'POST') + expect_equal(args[[1]][[3]]$routing_key, "test_queue") + expect_equal(args[[1]][[3]]$payload, jsonlite::toJSON("test_message", auto_unbox = TRUE)) + expect_equal(args[[1]][[3]]$payload_encoding, "string") + expect_equal(args[[1]][[4]], "POST") }) test_that("`rabbitmq_get_message()` passes the right params to send message to rabbitmq", { mocked_res <- mockery::mock(NA) - mockery::stub(rabbitmq_get_message, 'rabbitmq_send_message', mocked_res) - mockery::stub(rabbitmq_get_message, 'rabbitmq_create_queue', TRUE) - res <- rabbitmq_get_message(uri = 'amqp://guest:guest@localhost:15672/myvhost', queue = 'test_queue') + mockery::stub(rabbitmq_get_message, "rabbitmq_send_message", mocked_res) + mockery::stub(rabbitmq_get_message, "rabbitmq_create_queue", TRUE) + res <- rabbitmq_get_message(uri = "amqp://guest:guest@localhost:15672/myvhost", queue = "test_queue") args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]][[1]], 'http://localhost:15672/api/queues/myvhost/test_queue/get') + expect_equal(args[[1]][[1]], "http://localhost:15672/api/queues/myvhost/test_queue/get") expect_equal(args[[1]][[3]]$count, 1) - expect_equal(args[[1]][[3]]$ackmode, 'ack_requeue_false') - expect_equal(args[[1]][[3]]$encoding, 'auto') - expect_equal(args[[1]][[4]], 'POST') -}) \ No newline at end of file + expect_equal(args[[1]][[3]]$ackmode, "ack_requeue_false") + expect_equal(args[[1]][[3]]$encoding, "auto") + expect_equal(args[[1]][[4]], "POST") +}) diff --git a/base/remote/tests/testthat/test.remote.R b/base/remote/tests/testthat/test.remote.R index 61a62345c46..f2d565ab817 100644 --- a/base/remote/tests/testthat/test.remote.R +++ b/base/remote/tests/testthat/test.remote.R @@ -3,9 +3,9 @@ library(PEcAn.remote) library(testthat) good_host <- list(name = "localhost") -bad_host <- list(name = 'bigbadwolf') +bad_host <- list(name = "bigbadwolf") test_that("test_remote identifies good and bad hosts", { expect_true(test_remote(good_host)) expect_error(test_remote(bad_host)) expect_false(test_remote(bad_host, stderr = FALSE)) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.remote.copy.from.R b/base/remote/tests/testthat/test.remote.copy.from.R index 635b4ce5d84..79f048040a0 100644 --- a/base/remote/tests/testthat/test.remote.copy.from.R +++ b/base/remote/tests/testthat/test.remote.copy.from.R @@ -1,12 +1,12 @@ test_that("`remote.copy.from()` constructs the correct system command to be executed for doing the copy", { mocked_res <- mockery::mock(0) - mockery::stub(remote.copy.from, 'system2', mocked_res) - mockery::stub(remote.copy.from, 'file.exists', TRUE) - remote.copy.from(host = data.frame(name = 'pecan', tunnel = 'test_tunnel'), src = 'tmp/', dst = 'tmp/', delete = TRUE) + mockery::stub(remote.copy.from, "system2", mocked_res) + mockery::stub(remote.copy.from, "file.exists", TRUE) + remote.copy.from(host = data.frame(name = "pecan", tunnel = "test_tunnel"), src = "tmp/", dst = "tmp/", delete = TRUE) args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]][[1]], 'rsync') + expect_equal(args[[1]][[1]], "rsync") expect_equal( - args[[1]][[2]], + args[[1]][[2]], shQuote(c("-az", "-q", "--delete", "-e", "ssh -o ControlPath=\"test_tunnel\"", "pecan:tmp/", "tmp/")) ) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.remote.copy.to.R b/base/remote/tests/testthat/test.remote.copy.to.R index 3524ef4af9a..177e3f6f6fd 100644 --- a/base/remote/tests/testthat/test.remote.copy.to.R +++ b/base/remote/tests/testthat/test.remote.copy.to.R @@ -1,5 +1,5 @@ -test_that("`remote.copy.to()` raises errors for false inputs",{ +test_that("`remote.copy.to()` raises errors for false inputs", { expect_error(remote.copy.to(NULL, "path/to/local/file", "path/to/remote/file"), "`host` object passed to the function is NULL : Try passing a valid host object") - expect_error(remote.copy.to(host = list(name = "pecan",user="test_user", tunnel="test_tunnel"), "path/to/local/file", "path/to/remote/file"), "Could not find tunnel") - expect_error(remote.copy.to(host = list(name = "",user="test_user", tunnel="test_tunnel"), "path/to/local/file", "path/to/remote/file"), "`name` parameter in the `host` object is NULL or empty : Try passing a valid host object") -}) \ No newline at end of file + expect_error(remote.copy.to(host = list(name = "pecan", user = "test_user", tunnel = "test_tunnel"), "path/to/local/file", "path/to/remote/file"), "Could not find tunnel") + expect_error(remote.copy.to(host = list(name = "", user = "test_user", tunnel = "test_tunnel"), "path/to/local/file", "path/to/remote/file"), "`name` parameter in the `host` object is NULL or empty : Try passing a valid host object") +}) diff --git a/base/remote/tests/testthat/test.remote.execute.R.R b/base/remote/tests/testthat/test.remote.execute.R.R index f7c63eb6d8c..2a8fd017f98 100644 --- a/base/remote/tests/testthat/test.remote.execute.R.R +++ b/base/remote/tests/testthat/test.remote.execute.R.R @@ -14,4 +14,3 @@ result <- remote.execute.R(script = code, host = host) test_that("Remote execute R works as expected", { expect_identical(result, eval(parse(text = code))) }) - diff --git a/base/remote/tests/testthat/test.remote.execute.cmd.R b/base/remote/tests/testthat/test.remote.execute.cmd.R index 30e2aa30106..f3f75272f42 100644 --- a/base/remote/tests/testthat/test.remote.execute.cmd.R +++ b/base/remote/tests/testthat/test.remote.execute.cmd.R @@ -1,4 +1,3 @@ - host <- list(name = "localhost") echo_string <- "pecan" out <- remote.execute.cmd(host = host, cmd = "echo", args = echo_string) @@ -11,4 +10,4 @@ test_that("`remote.execute.cmd()` works correctly for incomplete inputs", { expect_error(remote.execute.cmd(NULL, "echo", ""), "`host` cannot be `NULL` for remote execution") expect_error(remote.execute.cmd(list(name = "geo.bu.edu", tunnel = "path/to/non/existent/tunnel"), "echo", ""), "Could not find tunnel") expect_error(remote.execute.cmd("", "echo", ""), "`name`parameter in `host` object cannot be `NULL` or empty for remote execution") -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.start_qsub.R b/base/remote/tests/testthat/test.start_qsub.R index dc54a6a97d9..94cf10876d3 100644 --- a/base/remote/tests/testthat/test.start_qsub.R +++ b/base/remote/tests/testthat/test.start_qsub.R @@ -1,11 +1,11 @@ test_that("`start_qsub()` able to correctly make the command to be executed remotely to start qsub runs", { mocked_res <- mockery::mock(0) - mockery::stub(start_qsub, 'remote.execute.cmd', mocked_res) + mockery::stub(start_qsub, "remote.execute.cmd", mocked_res) res <- start_qsub(1, "qsub -N @NAME@ -o @STDOUT@ -e @STDERR@", "test_rundir", "pecan", "test_host_rundir", "test_host_outdir", "test_stdout_log", "test_stderr_log", "test_job_script") args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]][[1]], 'pecan') - expect_equal(args[[1]][[2]], c('qsub', '-N', 'PEcAn-1', '-o', 'test_host_outdir/1/test_stdout_log', '-e', 'test_host_outdir/1/test_stderr_log')) - expect_equal(args[[1]][[3]][[1]], 'test_host_rundir/1/test_job_script') + expect_equal(args[[1]][[1]], "pecan") + expect_equal(args[[1]][[2]], c("qsub", "-N", "PEcAn-1", "-o", "test_host_outdir/1/test_stdout_log", "-e", "test_host_outdir/1/test_stderr_log")) + expect_equal(args[[1]][[3]][[1]], "test_host_rundir/1/test_job_script") expect_equal(args[[1]]$stderr, TRUE) expect_equal(res, 0) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.start_rabbitmq.R b/base/remote/tests/testthat/test.start_rabbitmq.R index 85156447903..e9dc8ce3f27 100644 --- a/base/remote/tests/testthat/test.start_rabbitmq.R +++ b/base/remote/tests/testthat/test.start_rabbitmq.R @@ -1,13 +1,13 @@ test_that("`start_rabbitmq()` able to correctly read the environment varibles and send desired values to rabbitmq_post_message", { - withr::with_envvar(c("RABBITMQ_PREFIX" = "prefix", "RABBITMQ_PORT" = "3000"),{ + withr::with_envvar(c("RABBITMQ_PREFIX" = "prefix", "RABBITMQ_PORT" = "3000"), { mocked_res <- mockery::mock(TRUE) - mockery::stub(start_rabbitmq, 'rabbitmq_post_message', mocked_res) - res <- start_rabbitmq('test_folder', 'test_uri', 'test_queue') + mockery::stub(start_rabbitmq, "rabbitmq_post_message", mocked_res) + res <- start_rabbitmq("test_folder", "test_uri", "test_queue") args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]][[1]], 'test_uri') - expect_equal(args[[1]][[2]], 'test_queue') - expect_equal(args[[1]][[3]], list(folder = 'test_folder')) - expect_equal(args[[1]][[4]], 'prefix') - expect_equal(args[[1]][[5]], '3000') + expect_equal(args[[1]][[1]], "test_uri") + expect_equal(args[[1]][[2]], "test_queue") + expect_equal(args[[1]][[3]], list(folder = "test_folder")) + expect_equal(args[[1]][[4]], "prefix") + expect_equal(args[[1]][[5]], "3000") }) -}) \ No newline at end of file +}) diff --git a/base/remote/tests/testthat/test.start_serial.R b/base/remote/tests/testthat/test.start_serial.R index 8f0e558515b..4854eb1eba3 100644 --- a/base/remote/tests/testthat/test.start_serial.R +++ b/base/remote/tests/testthat/test.start_serial.R @@ -1,9 +1,9 @@ -test_that("`start_serial()` able to pass desired parameters to execute command remotely to start model execution in serial mode",{ +test_that("`start_serial()` able to pass desired parameters to execute command remotely to start model execution in serial mode", { mocked_res <- mockery::mock(TRUE) - mockery::stub(start_serial, 'remote.execute.cmd', mocked_res) - res <- start_serial('test_run', 'pecan', 'test_rundir', 'test_host_rundir', 'test_job_script') + mockery::stub(start_serial, "remote.execute.cmd", mocked_res) + res <- start_serial("test_run", "pecan", "test_rundir", "test_host_rundir", "test_job_script") args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]][[1]], 'pecan') - expect_equal(args[[1]][[2]], 'test_host_rundir/test_run/test_job_script') + expect_equal(args[[1]][[1]], "pecan") + expect_equal(args[[1]][[2]], "test_host_rundir/test_run/test_job_script") expect_equal(res, TRUE) -}) \ No newline at end of file +}) diff --git a/base/settings/R/MultiSettings.R b/base/settings/R/MultiSettings.R index b61d6af9bea..dc7c88a81b9 100644 --- a/base/settings/R/MultiSettings.R +++ b/base/settings/R/MultiSettings.R @@ -18,7 +18,8 @@ MultiSettings <- function(...) { if (!all(sapply(result, is.Settings))) { stop( "MultiSettings can only be made from Setting,", - " MultiSettings, or a list of Settings") + " MultiSettings, or a list of Settings" + ) } if (length(result) > 0 && is.null(names(result))) { @@ -109,9 +110,11 @@ is.MultiSettings <- function(x) { replicatedFirstElement <- replicate( length(x), firstElement, - simplify = FALSE) + simplify = FALSE + ) return(isTRUE( - all.equal(replicatedFirstElement, x, check.attributes = FALSE))) + all.equal(replicatedFirstElement, x, check.attributes = FALSE) + )) } # .allListElementsEqual #' @export @@ -139,10 +142,10 @@ names.MultiSettings <- function(x) { } #' function that can retrieve or update the names of multi-settings. -#' +#' #' @param multiSettings object for which to retrieve or set the names. #' @param settingNames names to be set for the multi-settings object. -#' +#' #' @export settingNames <- function(multiSettings, settingNames) { if (missing(settingNames)) { @@ -160,7 +163,8 @@ print.MultiSettings <- function(x, printAll = FALSE, ...) { } else { print( paste0("A MultiSettings object containing ", length(x), " Settings."), - ...) + ... + ) } } diff --git a/base/settings/R/addSecrets.R b/base/settings/R/addSecrets.R index 65f7bc6f415..60d51d647a0 100644 --- a/base/settings/R/addSecrets.R +++ b/base/settings/R/addSecrets.R @@ -16,11 +16,12 @@ addSecrets <- function(settings, force = FALSE) { return(invisible(settings)) } - if (!force - && !is.null(settings$settings.info$secrets.added) - && settings$settings.info$secrets.added == TRUE) { + if (!force && + !is.null(settings$settings.info$secrets.added) && + settings$settings.info$secrets.added == TRUE) { PEcAn.logger::logger.info( - "Secret settings have been added already. Skipping.") + "Secret settings have been added already. Skipping." + ) return(invisible(settings)) } else { PEcAn.logger::logger.info("Adding secret settings...") diff --git a/base/settings/R/check.all.settings.R b/base/settings/R/check.all.settings.R index bc19f96d2e9..539d8c16e64 100644 --- a/base/settings/R/check.all.settings.R +++ b/base/settings/R/check.all.settings.R @@ -3,26 +3,30 @@ #' @param settings settings file #' @export check.inputs check.inputs <- function(settings) { - if (is.null(settings$model$type)) return(settings) - + if (is.null(settings$model$type)) { + return(settings) + } + # don't know how to check inputs if (is.null(settings$database$bety)) { PEcAn.logger::logger.info("No database connection, can't check inputs.") return(settings) } - + # get list of inputs associated with model type dbcon <- PEcAn.DB::db.open(settings$database$bety) on.exit(PEcAn.DB::db.close(dbcon), add = TRUE) - + inputs <- PEcAn.DB::db.query( paste0( "SELECT tag, format_id, required FROM modeltypes, modeltypes_formats ", "WHERE modeltypes_formats.modeltype_id = modeltypes.id ", "AND modeltypes.name='", settings$model$type, "' ", - "AND modeltypes_formats.input"), - con = dbcon) - + "AND modeltypes_formats.input" + ), + con = dbcon + ) + # check list of inputs allinputs <- names(settings$run$inputs) if (nrow(inputs) > 0) { @@ -30,7 +34,7 @@ check.inputs <- function(settings) { tag <- inputs$tag[i] hostname <- settings$host$name allinputs <- allinputs[allinputs != tag] - + # check if tag exists if (is.null(settings$run$inputs[[tag]])) { if (inputs$required[i]) { @@ -40,33 +44,36 @@ check.inputs <- function(settings) { } next } - + # check if exists if ("id" %in% names(settings$run$inputs[[tag]])) { id <- settings$run$inputs[[tag]][["id"]] file <- PEcAn.DB::dbfile.file("Input", id, dbcon, hostname) if (is.na(file)) { PEcAn.logger::logger.error( - "No file found for", tag, " and id", id, "on host", hostname) + "No file found for", tag, " and id", id, "on host", hostname + ) } else { if (is.null(settings$run$inputs[[tag]][["path"]])) { settings$run$inputs[[tag]]["path"] <- file } else if (file != settings$run$inputs[[tag]][["path"]]) { PEcAn.logger::logger.warn( - "Input file and id do not match for ", tag) + "Input file and id do not match for ", tag + ) } } } else if ("path" %in% names(settings$run$inputs[[tag]])) { # can we find the file so we can set the tag.id - #adding for to loop over ensemble member filepaths + # adding for to loop over ensemble member filepaths id <- list() path <- settings$run$inputs[[tag]][["path"]] - for (j in 1:length(path)){ + for (j in 1:length(path)) { id[j] <- PEcAn.DB::dbfile.id( "Input", path[[j]], dbcon, - hostname) + hostname + ) } if (any(!is.na(id))) { settings$run$inputs[[tag]][["id"]] <- id @@ -78,14 +85,17 @@ check.inputs <- function(settings) { formats <- PEcAn.DB::db.query( paste0( "SELECT format_id FROM inputs WHERE id=", - settings$run$inputs[[tag]][["id"]]), - con = dbcon) + settings$run$inputs[[tag]][["id"]] + ), + con = dbcon + ) if (nrow(formats) >= 1) { if (formats[1, "format_id"] != inputs$format_id[i]) { PEcAn.logger::logger.warn( "@Format of input", tag, "does not match specified input:", - formats[1, "format_id"], inputs$format_id[i]) + formats[1, "format_id"], inputs$format_id[i] + ) # zero out path, do_conversions will need to convert specified # input ID to model format settings$run$inputs[[tag]][["path"]] <- NULL @@ -97,13 +107,14 @@ check.inputs <- function(settings) { PEcAn.logger::logger.info("path", settings$run$inputs[[tag]][["path"]]) } } - + if (length(allinputs) > 0) { PEcAn.logger::logger.info( "Unused inputs found :", - paste(allinputs, collapse = " ")) + paste(allinputs, collapse = " ") + ) } - + return(settings) } @@ -114,13 +125,17 @@ check.inputs <- function(settings) { #' You'll probably use `settings$database` #' @export check.database check.database <- function(database) { - if (is.null(database)) return(NULL) + if (is.null(database)) { + return(NULL) + } ## check database settings if (is.null(database$driver)) { database$driver <- "PostgreSQL" - PEcAn.logger::logger.info("Database driver unspecified. ", - "Using 'PostgreSQL' (default)") + PEcAn.logger::logger.info( + "Database driver unspecified. ", + "Using 'PostgreSQL' (default)" + ) } is_postgres_like <- database$driver %in% c("PostgreSQL", "Postgres") @@ -167,10 +182,12 @@ check.database <- function(database) { ## The following hack handles *.illinois.* to *.uiuc.* aliases of ebi-forecast if (!is.null(database$host)) { - forcastnames <- c("ebi-forecast.igb.uiuc.edu", - "ebi-forecast.igb.illinois.edu") - if ((database$host %in% forcastnames) - && (Sys.info()["nodename"] %in% forcastnames)) { + forcastnames <- c( + "ebi-forecast.igb.uiuc.edu", + "ebi-forecast.igb.illinois.edu" + ) + if ((database$host %in% forcastnames) && + (Sys.info()["nodename"] %in% forcastnames)) { database$host <- "localhost" } } else if (is.null(database$host)) { @@ -182,12 +199,11 @@ check.database <- function(database) { if (!is.null(database$userid)) { PEcAn.logger::logger.info("'userid' in database section should be 'user'") database$user <- database$userid - } else if (!is.null(database$username)) { PEcAn.logger::logger.info( - "'username' in database section should be 'user'") + "'username' in database section should be 'user'" + ) database$user <- database$username - } else { PEcAn.logger::logger.info("no database user specified, using 'bety'") database$user <- "bety" @@ -205,12 +221,14 @@ check.database <- function(database) { if (!PEcAn.DB::db.exists(params = database, FALSE, table = NA)) { PEcAn.logger::logger.severe( - "Invalid Database Settings : ", unlist(database)) + "Invalid Database Settings : ", unlist(database) + ) } # connected PEcAn.logger::logger.info( - "Successfully connected to database : ", unlist(database)) + "Successfully connected to database : ", unlist(database) + ) # return fixed up database return(database) @@ -223,31 +241,39 @@ check.database <- function(database) { check.bety.version <- function(dbcon) { versions <- PEcAn.DB::db.query( "SELECT version FROM schema_migrations;", - con = dbcon)[["version"]] + con = dbcon + )[["version"]] # there should always be a version 1 - if (! ("1" %in% versions)) { + if (!("1" %in% versions)) { PEcAn.logger::logger.severe( - "No version 1, how did this database get created?") + "No version 1, how did this database get created?" + ) } # check for specific version - if (! ("20140617163304" %in% versions)) { + if (!("20140617163304" %in% versions)) { PEcAn.logger::logger.severe( - "Missing migration 20140617163304, this associates files with models.") + "Missing migration 20140617163304, this associates files with models." + ) } - if (! ("20140708232320" %in% versions)) { + if (!("20140708232320" %in% versions)) { PEcAn.logger::logger.severe( "Missing migration 20140708232320,", - "this introduces geometry column in sites") + "this introduces geometry column in sites" + ) } - if (! ("20140729045640" %in% versions)) { - PEcAn.logger::logger.severe("Missing migration 20140729045640,", - "this introduces modeltypes table") + if (!("20140729045640" %in% versions)) { + PEcAn.logger::logger.severe( + "Missing migration 20140729045640,", + "this introduces modeltypes table" + ) } - if (! ("20151011190026" %in% versions)) { - PEcAn.logger::logger.severe("Missing migration 20151011190026,", - "this introduces notes and user_id in workflows") + if (!("20151011190026" %in% versions)) { + PEcAn.logger::logger.severe( + "Missing migration 20151011190026,", + "this introduces notes and user_id in workflows" + ) } # check if database is newer @@ -256,7 +282,8 @@ check.bety.version <- function(dbcon) { PEcAn.logger::logger.warn( "Found database migration(s) not known by this release of PEcAn.settings:", unknown_migrations, - "This could result in PEcAn not working as expected.") + "This could result in PEcAn not working as expected." + ) } } @@ -273,9 +300,9 @@ check.bety.version <- function(dbcon) { #' @author Rob Kooper, David LeBauer #' @export check.settings check.settings <- function(settings, force = FALSE) { - if (!force - && !is.null(settings$settings.info$checked) - && settings$settings.info$checked == TRUE) { + if (!force && + !is.null(settings$settings.info$checked) && + settings$settings.info$checked == TRUE) { PEcAn.logger::logger.info("Settings have been checked already. Skipping.") return(invisible(settings)) } else { @@ -292,7 +319,7 @@ check.settings <- function(settings, force = FALSE) { settings <- check.database.settings(settings) - #checking the ensemble tag in settings + # checking the ensemble tag in settings settings <- check.ensemble.settings(settings) if (!is.null(settings$database$bety)) { @@ -317,7 +344,8 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$ensemble) && is.null(settings$sensitivity.analysis)) { PEcAn.logger::logger.warn( "No ensemble or sensitivity analysis specified.", - "No models will be executed!") + "No models will be executed!" + ) } settings <- papply(settings, check.run.settings, dbcon = dbcon) @@ -327,31 +355,35 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$meta.analysis$iter)) { settings$meta.analysis$iter <- 3000 PEcAn.logger::logger.info( - "Setting meta.analysis iterations to ", settings$meta.analysis$iter) + "Setting meta.analysis iterations to ", settings$meta.analysis$iter + ) } if (is.null(settings$meta.analysis$random.effects)) { - settings$meta.analysis$random.effects <- list() - settings$meta.analysis$random.effects$on <- FALSE + settings$meta.analysis$random.effects <- list() + settings$meta.analysis$random.effects$on <- FALSE settings$meta.analysis$random.effects$use_ghs <- TRUE PEcAn.logger::logger.info( "Setting meta.analysis random effects to ", - settings$meta.analysis$random.effects$on) + settings$meta.analysis$random.effects$on + ) } else if (!is.list(settings$meta.analysis$random.effects)) { # this handles the previous usage # # FALSE # re_check <- as.logical(settings$meta.analysis$random.effects) - settings$meta.analysis$random.effects <- list() + settings$meta.analysis$random.effects <- list() settings$meta.analysis$random.effects$on <- re_check settings$meta.analysis$random.effects$use_ghs <- TRUE } else { # everything is used as defined settings$meta.analysis$random.effects$on <- as.logical( - settings$meta.analysis$random.effects$on) + settings$meta.analysis$random.effects$on + ) if (!is.null(settings$meta.analysis$random.effects$use_ghs)) { settings$meta.analysis$random.effects$use_ghs <- as.logical( - settings$meta.analysis$random.effects$use_ghs) + settings$meta.analysis$random.effects$use_ghs + ) } else { settings$meta.analysis$random.effects$use_ghs <- TRUE } @@ -359,18 +391,21 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$meta.analysis$threshold)) { settings$meta.analysis$threshold <- 1.2 PEcAn.logger::logger.info( - "Setting meta.analysis threshold to ", settings$meta.analysis$threshold) + "Setting meta.analysis threshold to ", settings$meta.analysis$threshold + ) } if (is.null(settings$meta.analysis$update)) { settings$meta.analysis$update <- "AUTO" PEcAn.logger::logger.info( "Setting meta.analysis update to only update if no previous", - "meta analysis was found") + "meta analysis was found" + ) } - if ((settings$meta.analysis$update != "AUTO") - && is.na(as.logical(settings$meta.analysis$update))) { + if ((settings$meta.analysis$update != "AUTO") && + is.na(as.logical(settings$meta.analysis$update))) { PEcAn.logger::logger.info( - "meta.analysis update can only be AUTO/TRUE/FALSE, defaulting to FALSE") + "meta.analysis update can only be AUTO/TRUE/FALSE, defaulting to FALSE" + ) settings$meta.analysis$update <- FALSE } } @@ -379,9 +414,11 @@ check.settings <- function(settings, force = FALSE) { ## if run$host is localhost, set to "localhost if (any( - settings$host %in% c( - Sys.info()["nodename"], - gsub("illinois", "uiuc", Sys.info()["nodename"])))) { + settings$host %in% c( + Sys.info()["nodename"], + gsub("illinois", "uiuc", Sys.info()["nodename"]) + ) + )) { settings$host$name <- "localhost" } @@ -390,18 +427,21 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$host$qsub)) { settings$host$qsub <- "qsub -V -N @NAME@ -o @STDOUT@ -e @STDERR@ -S /bin/bash" PEcAn.logger::logger.info( - "qsub not specified using default value :", settings$host$qsub) + "qsub not specified using default value :", settings$host$qsub + ) } if (is.null(settings$host$qsub.jobid)) { settings$host$qsub.jobid <- ".* ([0-9]+).*" PEcAn.logger::logger.info( "qsub.jobid not specified using default value :", - settings$host$qsub.jobid) + settings$host$qsub.jobid + ) } if (is.null(settings$host$qstat)) { settings$host$qstat <- "qstat -j @JOBID@ &> /dev/null || echo DONE" PEcAn.logger::logger.info( - "qstat not specified using default value :", settings$host$qstat) + "qstat not specified using default value :", settings$host$qstat + ) } } @@ -411,17 +451,20 @@ check.settings <- function(settings, force = FALSE) { settings$host$modellauncher$binary <- "modellauncher" PEcAn.logger::logger.info( "binary not specified using default value :", - settings$host$modellauncher$binary) + settings$host$modellauncher$binary + ) } if (is.null(settings$host$modellauncher$qsub.extra)) { PEcAn.logger::logger.severe( - "qsub.extra not specified, can not launch in parallel environment.") + "qsub.extra not specified, can not launch in parallel environment." + ) } if (is.null(settings$host$modellauncher$mpirun)) { settings$host$modellauncher$mpirun <- "mpirun" PEcAn.logger::logger.info( "mpirun not specified using default value :", - settings$host$modellauncher$mpirun) + settings$host$modellauncher$mpirun + ) } } @@ -430,10 +473,12 @@ check.settings <- function(settings, force = FALSE) { if ("prerun" %in% names(settings$model)) { PEcAn.logger::logger.severe( "You have both settings$model$job.sh and settings$model$prerun,", - "please combine.") + "please combine." + ) } PEcAn.logger::logger.info( - "settings$model$job.sh is deprecated use settings$model$prerun instead.") + "settings$model$job.sh is deprecated use settings$model$prerun instead." + ) settings$model$prerun <- settings$model$job.sh settings$model$job.sh <- NULL } @@ -441,10 +486,12 @@ check.settings <- function(settings, force = FALSE) { if ("prerun" %in% names(settings$host)) { PEcAn.logger::logger.severe( "You have both settings$host$job.sh and settings$host$prerun,", - "please combine.") + "please combine." + ) } PEcAn.logger::logger.info( - "settings$host$job.sh is deprecated use settings$host$prerun instead.") + "settings$host$job.sh is deprecated use settings$host$prerun instead." + ) settings$host$prerun <- settings$host$job.sh settings$host$job.sh <- NULL } @@ -458,15 +505,18 @@ check.settings <- function(settings, force = FALSE) { "settings$database$dbfiles pathname", settings$database$dbfiles, "is invalid\n", "placing it in the home directory ", - Sys.getenv("HOME")) + Sys.getenv("HOME") + ) settings$database$dbfiles <- file.path( Sys.getenv("HOME"), - settings$database$dbfiles) + settings$database$dbfiles + ) } settings$database$dbfiles <- normalizePath( settings$database$dbfiles, - mustWork = FALSE) + mustWork = FALSE + ) } dir.create(settings$database$dbfiles, showWarnings = FALSE, recursive = TRUE) @@ -479,8 +529,8 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$rundir)) { settings$rundir <- file.path(settings$outdir, "run") } - if (!file.exists(settings$rundir) - && !dir.create(settings$rundir, recursive = TRUE)) { + if (!file.exists(settings$rundir) && + !dir.create(settings$rundir, recursive = TRUE)) { PEcAn.logger::logger.severe("Could not create run folder", settings$rundir) } @@ -488,10 +538,11 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$modeloutdir)) { settings$modeloutdir <- file.path(settings$outdir, "out") } - if (!file.exists(settings$modeloutdir) - && !dir.create(settings$modeloutdir, recursive = TRUE)) { + if (!file.exists(settings$modeloutdir) && + !dir.create(settings$modeloutdir, recursive = TRUE)) { PEcAn.logger::logger.severe( - "Could not create model out folder", settings$modeloutdir) + "Could not create model out folder", settings$modeloutdir + ) } # make sure remote folders are specified if need be @@ -499,9 +550,11 @@ check.settings <- function(settings, force = FALSE) { if (is.null(settings$host$folder)) { settings$host$folder <- paste0( PEcAn.remote::remote.execute.cmd("pwd", host = settings$host), - "/pecan_remote") + "/pecan_remote" + ) PEcAn.logger::logger.info( - "Using ", settings$host$folder, "to store output on remote machine") + "Using ", settings$host$folder, "to store output on remote machine" + ) } if (is.null(settings$host$rundir)) { settings$host$rundir <- paste0(settings$host$folder, "/@WORKFLOW@/run") @@ -509,18 +562,22 @@ check.settings <- function(settings, force = FALSE) { settings$host$rundir <- gsub( "@WORKFLOW@", settings$workflow$id, - settings$host$rundir) + settings$host$rundir + ) PEcAn.logger::logger.info( - "Using ", settings$host$rundir, "to store runs on remote machine") + "Using ", settings$host$rundir, "to store runs on remote machine" + ) if (is.null(settings$host$outdir)) { settings$host$outdir <- paste0(settings$host$folder, "/@WORKFLOW@/out") } settings$host$outdir <- gsub( "@WORKFLOW@", settings$workflow$id, - settings$host$outdir) + settings$host$outdir + ) PEcAn.logger::logger.info( - "Using ", settings$host$outdir, "to store output on remote machine") + "Using ", settings$host$outdir, "to store output on remote machine" + ) } else if (settings$host$name == "localhost") { settings$host$rundir <- settings$rundir settings$host$outdir <- settings$modeloutdir @@ -529,14 +586,16 @@ check.settings <- function(settings, force = FALSE) { # check/create the pft folders if (!is.null(settings$pfts) && (length(settings$pfts) > 0)) { for (i in seq_along(settings$pfts)) { - #check if name tag within pft + # check if name tag within pft if (!"name" %in% names(settings$pfts[i]$pft)) { PEcAn.logger::logger.severe( - "No name specified for pft of index: ", i, ", please specify name") + "No name specified for pft of index: ", i, ", please specify name" + ) } if (settings$pfts[i]$pft$name == "") { PEcAn.logger::logger.severe( - "Name specified for pft of index: ", i, " can not be empty.") + "Name specified for pft of index: ", i, " can not be empty." + ) } # check to see if name of each pft in xml file is actually @@ -546,27 +605,33 @@ check.settings <- function(settings, force = FALSE) { x <- PEcAn.DB::db.query( paste0( "SELECT pfts.id FROM pfts", - " WHERE pfts.name = '", settings$pfts[i]$pft$name, "'"), - con = dbcon) + " WHERE pfts.name = '", settings$pfts[i]$pft$name, "'" + ), + con = dbcon + ) } else { x <- PEcAn.DB::db.query( paste0( "SELECT pfts.id FROM pfts, modeltypes", - " WHERE pfts.name = '", settings$pfts[i]$pft$name, "'", + " WHERE pfts.name = '", settings$pfts[i]$pft$name, "'", " AND modeltypes.name='", settings$model$type, "'", - " AND modeltypes.id=pfts.modeltype_id;"), - con = dbcon) + " AND modeltypes.id=pfts.modeltype_id;" + ), + con = dbcon + ) } if (nrow(x) == 0) { PEcAn.logger::logger.severe( "Did not find a pft with name ", settings$pfts[i]$pft$name, - "\nfor model type", settings$model$type) + "\nfor model type", settings$model$type + ) } if (nrow(x) > 1) { PEcAn.logger::logger.warn( "Found multiple entries for pft with name ", settings$pfts[i]$pft$name, - "\nfor model type", settings$model$type) + "\nfor model type", settings$model$type + ) } } @@ -574,14 +639,17 @@ check.settings <- function(settings, force = FALSE) { settings$pfts[i]$pft$outdir <- file.path( settings$outdir, "pft", - settings$pfts[i]$pft$name) + settings$pfts[i]$pft$name + ) PEcAn.logger::logger.info( "Storing pft", settings$pfts[i]$pft$name, - "in", settings$pfts[i]$pft$outdir) + "in", settings$pfts[i]$pft$outdir + ) } else { PEcAn.logger::logger.debug( "Storing pft", settings$pfts[i]$pft$name, - "in", settings$pfts[i]$pft$outdir) + "in", settings$pfts[i]$pft$outdir + ) } out.dir <- settings$pfts[i]$pft$outdir if (!file.exists(out.dir) && !dir.create(out.dir, recursive = TRUE)) { @@ -626,11 +694,13 @@ check.run.settings <- function(settings, dbcon = NULL) { startdate <- lubridate::parse_date_time( settings$run$start.date, "ymd_HMS", - truncated = 3) + truncated = 3 + ) enddate <- lubridate::parse_date_time( settings$run$end.date, "ymd_HMS", - truncated = 3) + truncated = 3 + ) if (startdate >= enddate) { PEcAn.logger::logger.severe("Start date should come before the end date.") } @@ -642,73 +712,86 @@ check.run.settings <- function(settings, dbcon = NULL) { if (is.null(settings$sensitivity.analysis$variable)) { if (is.null(settings$ensemble$variable)) { PEcAn.logger::logger.severe( - "No variable specified to compute sensitivity.analysis for.") + "No variable specified to compute sensitivity.analysis for." + ) } PEcAn.logger::logger.info( "Setting sensitivity.analysis variable to the same as", - "ensemble variable [", settings$ensemble$variable, "]") + "ensemble variable [", settings$ensemble$variable, "]" + ) settings$sensitivity.analysis$variable <- settings$ensemble$variable } if (is.null(settings$sensitivity.analysis$start.year)) { if (!is.null(settings$run$start.date)) { settings$sensitivity.analysis$start.year <- lubridate::year( - settings$run$start.date) + settings$run$start.date + ) PEcAn.logger::logger.info( "No start date passed to sensitivity.analysis - using the run date (", - settings$sensitivity.analysis$start.year, ").") + settings$sensitivity.analysis$start.year, ")." + ) } else if (!is.null(settings$ensemble$start.year)) { settings$sensitivity.analysis$start.year <- settings$ensemble$start.year PEcAn.logger::logger.info( "No start date passed to sensitivity.analysis -", "using the ensemble date (", - settings$sensitivity.analysis$start.year, ").") + settings$sensitivity.analysis$start.year, ")." + ) } else { PEcAn.logger::logger.info( "No start date passed to sensitivity.analysis,", - "and no default available.") + "and no default available." + ) } } if (is.null(settings$sensitivity.analysis$end.year)) { if (!is.null(settings$run$end.date)) { settings$sensitivity.analysis$end.year <- lubridate::year( - settings$run$end.date) + settings$run$end.date + ) PEcAn.logger::logger.info( "No end date passed to sensitivity.analysis - using the run date (", - settings$sensitivity.analysis$end.year, ").") + settings$sensitivity.analysis$end.year, ")." + ) } else if (!is.null(settings$ensemble$end.year)) { settings$sensitivity.analysis$end.year <- settings$ensemble$end.year PEcAn.logger::logger.info( "No end date passed to sensitivity.analysis.", "Using the ensemble date (", - settings$sensitivity.analysis$end.year, ").") + settings$sensitivity.analysis$end.year, ")." + ) } else { PEcAn.logger::logger.info( "No end date passed to sensitivity.analysis,", - "and no default available.") + "and no default available." + ) } } # check start and end dates - if (exists("startdate") - && !is.null(settings$sensitivity.analysis$start.year) - && lubridate::year(startdate) > settings$sensitivity.analysis$start.year) { + if (exists("startdate") && + !is.null(settings$sensitivity.analysis$start.year) && + lubridate::year(startdate) > settings$sensitivity.analysis$start.year) { PEcAn.logger::logger.severe( - "Start year of SA should come after the start.date of the run") + "Start year of SA should come after the start.date of the run" + ) } - if (exists("enddate") - && !is.null(settings$sensitivity.analysis$end.year) - && lubridate::year(enddate) < settings$sensitivity.analysis$end.year) { + if (exists("enddate") && + !is.null(settings$sensitivity.analysis$end.year) && + lubridate::year(enddate) < settings$sensitivity.analysis$end.year) { PEcAn.logger::logger.severe( - "End year of SA should come before the end.date of the run") + "End year of SA should come before the end.date of the run" + ) } if (!is.null(settings$sensitivity.analysis$start.year) && - !is.null(settings$sensitivity.analysis$end.year) && - settings$sensitivity.analysis$start.year > settings$sensitivity.analysis$end.year) { + !is.null(settings$sensitivity.analysis$end.year) && + settings$sensitivity.analysis$start.year > settings$sensitivity.analysis$end.year) { PEcAn.logger::logger.severe( - "Start year of SA should come before the end year of the SA") + "Start year of SA should come before the end year of the SA" + ) } } @@ -724,8 +807,10 @@ check.run.settings <- function(settings, dbcon = NULL) { paste( "SELECT sitename, ST_X(ST_CENTROID(geometry)) AS lon, ", "ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id =", - settings$run$site$id), - con = dbcon) + settings$run$site$id + ), + con = dbcon + ) } else { site <- data.frame(id = settings$run$site$id) if (!is.null(settings$run$site$name)) { @@ -738,8 +823,8 @@ check.run.settings <- function(settings, dbcon = NULL) { site$lon <- settings$run$site$lon } } - if ((!is.null(settings$run$site$met)) - && settings$run$site$met == "NULL") { + if ((!is.null(settings$run$site$met)) && + settings$run$site$met == "NULL") { settings$run$site$met <- NULL } if (is.null(settings$run$site$name)) { @@ -749,12 +834,14 @@ check.run.settings <- function(settings, dbcon = NULL) { } else { settings$run$site$name <- site$sitename PEcAn.logger::logger.info( - "Setting site name to ", settings$run$site$name) + "Setting site name to ", settings$run$site$name + ) } } else if (site$sitename != settings$run$site$name) { PEcAn.logger::logger.warn( "Specified site name [", settings$run$site$name, - "] does not match sitename in database [", site$sitename, "]") + "] does not match sitename in database [", site$sitename, "]" + ) } if (is.null(settings$run$site$lat)) { @@ -763,12 +850,14 @@ check.run.settings <- function(settings, dbcon = NULL) { } else { settings$run$site$lat <- as.numeric(site$lat) PEcAn.logger::logger.info( - "Setting site lat to ", settings$run$site$lat) + "Setting site lat to ", settings$run$site$lat + ) } } else if (as.numeric(site$lat) != as.numeric(settings$run$site$lat)) { PEcAn.logger::logger.warn( "Specified site lat [", settings$run$site$lat, - "] does not match lat in database [", site$lat, "]") + "] does not match lat in database [", site$lat, "]" + ) } if (is.null(settings$run$site$lon)) { @@ -777,12 +866,14 @@ check.run.settings <- function(settings, dbcon = NULL) { } else { settings$run$site$lon <- as.numeric(site$lon) PEcAn.logger::logger.info( - "Setting site lon to ", settings$run$site$lon) + "Setting site lon to ", settings$run$site$lon + ) } } else if (as.numeric(site$lon) != as.numeric(settings$run$site$lon)) { PEcAn.logger::logger.warn( "Specified site lon [", settings$run$site$lon, - "] does not match lon in database [", site$lon, "]") + "] does not match lon in database [", site$lon, "]" + ) } } } else { @@ -807,15 +898,18 @@ check.model.settings <- function(settings, dbcon = NULL) { model <- PEcAn.DB::db.query( paste0( "SELECT models.id AS id, models.revision AS revision, ", - "modeltypes.name AS type", + "modeltypes.name AS type", " FROM models, modeltypes WHERE models.id=", settings$model$id, - " AND models.modeltype_id=modeltypes.id;"), - con = dbcon) + " AND models.modeltype_id=modeltypes.id;" + ), + con = dbcon + ) if (nrow(model) == 0) { PEcAn.logger::logger.error( "There is no record of model_id = ", settings$model$id, - "in database") + "in database" + ) } } else { model <- list() @@ -829,19 +923,24 @@ check.model.settings <- function(settings, dbcon = NULL) { "AND models.modeltype_id=modeltypes.id ", ifelse( is.null(settings$model$revision), "", - paste0("AND revision like '%", settings$model$revision, "%' ")), - "ORDER BY models.updated_at"), - con = dbcon) + paste0("AND revision like '%", settings$model$revision, "%' ") + ), + "ORDER BY models.updated_at" + ), + con = dbcon + ) if (nrow(model) > 1) { PEcAn.logger::logger.warn( "multiple records for", settings$model$type, - "returned; using the latest") + "returned; using the latest" + ) row <- which.max(model$updated_at) if (length(row) == 0) row <- nrow(model) model <- model[row, ] } else if (nrow(model) == 0) { PEcAn.logger::logger.warn( - "Model type", settings$model$type, "not in database") + "Model type", settings$model$type, "not in database" + ) } } else { PEcAn.logger::logger.warn("no model settings given") @@ -858,7 +957,8 @@ check.model.settings <- function(settings, dbcon = NULL) { PEcAn.logger::logger.info("Setting model id to ", settings$model$id) } else if (settings$model$id != model$id) { PEcAn.logger::logger.warn( - "Model id specified in settings file does not match database.") + "Model id specified in settings file does not match database." + ) } } else { if (is.null(settings$model$id) || (settings$model$id == "")) { @@ -872,17 +972,20 @@ check.model.settings <- function(settings, dbcon = NULL) { PEcAn.logger::logger.info("Setting model type to ", settings$model$type) } else if (settings$model$type != model$type) { PEcAn.logger::logger.warn( - "Model type specified in settings file does not match database.") + "Model type specified in settings file does not match database." + ) } } if (!is.null(model$revision)) { if (is.null(settings$model$revision) || (settings$model$revision == "")) { settings$model$revision <- model$revision PEcAn.logger::logger.info( - "Setting model revision to ", settings$model$revision) + "Setting model revision to ", settings$model$revision + ) } else if (settings$model$revision != model$revision) { PEcAn.logger::logger.warn( - "Model revision specified in settings file does not match database.") + "Model revision specified in settings file does not match database." + ) } } @@ -892,11 +995,12 @@ check.model.settings <- function(settings, dbcon = NULL) { } # Set model$delete.raw to FALSE by default - if (is.null(settings$model$delete.raw) - || !is.logical(as.logical(settings$model$delete.raw))) { + if (is.null(settings$model$delete.raw) || + !is.logical(as.logical(settings$model$delete.raw))) { PEcAn.logger::logger.info( "Option to delete raw model output not set or not logical.", - "Will keep all model output.") + "Will keep all model output." + ) settings$model$delete.raw <- FALSE } @@ -906,21 +1010,25 @@ check.model.settings <- function(settings, dbcon = NULL) { "Model", settings$model$id, dbcon, - settings$host$name) + settings$host$name + ) if (!is.na(binary)) { if (is.null(settings$model$binary)) { settings$model$binary <- binary PEcAn.logger::logger.info( - "Setting model binary to ", settings$model$binary) + "Setting model binary to ", settings$model$binary + ) } else if (binary != settings$model$binary) { PEcAn.logger::logger.warn( "Specified binary [", settings$model$binary, - "] does not match path in database [", binary, "]") + "] does not match path in database [", binary, "]" + ) } } } else { PEcAn.logger::logger.warn( - "No model binary sepcified in database for model ", settings$model$type) + "No model binary sepcified in database for model ", settings$model$type + ) } } @@ -934,43 +1042,47 @@ check.model.settings <- function(settings, dbcon = NULL) { check.workflow.settings <- function(settings, dbcon = NULL) { # check for workflow defaults fixoutdir <- FALSE - if (!is.null(dbcon) - && settings$database$bety$write - && ("model" %in% names(settings))) { + if (!is.null(dbcon) && + settings$database$bety$write && + ("model" %in% names(settings))) { if (!"workflow" %in% names(settings)) { now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S") if (is.MultiSettings(settings)) { insert_result <- PEcAn.DB::db.query( paste0( "INSERT INTO workflows (", - "folder, model_id, hostname, started_at) ", + "folder, model_id, hostname, started_at) ", "values ('", - settings$outdir, "','", - settings$model$id, "', '", - settings$host$name, "', '", - now, "') RETURNING id"), - con = dbcon) + settings$outdir, "','", + settings$model$id, "', '", + settings$host$name, "', '", + now, "') RETURNING id" + ), + con = dbcon + ) } else { insert_result <- PEcAn.DB::db.query( paste0( "INSERT INTO workflows (", - "folder, site_id, model_id, hostname, start_date, end_date, ", - "started_at) ", + "folder, site_id, model_id, hostname, start_date, end_date, ", + "started_at) ", "values ('", - settings$outdir, "','", - settings$run$site$id, "','", - settings$model$id, "', '", - settings$host$name, "', '", - settings$run$start.date, "', '", - settings$run$end.date, "', '", - now, "') RETURNING id"), - con = dbcon) + settings$outdir, "','", + settings$run$site$id, "','", + settings$model$id, "', '", + settings$host$name, "', '", + settings$run$start.date, "', '", + settings$run$end.date, "', '", + now, "') RETURNING id" + ), + con = dbcon + ) } settings$workflow$id <- insert_result[["id"]] fixoutdir <- TRUE } } else { - settings$workflow$id <- format(Sys.time(), "%Y-%m-%d-%H-%M-%S") + settings$workflow$id <- format(Sys.time(), "%Y-%m-%d-%H-%M-%S") } # check/create the pecan folder @@ -981,25 +1093,28 @@ check.workflow.settings <- function(settings, dbcon = NULL) { settings$outdir <- gsub( "@WORKFLOW@", format(settings$workflow$id, scientific = FALSE), - settings$outdir) + settings$outdir + ) # create fully qualified pathname if (substr(settings$outdir, 1, 1) != "/") { settings$outdir <- file.path(getwd(), settings$outdir) } PEcAn.logger::logger.info("output folder =", settings$outdir) - if (!file.exists(settings$outdir) - && !dir.create(settings$outdir, recursive = TRUE)) { + if (!file.exists(settings$outdir) && + !dir.create(settings$outdir, recursive = TRUE)) { PEcAn.logger::logger.severe("Could not create folder", settings$outdir) } - #update workflow + # update workflow if (fixoutdir) { PEcAn.DB::db.query( paste0( "UPDATE workflows SET folder='", PEcAn.utils::full.path(settings$outdir), - "' WHERE id=", settings$workflow$id), - con = dbcon) + "' WHERE id=", settings$workflow$id + ), + con = dbcon + ) } return(settings) @@ -1024,27 +1139,32 @@ check.database.settings <- function(settings) { # should runs be written to database if (is.null(settings$database$bety$write)) { PEcAn.logger::logger.info( - "Writing all runs/configurations to database.") + "Writing all runs/configurations to database." + ) settings$database$bety$write <- TRUE } else { settings$database$bety$write <- as.logical(settings$database$bety$write) if (settings$database$bety$write) { PEcAn.logger::logger.debug( - "Writing all runs/configurations to database.") + "Writing all runs/configurations to database." + ) } else { PEcAn.logger::logger.warn( - "Will not write runs/configurations to database.") + "Will not write runs/configurations to database." + ) } } # check if we can connect to the database with write permissions - if (settings$database$bety$write - && !PEcAn.DB::db.exists( - params = settings$database$bety, - TRUE, - table = "users")) { + if (settings$database$bety$write && + !PEcAn.DB::db.exists( + params = settings$database$bety, + TRUE, + table = "users" + )) { PEcAn.logger::logger.severe( - "Invalid Database Settings : ", unlist(settings$database)) + "Invalid Database Settings : ", unlist(settings$database) + ) } # TODO check userid and userpassword @@ -1057,11 +1177,13 @@ check.database.settings <- function(settings) { check.bety.version(dbcon) } else { PEcAn.logger::logger.warn( - "No BETY database information specified; not using database.") + "No BETY database information specified; not using database." + ) } } else { PEcAn.logger::logger.warn( - "No BETY database information specified; not using database.") + "No BETY database information specified; not using database." + ) } return(settings) } @@ -1070,17 +1192,18 @@ check.database.settings <- function(settings) { #' @param settings settings file #' @export check.ensemble.settings check.ensemble.settings <- function(settings) { - # check ensemble if (!is.null(settings$ensemble)) { if (is.null(settings$ensemble$variable)) { if (is.null(settings$sensitivity.analysis$variable)) { PEcAn.logger::logger.severe( - "No variable specified to compute ensemble for.") + "No variable specified to compute ensemble for." + ) } PEcAn.logger::logger.info( "Setting ensemble variable to the same as sensitivity analysis", - "variable [", settings$sensitivity.analysis$variable, "]") + "variable [", settings$sensitivity.analysis$variable, "]" + ) settings$ensemble$variable <- settings$sensitivity.analysis$variable } @@ -1094,21 +1217,26 @@ check.ensemble.settings <- function(settings) { startdate <- lubridate::parse_date_time( settings$run$start.date, "ymd_HMS", - truncated = 3) + truncated = 3 + ) settings$ensemble$start.year <- lubridate::year( - settings$run$start.date) + settings$run$start.date + ) PEcAn.logger::logger.info( "No start date passed to ensemble - using the run date (", - settings$ensemble$start.year, ").") + settings$ensemble$start.year, ")." + ) } else if (!is.null(settings$sensitivity.analysis$start.year)) { settings$ensemble$start.year <- settings$sensitivity.analysis$start.year PEcAn.logger::logger.info( "No start date passed to ensemble.", "Using the sensitivity.analysis date (", - settings$ensemble$start.year, ").") + settings$ensemble$start.year, ")." + ) } else { PEcAn.logger::logger.info( - "No start date passed to ensemble, and no default available.") + "No start date passed to ensemble, and no default available." + ) } } @@ -1117,54 +1245,62 @@ check.ensemble.settings <- function(settings) { enddate <- lubridate::parse_date_time( settings$run$end.date, "ymd_HMS", - truncated = 3) + truncated = 3 + ) settings$ensemble$end.year <- lubridate::year(settings$run$end.date) PEcAn.logger::logger.info( "No end date passed to ensemble - using the run date (", - settings$ensemble$end.year, ").") + settings$ensemble$end.year, ")." + ) } else if (!is.null(settings$sensitivity.analysis$end.year)) { settings$ensemble$end.year <- settings$sensitivity.analysis$end.year PEcAn.logger::logger.info( "No end date passed to ensemble.", "Using the sensitivity.analysis date (", - settings$ensemble$end.year, ").") + settings$ensemble$end.year, ")." + ) } else { PEcAn.logger::logger.info( - "No end date passed to ensemble, and no default available.") + "No end date passed to ensemble, and no default available." + ) } } # check start and end dates if (exists("startdate") && !is.null(settings$ensemble$start.year) && - lubridate::year(startdate) > settings$ensemble$start.year) { + lubridate::year(startdate) > settings$ensemble$start.year) { PEcAn.logger::logger.severe( - "Start year of ensemble should come after the start.date of the run") + "Start year of ensemble should come after the start.date of the run" + ) } if (exists("enddate") && !is.null(settings$ensemble$end.year) && - lubridate::year(enddate) < settings$ensemble$end.year) { + lubridate::year(enddate) < settings$ensemble$end.year) { PEcAn.logger::logger.severe( - "End year of ensemble should come before the end.date of the run") + "End year of ensemble should come before the end.date of the run" + ) } - if (!is.null(settings$ensemble$start.year) - && !is.null(settings$ensemble$end.year) - && settings$ensemble$start.year > settings$ensemble$end.year) { + if (!is.null(settings$ensemble$start.year) && + !is.null(settings$ensemble$end.year) && + settings$ensemble$start.year > settings$ensemble$end.year) { PEcAn.logger::logger.severe( - "Start year of ensemble should come before the end year of the ensemble") + "Start year of ensemble should come before the end year of the ensemble" + ) } # Old version of pecan xml files which they don't have a sampling space # or it's just sampling space and nothing inside it. - if (is.null(settings$ensemble$samplingspace) - || !is.list(settings$ensemble$samplingspace)) { + if (is.null(settings$ensemble$samplingspace) || + !is.list(settings$ensemble$samplingspace)) { PEcAn.logger::logger.info( - "We are updating the ensemble tag inside the xml file.") + "We are updating the ensemble tag inside the xml file." + ) # I try to put ensemble method in older versions into the parameter space - # If I fail (when no method is defined) I just set it as uniform settings$ensemble$samplingspace$parameters$method <- settings$ensemble$method if (is.null(settings$ensemble$samplingspace$parameters$method)) { settings$ensemble$samplingspace$parameters$method <- "uniform" } - #putting something simple in the met + # putting something simple in the met settings$ensemble$samplingspace$met$method <- "sampling" } } diff --git a/base/settings/R/clean.settings.R b/base/settings/R/clean.settings.R index 68c7db21487..d3bff321f8b 100644 --- a/base/settings/R/clean.settings.R +++ b/base/settings/R/clean.settings.R @@ -12,7 +12,7 @@ #' @author Rob Kooper #' @examples #' \dontrun{ -#' clean.settings('output/PEcAn_1/pecan.xml', 'pecan.xml') +#' clean.settings("output/PEcAn_1/pecan.xml", "pecan.xml") #' } clean.settings <- function( inputfile = "pecan.xml", diff --git a/base/settings/R/fix.deprecated.settings.R b/base/settings/R/fix.deprecated.settings.R index 4fddbcdb0fb..72f2cb0765a 100644 --- a/base/settings/R/fix.deprecated.settings.R +++ b/base/settings/R/fix.deprecated.settings.R @@ -7,11 +7,12 @@ #' @author Ryan Kelly #' @export fix.deprecated.settings fix.deprecated.settings <- function(settings, force = FALSE) { - if (!force - && !is.null(settings$settings.info$deprecated.settings.fixed) - && settings$settings.info$deprecated.settings.fixed == TRUE) { + if (!force && + !is.null(settings$settings.info$deprecated.settings.fixed) && + settings$settings.info$deprecated.settings.fixed == TRUE) { PEcAn.logger::logger.info( - "Deprecated settings have been fixed already. Skipping.") + "Deprecated settings have been fixed already. Skipping." + ) return(invisible(settings)) } else { PEcAn.logger::logger.info("Fixing deprecated settings...") @@ -26,11 +27,13 @@ fix.deprecated.settings <- function(settings, force = FALSE) { if (!is.null(settings$model$jobtemplate)) { PEcAn.logger::logger.severe( "You have both deprecated settings$run$jobtemplate", - "and settings$model$jobtemplate. Use latter only.") + "and settings$model$jobtemplate. Use latter only." + ) } PEcAn.logger::logger.info( "settings$run$jobtemplate is deprecated.", - "use settings$model$jobtemplate instead") + "use settings$model$jobtemplate instead" + ) settings$model$jobtemplate <- settings$run$jobtemplate settings$run$jobtemplate <- NULL } @@ -40,11 +43,13 @@ fix.deprecated.settings <- function(settings, force = FALSE) { if (!is.null(settings$database$dbfiles)) { PEcAn.logger::logger.severe( "You have both deprecated settings$run$dbfiles", - "and settings$database$dbfiles. Use latter only.") + "and settings$database$dbfiles. Use latter only." + ) } PEcAn.logger::logger.info( "settings$run$dbfiles is deprecated.", - "use settings$database$dbfiles instead") + "use settings$database$dbfiles instead" + ) settings$database$dbfiles <- settings$run$dbfiles settings$run$dbfiles <- NULL } @@ -54,10 +59,12 @@ fix.deprecated.settings <- function(settings, force = FALSE) { if (!is.null(settings$host)) { PEcAn.logger::logger.severe( "You have both deprecated settings$run$host and settings$host.", - "Use latter only.") + "Use latter only." + ) } PEcAn.logger::logger.info( - "settings$run$host is deprecated. use settings$host instead") + "settings$run$host is deprecated. use settings$host instead" + ) settings$host <- settings$run$host settings$run$host <- NULL } diff --git a/base/settings/R/get_args.R b/base/settings/R/get_args.R index 277ad5e0e53..6c93f009deb 100644 --- a/base/settings/R/get_args.R +++ b/base/settings/R/get_args.R @@ -7,7 +7,9 @@ #' @export #' #' @examples -#' \dontrun{./web/workflow.R -h} +#' \dontrun{ +#' . / web / workflow.R - h +#' } get_args <- function() { option_list <- list( optparse::make_option( diff --git a/base/settings/R/known_bety_migrations.R b/base/settings/R/known_bety_migrations.R index 21a670ae34c..3c2cb1ace67 100644 --- a/base/settings/R/known_bety_migrations.R +++ b/base/settings/R/known_bety_migrations.R @@ -4,66 +4,67 @@ # # TODO: Would it make sense to move this, and the checks it supports, to PEcAn.DB? .known_bety_migrations <- c( - "1", - "20130104205059", - "20130104211901", - "20130104211946", - "20130109205535", - "20130222222929", - "20130425152503", - "20130624001504", - "20130629205658", - "20130707190720", - "20130717162614", - "20130813212131", - "20130829162053", - "20150904184512", - "20130830184559", - "20140418005637", - "20140422155957", - "20140423220457", - "20140506210037", - "20140515205254", - "20140521180349", - "20140604192901", - "20140617163304", - "20140610210928", - "20140621060009", - "20140623004229", - "20140624185610", - "20140708232320", - "20140729045640", - "20151007174432", - "20151011190026", - "20140904220035", - "20140904221818", - "20140909212759", - "20140915153555", - "20141009160121", - "20141208165401", - "20141211220550", - "20150202215147", - "20150202220519", - "20150213162341", - "20150313165132", - "20150521211114", - "20150624220952", - "20150624222656", - "20150625184958", - "20151014182146", - "20160303221049", - "20160412030352", - "20160523165531", - "20160617133217", - "20160711231257", - "20160720182233", - "20160930213737", - "20161003180105", - "20161005181021", - "20161129192658", - "20170118205944", - "20170415183619", - "20170712171513", - "20180510184222", - "20181129000515", - "20200329233137") + "1", + "20130104205059", + "20130104211901", + "20130104211946", + "20130109205535", + "20130222222929", + "20130425152503", + "20130624001504", + "20130629205658", + "20130707190720", + "20130717162614", + "20130813212131", + "20130829162053", + "20150904184512", + "20130830184559", + "20140418005637", + "20140422155957", + "20140423220457", + "20140506210037", + "20140515205254", + "20140521180349", + "20140604192901", + "20140617163304", + "20140610210928", + "20140621060009", + "20140623004229", + "20140624185610", + "20140708232320", + "20140729045640", + "20151007174432", + "20151011190026", + "20140904220035", + "20140904221818", + "20140909212759", + "20140915153555", + "20141009160121", + "20141208165401", + "20141211220550", + "20150202215147", + "20150202220519", + "20150213162341", + "20150313165132", + "20150521211114", + "20150624220952", + "20150624222656", + "20150625184958", + "20151014182146", + "20160303221049", + "20160412030352", + "20160523165531", + "20160617133217", + "20160711231257", + "20160720182233", + "20160930213737", + "20161003180105", + "20161005181021", + "20161129192658", + "20170118205944", + "20170415183619", + "20170712171513", + "20180510184222", + "20181129000515", + "20200329233137" +) diff --git a/base/settings/R/listToXml.R b/base/settings/R/listToXml.R index e354a33e6c4..42880cd08b9 100644 --- a/base/settings/R/listToXml.R +++ b/base/settings/R/listToXml.R @@ -42,7 +42,7 @@ listToXml.default <- function(x, ...) { return(XML::xmlNode(tag, x)) } } - + # create the node if (identical(names(x), c("text", ".attrs"))) { # special case a node with text and attributes @@ -56,7 +56,7 @@ listToXml.default <- function(x, ...) { } } } - + # add attributes to node attrs <- x[[".attrs"]] for (name in names(attrs)) { diff --git a/base/settings/R/loadPath_sitePFT.R b/base/settings/R/loadPath_sitePFT.R index 30a2cbcc239..461d3a518aa 100644 --- a/base/settings/R/loadPath_sitePFT.R +++ b/base/settings/R/loadPath_sitePFT.R @@ -12,17 +12,18 @@ #' the name of the PFT. #' loadPath.sitePFT <- function(settings, Path) { - #finding the file extension. + # finding the file extension. ext <- tools::file_ext(Path) if (ext == "csv" || ext == "txt") { # reading in the links links <- utils::read.table(file.path(Path), header = TRUE, sep = ",") - #check to make sure the input file is what we expect it. + # check to make sure the input file is what we expect it. if (nrow(links) == 0 || ncol(links) == 0 || ncol(links) != 2) { PEcAn.logger::logger.severe( "There is a problem with reading the file. Either row number,", - "column number is zero or your file does not have two columns.") + "column number is zero or your file does not have two columns." + ) } return(`colnames<-`(links, c("site", "pft"))) diff --git a/base/settings/R/papply.R b/base/settings/R/papply.R index b76f0753b50..e1acb6b8177 100644 --- a/base/settings/R/papply.R +++ b/base/settings/R/papply.R @@ -48,11 +48,12 @@ papply <- function(settings, fn, ..., stop.on.error = FALSE) { for (i in seq_along(settings)) { PEcAn.logger::logger.debug( "papply executing ", deparse(substitute(fn)), - "on element ", i, " of ", length(settings), ".") + "on element ", i, " of ", length(settings), "." + ) + + tmp <- settings[[i]] + if (all(grepl("settings", names(tmp$run)))) tmp$run <- tmp$run[[i]] - tmp = settings[[i]] - if(all(grepl("settings",names(tmp$run)))) tmp$run = tmp$run[[i]] - result.i <- try(fn(tmp, ...), silent = TRUE) if (!inherits(result.i, "try-error")) { @@ -71,18 +72,21 @@ papply <- function(settings, fn, ..., stop.on.error = FALSE) { PEcAn.logger::logger.error( "papply threw an error for element ", i, " of ", length(settings), ", and is aborting since stop.on.error=TRUE. Message was: '", - as.character(result.i), "'") + as.character(result.i), "'" + ) stop() } else { warning.message.i <- paste0( "papply threw an error for element ", i, " of ", length(settings), ", but is continuing since stop.on.error=FALSE", " (there will be no results for this element, however).", - " Message was: '", as.character(result.i), "'") + " Message was: '", as.character(result.i), "'" + ) PEcAn.logger::logger.warn(warning.message.i) errors <- c( errors, - paste0("Element ", i, ": '", as.character(result.i), "'")) + paste0("Element ", i, ": '", as.character(result.i), "'") + ) } } } @@ -95,7 +99,8 @@ papply <- function(settings, fn, ..., stop.on.error = FALSE) { PEcAn.logger::logger.warn( "papply encountered errors for ", length(errors), " elements, ", "but continued since stop.on.error=FALSE.", - paste(errors, collapse = "; ")) + paste(errors, collapse = "; ") + ) } return(result) @@ -106,6 +111,7 @@ papply <- function(settings, fn, ..., stop.on.error = FALSE) { return(fn(as.Settings(settings), ...)) } else { PEcAn.logger::logger.severe( - "The function", fn, "requires input of type MultiSettings or Settings") + "The function", fn, "requires input of type MultiSettings or Settings" + ) } } # papply diff --git a/base/settings/R/pft_site_linker.R b/base/settings/R/pft_site_linker.R index cda8afdbe84..197d805be6b 100644 --- a/base/settings/R/pft_site_linker.R +++ b/base/settings/R/pft_site_linker.R @@ -16,27 +16,27 @@ #' @export site.pft.linkage #' #' @examples -#'\dontrun{ -#' #setting up the Look up tables -#' site.pft.links <-tribble( -#' ~site, ~pft, -#' "1000025731", "temperate.broadleaf.deciduous1", -#' "1000025731", "temperate.needleleaf.evergreen", -#' "1000000048", "temperate.broadleaf.deciduous2", -#' "772", "temperate.broadleaf.deciduous3", -#' "763", "temperate.broadleaf.deciduous4" +#' \dontrun{ +#' # setting up the Look up tables +#' site.pft.links <- tribble( +#' ~site, ~pft, +#' "1000025731", "temperate.broadleaf.deciduous1", +#' "1000025731", "temperate.needleleaf.evergreen", +#' "1000000048", "temperate.broadleaf.deciduous2", +#' "772", "temperate.broadleaf.deciduous3", +#' "763", "temperate.broadleaf.deciduous4" #' ) #' #' # sending a multi- setting xml file to the function -#' site.pft.linkage(settings,site.pft.links) -#'} +#' site.pft.linkage(settings, site.pft.links) +#' } site.pft.linkage <- function(settings, site.pft.links) { - # checking the LUT if (is.null(site.pft.links) || ncol(site.pft.links) != 2) { - PEcAn.logger::logger.severe( - "Your look up table should have two columns of site and pft", - "with site ids under site column and pft names under pft column.") + PEcAn.logger::logger.severe( + "Your look up table should have two columns of site and pft", + "with site ids under site column and pft names under pft column." + ) } # if it's not a multisetting put it still in a list @@ -44,18 +44,19 @@ site.pft.linkage <- function(settings, site.pft.links) { settings <- list(settings) } - #for each site in this setting + # for each site in this setting new.mset <- purrr::map( settings, function(site.setting) { site.pft <- NULL site.id <- (site.setting[["run"]])$site$id - #if no site id was found + # if no site id was found if (is.null(site.id)) { PEcAn.logger::logger.warn( "Since your site xml tag does NOT have a site id,", "we can not assign a PFT to it. The site of this site is", - (site.setting[["run"]])$site$name) + (site.setting[["run"]])$site$name + ) } else { # see if we can find this site id in the LUT if (site.id %in% site.pft.links$site) { @@ -70,12 +71,13 @@ site.pft.linkage <- function(settings, site.pft.links) { } } return(site.setting) - }) + } + ) - #putting it in the right format depending if it's multisetting or not + # putting it in the right format depending if it's multisetting or not if (is.MultiSettings(settings)) { new.mset <- MultiSettings(new.mset) - } else{ + } else { new.mset <- new.mset[[1]] } diff --git a/base/settings/R/prepare.settings.R b/base/settings/R/prepare.settings.R index 74b9562e147..eba26b07020 100644 --- a/base/settings/R/prepare.settings.R +++ b/base/settings/R/prepare.settings.R @@ -12,7 +12,6 @@ #' @export prepare.settings #' prepare.settings <- function(settings, force = FALSE) { - if (is.MultiSettings(settings)) { return(invisible(papply(settings, prepare.settings, force = force))) } diff --git a/base/settings/R/site_pft_link_settings.R b/base/settings/R/site_pft_link_settings.R index 9a2d5ac088c..61d6f371f4f 100644 --- a/base/settings/R/site_pft_link_settings.R +++ b/base/settings/R/site_pft_link_settings.R @@ -10,42 +10,44 @@ #' defined then, it will be used for linking sites with the pfts. #' @importFrom purrr %>% site.pft.link.settings <- function(settings) { - - #lets see if there is the pft.site tag under run>inputs + # lets see if there is the pft.site tag under run>inputs pft.site.info <- settings$run$inputs$pft.site # if it's not specified just let it go ! - if (is.null(pft.site.info)) return(settings) + if (is.null(pft.site.info)) { + return(settings) + } # if there is no input file/id defined or if both defined at the same time. # At the moment I'm gonna make sure that there is just one input. if (length(pft.site.info) != 1) { PEcAn.logger::logger.warn( "In your xml tag for linking site with pfts, you either have no input", - "specified or you have more than one input defined. No change was made!") + "specified or you have more than one input defined. No change was made!" + ) return(settings) } if (!is.null(pft.site.info$path)) { - #lets read in the Look Up Table + # lets read in the Look Up Table LUT <- loadPath.sitePFT(settings, pft.site.info$path) #-- if the pft in the LUT is not defined under the pft tag in the body of # the pecan xml - Then I add that. - def.pfts <- purrr::map_chr(settings[["pfts"]], "name") + def.pfts <- purrr::map_chr(settings[["pfts"]], "name") # Create a simple pft tag for the pfts in LUT that are not in the pft tag - pft.l <- LUT[["pft"]][!(LUT[["pft"]] %in% def.pfts)] %>% - trimws() %>% - unique() + pft.l <- LUT[["pft"]][!(LUT[["pft"]] %in% def.pfts)] %>% + trimws() %>% + unique() - new.pfts <- pft.l %>% - purrr::discard(~.x %in% def.pfts) %>% - purrr::map(~list(name = as.character(.x), constants = 1)) %>% - purrr::set_names("pft") + new.pfts <- pft.l %>% + purrr::discard(~ .x %in% def.pfts) %>% + purrr::map(~ list(name = as.character(.x), constants = 1)) %>% + purrr::set_names("pft") - #add them to the list + # add them to the list settings$pfts <- c(settings$pfts, new.pfts) # doing the real linkage and writing the setting down diff --git a/base/settings/R/update.settings.R b/base/settings/R/update.settings.R index 03b8aea011b..82f84071b5f 100644 --- a/base/settings/R/update.settings.R +++ b/base/settings/R/update.settings.R @@ -10,10 +10,11 @@ #' @author Rob Kooper update.settings <- function(settings, force = FALSE) { - if (!force && !is.null(settings$settings.info$settings.updated) - && settings$settings.info$settings.updated == TRUE) { + if (!force && !is.null(settings$settings.info$settings.updated) && + settings$settings.info$settings.updated == TRUE) { PEcAn.logger::logger.info( - "Deprecated settings have been fixed already. Skipping.") + "Deprecated settings have been fixed already. Skipping." + ) return(invisible(settings)) } else { PEcAn.logger::logger.info("Fixing deprecated settings...") @@ -30,13 +31,15 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$database$dbname)) { if (!is.null(settings$database$bety)) { PEcAn.logger::logger.severe( - "Please remove dbname etc from database configuration.") + "Please remove dbname etc from database configuration." + ) } PEcAn.logger::logger.info( "Database tag has changed, please use to store", "information about accessing the BETY database. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) bety <- list() for (name in names(settings$database)) { @@ -50,7 +53,8 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.warn( " is now part of the database settings. For more", "information about the database settings see", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) if (is.null(settings$database$bety$write)) { settings$database$bety$write <- settings$bety$write settings$bety$write <- NULL @@ -64,17 +68,20 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$model$type)) { if (settings$model$model_type != settings$model$type) { PEcAn.logger::logger.severe( - "Please remove model_type from model configuration.") + "Please remove model_type from model configuration." + ) } else { PEcAn.logger::logger.info( - "Please remove model_type from model configuration.") + "Please remove model_type from model configuration." + ) } } PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "type of model. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$model$type <- settings$model$model_type settings$model$model_type <- NULL } @@ -82,17 +89,20 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$model$type)) { if (settings$model$name != settings$model$type) { PEcAn.logger::logger.severe( - "Please remove name from model configuration.") + "Please remove name from model configuration." + ) } else { PEcAn.logger::logger.info( - "Please remove name from model configuration.") + "Please remove name from model configuration." + ) } } PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "type of model. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$model$type <- settings$model$name settings$model$name <- NULL } @@ -102,10 +112,12 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$run$inputs$met)) { if (settings$run$site$met != settings$run$inputs$met) { PEcAn.logger::logger.severe( - "Please remove met from model configuration.") + "Please remove met from model configuration." + ) } else { PEcAn.logger::logger.info( - "Please remove met from model configuration.") + "Please remove met from model configuration." + ) } } if (is.null(settings$run$inputs)) { @@ -114,7 +126,8 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "met file for a run. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$run$inputs$met$path <- settings$run$site$met settings$run$site$met <- NULL } @@ -140,12 +153,14 @@ update.settings <- function(settings, force = FALSE) { if (tagid %in% names(settings$run$inputs)) { if ("id" %in% names(settings$run$inputs[[tag]])) { if (settings$run$inputs[[tagid]] - != settings$run$inputs[[tag]][["id"]]) { + != settings$run$inputs[[tag]][["id"]]) { PEcAn.logger::logger.severe( - "Please remove", tagid, "from inputs configuration.") + "Please remove", tagid, "from inputs configuration." + ) } else { PEcAn.logger::logger.info( - "Please remove", tagid, "from inputs configuration.") + "Please remove", tagid, "from inputs configuration." + ) } settings$run$inputs[[tagid]] <- NULL } else { @@ -161,10 +176,12 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$run$inputs$veg)) { if (settings$model$veg != settings$run$inputs$veg) { PEcAn.logger::logger.severe( - "Please remove veg from model configuration.") + "Please remove veg from model configuration." + ) } else { PEcAn.logger::logger.info( - "Please remove veg from model configuration.") + "Please remove veg from model configuration." + ) } } if (is.null(settings$run$inputs)) { @@ -173,7 +190,8 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "veg file for a run. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$run$inputs$veg <- settings$model$veg settings$model$veg <- NULL } @@ -181,10 +199,12 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$run$inputs$soil)) { if (settings$model$soil != settings$run$inputs$soil) { PEcAn.logger::logger.severe( - "Please remove soil from model configuration.") + "Please remove soil from model configuration." + ) } else { PEcAn.logger::logger.info( - "Please remove soil from model configuration.") + "Please remove soil from model configuration." + ) } } if (is.null(settings$run$inputs)) { @@ -193,14 +213,16 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "soil file for a run. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$run$inputs$soil <- settings$model$soil settings$model$soil <- NULL } if (!is.null(settings$model$psscss)) { if (!is.null(settings$run$inputs$pss)) { PEcAn.logger::logger.info( - "Please remove psscss from model configuration.") + "Please remove psscss from model configuration." + ) } if (is.null(settings$run$inputs)) { settings$run$inputs <- list() @@ -208,7 +230,8 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "pss/css/site file for a run. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$run$inputs$pss <- file.path(settings$model$psscss, "foo.pss") settings$run$inputs$css <- file.path(settings$model$psscss, "foo.css") settings$run$inputs$site <- file.path(settings$model$psscss, "foo.site") @@ -217,7 +240,8 @@ update.settings <- function(settings, force = FALSE) { if (!is.null(settings$model$inputs)) { if (!is.null(settings$run$inputs$inputs)) { PEcAn.logger::logger.info( - "Please remove inputs from model configuration.") + "Please remove inputs from model configuration." + ) } if (is.null(settings$run$inputs)) { settings$run$inputs <- list() @@ -225,7 +249,8 @@ update.settings <- function(settings, force = FALSE) { PEcAn.logger::logger.info( "Model tag has changed, please use to specify", "lu/thsums file for a run. See also", - "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html.") + "https://pecanproject.github.io/pecan-documentation/develop/pecanXML.html." + ) settings$run$inputs$lu <- file.path(settings$model$inputs, "glu") settings$run$inputs$thsums <- settings$model$inputs settings$model$soil <- NULL diff --git a/base/settings/R/write.settings.R b/base/settings/R/write.settings.R index 3e9de94063c..ffd67ef22f0 100644 --- a/base/settings/R/write.settings.R +++ b/base/settings/R/write.settings.R @@ -9,15 +9,17 @@ #' @author Betsy Cowdery #' @export write.settings write.settings <- function( - settings, - outputfile, - outputdir = settings$outdir) { + settings, + outputfile, + outputdir = settings$outdir) { pecanfile <- file.path(outputdir, outputfile) if (file.exists(pecanfile)) { PEcAn.logger::logger.warn( paste( "File already exists [", pecanfile, - "] file will be overwritten")) + "] file will be overwritten" + ) + ) } XML::saveXML(listToXml(settings, "pecan"), file = pecanfile) } diff --git a/base/settings/examples/examples.papply.R b/base/settings/examples/examples.papply.R index 7255ad1a849..ea45ee17387 100644 --- a/base/settings/examples/examples.papply.R +++ b/base/settings/examples/examples.papply.R @@ -1,15 +1,15 @@ -f = function(settings, ...) { +f <- function(settings, ...) { # Here's how I envisioned a typical use case within a standard PEcAn function - if(is.MultiSettings(settings)) { + if (is.MultiSettings(settings)) { return(papply(settings, f, ...)) } - + # Don't worry about the beolow, it's just some guts to make the function do something we can see l <- list(...) - for(i in seq_along(l)) { + for (i in seq_along(l)) { ind <- length(settings) + 1 settings[[ind]] <- l[[i]] - if(!is.null(names(l))) { + if (!is.null(names(l))) { names(settings)[ind] <- names(l)[i] } } @@ -17,11 +17,10 @@ f = function(settings, ...) { } # Example -settings1 <- Settings(list(a="aa", b=1:3, c="NA")) -settings2 <- Settings(list(a="A", b=4:5, c=paste)) +settings1 <- Settings(list(a = "aa", b = 1:3, c = "NA")) +settings2 <- Settings(list(a = "A", b = 4:5, c = paste)) multiSettings <- MultiSettings(settings1, settings2) # The fucntion should add element $d = D to either a Settings, or each entry in a MultiSettings -f(settings1, d="D") -print(f(multiSettings, d="D"), TRUE) - +f(settings1, d = "D") +print(f(multiSettings, d = "D"), TRUE) diff --git a/base/settings/tests/testthat.R b/base/settings/tests/testthat.R index c943b3f7592..99a1cdb8d41 100644 --- a/base/settings/tests/testthat.R +++ b/base/settings/tests/testthat.R @@ -4,4 +4,4 @@ library(PEcAn.settings) PEcAn.logger::logger.setQuitOnSevere(FALSE) # tests are disbabled until https://github.com/PecanProject/bety/issues/180 is # resolved. -#test_check("PEcAn.settings") +# test_check("PEcAn.settings") diff --git a/base/settings/tests/testthat/helper-get.test.settings.R b/base/settings/tests/testthat/helper-get.test.settings.R index de1cc43a814..a80b714c519 100644 --- a/base/settings/tests/testthat/helper-get.test.settings.R +++ b/base/settings/tests/testthat/helper-get.test.settings.R @@ -1,13 +1,15 @@ .get.test.settings <- function(outdir = NULL) { settings <- NULL - try({ - if (PEcAn.remote::fqdn() == "pecan2.bu.edu") { - settings <- read.settings("data/testinput.pecan2.bu.edu.xml") - } else { - settings <- read.settings("data/testinput.xml") - } - }, - silent = TRUE) + try( + { + if (PEcAn.remote::fqdn() == "pecan2.bu.edu") { + settings <- read.settings("data/testinput.pecan2.bu.edu.xml") + } else { + settings <- read.settings("data/testinput.xml") + } + }, + silent = TRUE + ) # NB environment variables override values in XML here! # This is opposite of usual PEcAn rule that XML values always win, @@ -15,8 +17,9 @@ # don't know the database configuration in advance settings$database$bety <- do.call( PEcAn.DB::get_postgres_envvars, - settings$database$bety) - + settings$database$bety + ) + if (is.null(settings)) { skip("Can't get a valid test Settings right now. Skipping test. ") } diff --git a/base/settings/tests/testthat/test.MultiSettings.class.R b/base/settings/tests/testthat/test.MultiSettings.class.R index 28d2114b2a9..e6f216a5d79 100644 --- a/base/settings/tests/testthat/test.MultiSettings.class.R +++ b/base/settings/tests/testthat/test.MultiSettings.class.R @@ -48,20 +48,24 @@ test_that("MultiSettings extracts work as expected", { expect_equal(multiSettings[["a"]], 1) expect_equivalent( multiSettings[["a", collapse = FALSE]], - replicate(3, 1, FALSE)) + replicate(3, 1, FALSE) + ) # Can't collapse because not equal expect_equivalent(multiSettings$b, list(s1$b, s2$b, s3$b)) expect_equivalent(multiSettings[["b"]], list(s1$b, s2$b, s3$b)) expect_equivalent( multiSettings[["b", collapse = FALSE]], - list(s1$b, s2$b, s3$b)) + list(s1$b, s2$b, s3$b) + ) # Can't collapse because not shared by all expect_equivalent(multiSettings$c, list(s1$c, s2$c, s3$c)) expect_equivalent(multiSettings[["c"]], list(s1$c, s2$c, s3$c)) - expect_equivalent(multiSettings[["c", collapse = FALSE]], - list(s1$c, s2$c, s3$c)) + expect_equivalent( + multiSettings[["c", collapse = FALSE]], + list(s1$c, s2$c, s3$c) + ) # Explicitly prohibited to prevent confusion expect_error(multiSettings["a"]) @@ -146,7 +150,8 @@ test_that("Assigning NULL by name removes setting from each Setting", { test_that("Assigning non-globally applies values sequentially to Settings", { multiSettings <- expected <- multiSettingsTemplate expect_silent( - multiSettings[["x", global = FALSE]] <- seq_along(multiSettings)) + multiSettings[["x", global = FALSE]] <- seq_along(multiSettings) + ) for (i in seq_along(multiSettings)) { expected[[i]]$x <- i expect_identical(multiSettings[[i]], expected[[i]]) @@ -223,9 +228,11 @@ test_that("Assigning non-globally works as expected for a values list containing test_that("Assigning non-globally by name throws error for length mismatch", { multiSettings <- multiSettingsTemplate expect_error( - multiSettings[["x", global = FALSE]] <- rep(1, length(multiSettings) - 1)) + multiSettings[["x", global = FALSE]] <- rep(1, length(multiSettings) - 1) + ) expect_error( - multiSettings[["x", global = FALSE]] <- rep(1, length(multiSettings) + 1)) + multiSettings[["x", global = FALSE]] <- rep(1, length(multiSettings) + 1) + ) }) test_that("Assigning non-globally to a single-element MultiSettings expands it to match length of value", { diff --git a/base/settings/tests/testthat/test.Safelist.class.R b/base/settings/tests/testthat/test.Safelist.class.R index 3ec7e60e7dc..9f4cba11158 100644 --- a/base/settings/tests/testthat/test.Safelist.class.R +++ b/base/settings/tests/testthat/test.Safelist.class.R @@ -1,22 +1,22 @@ context("test SafeList class") test_that("SafeList constructors work as expected", { - l <- list(aa = 1, bb = 2, cc = list(dd = 3, ee = 4)) - s1 <- SafeList(aa = 1, bb = 2, cc = list(dd = 3, ee = 4)) - s2 <- SafeList(l) - s3 <- as.SafeList(l) + l <- list(aa = 1, bb = 2, cc = list(dd = 3, ee = 4)) + s1 <- SafeList(aa = 1, bb = 2, cc = list(dd = 3, ee = 4)) + s2 <- SafeList(l) + s3 <- as.SafeList(l) - for (i in seq_along(l)) { - expect_identical(s1[[i]], l[[i]]) - } - expect_identical(s1, s2) - expect_identical(s1, s3) + for (i in seq_along(l)) { + expect_identical(s1[[i]], l[[i]]) + } + expect_identical(s1, s2) + expect_identical(s1, s3) - expect_true(inherits(s1, "list")) - expect_true(inherits(s1, "SafeList")) - expect_true(is.SafeList(s1)) - expect_false(is.SafeList(l)) - expect_equal(length(class(s1)), 2) + expect_true(inherits(s1, "list")) + expect_true(inherits(s1, "SafeList")) + expect_true(is.SafeList(s1)) + expect_false(is.SafeList(l)) + expect_equal(length(class(s1)), 2) }) test_that("SafeList indexing works as expected", { diff --git a/base/settings/tests/testthat/test.addSecrets.R b/base/settings/tests/testthat/test.addSecrets.R index f683f135877..2f1eb2c5262 100644 --- a/base/settings/tests/testthat/test.addSecrets.R +++ b/base/settings/tests/testthat/test.addSecrets.R @@ -1,6 +1,6 @@ test_that("`addSecrets` returns settings without updating them when `~/.pecan.xml` does not exist", { settings <- list() - mockery::stub(addSecrets, 'file.exists', FALSE) + mockery::stub(addSecrets, "file.exists", FALSE) expect_equal(addSecrets(settings), settings) }) @@ -10,7 +10,7 @@ test_that("`addSecrets` returns settings without updating them when force is FAL secrets.added = TRUE ) ) - mockery::stub(addSecrets, 'file.exists', TRUE) + mockery::stub(addSecrets, "file.exists", TRUE) expect_equal(addSecrets(settings, force = FALSE), settings) }) @@ -22,18 +22,18 @@ test_that("`addSecrets` adds secret settings when force is TRUE and secrets have ) mocked_xmlToList_result <- list( - database = list( - section = list( - name = "pecan", - password = "pecan" - ) + database = list( + section = list( + name = "pecan", + password = "pecan" ) ) - mockery::stub(addSecrets, 'file.exists', TRUE) - mockery::stub(addSecrets, 'xmlToList', mocked_xmlToList_result) + ) + mockery::stub(addSecrets, "file.exists", TRUE) + mockery::stub(addSecrets, "xmlToList", mocked_xmlToList_result) updated_settings <- addSecrets(settings, force = TRUE) expect_equal(updated_settings$database$section$name, "pecan") - expect_equal(updated_settings$database$section$password, "pecan") + expect_equal(updated_settings$database$section$password, "pecan") }) test_that("`addSecrets` adds secret settings when force is FALSE and secrets have not been added", { @@ -51,9 +51,9 @@ test_that("`addSecrets` adds secret settings when force is FALSE and secrets hav ) ) ) - mockery::stub(addSecrets, 'file.exists', TRUE) - mockery::stub(addSecrets, 'xmlToList', mocked_xmlToList_result) + mockery::stub(addSecrets, "file.exists", TRUE) + mockery::stub(addSecrets, "xmlToList", mocked_xmlToList_result) updated_settings <- addSecrets(settings, force = FALSE) expect_equal(updated_settings$database$section$name, "pecan") expect_equal(updated_settings$database$section$password, "pecan") -}) \ No newline at end of file +}) diff --git a/base/settings/tests/testthat/test.check.all.settings.R b/base/settings/tests/testthat/test.check.all.settings.R index 9405cafb488..ebb3219828f 100644 --- a/base/settings/tests/testthat/test.check.all.settings.R +++ b/base/settings/tests/testthat/test.check.all.settings.R @@ -1,9 +1,9 @@ test_that("`check.inputs()` able to set dbfile path for inputs", { - mockery::stub(check.inputs, 'PEcAn.DB::db.open', TRUE) - mockery::stub(check.inputs, 'PEcAn.DB::db.close', TRUE) - mockery::stub(check.inputs, 'PEcAn.DB::dbfile.file', "test/path/to/file") + mockery::stub(check.inputs, "PEcAn.DB::db.open", TRUE) + mockery::stub(check.inputs, "PEcAn.DB::db.close", TRUE) + mockery::stub(check.inputs, "PEcAn.DB::dbfile.file", "test/path/to/file") - mocked_query_res = mockery::mock( + mocked_query_res <- mockery::mock( data.frame( tag = "test", format_id = 1, @@ -13,7 +13,7 @@ test_that("`check.inputs()` able to set dbfile path for inputs", { format_id = 1 ) ) - mockery::stub(check.inputs, 'PEcAn.DB::db.query', mocked_query_res) + mockery::stub(check.inputs, "PEcAn.DB::db.query", mocked_query_res) settings <- list( database = list( @@ -30,7 +30,7 @@ test_that("`check.inputs()` able to set dbfile path for inputs", { type = "ed" ) ) - + updated_settings <- check.inputs(settings) expect_equal(updated_settings$run$inputs$test$path, "test/path/to/file") }) @@ -67,8 +67,8 @@ test_that("`check.run.settings` able to set sensitivity analysis parameters base test_that("`check.run.settings` able to update run site parameters based on site id passed", { mockery::stub( - check.run.settings, - 'PEcAn.DB::db.query', + check.run.settings, + "PEcAn.DB::db.query", data.frame( sitename = "US-1", lat = 45, @@ -91,8 +91,8 @@ test_that("`check.run.settings` able to update run site parameters based on site test_that("`check.model.settings` able to update model parameters based on passed model id in settings", { mockery::stub( - check.model.settings, - 'PEcAn.DB::db.query', + check.model.settings, + "PEcAn.DB::db.query", data.frame( id = 7, revision = 82, @@ -101,15 +101,15 @@ test_that("`check.model.settings` able to update model parameters based on passe ) ) mockery::stub( - check.model.settings, - 'PEcAn.DB::dbfile.file', + check.model.settings, + "PEcAn.DB::dbfile.file", "/usr/local/bin/ed2.r82" ) settings <- list( model = list( id = 7 ) - ) + ) updated_settings <- check.model.settings(settings, 1) expect_equal(updated_settings$model$id, 7) @@ -121,8 +121,8 @@ test_that("`check.model.settings` able to update model parameters based on passe test_that("`check.model.settings` able to update model parameters based on passed model type in settings", { mockery::stub( - check.model.settings, - 'PEcAn.DB::db.query', + check.model.settings, + "PEcAn.DB::db.query", data.frame( id = 7, revision = 82, @@ -131,15 +131,15 @@ test_that("`check.model.settings` able to update model parameters based on passe ) ) mockery::stub( - check.model.settings, - 'PEcAn.DB::dbfile.file', + check.model.settings, + "PEcAn.DB::dbfile.file", "/usr/local/bin/ed2.r82" ) settings <- list( model = list( type = "ed" ) - ) + ) updated_settings <- check.model.settings(settings, 1) expect_equal(updated_settings$model$id, 7) @@ -150,8 +150,8 @@ test_that("`check.model.settings` able to update model parameters based on passe }) test_that("`check.workflow.settings` able to set workflow defaults in case they are not specified", { - mockery::stub(check.workflow.settings, 'PEcAn.DB::db.query', list(id = 100)) - mockery::stub(check.workflow.settings, 'file.exists', TRUE) + mockery::stub(check.workflow.settings, "PEcAn.DB::db.query", list(id = 100)) + mockery::stub(check.workflow.settings, "file.exists", TRUE) settings <- list( database = list( @@ -169,9 +169,9 @@ test_that("`check.workflow.settings` able to set workflow defaults in case they }) test_that("`check.database` able to set the database object with defaults correctly if nothing specified", { - rdriver <- paste0("R", "PostgreSQL") + rdriver <- paste0("R", "PostgreSQL") withr::with_package(rdriver, { - mockery::stub(check.database, 'PEcAn.DB::db.exists', TRUE) + mockery::stub(check.database, "PEcAn.DB::db.exists", TRUE) database <- list() updated_database <- check.database(database) expect_equal(updated_database$driver, "PostgreSQL") @@ -183,10 +183,10 @@ test_that("`check.database` able to set the database object with defaults correc }) test_that("`check.database.settings` able to set bety parameters correctly if they are not specified", { - mockery::stub(check.database.settings, 'PEcAn.DB::db.exists', TRUE) + mockery::stub(check.database.settings, "PEcAn.DB::db.exists", TRUE) mockery::stub( - check.database.settings, - 'check.database', + check.database.settings, + "check.database", list( driver = "PostgreSQL", host = "localhost", @@ -195,9 +195,9 @@ test_that("`check.database.settings` able to set bety parameters correctly if th dbname = "bety" ) ) - mockery::stub(check.database.settings, 'PEcAn.DB::db.open', TRUE) - mockery::stub(check.database.settings, 'PEcAn.DB::db.close', TRUE) - mockery::stub(check.database.settings, 'check.bety.version', TRUE) + mockery::stub(check.database.settings, "PEcAn.DB::db.open", TRUE) + mockery::stub(check.database.settings, "PEcAn.DB::db.close", TRUE) + mockery::stub(check.database.settings, "check.bety.version", TRUE) settings <- list( database = list( @@ -228,7 +228,7 @@ test_that("`check.ensemble.settings` able to update ensemble settings when varia settings <- list( ensemble = list(), sensitivity.analysis = list( - variable = "GPP" + variable = "GPP" ), run = list( start.date = "2000-01-01", @@ -242,4 +242,4 @@ test_that("`check.ensemble.settings` able to update ensemble settings when varia expect_equal(settings$ensemble$end.year, 2003) expect_equal(settings$ensemble$samplingspace$parameters$method, "uniform") expect_equal(settings$ensemble$samplingspace$met$method, "sampling") -}) \ No newline at end of file +}) diff --git a/base/settings/tests/testthat/test.check.bety.version.R b/base/settings/tests/testthat/test.check.bety.version.R index e59158aece3..9c78fb5ec70 100644 --- a/base/settings/tests/testthat/test.check.bety.version.R +++ b/base/settings/tests/testthat/test.check.bety.version.R @@ -1,4 +1,3 @@ - PEcAn.logger::logger.setQuitOnSevere(FALSE) on.exit(PEcAn.logger::logger.setQuitOnSevere(TRUE), add = TRUE) @@ -6,38 +5,38 @@ on.exit(PEcAn.logger::logger.setQuitOnSevere(TRUE), add = TRUE) test_that("`check.bety.version`` gives errors for missing significant versions", { dbcon <- 1 mockery::stub( - check.bety.version, - "PEcAn.DB::db.query", + check.bety.version, + "PEcAn.DB::db.query", list(version = c("2")) ) expect_error( - check.bety.version(dbcon), + check.bety.version(dbcon), "No version 1, how did this database get created?" ) mockery::stub( - check.bety.version, - "PEcAn.DB::db.query", + check.bety.version, + "PEcAn.DB::db.query", list(version = c("1")) ) expect_error( - check.bety.version(dbcon), + check.bety.version(dbcon), "Missing migration 20140617163304, this associates files with models." ) - + mockery::stub( - check.bety.version, - "PEcAn.DB::db.query", list(version = c("1","20140617163304")) + check.bety.version, + "PEcAn.DB::db.query", list(version = c("1", "20140617163304")) ) expect_error( check.bety.version(dbcon), "Missing migration 20140708232320, this introduces geometry column in sites" ) - + mockery::stub( - check.bety.version, - "PEcAn.DB::db.query", - list(version = c("1","20140617163304","20140708232320")) + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1", "20140617163304", "20140708232320")) ) expect_error( check.bety.version(dbcon), @@ -45,9 +44,9 @@ test_that("`check.bety.version`` gives errors for missing significant versions", ) mockery::stub( - check.bety.version, - "PEcAn.DB::db.query", - list(version = c("1","20140617163304","20140708232320","20140729045640")) + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1", "20140617163304", "20140708232320", "20140729045640")) ) expect_error( check.bety.version(dbcon), @@ -55,10 +54,9 @@ test_that("`check.bety.version`` gives errors for missing significant versions", ) mockery::stub( - check.bety.version, - "PEcAn.DB::db.query", - list(version = c("1","20140617163304","20140708232320","20140729045640","20151011190026")) + check.bety.version, + "PEcAn.DB::db.query", + list(version = c("1", "20140617163304", "20140708232320", "20140729045640", "20151011190026")) ) expect_silent(check.bety.version(dbcon)) }) - diff --git a/base/settings/tests/testthat/test.clean.settings.R b/base/settings/tests/testthat/test.clean.settings.R index cf066e8220b..568f5c6f8b5 100644 --- a/base/settings/tests/testthat/test.clean.settings.R +++ b/base/settings/tests/testthat/test.clean.settings.R @@ -2,14 +2,13 @@ PEcAn.logger::logger.setQuitOnSevere(FALSE) on.exit(PEcAn.logger::logger.setQuitOnSevere(TRUE)) test_that("`test.clean.settings` works correctly for invalid and correct inputs", { - # Error if input file is NULL or does not exist expect_error( - clean.settings(inputfile = NULL), + clean.settings(inputfile = NULL), "Could not find input file." ) expect_error( - clean.settings(inputfile = "nonexistent.xml"), + clean.settings(inputfile = "nonexistent.xml"), "Could not find input file." ) @@ -18,7 +17,7 @@ test_that("`test.clean.settings` works correctly for invalid and correct inputs" clean.settings(inputfile = "data/testinputcleanup.xml", outputfile = tf) test_xml <- readLines(tf) t <- XML::xmlToList(XML::xmlParse(test_xml)) - + # Check for updated settings after cleanup expect_equal(t$outdir, "pecan") expect_equal(t$rundir, NULL) diff --git a/base/settings/tests/testthat/test.createMultisiteMultiSettings.R b/base/settings/tests/testthat/test.createMultisiteMultiSettings.R index a74f1b45c0c..751be5665e1 100644 --- a/base/settings/tests/testthat/test.createMultisiteMultiSettings.R +++ b/base/settings/tests/testthat/test.createMultisiteMultiSettings.R @@ -12,27 +12,27 @@ test_that("`createSitegroupMultiSettings` able to create a MultiSettings object site_id = list("1000025731", "1000025732") ) mockery::stub( - createSitegroupMultiSettings, - 'PEcAn.DB::db.query', + createSitegroupMultiSettings, + "PEcAn.DB::db.query", siteIds ) - + # without specifying nSite multi_site_settings <- createSitegroupMultiSettings( - templateSettings = templateSettings, - sitegroupId = 10000, + templateSettings = templateSettings, + sitegroupId = 10000, params = NULL ) - for(i in seq_along(multi_site_settings)) { + for (i in seq_along(multi_site_settings)) { expect_equal(multi_site_settings[[i]]$run$site$id, siteIds$site_id[[i]]) } expect_equal(length(multi_site_settings), length(siteIds$site_id)) - + # with nSite specified multi_site_settings <- createSitegroupMultiSettings( - templateSettings = templateSettings, - sitegroupId = 10000, + templateSettings = templateSettings, + sitegroupId = 10000, nSite = 1, params = NULL ) @@ -125,11 +125,11 @@ test_that("`setDates` function sets start and end dates correctly", { end.year = NULL ) ) - + startDate <- "2023-01-01" endDate <- "2023-12-31" updated_settings <- setDates(settings, startDate, endDate) - + expect_equal(updated_settings$run$start.date, startDate) expect_equal(updated_settings$run$end.date, endDate) expect_equal(updated_settings$ensemble$start.year, lubridate::year(startDate)) diff --git a/base/settings/tests/testthat/test.get_args.R b/base/settings/tests/testthat/test.get_args.R index a23cb12eae9..e58a595010b 100644 --- a/base/settings/tests/testthat/test.get_args.R +++ b/base/settings/tests/testthat/test.get_args.R @@ -1,7 +1,7 @@ test_that("`get_args` throws an error with missing settings file", { withr::with_envvar(c(PECAN_SETTINGS = "doesnotexists.xml"), { expect_error( - get_args(), + get_args(), "--settings \"doesnotexists.xml\" not a valid file" ) }) @@ -9,10 +9,10 @@ test_that("`get_args` throws an error with missing settings file", { test_that("`get_args` works for existing settings file", { withr::with_envvar(c(PECAN_SETTINGS = "pecan.xml"), { - mockery::stub(get_args, 'file.exists', TRUE) + mockery::stub(get_args, "file.exists", TRUE) args <- get_args() expect_equal(args$settings, "pecan.xml") expect_equal(args$continue, FALSE) expect_equal(args$help, FALSE) }) -}) \ No newline at end of file +}) diff --git a/base/settings/tests/testthat/test.loadPath_sitePFT.R b/base/settings/tests/testthat/test.loadPath_sitePFT.R index 3eeec027b0d..88e3be0c9c8 100644 --- a/base/settings/tests/testthat/test.loadPath_sitePFT.R +++ b/base/settings/tests/testthat/test.loadPath_sitePFT.R @@ -20,7 +20,7 @@ test_that("`loadPath.sitePFT` gives an error for file with number of columns not }) }) -test_that("`loadPath.sitePFT` works for correct format of input file",{ +test_that("`loadPath.sitePFT` works for correct format of input file", { withr::with_tempfile("tf", fileext = ".csv", { settings <- list(host = "pecan") df <- data.frame( @@ -32,4 +32,4 @@ test_that("`loadPath.sitePFT` works for correct format of input file",{ links <- utils::read.table(tf, header = TRUE, sep = ",") expect_equal(loadPath.sitePFT(settings, tf), `colnames<-`(links, c("site", "pft"))) }) -}) \ No newline at end of file +}) diff --git a/base/settings/tests/testthat/test.pft_site_linker.R b/base/settings/tests/testthat/test.pft_site_linker.R index 3126bbc8043..60e325f4b1d 100644 --- a/base/settings/tests/testthat/test.pft_site_linker.R +++ b/base/settings/tests/testthat/test.pft_site_linker.R @@ -7,7 +7,7 @@ test_that("`site.pft.linkage` gives error for empty or incomplete lookup-table(L " with site ids under site column and pft names under pft column." ) ) - + LUT <- data.frame(h1 = c("1000025731", "1000025731")) expect_error( @@ -50,10 +50,10 @@ test_that("`site.pft.linkage` able to add site pft name if id is specified and i site = c("1000025731", "1000025732"), pft = c("temperate.broadleaf.deciduous1", "temperate.needleleaf.evergreen") ) - + new_settings <- site.pft.linkage(settings, LUT) expect_equal( new_settings$run$site$site.pft$pft.name, "temperate.broadleaf.deciduous1" ) -}) \ No newline at end of file +}) diff --git a/base/settings/tests/testthat/test.read.settings.R b/base/settings/tests/testthat/test.read.settings.R index 4911192c03f..1d1fe6292f0 100644 --- a/base/settings/tests/testthat/test.read.settings.R +++ b/base/settings/tests/testthat/test.read.settings.R @@ -37,18 +37,19 @@ test_that("read.settings() warns if named input file doesn't exist (but pecan.xm # this returns FALSE in the first call to the mock function, # FALSE in the second call, and TRUE in the third call m <- mockery::mock(FALSE, FALSE, TRUE) - mockery::stub(read.settings, 'file.exists', m) + mockery::stub(read.settings, "file.exists", m) mockery::stub( - read.settings, - 'XML::xmlParse', + read.settings, + "XML::xmlParse", " test - ") + " + ) - #hacky way to check for errors b/c PEcAn.logger errors are non-standard and - #not captured by testthat::expect_message() or expect_error() + # hacky way to check for errors b/c PEcAn.logger errors are non-standard and + # not captured by testthat::expect_message() or expect_error() x <- capture.output( read.settings("blahblahblah.xml"), type = "message" @@ -59,7 +60,7 @@ test_that("read.settings() warns if named input file doesn't exist (but pecan.xm }) test_that("read settings returns error if no settings file found (#1124)", { - withr::with_tempdir({ #in a dir with no pecan.xml + withr::with_tempdir({ # in a dir with no pecan.xml expect_error(read.settings("nofile.xml"), "Could not find a pecan.xml file") }) }) @@ -90,19 +91,20 @@ test_that("check.settings throws error if pft has different type than model", { test_that("check.settings gives sensible defaults", { ## This provides the minimum inputs s1 <- list( - pfts = list( - pft = list(name = "salix", outdir = file.path(testdir, "pft"))), - database = NULL, model = list(type = "BIOCRO"), - run = list( - start.date = lubridate::now(), - end.date = lubridate::days(1) + lubridate::now() - ), - # would create in cwd if not specified - outdir = file.path(testdir, "PEcAn_@WORKFLOW@") - ) + pfts = list( + pft = list(name = "salix", outdir = file.path(testdir, "pft")) + ), + database = NULL, model = list(type = "BIOCRO"), + run = list( + start.date = lubridate::now(), + end.date = lubridate::days(1) + lubridate::now() + ), + # would create in cwd if not specified + outdir = file.path(testdir, "PEcAn_@WORKFLOW@") + ) s2 <- check.settings(update.settings(s1)) - expect_true(is.null(s2$database) - || (length(s2$database) == 1 && names(s2$database) == "dbfiles")) + expect_true(is.null(s2$database) || + (length(s2$database) == 1 && names(s2$database) == "dbfiles")) s <- .get.test.settings(testdir) s1$database <- s$database @@ -148,15 +150,19 @@ test_that("check.settings uses run dates if dates not given in ensemble or sensi s <- .get.test.settings(testdir) for (node in c("ensemble", "sensitivity.analysis")) { - s1 <- list(pfts = s$pfts, database = list(bety = s$database$bety), - run = s$run, model = s$model, outdir = s$outdir) + s1 <- list( + pfts = s$pfts, database = list(bety = s$database$bety), + run = s$run, model = s$model, outdir = s$outdir + ) s1[[node]] <- list(variable = "FOO") s2 <- check.settings(update.settings(s1)) expect_equivalent(s2[[node]]$start.year, lubridate::year(s2$run$start.date)) expect_equivalent(s2[[node]]$end.year, lubridate::year(s2$run$end.date)) - s1 <- list(pfts = s$pfts, database = list(bety = s$database$bety), - run = NA, model = s$model) + s1 <- list( + pfts = s$pfts, database = list(bety = s$database$bety), + run = NA, model = s$model + ) s1[[node]] <- list(variable = "FOO", start.year = 1000, end.year = 1000) expect_error(check.settings(update.settings(s1))) @@ -171,8 +177,10 @@ test_that("sensitivity.analysis and ensemble use other's settings if null", { nodes <- c("sensitivity.analysis", "ensemble") for (node1 in nodes) { node2 <- nodes[nodes != node1] - s1 <- list(pfts = s$pfts, database = list(bety = s$database$bety), - run = s$run, model = s$model, outdir = s$outdir) + s1 <- list( + pfts = s$pfts, database = list(bety = s$database$bety), + run = s$run, model = s$model, outdir = s$outdir + ) s1[[node1]] <- list(variable = "FOO", start.year = 2003, end.year = 2004) s1[[node2]] <- list() s2 <- check.settings(update.settings(s1)) @@ -267,7 +275,8 @@ test_that("invalid pathname is placed in home directory", { s1 <- check.settings(update.settings(s)) expect_equal( s1$database$dbfiles, - file.path(Sys.getenv("HOME"), s$database$dbfiles)) + file.path(Sys.getenv("HOME"), s$database$dbfiles) + ) }) test_that("update.settings only runs once unless forced", { diff --git a/base/settings/tests/testthat/test.site_pft_link_settings.R b/base/settings/tests/testthat/test.site_pft_link_settings.R index 061a46122b9..c3dd9244cc8 100644 --- a/base/settings/tests/testthat/test.site_pft_link_settings.R +++ b/base/settings/tests/testthat/test.site_pft_link_settings.R @@ -9,7 +9,7 @@ test_that("`site.pft.link.settings` able to link sites to pfts and update settin ) ) ) - ) + ) df <- data.frame( site = c("1000025731", "1000025732"), pft = c("temperate.broadleaf.deciduous1", "temperate.needleleaf.evergreen") @@ -18,9 +18,9 @@ test_that("`site.pft.link.settings` able to link sites to pfts and update settin updated_settings <- site.pft.link.settings(settings) print(updated_settings) print(length(updated_settings$pfts)) - for(i in 1:length(updated_settings$pfts)) { + for (i in 1:length(updated_settings$pfts)) { expect_equal(updated_settings$pfts[[i]]$name, df$pft[i]) expect_equal(updated_settings$pfts$pft$constants, 1) } }) -}) \ No newline at end of file +}) diff --git a/base/settings/tests/testthat/test.write.settings.R b/base/settings/tests/testthat/test.write.settings.R index fe54129af0b..5a5fcfd4728 100644 --- a/base/settings/tests/testthat/test.write.settings.R +++ b/base/settings/tests/testthat/test.write.settings.R @@ -1,13 +1,14 @@ -test_that("`write.settings` able to write a settings file based on input list",{ - withr::with_tempfile("tf", fileext=".xml",{ +test_that("`write.settings` able to write a settings file based on input list", { + withr::with_tempfile("tf", fileext = ".xml", { writeLines( " testdir - ", - con = tf) + ", + con = tf + ) t <- XML::xmlToList(XML::xmlParse(tf)) - mockery::stub(write.settings, 'file.path', tf) + mockery::stub(write.settings, "file.path", tf) expect_equal(write.settings(t, tf), tf) expect_equal(XML::xmlToList(XML::xmlParse(tf)), t) }) -}) \ No newline at end of file +}) diff --git a/base/utils/R/Defunct.R b/base/utils/R/Defunct.R index 4533ce75891..057e6e0b3ab 100644 --- a/base/utils/R/Defunct.R +++ b/base/utils/R/Defunct.R @@ -13,6 +13,6 @@ NULL #' @usage NULL #' @aliases convert.input convert.input-defunct #' @export -convert.input <- function(...){ +convert.input <- function(...) { .Defunct("PEcAn.DB::convert_input", package = NULL) } # convert.input() diff --git a/base/utils/R/cf2date.R b/base/utils/R/cf2date.R index 79380c8fc50..0f44aadefbb 100644 --- a/base/utils/R/cf2date.R +++ b/base/utils/R/cf2date.R @@ -1,14 +1,14 @@ #' Convert CF-style date-time to POSIXct date-time #' -#' @param value Numeric value of CF date-time +#' @param value Numeric value of CF date-time #' @param unit CF style unit (e.g. "days since 2010-01-01") #' @param tz Time zone of result (default = "UTC") #' @return POSIXct datetime -#' +#' #' @export -#' +#' #' @author Alexey Shiklomanov -#' +#' #' @examples #' cf2datetime(5, "days since 1981-01-01") #' cf2datetime(27, "minutes since 1963-01-03 12:00:00 -05:00") @@ -32,9 +32,9 @@ cf2datetime <- function(value, unit, tz = "UTC") { #' @param ... Additional arguments to `as.POSIXct`. A common one is #' `tz` for time-zone (e.g. `tz = "UTC"`). #' @return Numeric value of date-time in target CF unit -#' +#' #' @export -#' +#' #' @examples #' datetime2cf("1990-10-05", "days since 1990-01-01", tz = "UTC") datetime2cf <- function(datetime, unit, ...) { @@ -54,15 +54,15 @@ datetime2cf <- function(datetime, unit, ...) { #' @inheritParams cf2datetime #' @inheritParams datetime2cf #' @return Numeric Julian date -#' +#' #' @export datetime2doy -#' +#' #' @author Alexey Shiklomanov -#' +#' #' @examples #' datetime2doy("2010-01-01") # 1 #' datetime2doy("2010-01-01 12:00:00") # 1.5 -#' cf2doy(0, "days since 2007-01-01") +#' cf2doy(0, "days since 2007-01-01") #' cf2doy(5, "days since 2010-01-01") # 6 #' cf2doy(5, "days since 2010-01-01") # 6 datetime2doy <- function(datetime, tz = "UTC") { @@ -78,7 +78,7 @@ datetime2doy <- function(datetime, tz = "UTC") { #' Convert from CF to DOY #' @rdname datetime2doy #' @export cf2doy -#' +#' #' @author Alexey Shiklomanov -#' +#' cf2doy <- function(value, unit, tz = "UTC") datetime2doy(cf2datetime(value, unit, tz), tz) diff --git a/base/utils/R/clear.scratch.R b/base/utils/R/clear.scratch.R index 924f8e3d149..f62d426fc9d 100644 --- a/base/utils/R/clear.scratch.R +++ b/base/utils/R/clear.scratch.R @@ -1,4 +1,3 @@ - #' Removes previous model run output from worker node local scratch directories on EBI-CLUSTER #' #' @author Shawn Serbin @@ -10,21 +9,19 @@ #' clear.scratch(settings) #' } clear.scratch <- function(settings) { - ### Setup script clear.scratch <- system.file("clear.scratch.sh", package = "PEcAn.utils") - host <- settings$host + host <- settings$host nodes <- paste0("all.q@compute-0-", seq(0, 24, 1), ".local") - + if (any(grep("cluster", host$name))) { for (i in nodes) { print(" ") print(paste("----- Removing output on node: ", i, sep = "")) system(paste0("ssh -T ", settings$host$name, " qlogin -q ", i, " < ", clear.scratch)) print(" ") - } ### End of for loop - + } ### End of for loop } else { print("---- No output to delete. Output host is not EBI-CLUSTER ----") - } ### End of if/else + } ### End of if/else } # clear.scratch diff --git a/base/utils/R/datasets.R b/base/utils/R/datasets.R index 38f3e27a92c..4e8613d4288 100644 --- a/base/utils/R/datasets.R +++ b/base/utils/R/datasets.R @@ -1,4 +1,3 @@ - #' Standardized variable names and units for PEcAn #' #' A lookup table giving standard names, units and descriptions for variables in PEcAn input/output files. @@ -22,6 +21,6 @@ #' \item{dim1,dim2,dim3,dim4}{Dimensions across which is this variable allowed to vary. #' Dimension names are themselves standard vars and must be present in the table with category "Dimension"} #' \item{Description}{Further details. For composite measures, list the variables it is calculated from} -#'} +#' } #' "standard_vars" diff --git a/base/utils/R/days_in_year.R b/base/utils/R/days_in_year.R index ef63c86fc45..b3b2f757eef 100644 --- a/base/utils/R/days_in_year.R +++ b/base/utils/R/days_in_year.R @@ -9,12 +9,12 @@ #' @return integer vector, all either 365 or 366 #' @export #' @examples -#' days_in_year(2010) # Not a leap year -- returns 365 -#' days_in_year(2012) # Leap year -- returns 366 -#' days_in_year(2000:2008) # Function is vectorized over years +#' days_in_year(2010) # Not a leap year -- returns 365 +#' days_in_year(2012) # Leap year -- returns 366 +#' days_in_year(2000:2008) # Function is vectorized over years days_in_year <- function(year, leap_year = TRUE) { if (any(year %% 1 != 0)) { - PEcAn.logger::logger.severe("Year must be integer. Given ", year, '.') + PEcAn.logger::logger.severe("Year must be integer. Given ", year, ".") } - ifelse( leap_year & lubridate::leap_year(year), yes = 366, no = 365) + ifelse(leap_year & lubridate::leap_year(year), yes = 366, no = 365) } diff --git a/base/utils/R/distn.stats.R b/base/utils/R/distn.stats.R index f8ee4ddd1df..6e8ff19a60c 100644 --- a/base/utils/R/distn.stats.R +++ b/base/utils/R/distn.stats.R @@ -10,36 +10,36 @@ #' @export #' @author David LeBauer #' @examples -#' distn.stats('norm', 0, 1) +#' distn.stats("norm", 0, 1) distn.stats <- function(distn, a, b) { mean <- sd <- NULL if (distn == "beta") { mean <- a / (a + b) - sd <- sqrt(a * b / ((a + b) ^ 2 * (a + b + 1))) + sd <- sqrt(a * b / ((a + b)^2 * (a + b + 1))) } else if (distn == "exp") { mean <- 1 / a - sd <- 1 / a + sd <- 1 / a } else if (distn == "f") { mean <- b / (b - 2) - sd <- sqrt(2 * b * b * (a + b - 2) / (a * (b - 2) ^ 2 * (b - 4))) + sd <- sqrt(2 * b * b * (a + b - 2) / (a * (b - 2)^2 * (b - 4))) } else if (distn == "gamma") { - mean <- a/b - sd <- sqrt(a / b ^ 2) + mean <- a / b + sd <- sqrt(a / b^2) } else if (distn == "lnorm") { - mean <- exp(a + 0.5 * b ^ 2) - sd <- sqrt(exp(2 * a + b ^ 2) * (exp(b ^ 2) - 1)) + mean <- exp(a + 0.5 * b^2) + sd <- sqrt(exp(2 * a + b^2) * (exp(b^2) - 1)) } else if (distn == "norm") { mean <- a - sd <- b + sd <- b } else if (distn == "t") { mean <- 0 - sd <- sqrt(a / (a - 2)) + sd <- sqrt(a / (a - 2)) } else if (distn == "unif") { mean <- 0.5 * (a + b) - sd <- (b - a) / sqrt(12) + sd <- (b - a) / sqrt(12) } else if (distn == "weibull") { mean <- b * gamma(1 + 1 / a) - sd <- b ^ 2 * (gamma(1 + 2 / a) - (gamma(1 + 1 / a)) ^ 2) + sd <- b^2 * (gamma(1 + 2 / a) - (gamma(1 + 1 / a))^2) } return(c(mean, sd)) } # distn.stats diff --git a/base/utils/R/download.url.R b/base/utils/R/download.url.R index 06561891332..3f3b689c136 100644 --- a/base/utils/R/download.url.R +++ b/base/utils/R/download.url.R @@ -16,7 +16,7 @@ #' #' @examples #' \dontrun{ -#' download.url('http://localhost/', index.html) +#' download.url("http://localhost/", index.html) #' } download.url <- function(url, file, timeout = 600, .opts = list(), retry = TRUE) { count <- 0 @@ -31,8 +31,9 @@ download.url <- function(url, file, timeout = 600, .opts = list(), retry = TRUE) res <- curl::curl_download( url = url, destfile = file, - handle = curl::new_handle(.list = .opts)) - + handle = curl::new_handle(.list = .opts) + ) + res } # download.url diff --git a/base/utils/R/full.path.R b/base/utils/R/full.path.R index 1221a81513a..57c1feb4234 100644 --- a/base/utils/R/full.path.R +++ b/base/utils/R/full.path.R @@ -1,4 +1,3 @@ - #' Creates an absolute path to a folder. #' #' This will take a folder and make it into an absolute folder name. It @@ -10,7 +9,7 @@ #' @return absolute path #' @export #' @examples -#' full.path('pecan') +#' full.path("pecan") full.path <- function(folder) { # normalize pathname folder <- normalizePath(folder, mustWork = FALSE) diff --git a/base/utils/R/get.ensemble.inputs.R b/base/utils/R/get.ensemble.inputs.R index 3377dd507ea..6924f13ec7b 100644 --- a/base/utils/R/get.ensemble.inputs.R +++ b/base/utils/R/get.ensemble.inputs.R @@ -12,19 +12,18 @@ #' @return find correct ensemble inputs #' @export -get.ensemble.inputs <- function(settings, ens = 1){ - - ##grab all inputs for this ensemble member +get.ensemble.inputs <- function(settings, ens = 1) { + ## grab all inputs for this ensemble member inputs <- list() input.list <- names(settings$run$inputs) input.table <- table(input.list) - - ##loop over inputs to get the correct inputs for each type - for(i in seq_along(input.table)){ + + ## loop over inputs to get the correct inputs for each type + for (i in seq_along(input.table)) { sel <- which(input.list == names(input.table)[i]) - inputs[[i]] <- settings$run$inputs[[(ens-1) %% input.table[i] + 1]] + inputs[[i]] <- settings$run$inputs[[(ens - 1) %% input.table[i] + 1]] names(inputs)[i] <- names(input.table)[i] } - + return(inputs) } diff --git a/base/utils/R/help.R b/base/utils/R/help.R index 175d8f302ef..83ca93dab04 100644 --- a/base/utils/R/help.R +++ b/base/utils/R/help.R @@ -15,12 +15,12 @@ #' #' 1. acquisition of meteorological inputs #' 2. synthesis of physiological trait data as the posterior distribution of a -#' Bayesian meta-analysis +#' Bayesian meta-analysis #' 3. sampling trait meta-analysis posterior distributions to parameterize -#' ensembles of ED2 and other ecophysiological models +#' ensembles of ED2 and other ecophysiological models #' 4. probabilistic forecasts #' 5. postprocessing to constrain forecasts and model parameters with field, -#' meterological, eddy flux, and spectral data, and +#' meterological, eddy flux, and spectral data, and #' 6. provenance tracking #' #' PECAn integrates available data into ecological forecasts by running diff --git a/base/utils/R/listToArgString.R b/base/utils/R/listToArgString.R index 880c07d0007..6388ce1fa49 100644 --- a/base/utils/R/listToArgString.R +++ b/base/utils/R/listToArgString.R @@ -9,17 +9,17 @@ ## This little utility is used in a few places in data.atmosphere. listToArgString <- function(l) { arg.string <- "" - arg.names <- names(l) - + arg.names <- names(l) + for (i in seq_along(l)) { # Quote value if character - val <- .parseArg(l[[i]]) + val <- .parseArg(l[[i]]) name <- ifelse(is.null(arg.names), "", arg.names[i]) - + if (i > 1) { arg.string <- paste0(arg.string, ", ") } - + if (name == "") { arg.string <- paste0(arg.string, val) } else { @@ -34,42 +34,44 @@ listToArgString <- function(l) { return(paste0("'", x, "'")) } else if (is.null(x)) { return("NULL") - } else if(is.data.frame(x)){ + } else if (is.data.frame(x)) { # note that this will treat everything as characters - foo <- sapply(1:ncol(x), function(v) paste(colnames(x)[v], - "=c('" , - paste(x[,v], collapse = "','"), - "')")) + foo <- sapply(1:ncol(x), function(v) { + paste( + colnames(x)[v], + "=c('", + paste(x[, v], collapse = "','"), + "')" + ) + }) foobar <- paste0("data.frame(", paste(foo, collapse = ","), ")") return(foobar) - }else if(is.list(x)){ + } else if (is.list(x)) { # does your list have sublist - has_sub_list <- sapply(x,is.list) - if(any(has_sub_list)){ + has_sub_list <- sapply(x, is.list) + if (any(has_sub_list)) { foo.string <- list() - for(r in seq_along(has_sub_list)){ - if(has_sub_list[r]){ + for (r in seq_along(has_sub_list)) { + if (has_sub_list[r]) { foonames <- names(x[[r]]) foobar <- unlist(x[[r]]) - tmp <-sapply(seq_along(x[[r]]), function(pas) paste0(foonames[pas], "='", foobar[pas], "'")) - foo <- paste0(names(x)[r],"=list(",toString(tmp),")") + tmp <- sapply(seq_along(x[[r]]), function(pas) paste0(foonames[pas], "='", foobar[pas], "'")) + foo <- paste0(names(x)[r], "=list(", toString(tmp), ")") foo.string[[r]] <- foo - }else{ # this doesn't take care of everything - foo.string[[r]] <- paste0(names(x)[r], "='", x[[r]],"'") + } else { # this doesn't take care of everything + foo.string[[r]] <- paste0(names(x)[r], "='", x[[r]], "'") } } - val <- paste0("list(",toString(foo.string),")") + val <- paste0("list(", toString(foo.string), ")") return(val) - - }else{ + } else { foonames <- names(x) foobar <- unlist(x) - tmp <-sapply(seq_along(x), function(pas) paste0(foonames[pas], "='", foobar[pas], "'")) - foo <- paste0("list(",toString(tmp),")") + tmp <- sapply(seq_along(x), function(pas) paste0(foonames[pas], "='", foobar[pas], "'")) + foo <- paste0("list(", toString(tmp), ")") return(foo) } - - }else { + } else { return(x) } } # .parseArg diff --git a/base/utils/R/mail.R b/base/utils/R/mail.R index 35735aab676..4ba215c0bbf 100644 --- a/base/utils/R/mail.R +++ b/base/utils/R/mail.R @@ -1,4 +1,3 @@ - #' Sends email. This assumes the program sendmail is installed. #' #' @param from the sender of the mail message @@ -10,7 +9,7 @@ #' @export #' @examples #' \dontrun{ -#' sendmail('bob@@example.com', 'joe@@example.com', 'Hi', 'This is R.') +#' sendmail("bob@@example.com", "joe@@example.com", "Hi", "This is R.") #' } sendmail <- function(from, to, subject, body) { if (is.null(to)) { @@ -21,12 +20,16 @@ sendmail <- function(from, to, subject, body) { } sendmail <- Sys.which("sendmail") mailfile <- tempfile("mail") - cat(paste0("From: ", from, "\n", - "Subject: ", subject, "\n", - "To: ", to, "\n", "\n", - body, "\n"), file = mailfile) - system2(sendmail, c("-f", paste0("\"", from, "\""), - paste0("\"", to, "\""), "<", mailfile)) + cat(paste0( + "From: ", from, "\n", + "Subject: ", subject, "\n", + "To: ", to, "\n", "\n", + body, "\n" + ), file = mailfile) + system2(sendmail, c( + "-f", paste0("\"", from, "\""), + paste0("\"", to, "\""), "<", mailfile + )) unlink(mailfile) } } # sendmail diff --git a/base/utils/R/match_file.R b/base/utils/R/match_file.R index 863cb73328a..a18afc12720 100644 --- a/base/utils/R/match_file.R +++ b/base/utils/R/match_file.R @@ -1,10 +1,10 @@ #' Match a file #' -#' Return a list of files given a full prefix and optional suffix. Optionally, -#' confirm that the right number of files are returned. If the wrong number of +#' Return a list of files given a full prefix and optional suffix. Optionally, +#' confirm that the right number of files are returned. If the wrong number of #' files is returned, throw an error. #' -#' If `path_prefix` points to a directory, then all files inside that directory +#' If `path_prefix` points to a directory, then all files inside that directory #' that match the suffix (if provided) are returned. #' @param path_prefix Full path and file prefix #' @param suffix File suffix, as character (default = `NULL`) diff --git a/base/utils/R/mcmc.list2init.R b/base/utils/R/mcmc.list2init.R index b4571181aa8..3e6f980c226 100644 --- a/base/utils/R/mcmc.list2init.R +++ b/base/utils/R/mcmc.list2init.R @@ -11,67 +11,63 @@ #' mcmc.list2init <- function(dat) { need_packages("coda") - + ## get unique variable names - allname <- strsplit(colnames(dat[[1]]),"[",fixed = TRUE) - firstname <- sapply(allname,function(x){x[1]}) - dims <- lapply(allname,function(x){ - y <- sub(pattern = "]",replacement = "",x[2]) - y <- as.numeric(strsplit(y,",",fixed=TRUE)[[1]]) + allname <- strsplit(colnames(dat[[1]]), "[", fixed = TRUE) + firstname <- sapply(allname, function(x) { + x[1] + }) + dims <- lapply(allname, function(x) { + y <- sub(pattern = "]", replacement = "", x[2]) + y <- as.numeric(strsplit(y, ",", fixed = TRUE)[[1]]) return(y) }) - ind <- t(sapply(dims,function(x){ - if(length(x)==2){ + ind <- t(sapply(dims, function(x) { + if (length(x) == 2) { return(x) - } else { return(c(NA,NA))} + } else { + return(c(NA, NA)) + } })) - + uname <- unique(firstname) - + ## define variables ic <- list() nr <- nrow(dat[[1]]) nc <- coda::nchain(dat) - for(c in seq_len(nc)) ic[[c]] <- list() - - for(v in seq_along(uname)){ - + for (c in seq_len(nc)) ic[[c]] <- list() + + for (v in seq_along(uname)) { ## detect variable type (scalar, vector, matrix) cols <- which(firstname == uname[v]) - if(length(cols) == 1){ + if (length(cols) == 1) { ## SCALAR - for(c in seq_len(nc)){ - ic[[c]][[v]] <- dat[[c]][nr,cols] + for (c in seq_len(nc)) { + ic[[c]][[v]] <- dat[[c]][nr, cols] names(ic[[c]])[v] <- uname[v] } - } else { - dim <- length(dims[[cols[1]]]) - - if(dim == 1){ + + if (dim == 1) { ## VECTOR - for(c in seq_len(nc)){ - ic[[c]][[v]] <- dat[[c]][nr,cols] + for (c in seq_len(nc)) { + ic[[c]][[v]] <- dat[[c]][nr, cols] names(ic[[c]])[v] <- uname[v] } - - } else if (dim == 2){ + } else if (dim == 2) { ## MATRIX - for(c in seq_len(nc)){ - ic[[c]][[v]] <- matrix(seq_along(cols),max(ind[cols,1]),max(ind[cols,2])) ## set up matrix for storage - ic[[c]][[v]][ind[cols]] <- dat[[c]][nr,cols] + for (c in seq_len(nc)) { + ic[[c]][[v]] <- matrix(seq_along(cols), max(ind[cols, 1]), max(ind[cols, 2])) ## set up matrix for storage + ic[[c]][[v]][ind[cols]] <- dat[[c]][nr, cols] names(ic[[c]])[v] <- uname[v] } - } else { - PEcAn.logger::logger.severe("dimension not supported",dim,uname[v]) + PEcAn.logger::logger.severe("dimension not supported", dim, uname[v]) } - - } ## end else VECTOR or MATRIX - + } ## end else VECTOR or MATRIX } ## end loop over v - + return(ic) - } ## end mcmc.list2init diff --git a/base/utils/R/n_leap_day.R b/base/utils/R/n_leap_day.R index fffa28c3f09..9cb2be601bc 100644 --- a/base/utils/R/n_leap_day.R +++ b/base/utils/R/n_leap_day.R @@ -5,14 +5,13 @@ #' @param start_date,end_date dates in any format recognized by \code{\link[base]{as.Date}} #' @export n_leap_day <- function(start_date, end_date) { - ## make sure dates are formatted correctly start_date <- as.Date(start_date) - end_date <- as.Date(end_date) - + end_date <- as.Date(end_date) + ## which years are leap years? l_years <- lubridate::leap_year(lubridate::year(start_date):lubridate::year(end_date)) - + ## check for mid-year conditions in start/end if (start_date >= as.Date(paste0(lubridate::year(start_date), "-03-01"))) { l_years[1] <- FALSE @@ -20,7 +19,7 @@ n_leap_day <- function(start_date, end_date) { if (end_date <= as.Date(paste0(lubridate::year(end_date), "-02-28"))) { l_years[length(l_years)] <- FALSE } - + ## count up total number of leap days return(sum(l_years)) } # n_leap_day diff --git a/base/utils/R/need_packages.R b/base/utils/R/need_packages.R index 6076bbee04a..67ad9fd040d 100644 --- a/base/utils/R/need_packages.R +++ b/base/utils/R/need_packages.R @@ -9,7 +9,7 @@ #' @examples #' # Only need ::: because package isn't exported. #' # Inside a package, just call `need_packages` -#' PEcAn.utils:::need_packages("stats", "methods") # Always works +#' PEcAn.utils:::need_packages("stats", "methods") # Always works #' try(PEcAn.utils:::need_packages("notapackage")) need_packages <- function(...) { pkgs <- unlist(list(...), recursive = TRUE) diff --git a/base/utils/R/r2bugs.distributions.R b/base/utils/R/r2bugs.distributions.R index 1ec86414388..e3ca8c46e45 100644 --- a/base/utils/R/r2bugs.distributions.R +++ b/base/utils/R/r2bugs.distributions.R @@ -8,23 +8,24 @@ #' @author David LeBauer, Ben Bolker #' @export #' @examples -#' priors <- data.frame(distn = c('weibull', 'lnorm', 'norm', 'gamma'), -#' parama = c(1, 1, 1, 1), -#' paramb = c(2, 2, 2, 2)) +#' priors <- data.frame( +#' distn = c("weibull", "lnorm", "norm", "gamma"), +#' parama = c(1, 1, 1, 1), +#' paramb = c(2, 2, 2, 2) +#' ) #' r2bugs.distributions(priors) r2bugs.distributions <- function(priors, direction = "r2bugs") { - - priors$distn <- as.character(priors$distn) + priors$distn <- as.character(priors$distn) priors$parama <- as.numeric(priors$parama) priors$paramb <- as.numeric(priors$paramb) ## index dataframe according to distribution - norm <- priors$distn %in% c("norm", "lnorm") # these have same tramsform - weib <- grepl("weib", priors$distn) # matches r and bugs version + norm <- priors$distn %in% c("norm", "lnorm") # these have same tramsform + weib <- grepl("weib", priors$distn) # matches r and bugs version gamma <- priors$distn == "gamma" - chsq <- grepl("chisq", priors$distn) # matches r and bugs version - bin <- priors$distn %in% c("binom", "bin") # matches r and bugs version - nbin <- priors$distn %in% c("nbinom", "negbin") # matches r and bugs version + chsq <- grepl("chisq", priors$distn) # matches r and bugs version + bin <- priors$distn %in% c("binom", "bin") # matches r and bugs version + nbin <- priors$distn %in% c("nbinom", "negbin") # matches r and bugs version ## Check that no rows are categorized into two distributions if (max(rowSums(cbind(norm, weib, gamma, chsq, bin, nbin))) > 1) { @@ -37,11 +38,10 @@ r2bugs.distributions <- function(priors, direction = "r2bugs") { priors$paramb[norm] <- priors$paramb[norm]^exponent if (direction == "r2bugs") { ## Convert R parameter b to BUGS parameter lambda by l = (1/b)^a - priors$paramb[weib] <- (1/priors$paramb[weib]) ^ priors$parama[weib] + priors$paramb[weib] <- (1 / priors$paramb[weib])^priors$parama[weib] } else if (direction == "bugs2r") { ## Convert BUGS parameter lambda to BUGS parameter b by b = l^(-1/a) - priors$paramb[weib] <- priors$paramb[weib] ^ (-1 / priors$parama[weib]) - + priors$paramb[weib] <- priors$paramb[weib]^(-1 / priors$parama[weib]) } ## Reverse parameter order for binomial and negative binomial priors[bin | nbin, c("parama", "paramb")] <- priors[bin | nbin, c("paramb", "parama")] @@ -50,12 +50,12 @@ r2bugs.distributions <- function(priors, direction = "r2bugs") { if (direction == "r2bugs") { priors$distn[weib] <- "weib" priors$distn[chsq] <- "chisqr" - priors$distn[bin] <- "bin" + priors$distn[bin] <- "bin" priors$distn[nbin] <- "negbin" } else if (direction == "bugs2r") { priors$distn[weib] <- "weibull" priors$distn[chsq] <- "chisq" - priors$distn[bin] <- "binom" + priors$distn[bin] <- "binom" priors$distn[nbin] <- "nbinom" } return(priors) @@ -93,10 +93,14 @@ bugs.rdist <- function(prior = data.frame(distn = "norm", parama = 0, paramb = 1 writeLines(model.string, con = "test.bug") j.model <- rjags::jags.model(file = "test.bug", data = list(x = 1)) - mcmc.object <- stats::window(rjags::coda.samples(model = j.model, - variable.names = c("Y"), - n.iter = n.iter, thin = 2), - start = n.iter / 2) + mcmc.object <- stats::window( + rjags::coda.samples( + model = j.model, + variable.names = c("Y"), + n.iter = n.iter, thin = 2 + ), + start = n.iter / 2 + ) Y <- as.matrix(mcmc.object)[, "Y"] if (!is.null(n)) { Y <- sample(Y, n) diff --git a/base/utils/R/read.output.R b/base/utils/R/read.output.R index a641b113aa3..d3861651e58 100644 --- a/base/utils/R/read.output.R +++ b/base/utils/R/read.output.R @@ -58,7 +58,6 @@ read.output <- function(runid, outdir, ncfiles = NULL, verbose = FALSE, print_summary = TRUE) { - ## vars in units s-1 to be converted to y-1 ## cflux = c('GPP', 'NPP', 'NEE', 'TotalResp', 'AutoResp', 'HeteroResp', 'DOC_flux', 'Fire_flux') # kgC m-2 s-1 ## wflux = c('Evap', 'TVeg', 'Qs', 'Qsb', 'Rainf') # kgH20 m-2 d-1 @@ -237,7 +236,7 @@ read.output <- function(runid, outdir, # there might be other cases that are not covered here dim.check <- length(dim(newresult)) if (any(pft.ind)) { # means pft.name passed, we want to read pft-specific outputs - if (dim.check == 1){ + if (dim.check == 1) { newresult <- newresult[pft.ind] } else { newresult <- newresult[, pft.ind] @@ -245,7 +244,7 @@ read.output <- function(runid, outdir, } else { # means this variable is available as per-pft, so written as such to standard ncdf files # but we still want to read as total - if (dim.check == 1){ + if (dim.check == 1) { newresult <- sum(newresult) } else { newresult <- apply(newresult, 1, sum) @@ -276,7 +275,9 @@ read.output <- function(runid, outdir, } } - if (!dataframe) return(result) + if (!dataframe) { + return(result) + } # Check if there are variables that have multiple dimensions for # example soil moisture at multiple levels. Currently we don't have diff --git a/base/utils/R/read_web_config.R b/base/utils/R/read_web_config.R index 4b5c5a8f4a3..bf2146a9a43 100644 --- a/base/utils/R/read_web_config.R +++ b/base/utils/R/read_web_config.R @@ -18,12 +18,13 @@ read_web_config <- function(php.config = "../../web/config.php", parse = TRUE, expand = TRUE) { - config <- readLines(php.config) - config <- config[grep("^\\$", config)] ## find lines that begin with $ (variables) + config <- config[grep("^\\$", config)] ## find lines that begin with $ (variables) - rxp <- paste0("^\\$([[:graph:]]+?)[[:space:]]*", - "=[[:space:]]*(.*?);?(?:[[:space:]]*//+.*)?$") + rxp <- paste0( + "^\\$([[:graph:]]+?)[[:space:]]*", + "=[[:space:]]*(.*?);?(?:[[:space:]]*//+.*)?$" + ) rxp_matches <- regexec(rxp, config, perl = TRUE) results <- regmatches(config, rxp_matches) list_names <- vapply(results, `[[`, character(1), 2, USE.NAMES = FALSE) @@ -34,7 +35,8 @@ read_web_config <- function(php.config = "../../web/config.php", if (parse) { # Remove surrounding quotes config_list <- lapply(config_list, gsub, - pattern = "\"(.*?)\"", replacement = "\\1") + pattern = "\"(.*?)\"", replacement = "\\1" + ) # Try to convert numbers to numeric config_list <- lapply( @@ -47,8 +49,9 @@ read_web_config <- function(php.config = "../../web/config.php", # Replace $output_folder with its value, and concatenate strings chr <- vapply(config_list, is.character, logical(1)) config_list[chr] <- lapply(config_list[chr], gsub, - pattern = "\\$output_folder *\\. *", - replacement = config_list[["output_folder"]]) + pattern = "\\$output_folder *\\. *", + replacement = config_list[["output_folder"]] + ) } config_list } diff --git a/base/utils/R/remove.config.R b/base/utils/R/remove.config.R index b9407fdb90d..333b106f0e4 100644 --- a/base/utils/R/remove.config.R +++ b/base/utils/R/remove.config.R @@ -1,10 +1,9 @@ remove.config <- function(dir, settings, model) { - - fcn.name <- paste0("remove.config.", model) - if (exists(fcn.name)) { - do.call(fcn.name, args = list(dir, settings)) - } else { - warning(paste(fcn.name, "does not exist")) - warning("This function is not required, but its implementation is recommended") - } + fcn.name <- paste0("remove.config.", model) + if (exists(fcn.name)) { + do.call(fcn.name, args = list(dir, settings)) + } else { + warning(paste(fcn.name, "does not exist")) + warning("This function is not required, but its implementation is recommended") + } } # remove.config diff --git a/base/utils/R/seconds_in_year.R b/base/utils/R/seconds_in_year.R index 03746670dc9..5bc5a5a4474 100644 --- a/base/utils/R/seconds_in_year.R +++ b/base/utils/R/seconds_in_year.R @@ -5,12 +5,12 @@ #' @param leap_year Default = TRUE. If set to FALSE will always return 31536000. #' @param ... additional arguments, all currently ignored #' @examples -#' seconds_in_year(2000) # Leap year -- 366 x 24 x 60 x 60 = 31622400 -#' seconds_in_year(2001) # Regular year -- 365 x 24 x 60 x 60 = 31536000 -#' seconds_in_year(2000:2005) # Vectorized over year +#' seconds_in_year(2000) # Leap year -- 366 x 24 x 60 x 60 = 31622400 +#' seconds_in_year(2001) # Regular year -- 365 x 24 x 60 x 60 = 31536000 +#' seconds_in_year(2000:2005) # Vectorized over year #' @export seconds_in_year <- function(year, leap_year = TRUE, ...) { - diy <- days_in_year(year, leap_year) - siy <- ud_convert(diy, 'days', 'seconds') - return(siy) + diy <- days_in_year(year, leap_year) + siy <- ud_convert(diy, "days", "seconds") + return(siy) } diff --git a/base/utils/R/status.R b/base/utils/R/status.R index 41e340d0ebf..94044274c90 100644 --- a/base/utils/R/status.R +++ b/base/utils/R/status.R @@ -37,7 +37,8 @@ status.start <- function(name, file = NULL) { cat( paste(name, format(Sys.time(), "%F %T"), sep = "\t"), file = file, - append = TRUE) + append = TRUE + ) } #' @describeIn status Record module completion time and status @@ -47,7 +48,8 @@ status.end <- function(status = "DONE", file = NULL) { cat( paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), file = file, - append = TRUE) + append = TRUE + ) } #' @describeIn status Record that module was skipped @@ -59,9 +61,12 @@ status.skip <- function(name, file = NULL) { name, format(Sys.time(), "%F %T"), "", format(Sys.time(), "%F %T"), - "SKIPPED", "\n", sep = "\t"), + "SKIPPED", "\n", + sep = "\t" + ), file = file, - append = TRUE) + append = TRUE + ) } #' @describeIn status Look up module status from file @@ -74,8 +79,10 @@ status.check <- function(name, file = NULL) { return(0L) } status_data <- utils::read.table( - file, row.names = 1, header = FALSE, - sep = "\t", quote = "", fill = TRUE) + file, + row.names = 1, header = FALSE, + sep = "\t", quote = "", fill = TRUE + ) if (!name %in% row.names(status_data)) { return(0L) } @@ -117,7 +124,8 @@ get_status_path <- function(file) { x = "settings", envir = parent.frame(2), inherits = TRUE, - ifnotfound = list())$outdir + ifnotfound = list() + )$outdir base <- "STATUS" } @@ -127,4 +135,4 @@ get_status_path <- function(file) { # cat treats empty path as "write to stdout" return("") } -} \ No newline at end of file +} diff --git a/base/utils/R/timezone_hour.R b/base/utils/R/timezone_hour.R index b61c450d817..6f20fb55c38 100644 --- a/base/utils/R/timezone_hour.R +++ b/base/utils/R/timezone_hour.R @@ -6,7 +6,7 @@ #' @return hours offset of the timezone #' @examples #' \dontrun{ -#' timezone_hour('America/New_York') +#' timezone_hour("America/New_York") #' } #' @export timezone_hour <- function(timezone) { @@ -14,7 +14,7 @@ timezone_hour <- function(timezone) { return(timezone) } else { return(tryCatch(stringi::stri_timezone_info(timezone)$RawOffset, - error=function(e) NaN)) + error = function(e) NaN + )) } } - diff --git a/base/utils/R/to_nc.R b/base/utils/R/to_nc.R index 9ee1d38e047..ea71722c1ae 100644 --- a/base/utils/R/to_nc.R +++ b/base/utils/R/to_nc.R @@ -7,27 +7,27 @@ #' @param vals values of dimension; can be single value or vector #' @return ncdim defined according to standard_vars #' @author Anne Thomas -to_ncdim <- function(dimname,vals){ - dim <- PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == dimname),] - #check dim exists - if(nrow(dim) == 0){ - PEcAn.logger::logger.severe(paste("Dimension",dimname,"not in standard_vars")) +to_ncdim <- function(dimname, vals) { + dim <- PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == dimname), ] + # check dim exists + if (nrow(dim) == 0) { + PEcAn.logger::logger.severe(paste("Dimension", dimname, "not in standard_vars")) } - if(dim$Category != "Dimension"){ - PEcAn.logger::logger.severe(paste(dimname,"not a dimension or is deprecated")) + if (dim$Category != "Dimension") { + PEcAn.logger::logger.severe(paste(dimname, "not a dimension or is deprecated")) } - - if(is.null(vals) || length(vals) == 0){ - PEcAn.logger::logger.severe(paste("Missing vals for dim",dimname,",please check input")) - } #not sure if this check is necessary - - units <- as.character(dim$Units) #if the units are a factor the function fails + + if (is.null(vals) || length(vals) == 0) { + PEcAn.logger::logger.severe(paste("Missing vals for dim", dimname, ",please check input")) + } # not sure if this check is necessary + + units <- as.character(dim$Units) # if the units are a factor the function fails longname <- as.character(dim$Long.name) - - ncdim <- ncdf4::ncdim_def(name = dimname, vals = vals, units = units, longname = longname,-999) - + + ncdim <- ncdf4::ncdim_def(name = dimname, vals = vals, units = units, longname = longname, -999) + return(ncdim) -} #to_ncdim +} # to_ncdim #' Define an NCDF variable @@ -38,24 +38,24 @@ to_ncdim <- function(dimname,vals){ #' @param dims list of previously defined ncdims (function will match subset of dims for this variable in standard_vars; can include other dims--enables lapply.) #' @return ncvar defined according to standard_vars #' @author Anne Thomas -to_ncvar <- function(varname,dims){ - nc_var <- PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == varname),] - #check nc_var exists - if(nrow(nc_var)==0){ - PEcAn.logger::logger.severe(paste("Variable",varname,"not in standard_vars")) +to_ncvar <- function(varname, dims) { + nc_var <- PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == varname), ] + # check nc_var exists + if (nrow(nc_var) == 0) { + PEcAn.logger::logger.severe(paste("Variable", varname, "not in standard_vars")) } - - dimset <- nc_var[,c("dim1","dim2","dim3","dim4")] - dim <- dims[which(names(dims) %in% dimset)] #subset list of all dims for this variable - #check that dim isn't 0 - if(length(dim)==0 || is.null(dim)){ - PEcAn.logger::logger.severe(paste("No dimensions were loaded for",varname)) + + dimset <- nc_var[, c("dim1", "dim2", "dim3", "dim4")] + dim <- dims[which(names(dims) %in% dimset)] # subset list of all dims for this variable + # check that dim isn't 0 + if (length(dim) == 0 || is.null(dim)) { + PEcAn.logger::logger.severe(paste("No dimensions were loaded for", varname)) } - - units = as.character(nc_var$Units) #if the units are a factor the function fails + + units <- as.character(nc_var$Units) # if the units are a factor the function fails longname <- as.character(nc_var$Long.name) - - ncvar <- ncdf4::ncvar_def(name = varname, units = units, longname = longname, dim = dim, -999, prec = "double") - + + ncvar <- ncdf4::ncvar_def(name = varname, units = units, longname = longname, dim = dim, -999, prec = "double") + return(ncvar) -} #to_ncvar +} # to_ncvar diff --git a/base/utils/R/transformstats.R b/base/utils/R/transformstats.R index 6c3bf7e4bc3..18baea10a98 100644 --- a/base/utils/R/transformstats.R +++ b/base/utils/R/transformstats.R @@ -1,4 +1,3 @@ - #' Transform misc. statistics to SE #' #' Automates transformations of SD, MSE, LSD, 95%CI, HSD, and MSD @@ -12,16 +11,19 @@ #' @author David LeBauer #' @export #' @examples -#' statdf <- data.frame(Y=rep(1,5), -#' stat=rep(1,5), -#' n=rep(4,5), -#' statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD')) +#' statdf <- data.frame( +#' Y = rep(1, 5), +#' stat = rep(1, 5), +#' n = rep(4, 5), +#' statname = c("SD", "MSE", "LSD", "HSD", "MSD") +#' ) #' transformstats(statdf) transformstats <- function(data) { if (is.factor(data$statname) && !"SE" %in% levels(data$statname)) { data$statname <- factor( data$statname, - levels = c(levels(data$statname), "SE")) + levels = c(levels(data$statname), "SE") + ) } ## Transformation of stats to SE transform SD to SE if (max(c("SD", "sd") %in% data$statname)) { @@ -48,7 +50,7 @@ transformstats <- function(data) { data$stat[lsdi] <- ( data$stat[lsdi] / (stats::qt(0.975, data$n[lsdi]) - * sqrt((2 * data$n[lsdi])))) + * sqrt((2 * data$n[lsdi])))) data$statname[lsdi] <- "SE" } ## Tukey's Honestly Significant Difference (HSD), @@ -56,7 +58,7 @@ transformstats <- function(data) { if ("HSD" %in% data$statname) { hsdi <- which(data$statname == "HSD") n <- data$n[hsdi] - n[is.na(n)] <- 2 ## minimum n that can be used if NA + n[is.na(n)] <- 2 ## minimum n that can be used if NA data$stat[hsdi] <- data$stat[hsdi] / (stats::qtukey(0.975, n, df = 2)) data$statname[hsdi] <- "SE" data$n[hsdi] <- n @@ -69,7 +71,7 @@ transformstats <- function(data) { data$stat[msdi] <- ( data$stat[msdi] * data$n[msdi] - / (stats::qt(0.975, 2 * data$n[msdi] - 2) * sqrt(2))) + / (stats::qt(0.975, 2 * data$n[msdi] - 2) * sqrt(2))) data$statname[msdi] <- "SE" } if (!all(data$statname %in% c("SE", "none"))) { diff --git a/base/utils/R/ud_convert.R b/base/utils/R/ud_convert.R index ee96bf60cd9..f7c5d5b1766 100644 --- a/base/utils/R/ud_convert.R +++ b/base/utils/R/ud_convert.R @@ -13,15 +13,15 @@ #' @export ud_convert <- function(x, u1, u2) { stopifnot(units::ud_are_convertible(u1, u2)) - if(inherits(x, "difftime")) { + if (inherits(x, "difftime")) { x1 <- units::as_units(x) - if(units(x1) != units(units::as_units(u1))) { + if (units(x1) != units(units::as_units(u1))) { warning("Units of `x` don't match `u1`, using '", units::deparse_unit(x1), "' instead") } } else { x1 <- units::set_units(x, value = u1, mode = "standard") } x2 <- units::set_units(x1, value = u2, mode = "standard") - + units::drop_units(x2) - } # ud_convert +} # ud_convert diff --git a/base/utils/R/unit_is_parseable.R b/base/utils/R/unit_is_parseable.R index 7ec67361258..2ed4f4e6b95 100644 --- a/base/utils/R/unit_is_parseable.R +++ b/base/utils/R/unit_is_parseable.R @@ -8,15 +8,17 @@ #' @return TRUE if the units is parseable, FALSE otherwise. #' #' @examples -#' unit_is_parseable("g/sec^2") -#' unit_is_parseable("kiglometters") +#' unit_is_parseable("g/sec^2") +#' unit_is_parseable("kiglometters") #' #' @export -unit_is_parseable <- function(unit){ - tryCatch({ - if(units::as_units(unit)) - return(TRUE) - }, - error = function(e) FALSE +unit_is_parseable <- function(unit) { + tryCatch( + { + if (units::as_units(unit)) { + return(TRUE) + } + }, + error = function(e) FALSE ) } # unit_is_parseable diff --git a/base/utils/R/utils.R b/base/utils/R/utils.R index 1bb2174ad35..a7395508197 100644 --- a/base/utils/R/utils.R +++ b/base/utils/R/utils.R @@ -1,4 +1,3 @@ - #--------------------------------------------------------------------------------------------------# # Small, miscellaneous functions for use throughout PEcAn #--------------------------------------------------------------------------------------------------# @@ -20,7 +19,7 @@ #' @author Rob Kooper mstmipvar <- function(name, lat = NULL, lon = NULL, time = NULL, nsoil = NULL, silent = FALSE) { nc_var <- PEcAn.utils::standard_vars[PEcAn.utils::standard_vars$Variable.Name == name, ] - + if (nrow(nc_var) == 0) { if (!silent) { PEcAn.logger::logger.info("Don't know about variable", name, " in standard_vars in PEcAn.utils") @@ -37,29 +36,29 @@ mstmipvar <- function(name, lat = NULL, lon = NULL, time = NULL, nsoil = NULL, s } return(ncdf4::ncvar_def(name, "", list(time), -999, name)) } - + var_dims <- nc_var[paste0("dim", 1:4)] pos_dims <- c("lon", "lat", "time", "nsoil") - - #check for missing dimensions + + # check for missing dimensions no_dims <- pos_dims[!var_dims %in% pos_dims] if (!silent) { if (length(no_dims) > 0) { PEcAn.logger::logger.info("Don't know dimension(s)", no_dims, "for variable", name) } } - - #replace dim names with values + + # replace dim names with values var_dims <- var_dims[var_dims %in% pos_dims] - var_dims <- lapply(var_dims, function(x) eval(str2lang(x))) #converts character values in `var_dims` to corresponding R objects - dims <- var_dims[!sapply(var_dims, is.null)] #get rid of NULL elements - + var_dims <- lapply(var_dims, function(x) eval(str2lang(x))) # converts character values in `var_dims` to corresponding R objects + dims <- var_dims[!sapply(var_dims, is.null)] # get rid of NULL elements + ncvar <- ncdf4::ncvar_def(name, as.character(nc_var$Units), dims, -999) if (nc_var$Long.name != "na") { ncvar$longname <- as.character(nc_var$Long.name) } return(ncvar) -} +} #--------------------------------------------------------------------------------------------------# @@ -153,10 +152,10 @@ vecpaste <- function(x) paste(paste0("'", x, "'"), collapse = ",") #' @return id representing a model run #' @export #' @examples -#' get.run.id('ENS', left.pad.zeros(1, 5)) -#' get.run.id('SA', round(qnorm(-3),3), trait = 'Vcmax') +#' get.run.id("ENS", left.pad.zeros(1, 5)) +#' get.run.id("SA", round(qnorm(-3), 3), trait = "Vcmax") #' @author Carl Davidson, David LeBauer -get.run.id <- function(run.type, index, trait = NULL, pft.name = NULL, site.id=NULL) { +get.run.id <- function(run.type, index, trait = NULL, pft.name = NULL, site.id = NULL) { result <- paste(c(run.type, pft.name, trait, index, site.id), collapse = "-") return(result) } # get.run.id @@ -176,11 +175,11 @@ get.run.id <- function(run.type, index, trait = NULL, pft.name = NULL, site.id=N #' @references M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569} #' @export zero.bounded.density <- function(x, bw = "SJ", n = 1001) { - y <- log(x) - g <- stats::density(y, bw = bw, n = n) + y <- log(x) + g <- stats::density(y, bw = bw, n = n) xgrid <- exp(g$x) - g$y <- c(0, g$y / xgrid) - g$x <- c(0, xgrid) + g$y <- c(0, g$y / xgrid) + g$x <- c(0, xgrid) return(g) } # zero.bounded.density @@ -197,9 +196,11 @@ zero.bounded.density <- function(x, bw = "SJ", n = 1001) { summarize.result <- function(result) { ans1 <- result %>% dplyr::filter(.data$n == 1) %>% - dplyr::group_by(.data$citation_id, .data$site_id, .data$trt_id, - .data$control, .data$greenhouse, .data$date, .data$time, - .data$cultivar_id, .data$specie_id, .data$name, .data$treatment_id) %>% + dplyr::group_by( + .data$citation_id, .data$site_id, .data$trt_id, + .data$control, .data$greenhouse, .data$date, .data$time, + .data$cultivar_id, .data$specie_id, .data$name, .data$treatment_id + ) %>% dplyr::summarize( # stat must be computed first, before n and mean statname = dplyr::if_else(length(.data$n) == 1, "none", "SE"), stat = stats::sd(.data$mean) / sqrt(length(.data$n)), @@ -257,9 +258,11 @@ get.stats.mcmc <- function(mcmc.summary, sample.size) { #' paste.stats(3.333333, 5.00001, 6.22222, n = 3) #' # [1] "$3.33(5,6.22)$" paste.stats <- function(median, lcl, ucl, n = 2) { - paste0("$", tabnum(median, n), - "(", tabnum(lcl, n), ",", tabnum(ucl, n), ")", - "$") + paste0( + "$", tabnum(median, n), + "(", tabnum(lcl, n), ",", tabnum(ucl, n), ")", + "$" + ) } # paste.stats @@ -274,12 +277,16 @@ paste.stats <- function(median, lcl, ucl, n = 2) { #' @author David LeBauer #' @export #' @examples -#' \dontrun{get.parameter.stat(mcmc.summaries[[1]], 'beta.o')} +#' \dontrun{ +#' get.parameter.stat(mcmc.summaries[[1]], "beta.o") +#' } get.parameter.stat <- function(mcmc.summary, parameter) { - paste.stats(median = mcmc.summary$quantiles[parameter, "50%"], - lcl = mcmc.summary$quantiles[parameter, c("2.5%")], - ucl = mcmc.summary$quantiles[parameter, c("97.5%")], - n = 2) + paste.stats( + median = mcmc.summary$quantiles[parameter, "50%"], + lcl = mcmc.summary$quantiles[parameter, c("2.5%")], + ucl = mcmc.summary$quantiles[parameter, c("97.5%")], + n = 2 + ) } # get.parameter.stat #--------------------------------------------------------------------------------------------------# @@ -298,24 +305,28 @@ get.parameter.stat <- function(mcmc.summary, parameter) { pdf.stats <- function(distn, A, B) { distn <- as.character(distn) mean <- switch(distn, - gamma = A/B, - lnorm = exp(A + 1/2 * B^2), - beta = A/(A + B), - weibull = B * gamma(1 + 1/A), - norm = A, - f = ifelse(B > 2, - B/(B - 2), - mean(stats::rf(10000, A, B)))) + gamma = A / B, + lnorm = exp(A + 1 / 2 * B^2), + beta = A / (A + B), + weibull = B * gamma(1 + 1 / A), + norm = A, + f = ifelse(B > 2, + B / (B - 2), + mean(stats::rf(10000, A, B)) + ) + ) var <- switch(distn, - gamma = A/B^2, - lnorm = exp(2 * A + B ^ 2) * (exp(B ^ 2) - 1), - beta = A * B/((A + B) ^ 2 * (A + B + 1)), - weibull = B ^ 2 * (gamma(1 + 2 / A) - - gamma(1 + 1 / A) ^ 2), - norm = B ^ 2, - f = ifelse(B > 4, - 2 * B^2 * (A + B - 2) / (A * (B - 2) ^ 2 * (B - 4)), - stats::var(stats::rf(1e+05, A, B)))) + gamma = A / B^2, + lnorm = exp(2 * A + B^2) * (exp(B^2) - 1), + beta = A * B / ((A + B)^2 * (A + B + 1)), + weibull = B^2 * (gamma(1 + 2 / A) - + gamma(1 + 1 / A)^2), + norm = B^2, + f = ifelse(B > 4, + 2 * B^2 * (A + B - 2) / (A * (B - 2)^2 * (B - 4)), + stats::var(stats::rf(1e+05, A, B)) + ) + ) qci <- get(paste0("q", distn)) ci <- qci(c(0.025, 0.975), A, B) lcl <- ci[1] @@ -338,11 +349,11 @@ pdf.stats <- function(distn, A, B) { #' @examples #' # convert parameter name to a string appropriate for end-use plotting #' \dontrun{ -#' trait.lookup('growth_resp_factor') -#' trait.lookup('growth_resp_factor')$figid +#' trait.lookup("growth_resp_factor") +#' trait.lookup("growth_resp_factor")$figid #' #' # get a list of all traits and units in dictionary -#' trait.lookup()[,c('figid', 'units')] +#' trait.lookup()[, c("figid", "units")] #' } trait.lookup <- function(traits = NULL) { if (is.null(traits)) { @@ -419,11 +430,12 @@ newxtable <- function(x, environment = "table", table.placement = "ht", label = caption = NULL, caption.placement = NULL, align = NULL) { need_packages("xtable") print(xtable::xtable(x, label = label, caption = caption, align = align), - floating.environment = environment, - table.placement = table.placement, - caption.placement = caption.placement, - # sanitize.text.function = function(x) gsub("%", "\\\\%", x), - sanitize.rownames.function = function(x) paste('')) + floating.environment = environment, + table.placement = table.placement, + caption.placement = caption.placement, + # sanitize.text.function = function(x) gsub("%", "\\\\%", x), + sanitize.rownames.function = function(x) paste("") + ) } # newxtable #--------------------------------------------------------------------------------------------------# @@ -499,9 +511,9 @@ temp.settings <- function(settings.txt) { #' @return FALSE if function returns error; else TRUE #' @export #' @examples -#' tryl(1+1) +#' tryl(1 + 1) #' # TRUE -#' tryl(sum('a')) +#' tryl(sum("a")) #' # FALSE #' @author David LeBauer tryl <- function(FUN) { @@ -519,7 +531,9 @@ tryl <- function(FUN) { #' @return FALSE if function returns error; else TRUE #' @export #' @examples -#' \dontrun{require.modelpkg(BioCro)} +#' \dontrun{ +#' require.modelpkg(BioCro) +#' } #' @author David LeBauer load.modelpkg <- function(model) { pecan.modelpkg <- paste0("PEcAn.", model) @@ -527,8 +541,10 @@ load.modelpkg <- function(model) { if (pecan.modelpkg %in% rownames(utils::installed.packages())) { do.call(require, args = list(pecan.modelpkg)) } else { - PEcAn.logger::logger.error("I can't find a package for the ", model, - "model; I expect it to be named ", pecan.modelpkg) + PEcAn.logger::logger.error( + "I can't find a package for the ", model, + "model; I expect it to be named ", pecan.modelpkg + ) } } } # load.modelpkg @@ -545,10 +561,9 @@ load.modelpkg <- function(model) { #' @return val converted values #' @author Istem Fer, Shawn Serbin misc.convert <- function(x, u1, u2) { - - amC <- 12.0107 # atomic mass of carbon + amC <- 12.0107 # atomic mass of carbon mmH2O <- 18.01528 # molar mass of H2O, g/mol - + if (u1 == "umol C m-2 s-1" & u2 == "kg C m-2 s-1") { val <- ud_convert(x, "ug", "kg") * amC } else if (u1 == "kg C m-2 s-1" & u2 == "umol C m-2 s-1") { @@ -562,11 +577,11 @@ misc.convert <- function(x, u1, u2) { } else if (u1 == "kg C m-2" & u2 == "Mg ha-1") { val <- x * ud_convert(1, "kg", "Mg") * ud_convert(1, "m-2", "ha-1") } else { - u1 <- gsub("gC","g*12",u1) - u2 <- gsub("gC","g*12",u2) - val <- ud_convert(x,u1,u2) - - + u1 <- gsub("gC", "g*12", u1) + u2 <- gsub("gC", "g*12", u2) + val <- ud_convert(x, u1, u2) + + # PEcAn.logger::logger.severe(paste("Unknown units", u1, u2)) } return(val) @@ -583,16 +598,19 @@ misc.convert <- function(x, u1, u2) { #' @return logical #' @author Istem Fer, Shawn Serbin misc.are.convertible <- function(u1, u2) { - # make sure the order of vectors match - units.from <- c("umol C m-2 s-1", "kg C m-2 s-1", - "mol H2O m-2 s-1", "kg H2O m-2 s-1", - "Mg ha-1", "kg C m-2") - units.to <- c("kg C m-2 s-1", "umol C m-2 s-1", - "kg H2O m-2 s-1", "mol H2O m-2 s-1", - "kg C m-2", "Mg ha-1") - - if(u1 %in% units.from & u2 %in% units.to) { + units.from <- c( + "umol C m-2 s-1", "kg C m-2 s-1", + "mol H2O m-2 s-1", "kg H2O m-2 s-1", + "Mg ha-1", "kg C m-2" + ) + units.to <- c( + "kg C m-2 s-1", "umol C m-2 s-1", + "kg H2O m-2 s-1", "mol H2O m-2 s-1", + "kg C m-2", "Mg ha-1" + ) + + if (u1 %in% units.from & u2 %in% units.to) { if (which(units.from == u1) == which(units.to == u2)) { return(TRUE) } else { @@ -616,17 +634,17 @@ convert.expr <- function(expression) { # split equation to LHS and RHS deri.var <- gsub("=.*$", "", expression) # name of the derived variable deri.eqn <- gsub(".*=", "", expression) # derivation eqn - - non.match <- gregexpr('[^a-zA-Z_.]', deri.eqn) # match characters that are not "a-zA-Z_." + + non.match <- gregexpr("[^a-zA-Z_.]", deri.eqn) # match characters that are not "a-zA-Z_." split.chars <- unlist(regmatches(deri.eqn, non.match)) # where to split at # split the expression to retrieve variable names to be used in read.output - if(length(split.chars)!=0){ - variables <- unlist(strsplit(deri.eqn, paste0("[",noquote(paste0(split.chars, collapse="")),"]"))) + if (length(split.chars) != 0) { + variables <- unlist(strsplit(deri.eqn, paste0("[", noquote(paste0(split.chars, collapse = "")), "]"))) variables <- variables[variables != ""] # Remove empty entries } else { variables <- deri.eqn } - + return(list(variable.drv = deri.var, variable.eqn = list(variables = variables, expression = deri.eqn))) } #--------------------------------------------------------------------------------------------------# @@ -645,11 +663,13 @@ convert.expr <- function(expression) { #' #' @examples #' \dontrun{ -#' download_file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt","~/test.download.txt") +#' download_file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt", "~/test.download.txt") #' -#' download_file(" +#' download_file( +#' " #' ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/pres.sfc.2000.nc", -#' "~/pres.sfc.2000.nc") +#' "~/pres.sfc.2000.nc" +#' ) #' } #' #' @export @@ -659,9 +679,9 @@ download_file <- function(url, filename, method) { if (startsWith(url, "ftp://")) { if (missing(method)) method <- getOption("download.ftp.method", default = "auto") if (method == "ncftpget") { - PEcAn.logger::logger.debug(paste0("FTP Method: ",method)) - #system2("ncftpget", c("-c", "url", ">", filename)) - system(paste(method,"-c",url,">",filename,sep=" ")) + PEcAn.logger::logger.debug(paste0("FTP Method: ", method)) + # system2("ncftpget", c("-c", "url", ">", filename)) + system(paste(method, "-c", url, ">", filename, sep = " ")) } else { utils::download.file(url, filename, method) } @@ -686,34 +706,39 @@ download_file <- function(url, filename, method) { #' #' @examples #' \dontrun{ -#' file_url <- paste0("https://thredds.daac.ornl.gov/", -#' "thredds/dodsC/ornldaac/1220", -#' "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4") +#' file_url <- paste0( +#' "https://thredds.daac.ornl.gov/", +#' "thredds/dodsC/ornldaac/1220", +#' "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4" +#' ) #' dap <- retry.func( #' ncdf4::nc_open(file_url), -#' maxErrors=10, -#' sleep=2) +#' maxErrors = 10, +#' sleep = 2 +#' ) #' } #' #' @export #' @author Shawn Serbin retry.func <- function(expr, isError = function(x) inherits(x, "try-error"), maxErrors = 5, sleep = 0) { - attempts = 0 - retval = try(eval(expr)) + attempts <- 0 + retval <- try(eval(expr)) while (isError(retval)) { - attempts = attempts + 1 + attempts <- attempts + 1 if (attempts >= maxErrors) { - msg = sprintf("retry: too many retries [[%s]]", utils::capture.output(utils::str(retval))) + msg <- sprintf("retry: too many retries [[%s]]", utils::capture.output(utils::str(retval))) PEcAn.logger::logger.warn(msg) stop(msg) } else { - msg = sprintf("retry: error in attempt %i/%i [[%s]]", attempts, maxErrors, - utils::capture.output(utils::str(retval))) + msg <- sprintf( + "retry: error in attempt %i/%i [[%s]]", attempts, maxErrors, + utils::capture.output(utils::str(retval)) + ) PEcAn.logger::logger.warn(msg) - #warning(msg) + # warning(msg) } if (sleep > 0) Sys.sleep(sleep) - retval = try(eval(expr)) + retval <- try(eval(expr)) } return(retval) } @@ -732,12 +757,12 @@ retry.func <- function(expr, isError = function(x) inherits(x, "try-error"), max #' rlog <- robustly(log, timeout = 0.3) #' try(rlog("fail")) #' \dontrun{ -#' nc_openr <- robustly(ncdf4::nc_open, n = 10, timeout = 0.5) -#' nc <- nc_openr(url) -#' # ...or just call the function directly -#' nc <- robustly(ncdf4::nc_open, n = 20)(url) -#' # Useful in `purrr` maps -#' many_vars <- purrr::map(varnames, robustly(ncdf4::ncvar_get), nc = nc) +#' nc_openr <- robustly(ncdf4::nc_open, n = 10, timeout = 0.5) +#' nc <- nc_openr(url) +#' # ...or just call the function directly +#' nc <- robustly(ncdf4::nc_open, n = 20)(url) +#' # Useful in `purrr` maps +#' many_vars <- purrr::map(varnames, robustly(ncdf4::ncvar_get), nc = nc) #' } #' @export robustly <- function(.f, n = 10, timeout = 0.2, silent = TRUE) { @@ -746,7 +771,9 @@ robustly <- function(.f, n = 10, timeout = 0.2, silent = TRUE) { attempt <- 1 while (attempt <= n) { result <- try(.f(...), silent = silent) - if (!inherits(result, "try-error")) return(result) + if (!inherits(result, "try-error")) { + return(result) + } attempt <- attempt + 1 if (!silent) PEcAn.logger::logger.info("Trying attempt ", attempt, " of ", n) } diff --git a/base/utils/R/write.config.utils.R b/base/utils/R/write.config.utils.R index 06c2bb4a379..2fd9089cd3b 100644 --- a/base/utils/R/write.config.utils.R +++ b/base/utils/R/write.config.utils.R @@ -1,4 +1,3 @@ - #--------------------------------------------------------------------------------------------------# ### TODO: Generalize this code for all ecosystem models (e.g. ED2.2, SiPNET, etc). #--------------------------------------------------------------------------------------------------# @@ -21,7 +20,7 @@ get.quantiles <- function(quantiles.tag) { quantiles <- append(quantiles, 1 - stats::pnorm(sigmas)) } if (length(quantiles) == 0) { - quantiles <- 1 - stats::pnorm(-3:3) #default + quantiles <- 1 - stats::pnorm(-3:3) # default } if (!0.5 %in% quantiles) { quantiles <- append(quantiles, 0.5) @@ -70,7 +69,7 @@ get.sa.samples <- function(samples, quantiles) { sa.samples <- data.frame() for (trait in names(samples)) { for (quantile in quantiles) { - sa.samples[as.character(round(quantile * 100, 3)), trait] <- + sa.samples[as.character(round(quantile * 100, 3)), trait] <- quantile(samples[[trait]], quantile) } } diff --git a/base/utils/data/standard_vars.R b/base/utils/data/standard_vars.R index c324394b849..e06bc401792 100644 --- a/base/utils/data/standard_vars.R +++ b/base/utils/data/standard_vars.R @@ -1,4 +1,4 @@ - standard_vars <- utils::read.csv( - file = "standard_vars.csv", - colClasses = "character") + file = "standard_vars.csv", + colClasses = "character" +) diff --git a/base/utils/inst/LBNL_remote_test.R b/base/utils/inst/LBNL_remote_test.R index 941b6d48bfe..11804ba22bc 100644 --- a/base/utils/inst/LBNL_remote_test.R +++ b/base/utils/inst/LBNL_remote_test.R @@ -6,50 +6,52 @@ setwd("~/git/pecan/web") ## define 'pass' as Google Authenticator current password (string) exe_host <- "lrc-login.lbl.gov" data_host <- "lrc-xfer.lbl.gov" -user <- 'dietze' +user <- "dietze" tunnelDir <- "/tmp/LBNL" -exeDir <- file.path(tunnelDir,"exe") -dataDir <- file.path(tunnelDir,"data") +exeDir <- file.path(tunnelDir, "exe") +dataDir <- file.path(tunnelDir, "data") dir.create(tunnelDir) dir.create(exeDir) dir.create(dataDir) R <- "/global/software/sl-6.x86_64/modules/langs/r/3.2.5/bin/R" -##open connections -write(pass,file.path(exeDir,"password")) -con1 <- system2("./sshtunnel.sh",c(exe_host,user,exeDir,">",file.path(exeDir,"log"),"&")) -file.remove(file.path(exeDir,"password")) +## open connections +write(pass, file.path(exeDir, "password")) +con1 <- system2("./sshtunnel.sh", c(exe_host, user, exeDir, ">", file.path(exeDir, "log"), "&")) +file.remove(file.path(exeDir, "password")) -write(pass,file.path(dataDir,"password")) -con2 <- system2("./sshtunnel.sh",c(data_host,user,dataDir,">",file.path(dataDir,"log"),"&")) -file.remove(file.path(dataDir,"password")) +write(pass, file.path(dataDir, "password")) +con2 <- system2("./sshtunnel.sh", c(data_host, user, dataDir, ">", file.path(dataDir, "log"), "&")) +file.remove(file.path(dataDir, "password")) ## build host list -host <- list(name=exe_host, - data_hostname = data_host, - tunnel = file.path(exeDir,"tunnel"), - data_tunnel = file.path(dataDir,"tunnel"), - Rbinary = R) -settings <- list(host=host) +host <- list( + name = exe_host, + data_hostname = data_host, + tunnel = file.path(exeDir, "tunnel"), + data_tunnel = file.path(dataDir, "tunnel"), + Rbinary = R +) +settings <- list(host = host) ## test remote.copy.to -PEcAn.remote::remote.copy.to(host,"favicon.ico","~/favicon.ico") +PEcAn.remote::remote.copy.to(host, "favicon.ico", "~/favicon.ico") ## test remote.execute.cmd -foo <- PEcAn.remote::remote.execute.cmd(host,"pwd") +foo <- PEcAn.remote::remote.execute.cmd(host, "pwd") print(foo) -PEcAn.remote::remote.execute.cmd(host,"mv",c("/global/home/users/dietze/favicon.ico","/global/home/users/dietze/favicon.jpg")) +PEcAn.remote::remote.execute.cmd(host, "mv", c("/global/home/users/dietze/favicon.ico", "/global/home/users/dietze/favicon.jpg")) ## test remote.copy.from -PEcAn.remote::remote.copy.from(host,"~/favicon.jpg","favicon.jpg") +PEcAn.remote::remote.copy.from(host, "~/favicon.jpg", "favicon.jpg") ## test remote.execute.R -b <- PEcAn.remote::remote.execute.R(script = "return(1)",host = host,R=R,verbose=TRUE,scratchdir="/global/scratch/dietze/") +b <- PEcAn.remote::remote.execute.R(script = "return(1)", host = host, R = R, verbose = TRUE, scratchdir = "/global/scratch/dietze/") -c <- PEcAn.remote::remote.execute.R(script = "return(require(PEcAn.data.atmosphere))",host = host,R=R,verbose=TRUE,scratchdir="/global/scratch/dietze/") +c <- PEcAn.remote::remote.execute.R(script = "return(require(PEcAn.data.atmosphere))", host = host, R = R, verbose = TRUE, scratchdir = "/global/scratch/dietze/") -d <- PEcAn.remote::remote.execute.R(script = "return(.libPaths())",host = host,R=R,verbose=TRUE,scratchdir="/global/scratch/dietze/") +d <- PEcAn.remote::remote.execute.R(script = "return(.libPaths())", host = host, R = R, verbose = TRUE, scratchdir = "/global/scratch/dietze/") ## kill tunnels diff --git a/base/utils/scripts/metutils.R b/base/utils/scripts/metutils.R index 0d25fc23506..655f12d3be3 100644 --- a/base/utils/scripts/metutils.R +++ b/base/utils/scripts/metutils.R @@ -1,27 +1,27 @@ #################################################################################################### -#/file -# -# ED2 Meteorology Driver Utilities -# -- v1 -# -- Draft version. -# +# /file +# +# ED2 Meteorology Driver Utilities +# -- v1 +# -- Draft version. +# #################################################################################################### #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics #--------------------------------------------------------------------------------------------------# #---------------- Import command arguments -------------------------------------------------------# -args = commandArgs(trailingOnly = TRUE) # import any needed arguments for the terminal -print(args) #---> Print command arguments in R output. For debugging. +args <- commandArgs(trailingOnly = TRUE) # import any needed arguments for the terminal +print(args) #---> Print command arguments in R output. For debugging. ######### Define model run location ######### -if (args[1]=="pwd"){ - model_run=paste(getwd(),"/",sep="") # if set as pwd -}else{ - model_run=paste(args[1],"/",sep="") # if full path given +if (args[1] == "pwd") { + model_run <- paste(getwd(), "/", sep = "") # if set as pwd +} else { + model_run <- paste(args[1], "/", sep = "") # if full path given } #--------------------------------------------------------------------------------------------------# @@ -32,66 +32,69 @@ library(XML) #################################################################################################### ###################################### START SCRIPT ################################################ -print('**************************************************') -print('**************** STARTING SCRIPT *****************') -print('**************************************************') -print(' ') +print("**************************************************") +print("**************** STARTING SCRIPT *****************") +print("**************************************************") +print(" ") #---------------- Setup script output(s) ----------------------------------------------------------# # Info: Generate output folder for diagnostic plots. Will create folder if it doesn't already # exist. -output_dir = paste(model_run,"MET_OUTPUT/",sep="") -if (! file.exists(output_dir)) dir.create(output_dir) +output_dir <- paste(model_run, "MET_OUTPUT/", sep = "") +if (!file.exists(output_dir)) dir.create(output_dir) #--------------------------------------------------------------------------------------------------# #---------------- Parse Info From ED2IN File ------------------------------------------------------# -# Info: Parse ED2IN txt file to determine location of ED_MET_DRIVER_HEADER_DB. -# Then parse ED_MET_DRIVER_HEADER for location of met driver hdf5 files. +# Info: Parse ED2IN txt file to determine location of ED_MET_DRIVER_HEADER_DB. +# Then parse ED_MET_DRIVER_HEADER for location of met driver hdf5 files. # Location of ED_MET_DRIVER_HEADER -setwd(model_run) # CWD +setwd(model_run) # CWD # ------ OLD METHOD ------ -#system(paste("grep"," ","NL%ED_MET_DRIVER_DB"," ","*ED2IN*"," ","> loc")) -#ED_MET_DRIVER_DB = readLines("loc") +# system(paste("grep"," ","NL%ED_MET_DRIVER_DB"," ","*ED2IN*"," ","> loc")) +# ED_MET_DRIVER_DB = readLines("loc") # ------------------------ # PARSE ED_MET_DRIVER_DB LOCATION -if (args[2]=="-f"){ - ED_MET_DRIVER_DB = system(paste("grep"," ","NL%ED_MET_DRIVER_DB"," ","*ED2IN*"),intern=TRUE) -}else{ - ED_MET_DRIVER_DB = system(paste("grep"," ","NL%ED_MET_DRIVER_DB"," ",args[2]),intern=TRUE) +if (args[2] == "-f") { + ED_MET_DRIVER_DB <- system(paste("grep", " ", "NL%ED_MET_DRIVER_DB", " ", "*ED2IN*"), intern = TRUE) +} else { + ED_MET_DRIVER_DB <- system(paste("grep", " ", "NL%ED_MET_DRIVER_DB", " ", args[2]), intern = TRUE) } # Locate met header info in ED2IN file -indices = gregexpr("'", ED_MET_DRIVER_DB)[[1]] -met_header_loc = substr(ED_MET_DRIVER_DB,indices[1]+1, indices[2]-1) +indices <- gregexpr("'", ED_MET_DRIVER_DB)[[1]] +met_header_loc <- substr(ED_MET_DRIVER_DB, indices[1] + 1, indices[2] - 1) # Location of met driver files -ED_MET_DRIVER=readLines(met_header_loc) -MET_DRIVERS=ED_MET_DRIVER[3] -indices = gregexpr("/", MET_DRIVERS)[[1]] -MET_DRIVER_LOC = substr(MET_DRIVERS,indices[1]+1, indices[length(indices)]-1) +ED_MET_DRIVER <- readLines(met_header_loc) +MET_DRIVERS <- ED_MET_DRIVER[3] +indices <- gregexpr("/", MET_DRIVERS)[[1]] +MET_DRIVER_LOC <- substr(MET_DRIVERS, indices[1] + 1, indices[length(indices)] - 1) # List of Met files -met_files = list.files(path=paste("/",MET_DRIVER_LOC,"/",sep=""),pattern=".h5") +met_files <- list.files(path = paste("/", MET_DRIVER_LOC, "/", sep = ""), pattern = ".h5") # Extract years and months from filenames. Uses grep and regexp -setwd(paste("/",MET_DRIVER_LOC,"/",sep="")) # CWD +setwd(paste("/", MET_DRIVER_LOC, "/", sep = "")) # CWD # Probably should change this so it is system independent. E.g. R internal commands. -yrs = as.numeric(system(paste("ls *.h5 | ","grep"," ","-o"," ",'[1,2][0-9][0-9][0-9]',sep="") -,intern=TRUE)) +yrs <- as.numeric(system(paste("ls *.h5 | ", "grep", " ", "-o", " ", "[1,2][0-9][0-9][0-9]", sep = ""), + intern = TRUE +)) -first.yr =min(unique(yrs)) # first year of met data -last.yr =max(unique(yrs)) # last year of met data -yr.range = (last.yr-first.yr)+1 # number of years +first.yr <- min(unique(yrs)) # first year of met data +last.yr <- max(unique(yrs)) # last year of met data +yr.range <- (last.yr - first.yr) + 1 # number of years # Probably should change this so it is system independent. E.g. R internal commands. -months = system(paste("ls *.h5 | ","grep"," ","-o -E"," ", -"'JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC'",sep=""),intern=TRUE) +months <- system(paste("ls *.h5 | ", "grep", " ", "-o -E", " ", + "'JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC'", + sep = "" +), intern = TRUE) #--------------------------------------------------------------------------------------------------# @@ -99,114 +102,126 @@ months = system(paste("ls *.h5 | ","grep"," ","-o -E"," ", # Info: Create vectors of full time-series met data setwd(output_dir) # Change to output directory -print(paste("----- Start year: ",first.yr)) -print(paste("----- End year: ",last.yr)) -print(paste("----- Number of years: ",yr.range)) +print(paste("----- Start year: ", first.yr)) +print(paste("----- End year: ", last.yr)) +print(paste("----- Number of years: ", yr.range)) # initialize output arrays -vgrd = numeric(0) # meridional wind [m/s]. may be empty -vddsf = numeric(0) # visible diffuse downward solar radiation [W/m2] -vbdsf = numeric(0) # visible beam downward solar radiation [W/m2] -ugrd = numeric(0) # zonal wind [m/s] -Tair = numeric(0) # temperature [K]. hdf5 var name "tmp" -sh = numeric(0) # specific humidity [kg_H2O/kg_air] -pres = numeric(0) # pressure [Pa] -prate = numeric(0) # precipitation rate [kg_H2O/m2/s] -nddsf = numeric(0) # near IR diffuse downward solar radiation [W/m2] -nbdsf = numeric(0) # near IR beam downward solar radiation [W/m2] -hgt = numeric(0) # geopotential height [m] -dlwrf = numeric(0) # downward long wave radiation [W/m2] -CO2 = numeric(0) # surface co2 concentration [ppm] - -time = 1 -time_year = numeric(0) -cnt = 1 -month_order <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") +vgrd <- numeric(0) # meridional wind [m/s]. may be empty +vddsf <- numeric(0) # visible diffuse downward solar radiation [W/m2] +vbdsf <- numeric(0) # visible beam downward solar radiation [W/m2] +ugrd <- numeric(0) # zonal wind [m/s] +Tair <- numeric(0) # temperature [K]. hdf5 var name "tmp" +sh <- numeric(0) # specific humidity [kg_H2O/kg_air] +pres <- numeric(0) # pressure [Pa] +prate <- numeric(0) # precipitation rate [kg_H2O/m2/s] +nddsf <- numeric(0) # near IR diffuse downward solar radiation [W/m2] +nbdsf <- numeric(0) # near IR beam downward solar radiation [W/m2] +hgt <- numeric(0) # geopotential height [m] +dlwrf <- numeric(0) # downward long wave radiation [W/m2] +CO2 <- numeric(0) # surface co2 concentration [ppm] + +time <- 1 +time_year <- numeric(0) +cnt <- 1 +month_order <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") # Loops over met files -for (i in sort(unique(yrs))){ - - sel <- which(yrs == i) # which year? - - for(m in 1:12){ - msel <- sel[which(months[sel] == month_order[m])] # select appropriate month - print(paste("**** PROCESSING: ",met_files[msel])) # output status to screen - met = hdf5load(paste("/",MET_DRIVER_LOC,"/",met_files[msel],sep=""), - load = FALSE,tidy=TRUE) # load met file - - # put data into arrays - if(cnt == 1){ - ugrd[1:length(as.numeric(met$ugrd))]=as.numeric(met$ugrd) - vgrd[1:length(as.numeric(met$vgrd))]=as.numeric(met$vgrd) - vddsf[1:length(as.numeric(met$vddsf))]=as.numeric(met$vddsf) - vbdsf[1:length(as.numeric(met$vbdsf))]=as.numeric(met$vbdsf) - Tair[1:length(as.numeric(met$tmp))]=as.numeric(met$tmp) - sh[1:length(as.numeric(met$sh))]=as.numeric(met$sh) - pres[1:length(as.numeric(met$pres))]=as.numeric(met$pres) - prate[1:length(as.numeric(met$prate))]=as.numeric(met$prate) - nddsf[1:length(as.numeric(met$nddsf))]=as.numeric(met$nddsf) - nbdsf[1:length(as.numeric(met$nbdsf))]=as.numeric(met$nbdsf) - hgt[1:length(as.numeric(met$hgt))]=as.numeric(met$hgt) - dlwrf[1:length(as.numeric(met$dlwrf))]=as.numeric(met$dlwrf) - CO2[1:length(as.numeric(met$co2))]=as.numeric(met$co2) - }else{ - # populate arrays - start = length(ugrd) - ugrd = append(ugrd,as.numeric(met$ugrd),after=start) - - rm(start);start=length(vgrd) - vgrd = append(vgrd,as.numeric(met$vgrd),after=start) - - rm(start);start=length(vddsf) - vddsf = append(vddsf,as.numeric(met$vddsf),after=start) - - rm(start);start=length(vbdsf) - vbdsf = append(vbdsf,as.numeric(met$vbdsf),after=start) - - rm(start);start=length(Tair) - Tair = append(Tair,as.numeric(met$tmp),after=start) - - rm(start);start=length(sh) - sh = append(sh,as.numeric(met$sh),after=start) - - rm(start);start=length(pres) - pres = append(pres,as.numeric(met$pres),after=start) - - rm(start);start=length(prate) - prate = append(prate,as.numeric(met$prate),after=start) - - rm(start);start=length(nddsf) - nddsf = append(nddsf,as.numeric(met$nddsf),after=start) - - rm(start);start=length(nbdsf) - nbdsf = append(nbdsf,as.numeric(met$nbdsf),after=start) - - rm(start);start=length(hgt) - hgt = append(hgt,as.numeric(met$hgt),after=start) - - rm(start);start=length(dlwrf) - dlwrf = append(dlwrf,as.numeric(met$dlwrf),after=start) - - rm(start);start=length(CO2) - CO2 = append(CO2,as.numeric(met$co2),after=start) - } - cnt <- cnt + 1 - rm(met) # remove met data for last year - } - obs = length(Tair) - time_year[time] = data.frame(length=as.numeric(obs)) - time=time+1 +for (i in sort(unique(yrs))) { + sel <- which(yrs == i) # which year? + + for (m in 1:12) { + msel <- sel[which(months[sel] == month_order[m])] # select appropriate month + print(paste("**** PROCESSING: ", met_files[msel])) # output status to screen + met <- hdf5load(paste("/", MET_DRIVER_LOC, "/", met_files[msel], sep = ""), + load = FALSE, tidy = TRUE + ) # load met file + + # put data into arrays + if (cnt == 1) { + ugrd[1:length(as.numeric(met$ugrd))] <- as.numeric(met$ugrd) + vgrd[1:length(as.numeric(met$vgrd))] <- as.numeric(met$vgrd) + vddsf[1:length(as.numeric(met$vddsf))] <- as.numeric(met$vddsf) + vbdsf[1:length(as.numeric(met$vbdsf))] <- as.numeric(met$vbdsf) + Tair[1:length(as.numeric(met$tmp))] <- as.numeric(met$tmp) + sh[1:length(as.numeric(met$sh))] <- as.numeric(met$sh) + pres[1:length(as.numeric(met$pres))] <- as.numeric(met$pres) + prate[1:length(as.numeric(met$prate))] <- as.numeric(met$prate) + nddsf[1:length(as.numeric(met$nddsf))] <- as.numeric(met$nddsf) + nbdsf[1:length(as.numeric(met$nbdsf))] <- as.numeric(met$nbdsf) + hgt[1:length(as.numeric(met$hgt))] <- as.numeric(met$hgt) + dlwrf[1:length(as.numeric(met$dlwrf))] <- as.numeric(met$dlwrf) + CO2[1:length(as.numeric(met$co2))] <- as.numeric(met$co2) + } else { + # populate arrays + start <- length(ugrd) + ugrd <- append(ugrd, as.numeric(met$ugrd), after = start) + + rm(start) + start <- length(vgrd) + vgrd <- append(vgrd, as.numeric(met$vgrd), after = start) + + rm(start) + start <- length(vddsf) + vddsf <- append(vddsf, as.numeric(met$vddsf), after = start) + + rm(start) + start <- length(vbdsf) + vbdsf <- append(vbdsf, as.numeric(met$vbdsf), after = start) + + rm(start) + start <- length(Tair) + Tair <- append(Tair, as.numeric(met$tmp), after = start) + + rm(start) + start <- length(sh) + sh <- append(sh, as.numeric(met$sh), after = start) + + rm(start) + start <- length(pres) + pres <- append(pres, as.numeric(met$pres), after = start) + + rm(start) + start <- length(prate) + prate <- append(prate, as.numeric(met$prate), after = start) + + rm(start) + start <- length(nddsf) + nddsf <- append(nddsf, as.numeric(met$nddsf), after = start) + + rm(start) + start <- length(nbdsf) + nbdsf <- append(nbdsf, as.numeric(met$nbdsf), after = start) + + rm(start) + start <- length(hgt) + hgt <- append(hgt, as.numeric(met$hgt), after = start) + + rm(start) + start <- length(dlwrf) + dlwrf <- append(dlwrf, as.numeric(met$dlwrf), after = start) + + rm(start) + start <- length(CO2) + CO2 <- append(CO2, as.numeric(met$co2), after = start) + } + cnt <- cnt + 1 + rm(met) # remove met data for last year + } + obs <- length(Tair) + time_year[time] <- data.frame(length = as.numeric(obs)) + time <- time + 1 } ######### Calculate cumulative precip by year ####### -cumprcp = numeric(0) -for(j in 1:length(unique(yrs))){ - if(j == 1){ - cumprcp[1:as.numeric(time_year[j])]=cumsum(prate[1:as.numeric(time_year[j])]) - }else{ - start = length(cumprcp) - cumprcp[start:as.numeric(time_year[j])]=cumsum(prate[start:as.numeric(time_year[j])]) - } +cumprcp <- numeric(0) +for (j in 1:length(unique(yrs))) { + if (j == 1) { + cumprcp[1:as.numeric(time_year[j])] <- cumsum(prate[1:as.numeric(time_year[j])]) + } else { + start <- length(cumprcp) + cumprcp[start:as.numeric(time_year[j])] <- cumsum(prate[start:as.numeric(time_year[j])]) + } } #--------------------------------------------------------------------------------------------------# @@ -214,344 +229,404 @@ for(j in 1:length(unique(yrs))){ #---------------- Output --------------------------------------------------------------------------# # Info: Generate output diagnostics -if (args[3]==1) { - print("************* OUTPUT STATS ONLY *************") - print("************* !NOT COMPLETED YET! *************") - #TODO: Create statistics here -}else{ - message('') - print("************* OUTPUT STATS & DIAGNOSTIC PLOTS *************") +if (args[3] == 1) { + print("************* OUTPUT STATS ONLY *************") + print("************* !NOT COMPLETED YET! *************") + # TODO: Create statistics here +} else { + message("") + print("************* OUTPUT STATS & DIAGNOSTIC PLOTS *************") message("************* OUTPUT STATS & DIAGNOSTIC PLOTS *************") - #---------------- Generate Diagnostic Plots -----------------------------------------------# - ptcex = 1.0 - axcex = 1.4 - labcex = 1.8 - maincex = 1.5 - xaxis = 1:length(Tair) - years = unique(yrs) - years2 = c(years[2:yr.range],years[yr.range]+1) - - # TODO: Put in a figure loop here rather than being - # explicit with each variable. - vars = list("vgrd","vddsf","vbdsf","ugrd","Tair","sh","pres","prate","cumprcp", - "nddsf","nbdsf","hgt","dlwrf","CO2") - ################## Generate Plots ################# - - ############# vgrd - num=1 - message('') - message("************* Plotting meridional wind [m/s] *************") - print("************* Plotting meridional wind [m/s] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,vgrd,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(Wind[meridional]," (",m~s^{-1},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# vddsf - num=2 - message('') - message("************* Plotting visible diffuse downward solar radiation [W/m2] *************") - print("************* Plotting visible diffuse downward solar radiation [W/m2] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,vddsf,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(Vis.Rad.[diffuse]," (",W~m^{-2},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# vbdsf - num=3 - message('') - message("************* Plotting visible beam downward solar radiation [W/m2] *************") - print("************* Plotting visible beam downward solar radiation [W/m2] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,vbdsf,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(Vis.Rad.[beam]," (",W~m^{-2},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# ugrd - num=4 - message('') - message("************* Plotting zonal wind [m/s] *************") - print("************* Plotting zonal wind [m/s] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,ugrd,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(Wind[zonal]," (",m~s^{-1},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# Tair - num=5 - message('') - message("************* Plotting air temp (degC) *************") - print("************* Plotting air temp (degC) *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,Tair-273.15,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(T[air]," (",{degree}," C)" )), cex=ptcex,cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# Specific Humidity - num=6 - message('') - message("************* Plotting Specific Humidity (kg H20/Kg air) *************") - print("************* Plotting Specific Humidity (kg H20/Kg air) *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,sh,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste("Specific Humidity"," (kg H2O / kg air)" )), cex=ptcex,cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# pres - num=7 - message('') - message("************* Plotting Pressure [Pa] *************") - print("************* Plotting Pressure [Pa] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,pres,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab="Pressure (Pa)", cex=ptcex,cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# Precip rate - num=8 - message('') - message("************* Plotting Precip Rate (kg H20/m2/s) *************") - print("************* Plotting Precip Rate (kg H20/m2/s) *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,prate,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste("Precip."," (kg H2O ",m^{-2}~s^{-1},")" )), - cex=ptcex,cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# cumulative precip - num=9 - message('') - message("************* Plotting Cumulative Precip *************") - print("************* Plotting Cumulative Precip *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,cumprcp,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste("Cumulative Precip."," (kg H2O ",m^{-2}~s^{-1},")" )), - cex=ptcex,cex.lab=labcex,cex.axis=axcex) - lines(xaxis,cumprcp,lwd=1.0,col="blue") - polygon(c(xaxis, xaxis[length(xaxis)]), c(cumprcp, cumprcp[1]), col="blue") - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# nddsf - num=10 - message('') - message("************* Plotting IR diffuse downward solar radiation [W/m2] *************") - print("************* Plotting IR diffuse downward solar radiation [W/m2] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,nddsf,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(IR.Rad.[diffuse]," (",W~m^{-2},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# nbdsf - num=11 - message('') - message("************* Plotting IR direct beam downward solar radiation [W/m2] *************") - print("************* Plotting IR direct beam downward solar radiation [W/m2] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,nbdsf,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(IR.Rad.[beam]," (",W~m^{-2},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# hgt - num=12 - message('') - message("************* Plotting Geopotential Height [m] *************") - print("************* Plotting Geopotential Height [m] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,hgt,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab="Geopotential Height (m)",cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# dlwrf - num=13 - message('') - message("************* Plotting Downward long wave radiation [W/m2] *************") - print("************* Plotting Downward long wave radiation [W/m2] *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,dlwrf,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(LW.Rad.[downward]," (",W~m^{-2},")")),cex=ptcex, - cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) - dev.off() # Close figure - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - ############# CO2 Concentration - num=14 - message('') - message("************* Plotting CO2 Concentration (ppm) *************") - print("************* Plotting CO2 Concentration (ppm) *************") - pdf(paste("ED2_",vars[num],"_Met_Drivers.pdf",sep=""),width=14,height=7) - par(mfrow=c(1,1),mar=c(4.5,5.2,1,0.8), mgp=c(3,1,0)) # B, L, T, R - plot(xaxis,CO2,pch=21,bg="dark grey",col="dark grey",xaxt="n",xlab="Year", - ylab=expression(paste(CO["2"]," Conc. (ppm)",)), - cex=ptcex,cex.lab=labcex,cex.axis=axcex) - axis(1,at=1,label=as.character(years[1]),cex.axis=axcex) # first year start - axis(1,at=as.numeric(time_year),label=as.character(years2),cex.axis=axcex) # remaining years - abline(v=as.numeric(time_year)) - box(lwd=2.2) # Close figure - dev.off() - - # Shrink PDF - pdf = paste("ED2_",vars[num],"_Met_Drivers.pdf",sep="") - png = paste("ED2_",vars[num],"_Met_Drivers.png",sep="") - system(paste("convert -quality 10 ",pdf," ",png,sep="")) - system(paste("convert ",png," ",pdf,sep="")) - - #------------------String Together PDFs and remove individual .pdfs------------------------# - # Info: could string together pngs into single pdf - out_pdf=paste("ED2_Met_Driver_Diagnostics.pdf",sep="") - try(system(paste("rm ",out_pdf)),silent=TRUE) - try(system(paste("gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=", - out_pdf," *.pdf",sep="")),silent=FALSE) - system("rm *_Drivers.pdf") # Delete individual .pdfs, leave concatentated file - + #---------------- Generate Diagnostic Plots -----------------------------------------------# + ptcex <- 1.0 + axcex <- 1.4 + labcex <- 1.8 + maincex <- 1.5 + xaxis <- 1:length(Tair) + years <- unique(yrs) + years2 <- c(years[2:yr.range], years[yr.range] + 1) + + # TODO: Put in a figure loop here rather than being + # explicit with each variable. + vars <- list( + "vgrd", "vddsf", "vbdsf", "ugrd", "Tair", "sh", "pres", "prate", "cumprcp", + "nddsf", "nbdsf", "hgt", "dlwrf", "CO2" + ) + ################## Generate Plots ################# + + ############# vgrd + num <- 1 + message("") + message("************* Plotting meridional wind [m/s] *************") + print("************* Plotting meridional wind [m/s] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, vgrd, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(Wind[meridional], " (", m ~ s^{ + -1 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# vddsf + num <- 2 + message("") + message("************* Plotting visible diffuse downward solar radiation [W/m2] *************") + print("************* Plotting visible diffuse downward solar radiation [W/m2] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, vddsf, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(Vis.Rad.[diffuse], " (", W ~ m^{ + -2 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# vbdsf + num <- 3 + message("") + message("************* Plotting visible beam downward solar radiation [W/m2] *************") + print("************* Plotting visible beam downward solar radiation [W/m2] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, vbdsf, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(Vis.Rad.[beam], " (", W ~ m^{ + -2 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# ugrd + num <- 4 + message("") + message("************* Plotting zonal wind [m/s] *************") + print("************* Plotting zonal wind [m/s] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, ugrd, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(Wind[zonal], " (", m ~ s^{ + -1 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# Tair + num <- 5 + message("") + message("************* Plotting air temp (degC) *************") + print("************* Plotting air temp (degC) *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, Tair - 273.15, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste( + T[air], + " (", + { + degree + }, + " C)" + )), cex = ptcex, cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# Specific Humidity + num <- 6 + message("") + message("************* Plotting Specific Humidity (kg H20/Kg air) *************") + print("************* Plotting Specific Humidity (kg H20/Kg air) *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, sh, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste("Specific Humidity", " (kg H2O / kg air)")), cex = ptcex, cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# pres + num <- 7 + message("") + message("************* Plotting Pressure [Pa] *************") + print("************* Plotting Pressure [Pa] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, pres, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = "Pressure (Pa)", cex = ptcex, cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# Precip rate + num <- 8 + message("") + message("************* Plotting Precip Rate (kg H20/m2/s) *************") + print("************* Plotting Precip Rate (kg H20/m2/s) *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, prate, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste("Precip.", " (kg H2O ", m^{ + -2 + } ~ s^{ + -1 + }, ")")), + cex = ptcex, cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# cumulative precip + num <- 9 + message("") + message("************* Plotting Cumulative Precip *************") + print("************* Plotting Cumulative Precip *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, cumprcp, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste("Cumulative Precip.", " (kg H2O ", m^{ + -2 + } ~ s^{ + -1 + }, ")")), + cex = ptcex, cex.lab = labcex, cex.axis = axcex + ) + lines(xaxis, cumprcp, lwd = 1.0, col = "blue") + polygon(c(xaxis, xaxis[length(xaxis)]), c(cumprcp, cumprcp[1]), col = "blue") + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# nddsf + num <- 10 + message("") + message("************* Plotting IR diffuse downward solar radiation [W/m2] *************") + print("************* Plotting IR diffuse downward solar radiation [W/m2] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, nddsf, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(IR.Rad.[diffuse], " (", W ~ m^{ + -2 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# nbdsf + num <- 11 + message("") + message("************* Plotting IR direct beam downward solar radiation [W/m2] *************") + print("************* Plotting IR direct beam downward solar radiation [W/m2] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, nbdsf, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(IR.Rad.[beam], " (", W ~ m^{ + -2 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# hgt + num <- 12 + message("") + message("************* Plotting Geopotential Height [m] *************") + print("************* Plotting Geopotential Height [m] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, hgt, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = "Geopotential Height (m)", cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# dlwrf + num <- 13 + message("") + message("************* Plotting Downward long wave radiation [W/m2] *************") + print("************* Plotting Downward long wave radiation [W/m2] *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, dlwrf, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(LW.Rad.[downward], " (", W ~ m^{ + -2 + }, ")")), cex = ptcex, + cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) + dev.off() # Close figure + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + ############# CO2 Concentration + num <- 14 + message("") + message("************* Plotting CO2 Concentration (ppm) *************") + print("************* Plotting CO2 Concentration (ppm) *************") + pdf(paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = ""), width = 14, height = 7) + par(mfrow = c(1, 1), mar = c(4.5, 5.2, 1, 0.8), mgp = c(3, 1, 0)) # B, L, T, R + plot(xaxis, CO2, + pch = 21, bg = "dark grey", col = "dark grey", xaxt = "n", xlab = "Year", + ylab = expression(paste(CO["2"], " Conc. (ppm)", )), + cex = ptcex, cex.lab = labcex, cex.axis = axcex + ) + axis(1, at = 1, label = as.character(years[1]), cex.axis = axcex) # first year start + axis(1, at = as.numeric(time_year), label = as.character(years2), cex.axis = axcex) # remaining years + abline(v = as.numeric(time_year)) + box(lwd = 2.2) # Close figure + dev.off() + + # Shrink PDF + pdf <- paste("ED2_", vars[num], "_Met_Drivers.pdf", sep = "") + png <- paste("ED2_", vars[num], "_Met_Drivers.png", sep = "") + system(paste("convert -quality 10 ", pdf, " ", png, sep = "")) + system(paste("convert ", png, " ", pdf, sep = "")) + + #------------------String Together PDFs and remove individual .pdfs------------------------# + # Info: could string together pngs into single pdf + out_pdf <- paste("ED2_Met_Driver_Diagnostics.pdf", sep = "") + try(system(paste("rm ", out_pdf)), silent = TRUE) + try(system(paste("gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=", + out_pdf, " *.pdf", + sep = "" + )), silent = FALSE) + system("rm *_Drivers.pdf") # Delete individual .pdfs, leave concatentated file } # END OF IF/THEN #--------------------------------------------------------------------------------------------------# diff --git a/base/utils/scripts/time.constants.R b/base/utils/scripts/time.constants.R index b6daeb31122..efdfba2b66f 100644 --- a/base/utils/scripts/time.constants.R +++ b/base/utils/scripts/time.constants.R @@ -1,12 +1,12 @@ -#==========================================================================================# +# ==========================================================================================# # Time conversion units # #------------------------------------------------------------------------------------------# -yr.day <<- 365.2425 # # of days in a year [ day/yr] -day.sec <<- 86400. # # of seconds in a day [ s/day] -day.min <<- 1440. # # of minutes in a day [ min/day] -day.hr <<- 24. # # of hours in a day [ hr/day] -hr.sec <<- 3600. # # of seconds in an hour [ s/hr] -hr.min <<- 60. # # of minutes in an hour [ min/hr] -min.sec <<- 60. # # of seconds in a minute [ s/min] -yr.sec <<- yr.day * day.sec # # of seconds in a year [ s/yr] -#==========================================================================================# \ No newline at end of file +yr.day <<- 365.2425 # # of days in a year [ day/yr] +day.sec <<- 86400. # # of seconds in a day [ s/day] +day.min <<- 1440. # # of minutes in a day [ min/day] +day.hr <<- 24. # # of hours in a day [ hr/day] +hr.sec <<- 3600. # # of seconds in an hour [ s/hr] +hr.min <<- 60. # # of minutes in an hour [ min/hr] +min.sec <<- 60. # # of seconds in a minute [ s/min] +yr.sec <<- yr.day * day.sec # # of seconds in a year [ s/yr] +# ==========================================================================================# diff --git a/base/utils/tests/testthat/helper.R b/base/utils/tests/testthat/helper.R index f78f1c36324..ab65e21dd27 100644 --- a/base/utils/tests/testthat/helper.R +++ b/base/utils/tests/testthat/helper.R @@ -3,15 +3,18 @@ times_to_netcdf <- function(times, timeunit, outdir, filename) { dims <- list(time = ncdf4::ncdim_def( name = "time", units = timeunit, - vals = times)) + vals = times + )) vars <- list(Y = ncdf4::ncvar_def( name = "Y", - units = "kg", + units = "kg", dim = dims, - missval = NA)) + missval = NA + )) nc <- ncdf4::nc_create( filename = file.path(outdir, filename), - vars = vars) + vars = vars + ) ncdf4::ncatt_put(nc, 0, "description", "strictly for testing") ncdf4::ncvar_put(nc, varid = vars[["Y"]], vals = rnorm(length(times))) ncdf4::nc_close(nc) @@ -35,7 +38,8 @@ example_netcdf <- function(varnames, file_path) { vals = seq_len(n) - 1 )) vars <- lapply(varnames, ncdf4::ncvar_def, - units = "kg", dim = dims, missval = NA) + units = "kg", dim = dims, missval = NA + ) names(vars) <- varnames nc <- ncdf4::nc_create(filename = file_path, vars = vars) on.exit(ncdf4::nc_close(nc), add = TRUE) diff --git a/base/utils/tests/testthat/test-read.output.R b/base/utils/tests/testthat/test-read.output.R index dc70af02b47..2a85b00fc61 100644 --- a/base/utils/tests/testthat/test-read.output.R +++ b/base/utils/tests/testthat/test-read.output.R @@ -1,77 +1,81 @@ context("read.output") -testdir = file.path(tempfile(), "readtest") +testdir <- file.path(tempfile(), "readtest") dir.create(testdir, recursive = TRUE) teardown(unlink(testdir, recursive = TRUE)) test_that("returns a list or dataframe as requested", { - times_to_netcdf(0:364, "days since 2001-01-01", testdir, "2001.nc") - - out_log <- capture.output(type = "message", { - listres <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = FALSE) - }) - expect_is(listres, "list") - expect_length(listres, 1) - expect_setequal(names(listres), "Y") - expect_length(listres$Y, 365) - - out_log <- capture.output(type = "message", { - dfres <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = TRUE) - }) - expect_s3_class(dfres, "data.frame") - expect_equal(dim(dfres), c(365, 3)) - expect_setequal(names(dfres), c("posix", "year", "Y")) - expect_s3_class(dfres$posix, "POSIXct") - expect_equal(range(dfres$posix), as.POSIXct(c("2001-01-01", "2001-12-31"), tz="UTC")) - expect_equal(unique(dfres$year), 2001) + times_to_netcdf(0:364, "days since 2001-01-01", testdir, "2001.nc") + + out_log <- capture.output(type = "message", { + listres <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = FALSE) + }) + expect_is(listres, "list") + expect_length(listres, 1) + expect_setequal(names(listres), "Y") + expect_length(listres$Y, 365) + + out_log <- capture.output(type = "message", { + dfres <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = TRUE) + }) + expect_s3_class(dfres, "data.frame") + expect_equal(dim(dfres), c(365, 3)) + expect_setequal(names(dfres), c("posix", "year", "Y")) + expect_s3_class(dfres$posix, "POSIXct") + expect_equal(range(dfres$posix), as.POSIXct(c("2001-01-01", "2001-12-31"), tz = "UTC")) + expect_equal(unique(dfres$year), 2001) }) test_that("accepts start and end years as string, number, datetime", { - times_to_netcdf(0:364, "days since 2001-01-01", testdir, "2001.nc") - times_to_netcdf(0:364, "days since 2002-01-01", testdir, "2002.nc") - times_to_netcdf(0:364, "days since 2003-01-01", testdir, "2003.nc") - - out_log <- capture.output(type = "message", { - res_all <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = TRUE) - res_str <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = "2001", end.year = "2003", dataframe = TRUE) - res_num <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = 2001, end.year = 2003, dataframe = TRUE) - res_date <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = as.Date("2001-01-01"), end.year = as.Date("2003-06-23"), dataframe = TRUE) - res_posix <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = as.POSIXct("2001-01-01 08:22:33"), end.year = as.POSIXct("2003-12-16 23:22:53"), dataframe = TRUE) - }) - - expect_equivalent(res_all, res_str) - expect_equivalent(res_all, res_num) - expect_equivalent(res_all, res_date) - expect_equivalent(res_all, res_posix) - - expect_length(res_all$posix, 365*3) - expect_setequal(names(res_all), c("posix", "year", "Y")) - - out_log <- capture.output(type = "message", { - res_start <- read.output(runid = "", outdir = testdir, variables = "Y", - dataframe = TRUE, start.year = 2002) - res_end <- read.output(runid = "", outdir = testdir, variables = "Y", - dataframe = TRUE, end.year = 2002) - }) - - expect_length(res_start[["posix"]], 365 * 2) - expect_length(res_end[["posix"]], 365 * 2) + times_to_netcdf(0:364, "days since 2001-01-01", testdir, "2001.nc") + times_to_netcdf(0:364, "days since 2002-01-01", testdir, "2002.nc") + times_to_netcdf(0:364, "days since 2003-01-01", testdir, "2003.nc") + + out_log <- capture.output(type = "message", { + res_all <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = TRUE) + res_str <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = "2001", end.year = "2003", dataframe = TRUE) + res_num <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = 2001, end.year = 2003, dataframe = TRUE) + res_date <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = as.Date("2001-01-01"), end.year = as.Date("2003-06-23"), dataframe = TRUE) + res_posix <- read.output(runid = "", outdir = testdir, variables = "Y", start.year = as.POSIXct("2001-01-01 08:22:33"), end.year = as.POSIXct("2003-12-16 23:22:53"), dataframe = TRUE) + }) + + expect_equivalent(res_all, res_str) + expect_equivalent(res_all, res_num) + expect_equivalent(res_all, res_date) + expect_equivalent(res_all, res_posix) + + expect_length(res_all$posix, 365 * 3) + expect_setequal(names(res_all), c("posix", "year", "Y")) + + out_log <- capture.output(type = "message", { + res_start <- read.output( + runid = "", outdir = testdir, variables = "Y", + dataframe = TRUE, start.year = 2002 + ) + res_end <- read.output( + runid = "", outdir = testdir, variables = "Y", + dataframe = TRUE, end.year = 2002 + ) + }) + + expect_length(res_start[["posix"]], 365 * 2) + expect_length(res_end[["posix"]], 365 * 2) }) test_that("handles arbitrary time offsets", { - times_to_netcdf(365:730, "days since 2003-01-01", testdir, "2004.nc") - times_to_netcdf( ((0:364)+916) * 24, "hours since 2002-06-30", testdir, "2005.nc") - - out_log <- capture.output(type = "message", { - mixedres = read.output(runid = "", outdir = testdir, variables = "Y", dataframe = TRUE, start.year = 2004, end.year = 2006) - }) - - expect_length(mixedres$posix, 731) - - # Hack: drop `dim` attribute so it doesn't make the next comparison fail - dim(mixedres$posix) = NULL - expect_equal(mixedres$posix, as.POSIXct((0:730)*86400, origin = "2004-01-01", tz = "UTC")) + times_to_netcdf(365:730, "days since 2003-01-01", testdir, "2004.nc") + times_to_netcdf(((0:364) + 916) * 24, "hours since 2002-06-30", testdir, "2005.nc") + + out_log <- capture.output(type = "message", { + mixedres <- read.output(runid = "", outdir = testdir, variables = "Y", dataframe = TRUE, start.year = 2004, end.year = 2006) + }) + + expect_length(mixedres$posix, 731) + + # Hack: drop `dim` attribute so it doesn't make the next comparison fail + dim(mixedres$posix) <- NULL + expect_equal(mixedres$posix, as.POSIXct((0:730) * 86400, origin = "2004-01-01", tz = "UTC")) }) empty_testdir <- tempfile() @@ -81,8 +85,10 @@ teardown(unlink(empty_testdir, recursive = TRUE)) test_that("Correct behavior when no NetCDF files present", { expected <- "no netCDF files of model output present" out_log <- capture.output(type = "message", { - out <- read.output(runid = "", outdir = empty_testdir, - start.year = 2001, end.year = 2002) + out <- read.output( + runid = "", outdir = empty_testdir, + start.year = 2001, end.year = 2002 + ) }) expect_match(out_log, "no netCDF files of model output present", all = FALSE) expect_match(out_log, "No files found. Returning all NA", all = FALSE) diff --git a/base/utils/tests/testthat/test-status.R b/base/utils/tests/testthat/test-status.R index 8cb463870d3..59defe5defe 100644 --- a/base/utils/tests/testthat/test-status.R +++ b/base/utils/tests/testthat/test-status.R @@ -1,70 +1,72 @@ context("status") make_testdir <- function() { - td <- tempfile() - dir.create(td) - teardown(unlink(td, recursive = TRUE, force = TRUE)) + td <- tempfile() + dir.create(td) + teardown(unlink(td, recursive = TRUE, force = TRUE)) - td + td } test_that("status functions accept explicit filename", { - d <- make_testdir() - f <- file.path(d, "MY_STATUS") + d <- make_testdir() + f <- file.path(d, "MY_STATUS") - expect_silent(status.start("TRAITS", f)) - expect_silent(status.end("DONE", f)) - expect_silent(status.skip("MET", f)) - expect_silent(status.start("ENSEMBLE", f)) - expect_silent(status.end("ERROR", f)) + expect_silent(status.start("TRAITS", f)) + expect_silent(status.end("DONE", f)) + expect_silent(status.skip("MET", f)) + expect_silent(status.start("ENSEMBLE", f)) + expect_silent(status.end("ERROR", f)) - res <- readLines(f) - expect_length(res, 3) - expect_match(res[[1]], "^TRAITS.*DONE\\s*$") - expect_match(res[[2]], "^MET.*SKIPPED\\s*$") - expect_match(res[[3]], "^ENSEMBLE.*ERROR\\s*$") - expect_equal(status.check("TRAITS", f), 1L) - expect_equal(status.check("MET", f), 0L) - expect_equal(status.check("ENSEMBLE", f), -1L) + res <- readLines(f) + expect_length(res, 3) + expect_match(res[[1]], "^TRAITS.*DONE\\s*$") + expect_match(res[[2]], "^MET.*SKIPPED\\s*$") + expect_match(res[[3]], "^ENSEMBLE.*ERROR\\s*$") + expect_equal(status.check("TRAITS", f), 1L) + expect_equal(status.check("MET", f), 0L) + expect_equal(status.check("ENSEMBLE", f), -1L) }) test_that("status handles file = dir/", { - d <- make_testdir() - status.start("NONE", d) - status.end("DONE", d) - expect_equal(status.check("NONE", file.path(d, "STATUS")), 1L) + d <- make_testdir() + status.start("NONE", d) + status.end("DONE", d) + expect_equal(status.check("NONE", file.path(d, "STATUS")), 1L) }) test_that("status functions read from local settings", { - settings <- list(outdir = make_testdir()) - expect_silent(status.skip("auto")) - expect_match( - readLines(file.path(settings$outdir, "STATUS"))[[1]], - "^auto.*SKIPPED\\s*$") + settings <- list(outdir = make_testdir()) + expect_silent(status.skip("auto")) + expect_match( + readLines(file.path(settings$outdir, "STATUS"))[[1]], + "^auto.*SKIPPED\\s*$" + ) }) test_that("status finds settings defined outside immediate calling scope", { - settings <- list(outdir = make_testdir()) - f <- function(name) { - status.start(name) - status.end() - } - g <- function(name) { - f(name) - } - expect_silent(g("WRAPPED")) - expect_equal( - status.check("WRAPPED", file.path(settings$outdir, "STATUS")), - 1L) + settings <- list(outdir = make_testdir()) + f <- function(name) { + status.start(name) + status.end() + } + g <- function(name) { + f(name) + } + expect_silent(g("WRAPPED")) + expect_equal( + status.check("WRAPPED", file.path(settings$outdir, "STATUS")), + 1L + ) }) test_that("status writes to stdout on bad filename", { - expect_output(status.start("NOFILE"), "NOFILE") - settings <- list(outdir = file.path(make_testdir(), "fake", "path")) - expect_output(status.end(), "\\d{4}-\\d{2}-\\d{2}.*DONE") + expect_output(status.start("NOFILE"), "NOFILE") + settings <- list(outdir = file.path(make_testdir(), "fake", "path")) + expect_output(status.end(), "\\d{4}-\\d{2}-\\d{2}.*DONE") }) test_that("status.check returns 0 on bad filename", { - expect_equal(status.check("NOFILE"), 0L) - expect_equal(status.check("NOFILE", file.path(make_testdir(), "fake")), 0L) + expect_equal(status.check("NOFILE"), 0L) + expect_equal(status.check("NOFILE", file.path(make_testdir(), "fake")), 0L) }) diff --git a/base/utils/tests/testthat/test-ud_convert.R b/base/utils/tests/testthat/test-ud_convert.R index bd994f1fb6a..82ba6c22d38 100644 --- a/base/utils/tests/testthat/test-ud_convert.R +++ b/base/utils/tests/testthat/test-ud_convert.R @@ -8,7 +8,7 @@ test_that("unit conversions", { }) test_that("unit conversion invariants", { - expect_equal(ud_convert(1, "g", "g"), 1) + expect_equal(ud_convert(1, "g", "g"), 1) expect_equal(ud_convert(0, "g", "kg"), 0) expect_equal(ud_convert(Inf, "g", "kg"), Inf) }) @@ -23,7 +23,6 @@ test_that("output is type numeric and not class \"units\"", { x <- ud_convert(23, "degC", "K") testthat::expect_failure(expect_s3_class(x, "units")) testthat::expect_type(x, "double") - }) test_that("ud_convert() handles difftime", { @@ -34,6 +33,6 @@ test_that("ud_convert() handles difftime", { test_that("ud_convert() warns with wrong input units for difftime", { expect_warning(ud_convert(as.difftime("12:00:00"), u1 = "years", u2 = "minutes")) - #should still error if units are not convertible + # should still error if units are not convertible expect_error(ud_convert(as.difftime("12:00:00"), u1 = "kilograms", u2 = "minutes")) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test-unit_is_parseable.R b/base/utils/tests/testthat/test-unit_is_parseable.R index fc6d11f5112..1d5fa8e6efc 100644 --- a/base/utils/tests/testthat/test-unit_is_parseable.R +++ b/base/utils/tests/testthat/test-unit_is_parseable.R @@ -18,4 +18,3 @@ test_that("Non-parseable unit", { # we report it as unparseable. expect_false(unit_is_parseable("")) }) - diff --git a/base/utils/tests/testthat/test.cf2date.R b/base/utils/tests/testthat/test.cf2date.R index d45df781b9a..cb59fe08895 100644 --- a/base/utils/tests/testthat/test.cf2date.R +++ b/base/utils/tests/testthat/test.cf2date.R @@ -13,11 +13,10 @@ test_that("`datetime2cf()` able to convert POSIXct date-time to CF-style date-ti }) test_that("`datetime2doy()` and `cf2doy()` able to extract Julian day from POSIXct or CF date-times respectively(cf2doy internally converts CF to POSIXct and calls datetime2doy)", { - # POSIXct date-times expect_equal(datetime2doy("2010-01-01"), 1) expect_equal(datetime2doy("2010-01-01 12:00:00"), 1.5) # CF date-times expect_equal(cf2doy(0, "days since 2007-01-01"), 1) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.clear.scratch.R b/base/utils/tests/testthat/test.clear.scratch.R index 1d4ca0ced1e..3f42425db4f 100644 --- a/base/utils/tests/testthat/test.clear.scratch.R +++ b/base/utils/tests/testthat/test.clear.scratch.R @@ -1,7 +1,7 @@ test_that("`clear.scratch()` able to build the correct system command prompt to remove previous model run output", { mocked_res <- mockery::mock(TRUE) - mockery::stub(clear.scratch, 'system', mocked_res) - mockery::stub(clear.scratch, 'seq', 0) + mockery::stub(clear.scratch, "system", mocked_res) + mockery::stub(clear.scratch, "seq", 0) settings <- list(host = list(name = "cluster")) expect_output( clear.scratch(settings), @@ -10,7 +10,7 @@ test_that("`clear.scratch()` able to build the correct system command prompt to args <- mockery::mock_args(mocked_res) expect_true( grepl( - "ssh -T cluster qlogin -q all.q@compute-0-0.local.*clear.scratch.sh", + "ssh -T cluster qlogin -q all.q@compute-0-0.local.*clear.scratch.sh", args[[1]][[1]] ) ) @@ -21,4 +21,4 @@ test_that("`clear.scratch()` able to build the correct system command prompt to clear.scratch(settings), ".*No output to delete.*" ) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.days_in_year.R b/base/utils/tests/testthat/test.days_in_year.R index 6b55cd20fd8..76266617494 100644 --- a/base/utils/tests/testthat/test.days_in_year.R +++ b/base/utils/tests/testthat/test.days_in_year.R @@ -2,4 +2,4 @@ test_that("`days_in_year()` correctly returns number of days when provided a yea expect_equal(days_in_year(2010), 365) expect_equal(days_in_year(2012), 366) expect_equal(days_in_year(2010:2012), c(365, 365, 366)) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.distn.stats.R b/base/utils/tests/testthat/test.distn.stats.R index 9d4f9b8d322..b06f3d7962b 100644 --- a/base/utils/tests/testthat/test.distn.stats.R +++ b/base/utils/tests/testthat/test.distn.stats.R @@ -1,21 +1,27 @@ context("functions that calculate statistics from parameterized distributions") -test_that("distn.stats works for different distributions",{ - expect_equal(distn.stats("norm", 0, 1), c(0,1)) - expect_equal(distn.stats("norm", -1, 1), c(-1,1)) - expect_equal(distn.stats("beta", 2, 3), c(2/5, sqrt(6/(5^2 * 6)))) - expect_equal(distn.stats("exp", 2), c(1/2, 1/2)) +test_that("distn.stats works for different distributions", { + expect_equal(distn.stats("norm", 0, 1), c(0, 1)) + expect_equal(distn.stats("norm", -1, 1), c(-1, 1)) + expect_equal(distn.stats("beta", 2, 3), c(2 / 5, sqrt(6 / (5^2 * 6)))) + expect_equal(distn.stats("exp", 2), c(1 / 2, 1 / 2)) expect_equal(distn.stats("t", 2), c(0, Inf)) - expect_equal(distn.stats("lnorm", 2, 3), - c(exp(2 + 0.5*3^2), sqrt(exp(2*2 + 3^2)*(exp(3^2)-1)))) - expect_equal(distn.stats("weibull", 2,3), - c(3 * gamma(1+1/2), 3^2 * (gamma(1 + 2/2) - (gamma(1 + 1/2))^2))) + expect_equal( + distn.stats("lnorm", 2, 3), + c(exp(2 + 0.5 * 3^2), sqrt(exp(2 * 2 + 3^2) * (exp(3^2) - 1))) + ) + expect_equal( + distn.stats("weibull", 2, 3), + c(3 * gamma(1 + 1 / 2), 3^2 * (gamma(1 + 2 / 2) - (gamma(1 + 1 / 2))^2)) + ) }) -test_that("distn.table.stats works for different distributions",{ +test_that("distn.table.stats works for different distributions", { load("data/prior.distns.RData") distn.table.stats(prior.distns) - new.distn.table <- data.frame(distn = c("norm", "exp"), parama = c(1,1), paramb = c(1,1)) - expect_equivalent(distn.table.stats(new.distn.table), - data.frame(mean = c(1,1), sd = c(1,1))) -}) \ No newline at end of file + new.distn.table <- data.frame(distn = c("norm", "exp"), parama = c(1, 1), paramb = c(1, 1)) + expect_equivalent( + distn.table.stats(new.distn.table), + data.frame(mean = c(1, 1), sd = c(1, 1)) + ) +}) diff --git a/base/utils/tests/testthat/test.download.url.R b/base/utils/tests/testthat/test.download.url.R index 3180de253cc..0b4bd12a50f 100644 --- a/base/utils/tests/testthat/test.download.url.R +++ b/base/utils/tests/testthat/test.download.url.R @@ -1,12 +1,12 @@ test_that("`download.url()` able to create the target dir for file download and passes the correct args to curl_download", { withr::with_dir(tempdir(), { mocked_res <- mockery::mock(TRUE) - mockery::stub(download.url, 'url_found', TRUE) - mockery::stub(download.url, 'curl::curl_download', mocked_res) - res <- download.url('http://localhost/', 'test/index.html') - expect_true(file.exists('test')) + mockery::stub(download.url, "url_found", TRUE) + mockery::stub(download.url, "curl::curl_download", mocked_res) + res <- download.url("http://localhost/", "test/index.html") + expect_true(file.exists("test")) args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]]$url, 'http://localhost/') - expect_equal(args[[1]]$destfile, 'test/index.html') + expect_equal(args[[1]]$url, "http://localhost/") + expect_equal(args[[1]]$destfile, "test/index.html") }) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.get.ensemble.inputs.R b/base/utils/tests/testthat/test.get.ensemble.inputs.R index 025486c81ff..de6239c7df4 100644 --- a/base/utils/tests/testthat/test.get.ensemble.inputs.R +++ b/base/utils/tests/testthat/test.get.ensemble.inputs.R @@ -10,7 +10,7 @@ test_that("`get.ensemble.inputs()` able to return desired ensemble inputs from s ) res <- get.ensemble.inputs(settings) expect_equal( - res, + res, list(input1 = c(1, 2, 3), input2 = c(1, 2, 3), input3 = c(1, 2, 3)) ) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.listToArgString.R b/base/utils/tests/testthat/test.listToArgString.R index dfa1f996a92..f4a4d1f72c3 100644 --- a/base/utils/tests/testthat/test.listToArgString.R +++ b/base/utils/tests/testthat/test.listToArgString.R @@ -1,13 +1,13 @@ test_that("`listToArgString()` able to format list of named function args in a comma separated list", { expect_equal( - listToArgString(c(host = 'pecan', settings = 'test', id = 2020)), + listToArgString(c(host = "pecan", settings = "test", id = 2020)), "host='pecan', settings='test', id='2020'" ) }) test_that("`.parseArg()` works for all different types of entries in the list of function args passed to listToArgString", { # character - expect_equal(.parseArg('pecan'), "'pecan'") + expect_equal(.parseArg("pecan"), "'pecan'") # NULL expect_equal(.parseArg(NULL), "NULL") # list @@ -16,4 +16,4 @@ test_that("`.parseArg()` works for all different types of entries in the list of expect_equal(.parseArg(data.frame(a = 1, b = 2)), "data.frame(a =c(' 1 '),b =c(' 2 '))") # nested list expect_equal(.parseArg(list(a = 1, b = list(c = 3, d = 4))), "list(a='1', b=list(c='3', d='4'))") -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.load_local.R b/base/utils/tests/testthat/test.load_local.R index 16f35f848fe..1278eb0efad 100644 --- a/base/utils/tests/testthat/test.load_local.R +++ b/base/utils/tests/testthat/test.load_local.R @@ -7,4 +7,4 @@ test_that("`load_local()` able to load file into a list", { expect_equal(test_list$x, x) expect_equal(test_list$y, y) }) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.n_leap_day.R b/base/utils/tests/testthat/test.n_leap_day.R index 0b6c55a4180..527a354c540 100644 --- a/base/utils/tests/testthat/test.n_leap_day.R +++ b/base/utils/tests/testthat/test.n_leap_day.R @@ -1,9 +1,8 @@ test_that("`n_leap_day()` able to correctly return number of leap days between 2 specified dates", { - # having leap days expect_equal(n_leap_day("2000-01-01", "2003-12-31"), 1) expect_equal(n_leap_day("2000-01-01", "2004-12-31"), 2) - + # no leap days expect_equal(n_leap_day("2001-01-01", "2003-12-31"), 0) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.need_packages.R b/base/utils/tests/testthat/test.need_packages.R index 6ae98f05a30..349a2f63140 100644 --- a/base/utils/tests/testthat/test.need_packages.R +++ b/base/utils/tests/testthat/test.need_packages.R @@ -1,11 +1,10 @@ test_that("`need_packages()` correctly checks if the required packages are installed", { - # normal condition : when packages exist expect_equal(need_packages("stats", "methods"), c("stats", "methods")) # error condition expect_error( - need_packages("notapackage"), + need_packages("notapackage"), "The following packages are required but not installed: `notapackage`" ) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.r2bugs.distributions.R b/base/utils/tests/testthat/test.r2bugs.distributions.R index dcd414681fd..5d44c902e1b 100644 --- a/base/utils/tests/testthat/test.r2bugs.distributions.R +++ b/base/utils/tests/testthat/test.r2bugs.distributions.R @@ -1,7 +1,9 @@ test_that("`r2bugs.distributions()` able to convert R parameterization to BUGS parameterization", { - priors <- data.frame(distn = c('weibull', 'lnorm', 'norm', 'gamma'), - parama = c(1, 1, 1, 1), - paramb = c(2, 2, 2, 2)) + priors <- data.frame( + distn = c("weibull", "lnorm", "norm", "gamma"), + parama = c(1, 1, 1, 1), + paramb = c(2, 2, 2, 2) + ) res <- r2bugs.distributions(priors) expect_equal(res$distn, c("weib", "lnorm", "norm", "gamma")) expect_equal(res$parama, c(1, 1, 1, 1)) @@ -9,11 +11,13 @@ test_that("`r2bugs.distributions()` able to convert R parameterization to BUGS p }) test_that("`bugs2r.distributions()` able to convert BUGS parameterization to R parameterization", { - priors <- data.frame(distn = c('weib', 'lnorm', 'norm', 'gamma'), - parama = c(1, 1, 1, 1), - paramb = c(0.50, 0.25, 0.25, 2.00)) + priors <- data.frame( + distn = c("weib", "lnorm", "norm", "gamma"), + parama = c(1, 1, 1, 1), + paramb = c(0.50, 0.25, 0.25, 2.00) + ) res <- bugs2r.distributions(priors) expect_equal(res$distn, c("weibull", "lnorm", "norm", "gamma")) expect_equal(res$parama, c(1, 1, 1, 1)) expect_equal(res$paramb, c(2, 2, 2, 2)) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.seconds_in_year.R b/base/utils/tests/testthat/test.seconds_in_year.R index c7db4e92d6a..7e863325077 100644 --- a/base/utils/tests/testthat/test.seconds_in_year.R +++ b/base/utils/tests/testthat/test.seconds_in_year.R @@ -5,4 +5,4 @@ test_that("`seconds_in_year()` able to return number of seconds in a given year( expect_equal(seconds_in_year(2001), 31536000) # vector of years expect_equal(seconds_in_year(2000:2004), c(31622400, 31536000, 31536000, 31536000, 31622400)) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.sendmail.R b/base/utils/tests/testthat/test.sendmail.R index 990e0d394ae..f74f1bf5b09 100644 --- a/base/utils/tests/testthat/test.sendmail.R +++ b/base/utils/tests/testthat/test.sendmail.R @@ -1,18 +1,18 @@ test_that("`sendmail()` able to create the file with contents to email correctly, also able to build correct command to send the email", { withr::with_tempfile("tf", { mocked_res <- mockery::mock(TRUE) - mockery::stub(sendmail, 'system2', mocked_res) - mockery::stub(sendmail, 'tempfile', tf) - mockery::stub(sendmail, 'unlink', NULL) - sendmail('pecan@@example.com', 'carya@@example.com', 'Hi', 'Message from pecan.') + mockery::stub(sendmail, "system2", mocked_res) + mockery::stub(sendmail, "tempfile", tf) + mockery::stub(sendmail, "unlink", NULL) + sendmail("pecan@@example.com", "carya@@example.com", "Hi", "Message from pecan.") sendmailfile <- readLines(tf) - expect_equal(sendmailfile[1], 'From: pecan@@example.com') - expect_equal(sendmailfile[2], 'Subject: Hi') - expect_equal(sendmailfile[3], 'To: carya@@example.com') - expect_equal(sendmailfile[5], 'Message from pecan.') + expect_equal(sendmailfile[1], "From: pecan@@example.com") + expect_equal(sendmailfile[2], "Subject: Hi") + expect_equal(sendmailfile[3], "To: carya@@example.com") + expect_equal(sendmailfile[5], "Message from pecan.") args <- mockery::mock_args(mocked_res) - expect_equal(args[[1]][[2]][[1]], '-f') + expect_equal(args[[1]][[2]][[1]], "-f") expect_equal(args[[1]][[2]][[2]], '"pecan@@example.com"') expect_equal(args[[1]][[2]][[3]], '"carya@@example.com"') }) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.timezone_hour.R b/base/utils/tests/testthat/test.timezone_hour.R index e1ca8dd44d5..8a9b8e8a6cf 100644 --- a/base/utils/tests/testthat/test.timezone_hour.R +++ b/base/utils/tests/testthat/test.timezone_hour.R @@ -1,7 +1,7 @@ test_that("`timezone_hour()` able to correctly return number of hours offset to UTC for a timezone", { - expect_equal(timezone_hour('US/Pacific'), -8) - expect_equal(timezone_hour('US/Eastern'), -5) + expect_equal(timezone_hour("US/Pacific"), -8) + expect_equal(timezone_hour("US/Eastern"), -5) # for numeric expect_equal(timezone_hour(-8), -8) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.trait.dictionary.R b/base/utils/tests/testthat/test.trait.dictionary.R index 4330d518b65..7e56cece6d4 100644 --- a/base/utils/tests/testthat/test.trait.dictionary.R +++ b/base/utils/tests/testthat/test.trait.dictionary.R @@ -1,9 +1,9 @@ -test_that("trait dictionary loads and has expected columns",{ +test_that("trait dictionary loads and has expected columns", { rm(list = ls()) data(trait.dictionary, package = "PEcAn.utils") expect_true(exists("trait.dictionary")) expect_true(all(c("id", "figid", "units", "model.id") %in% - colnames(trait.dictionary))) + colnames(trait.dictionary))) expect_true(ncol(trait.dictionary) >= 4) # dim = 49 x 4 at time of writing - expect_true(nrow(trait.dictionary) >=49) + expect_true(nrow(trait.dictionary) >= 49) }) diff --git a/base/utils/tests/testthat/test.units_are_equivalent.R b/base/utils/tests/testthat/test.units_are_equivalent.R index d5d8b4c4240..a4eab1f7875 100644 --- a/base/utils/tests/testthat/test.units_are_equivalent.R +++ b/base/utils/tests/testthat/test.units_are_equivalent.R @@ -3,4 +3,4 @@ test_that("`units_are_equivalent()` able to identify if the units are equivalent expect_true(units_are_equivalent("m/s", "m s-1")) # Non-equivalent units expect_error(units_are_equivalent("m/s", "m s-2")) -}) \ No newline at end of file +}) diff --git a/base/utils/tests/testthat/test.utils.R b/base/utils/tests/testthat/test.utils.R index 37478cba081..845d8e39853 100644 --- a/base/utils/tests/testthat/test.utils.R +++ b/base/utils/tests/testthat/test.utils.R @@ -1,106 +1,127 @@ context("Other utilities") -test.stats <- data.frame(Y=rep(1,5), - stat=rep(1,5), - n=rep(4,5), - statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD')) - -test_that("transformstats works",{ - expect_equal(signif(transformstats(test.stats)$stat, 5), - c(0.5, 0.5, 0.12734, 0.071333, 1.1559)) +test.stats <- data.frame( + Y = rep(1, 5), + stat = rep(1, 5), + n = rep(4, 5), + statname = c("SD", "MSE", "LSD", "HSD", "MSD") +) + +test_that("transformstats works", { + expect_equal( + signif(transformstats(test.stats)$stat, 5), + c(0.5, 0.5, 0.12734, 0.071333, 1.1559) + ) expect_true(all(transformstats(test.stats)$statname == "SE")) expect_equal(test.stats$Y, transformstats(test.stats)$Y) - expect_equal(test.stats$n, transformstats(test.stats)$n) + expect_equal(test.stats$n, transformstats(test.stats)$n) expect_false(any(as.character(test.stats$statname) == - as.character(transformstats(test.stats)$statname))) + as.character(transformstats(test.stats)$statname))) }) -test_that('arrhenius scaling works', { - expect_equivalent(signif(arrhenius.scaling(1,2,3),5), - c(1.0403)) - expect_equivalent(signif(arrhenius.scaling(1,3,2),5), - c(0.96129)) +test_that("arrhenius scaling works", { + expect_equivalent( + signif(arrhenius.scaling(1, 2, 3), 5), + c(1.0403) + ) + expect_equivalent( + signif(arrhenius.scaling(1, 3, 2), 5), + c(0.96129) + ) }) -test_that("vecpaste works",{ - +test_that("vecpaste works", { ## vecpaste() - expect_that(vecpaste(c('a','b')), - equals("'a','b'")) - expect_that(vecpaste(1), - equals("'1'")) + expect_that( + vecpaste(c("a", "b")), + equals("'a','b'") + ) + expect_that( + vecpaste(1), + equals("'1'") + ) }) -test_that("left.pad.zeros works",{ +test_that("left.pad.zeros works", { ## left.pad.zeros() - expect_that(left.pad.zeros(1,2), - equals("01")) - expect_that(left.pad.zeros(100,2), - equals("100")) + expect_that( + left.pad.zeros(1, 2), + equals("01") + ) + expect_that( + left.pad.zeros(100, 2), + equals("100") + ) }) -test_that("get.run.id works",{ +test_that("get.run.id works", { expect_equal(get.run.id("a", "b", "c", "d"), "a-d-c-b") expect_equal(get.run.id("a", "b", "c"), "a-c-b") expect_equal(get.run.id("a", "b"), "a-b") expect_equal(get.run.id("ENS", left.pad.zeros(1, 5)), "ENS-00001") - expect_equal(get.run.id("SA", round(pnorm(-3),3), trait = "Vcmax"), "SA-Vcmax-0.001") + expect_equal(get.run.id("SA", round(pnorm(-3), 3), trait = "Vcmax"), "SA-Vcmax-0.001") }) test_that("summarize.result works appropriately", { - ## generate test data - testresult <- data.frame(citation_id = 1, - site_id = 1:10, - trt_id = rep(c('control', 'fert'),5), - control = rep(c(0,1), 5), - greenhouse = c(rep(0,5), rep(1,5)), - date = 1, - time = NA, - cultivar_id = 1, - specie_id = 1, - n = 1, - mean = sqrt(1:10), - stat = NA, - statname = NA, - name = NA, - treatment_id = NA + ## generate test data + testresult <- data.frame( + citation_id = 1, + site_id = 1:10, + trt_id = rep(c("control", "fert"), 5), + control = rep(c(0, 1), 5), + greenhouse = c(rep(0, 5), rep(1, 5)), + date = 1, + time = NA, + cultivar_id = 1, + specie_id = 1, + n = 1, + mean = sqrt(1:10), + stat = NA, + statname = NA, + name = NA, + treatment_id = NA ) # check that individual means produced for distinct sites - expect_that(summarize.result(testresult)$mean, equals(testresult$mean)) - + expect_that(summarize.result(testresult)$mean, equals(testresult$mean)) + # check that four means are produced for a single site - testresult2 <- dplyr::mutate(testresult, site_id= 1) - expect_that(nrow(summarize.result(testresult2)), equals(4)) - + testresult2 <- dplyr::mutate(testresult, site_id = 1) + expect_that(nrow(summarize.result(testresult2)), equals(4)) + # check that if stat == NA, SE will be computed testresult3 <- summarize.result(testresult2) expect_true(all(!is.na(testresult3$stat))) expect_equal(testresult3$n, c(3L, 2L, 2L, 3L)) expect_equal(round(testresult3$stat, 3), c(0.359, 0.177, 0.293, 0.206)) expect_equal(round(testresult3$mean, 3), c(1.656, 2.823, 1.707, 2.813)) - + # check that site groups correctly for length(site) > 1 - testresult4 <- rbind.data.frame(testresult2, transform(testresult2, site_id= 2)) + testresult4 <- rbind.data.frame(testresult2, transform(testresult2, site_id = 2)) testresult5 <- summarize.result(testresult4) expect_true(all(!is.na(testresult5$stat))) expect_equal(nrow(testresult5), 8) - }) -test_that("as.sequence works",{ - expect_identical(as.sequence(c("a", "b")), - c(1,2)) - expect_identical(as.sequence(c("a", NA)), - c(1,2)) - expect_equal(as.sequence(c("a", NA), na.rm = FALSE), - c(1,NA)) - expect_equal(as.sequence(c(NA,NA)), c(1,1)) +test_that("as.sequence works", { + expect_identical( + as.sequence(c("a", "b")), + c(1, 2) + ) + expect_identical( + as.sequence(c("a", NA)), + c(1, 2) + ) + expect_equal( + as.sequence(c("a", NA), na.rm = FALSE), + c(1, NA) + ) + expect_equal(as.sequence(c(NA, NA)), c(1, 1)) }) test_that("tryl returns FALSE if error, else true ", { - expect_true(tryl(1+1)) + expect_true(tryl(1 + 1)) expect_true(!tryl(log("a"))) }) @@ -109,7 +130,7 @@ test_that("mstmipvar works with defaults", { }) test_that("mstmipvar works with args specified", { - lat <- + lat <- ncdf4::ncdim_def( name = "lat", longname = "station_latitude", @@ -133,17 +154,19 @@ test_that("mstmipvar works with args specified", { calendar = "standard", unlim = TRUE ) - nsoil <- + nsoil <- ncdf4::ncdim_def( name = "SoilLayerMidpoint", longname = "SoilLayerMidpoint", units = "meters", - vals = c(-1.75,-1.25,-0.9,-0.7,-0.5,-0.3,-0.15,-0.075, 0), + vals = c(-1.75, -1.25, -0.9, -0.7, -0.5, -0.3, -0.15, -0.075, 0), unlim = FALSE ) - - expect_s3_class(mstmipvar("NPP", lat = lat, lon = lon, time = time, nsoil = nsoil), - "ncvar4") + + expect_s3_class( + mstmipvar("NPP", lat = lat, lon = lon, time = time, nsoil = nsoil), + "ncvar4" + ) }) # doesn't work because PEcAn.logger doesn't use message() @@ -167,7 +190,6 @@ test_that("`zero.truncate()` able to truncate vector at zero", { }) test_that("`tabnum()` able to convert positive and negative numbers to `n` significant figures", { - # case where n specified x <- c(-2.345, 6.789) result <- tabnum(x, 2) @@ -192,26 +214,25 @@ test_that("`bibtexify()` able to convert parameters passed to bibtex citation fo test_that("`rsync()` able to correctly make the command passed to `system` function", { mocked_res <- mockery::mock(0) - mockery::stub(rsync, 'system', mocked_res) - rsync(args = '-avz', from = 'pecan:test_src', to = 'pecan:test_des') + mockery::stub(rsync, "system", mocked_res) + rsync(args = "-avz", from = "pecan:test_src", to = "pecan:test_des") args <- mockery::mock_args(mocked_res) expect_equal(args[[1]][[1]], "rsync -avz pecan:test_src pecan:test_des") }) test_that("`ssh()` able to correctly make the command passed to `system` function", { mocked_res <- mockery::mock(0) - mockery::stub(ssh, 'system', mocked_res) - ssh(host = 'pecan') + mockery::stub(ssh, "system", mocked_res) + ssh(host = "pecan") args <- mockery::mock_args(mocked_res) expect_equal(args[[1]][[1]], "ssh -T pecan \"\" ") }) test_that("`temp.settings()` able to create a temporary settings file", { - expect_equal(temp.settings(''), '') + expect_equal(temp.settings(""), "") }) test_that("`misc.convert()` able to unit conversions for known and unknown units to the function", { - # units known to misc.convert expect_equal(misc.convert(1, "kg C m-2 s-1", "umol C m-2 s-1"), 83259094) # units not known to misc.convert @@ -246,7 +267,7 @@ test_that("`zero.bounded.density()` returns output containing required component test_that("`pdf.stats()` able to calculate mean, variance statistics, and CI from a known distribution", { expect_equal( - pdf.stats("beta", 1, 2), + pdf.stats("beta", 1, 2), unlist(list(mean = 0.33333333, var = 0.05555556, lcl = 0.01257912, ucl = 0.84188612)) ) }) @@ -258,7 +279,7 @@ test_that("`newxtable()` generates correct xtable object", { test_that("`tryl()` able to check if a function gives an error when called", { # case where function does not give an error - expect_true(tryl(1+1)) + expect_true(tryl(1 + 1)) # case where function gives an error expect_false(tryl(log("a"))) @@ -266,14 +287,14 @@ test_that("`tryl()` able to check if a function gives an error when called", { test_that("`download_file()` able to correctly construct the inputs command to system function", { mocked_res <- mockery::mock(0) - mockery::stub(download_file, 'system', mocked_res) + mockery::stub(download_file, "system", mocked_res) download_file("ftp://testpecan.com", "test", "ncftpget") args <- mockery::mock_args(mocked_res) expect_equal(args[[1]][[1]], "ncftpget -c ftp://testpecan.com > test") }) test_that("`retry.func()` able to retry a function before returning an error", { - defaultW <- getOption("warn") + defaultW <- getOption("warn") options(warn = -1) on.exit(options(warn = defaultW)) expect_error( @@ -282,6 +303,5 @@ test_that("`retry.func()` able to retry a function before returning an error", { ) # case where function does not give an error - expect_equal(retry.func(1+1, maxErrors = 2, sleep = 2), 2) + expect_equal(retry.func(1 + 1, maxErrors = 2, sleep = 2), 2) }) - diff --git a/base/visualization/R/add_icon.R b/base/visualization/R/add_icon.R index b72433c0827..a660a7893a5 100644 --- a/base/visualization/R/add_icon.R +++ b/base/visualization/R/add_icon.R @@ -1,34 +1,38 @@ ##' @name add_icon ##' @title add_icon -##' +##' ##' @param x x-coordinate of logo ##' @param y y-coordinate of logo ##' @param id additional plot identification (URL, database ID, etc) -##' @export +##' @export ##' @author Mike Dietze -##' +##' add_icon <- function(id = NULL, x = 0, y = 0) { - # png and grid are both in Suggests; need to check if available before using - if (!requireNamespace("png", quietly = TRUE) - || !requireNamespace("grid", quietly = TRUE)) { + if (!requireNamespace("png", quietly = TRUE) || + !requireNamespace("grid", quietly = TRUE)) { PEcAn.logger::logger.error( - "PEcAn.visualization::add_icon needs packages 'png' and 'grid'") + "PEcAn.visualization::add_icon needs packages 'png' and 'grid'" + ) return(NULL) } icon <- png::readPNG( - system.file("favicon.png", package = "PEcAn.visualization")) + system.file("favicon.png", package = "PEcAn.visualization") + ) dims <- dim(icon) logo <- grid::rasterGrob(icon, grid::unit(x, "npc"), - grid::unit(y, "npc"), - grid::unit(dims[1], "points"), - grid::unit(dims[2], "points"), - just = c("left", "bottom")) + grid::unit(y, "npc"), + grid::unit(dims[1], "points"), + grid::unit(dims[2], "points"), + just = c("left", "bottom") + ) grid::grid.draw(logo) - lab <- grid::textGrob(label = paste("PEcAn", id), - x = grid::unit(x, "npc") + grid::unit(dims[1], "points"), - y = grid::unit(y, "npc"), just = c("left", "bottom")) + lab <- grid::textGrob( + label = paste("PEcAn", id), + x = grid::unit(x, "npc") + grid::unit(dims[1], "points"), + y = grid::unit(y, "npc"), just = c("left", "bottom") + ) grid::grid.draw(lab) -} # add_icon +} # add_icon diff --git a/base/visualization/R/ciEnvelope.R b/base/visualization/R/ciEnvelope.R index a6ae53be5a0..29932d5d149 100644 --- a/base/visualization/R/ciEnvelope.R +++ b/base/visualization/R/ciEnvelope.R @@ -1,5 +1,5 @@ #' plots a confidence interval around an x-y plot (e.g. a timeseries) -#' +#' #' @param x Vector defining CI center #' @param ylo Vector defining bottom of CI envelope #' @param yhi Vector defining top of CI envelope @@ -8,7 +8,7 @@ #' @export #' @author Michael Dietze, David LeBauer ciEnvelope <- function(x, ylo, yhi, ...) { - m <- rbind(x, ylo, yhi) + m <- rbind(x, ylo, yhi) nas <- which(apply(is.na(m), 2, sum) > 0) if (length(nas) > 0) { ## break overall dataset into complete blocks @@ -33,6 +33,8 @@ ciEnvelope <- function(x, ylo, yhi, ...) { ylo <- sub.m[[i]]["ylo", ] yhi <- sub.m[[i]]["yhi", ] graphics::polygon( - cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi), ylo[1])), border = NA, ...) + cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi), ylo[1])), + border = NA, ... + ) } } # ciEnvelope diff --git a/base/visualization/R/map.output.R b/base/visualization/R/map.output.R index d12bff489ee..08561ab0d1f 100644 --- a/base/visualization/R/map.output.R +++ b/base/visualization/R/map.output.R @@ -1,7 +1,7 @@ #' Map Output -#' #' -#' @param table data.table or data.frame with columns lat, lon, followed by variable names +#' +#' @param table data.table or data.frame with columns lat, lon, followed by variable names #' @param variable name of variable to be mapped #' @return plot #' @export @@ -17,15 +17,18 @@ map.output <- function(table, variable) { data = world, ggplot2::aes(x = .data$long, y = .data$lat, group = .data$group), fill = "white", - color = "darkgrey") + + color = "darkgrey" + ) + ggplot2::geom_point( data = table, ggplot2::aes(x = .data$lon, y = .data$lat, color = table[, variable]), - size = 5) + + size = 5 + ) + ggplot2::scale_color_gradientn( - colours = c("red", "orange", "yellow", "green", "blue", "violet")) + + colours = c("red", "orange", "yellow", "green", "blue", "violet") + ) + ggplot2::theme_bw() + ggplot2::xlim(range(pretty(table$lon))) + ggplot2::ylim(range(pretty(table$lat))) return(map) -} # map.output +} # map.output diff --git a/base/visualization/R/plot_netcdf.R b/base/visualization/R/plot_netcdf.R index 451e4d89f7f..4f82b8bfbe7 100644 --- a/base/visualization/R/plot_netcdf.R +++ b/base/visualization/R/plot_netcdf.R @@ -27,15 +27,15 @@ data.fetch <- function(var, nc, fun = mean) { attr(val, "lbl") <- nc$dim$time$units return(val) } - + # some precomputations - indices <- 0:length(nc$dim[["time"]]$vals) + indices <- 0:length(nc$dim[["time"]]$vals) aggrlist <- list(floor(nc$dim[["time"]]$vals)) - + # aggregate the data data <- ncdf4::ncvar_get(nc, var) - val <- stats::aggregate(data[indices], by = aggrlist, FUN = fun)$x - + val <- stats::aggregate(data[indices], by = aggrlist, FUN = fun)$x + # get the label title <- nc$var[[var]]$longname units <- nc$var[[var]]$units @@ -48,10 +48,10 @@ data.fetch <- function(var, nc, fun = mean) { } else { attr(val, "lbl") <- paste(title, "in", units) } - + # done return(val) -} # data.fetch +} # data.fetch # ---------------------------------------------------------------------- # MAIN FUNCTIONS @@ -83,13 +83,13 @@ plot_netcdf <- function(datafile, yvar, xvar = "time", width = 800, height = 600 filename = NULL, year = NULL) { # open netcdf file nc <- ncdf4::nc_open(datafile) - + # compute variables xval_mean <- data.fetch(xvar, nc, mean) yval_mean <- data.fetch(yvar, nc, mean) - yval_max <- data.fetch(yvar, nc, max) - yval_min <- data.fetch(yvar, nc, min) - + yval_max <- data.fetch(yvar, nc, max) + yval_min <- data.fetch(yvar, nc, min) + # setup output if (!is.null(filename)) { if (tolower(filename) == "x11") { @@ -104,7 +104,7 @@ plot_netcdf <- function(datafile, yvar, xvar = "time", width = 800, height = 600 grDevices::tiff(filename = filename, width = width, height = height) } } - + # setup plot (needs to be done before removing of NA since that removes attr as well). graphics::plot.new() graphics::title(xlab = attr(xval_mean, "lbl")) @@ -124,12 +124,14 @@ plot_netcdf <- function(datafile, yvar, xvar = "time", width = 800, height = 600 } # done with netcdf file ncdf4::nc_close(nc) - + # remove all NA's - removeme <- unique(c(which(is.na(yval_min)), - which(is.na(xval_mean)), - which(is.na(yval_mean)), - which(is.na(yval_max)))) + removeme <- unique(c( + which(is.na(yval_min)), + which(is.na(xval_mean)), + which(is.na(yval_mean)), + which(is.na(yval_max)) + )) if (length(removeme) > 0) { xval_mean <- xval_mean[-removeme] yval_mean <- yval_mean[-removeme] @@ -137,30 +139,36 @@ plot_netcdf <- function(datafile, yvar, xvar = "time", width = 800, height = 600 yval_min <- yval_min[-removeme] } yvals <- c(yval_max, yval_min) - + # order data based on x values o <- order(xval_mean, yval_mean) - + # plot actual data - graphics::plot.window(xlim = c(min(xval_mean), max(xval_mean)), - ylim = c(min(yvals), max(yvals))) + graphics::plot.window( + xlim = c(min(xval_mean), max(xval_mean)), + ylim = c(min(yvals), max(yvals)) + ) graphics::polygon( c(xval_mean[o], rev(xval_mean[o])), c(yval_max[o], rev(yval_min[o])), col = "gray", - border = "black") + border = "black" + ) graphics::lines(x = xval_mean[o], y = yval_mean[o], col = "red") graphics::points( x = xval_mean[o], y = yval_mean[o], col = "black", pch = ".", - cex = 5) + cex = 5 + ) # legend - graphics::legend("bottomright", col = c(1, "gray"), lwd = c(3, 6), - legend = c("mean", "min/max"), - cex = 1.5) + graphics::legend("bottomright", + col = c(1, "gray"), lwd = c(3, 6), + legend = c("mean", "min/max"), + cex = 1.5 + ) # draw axis and box graphics::axis(1) @@ -173,4 +181,4 @@ plot_netcdf <- function(datafile, yvar, xvar = "time", width = 800, height = 600 if (!is.null(filename) && (tolower(filename) != "x11")) { grDevices::dev.off() } -} # plot_netcdf +} # plot_netcdf diff --git a/base/visualization/R/plots.R b/base/visualization/R/plots.R index 40bebb9510a..4afc84d67f2 100644 --- a/base/visualization/R/plots.R +++ b/base/visualization/R/plots.R @@ -3,53 +3,55 @@ ##' When constructing a histogram, it is common to make all bars the same width. ##' One could also choose to make them all have the same area. ##' These two options have complementary strengths and weaknesses; the equal-width histogram oversmooths in regions of high density, and is poor at identifying sharp peaks; the equal-area histogram oversmooths in regions of low density, and so does not identify outliers. -##' We describe a compromise approach which avoids both of these defects. We regard the histogram as an exploratory device, rather than as an estimate of a density. -##' +##' We describe a compromise approach which avoids both of these defects. We regard the histogram as an exploratory device, rather than as an estimate of a density. +##' ##' @param x is a numeric vector (the data) ##' @param a is the scaling factor, default is 5 * IQR ##' @param nbins is the number of bins, default is assigned by the Stuges method -##' @param rx is the range used for the left of the left-most bin to the right of the right-most bin +##' @param rx is the range used for the left of the left-most bin to the right of the right-most bin ##' @param eps used to set artificial bound on min width / max height of bins as described in Denby and Mallows (2009) on page 24 -##' @param xlab is label for the x axis +##' @param xlab is label for the x axis ##' @param plot = TRUE produces the plot, FALSE returns the heights, breaks and counts ##' @param lab.spikes = TRUE labels the % of data in the spikes -##' -##' -##' -##' @return list with two elements, heights of length n and breaks of length n+1 indicating the heights and break points of the histogram bars. +##' +##' +##' +##' @return list with two elements, heights of length n and breaks of length n+1 indicating the heights and break points of the histogram bars. ##' @author Lorraine Denby, Colin Mallows ##' @references Lorraine Denby, Colin Mallows. Journal of Computational and Graphical Statistics. March 1, 2009, 18(1): 21-31. doi:10.1198/jcgs.2009.0002. dhist <- function(x, a = 5 * iqr(x), nbins = grDevices::nclass.Sturges(x), rx = range(x, na.rm = TRUE), eps = 0.15, xlab = "x", plot = TRUE, lab.spikes = TRUE) { - if (is.character(nbins)) { - nbins <- switch(casefold(nbins), - sturges = grDevices::nclass.Sturges(x), - fd = grDevices::nclass.FD(x), - scott = grDevices::nclass.scott(x), - stop("Nclass method not recognized")) + nbins <- switch(casefold(nbins), + sturges = grDevices::nclass.Sturges(x), + fd = grDevices::nclass.FD(x), + scott = grDevices::nclass.scott(x), + stop("Nclass method not recognized") + ) } else { if (is.function(nbins)) { nbins <- nbins(x) } } - + x <- sort(x[!is.na(x)]) if (a == 0) { a <- diff(range(x)) / 1e+08 } if (a != 0 & a != Inf) { - n <- length(x) - h <- (rx[2] + a - rx[1]) / nbins - ybr <- rx[1] + h * (0:nbins) + n <- length(x) + h <- (rx[2] + a - rx[1]) / nbins + ybr <- rx[1] + h * (0:nbins) yupper <- x + (a * seq_len(n)) / n # upper and lower corners in the ecdf ylower <- yupper - a / n - # - cmtx <- cbind(cut(yupper, breaks = ybr), - cut(yupper, breaks = ybr, left.include = TRUE), - cut(ylower, breaks = ybr), - cut(ylower, breaks = ybr, left.include = TRUE)) + # + cmtx <- cbind( + cut(yupper, breaks = ybr), + cut(yupper, breaks = ybr, left.include = TRUE), + cut(ylower, breaks = ybr), + cut(ylower, breaks = ybr, left.include = TRUE) + ) cmtx[1, 3] <- cmtx[1, 4] <- 1 # to replace NAs when default r is used cmtx[n, 1] <- cmtx[n, 2] <- nbins @@ -64,7 +66,7 @@ dhist <- function(x, a = 5 * iqr(x), nbins = grDevices::nclass.Sturges(x), rx = counts <- table(c(1:nbins, cmtx[, 1])) } counts <- counts - 1 - # + # if (length(straddlers) > 0) { for (i in straddlers) { binno <- cmtx[i, 1] @@ -75,39 +77,40 @@ dhist <- function(x, a = 5 * iqr(x), nbins = grDevices::nclass.Sturges(x), rx = } xbr <- ybr xbr[-1] <- ybr[-1] - (a * cumsum(counts)) / n - spike <- eps * diff(rx)/nbins + spike <- eps * diff(rx) / nbins flag.vec <- c(diff(xbr) < spike, FALSE) if (sum(abs(diff(xbr)) <= spike) > 1) { - xbr.new <- xbr + xbr.new <- xbr counts.new <- counts - diff.xbr <- abs(diff(xbr)) - amt.spike <- diff.xbr[length(diff.xbr)] + diff.xbr <- abs(diff(xbr)) + amt.spike <- diff.xbr[length(diff.xbr)] for (i in rev(2:length(diff.xbr))) { if (diff.xbr[i - 1] <= spike & diff.xbr[i] <= spike & !is.na(diff.xbr[i])) { - amt.spike <- amt.spike + diff.xbr[i - 1] + amt.spike <- amt.spike + diff.xbr[i - 1] counts.new[i - 1] <- counts.new[i - 1] + counts.new[i] - xbr.new[i] <- NA - counts.new[i] <- NA - flag.vec[i - 1] <- TRUE + xbr.new[i] <- NA + counts.new[i] <- NA + flag.vec[i - 1] <- TRUE } else { amt.spike <- diff.xbr[i - 1] - } + } } flag.vec <- flag.vec[!is.na(xbr.new)] flag.vec <- flag.vec[-length(flag.vec)] - counts <- counts.new[!is.na(counts.new)] - xbr <- xbr.new[!is.na(xbr.new)] - + counts <- counts.new[!is.na(counts.new)] + xbr <- xbr.new[!is.na(xbr.new)] } else { flag.vec <- flag.vec[-length(flag.vec)] } widths <- abs(diff(xbr)) ## N.B. argument 'widths' in barplot must be xbr - heights <- counts/widths + heights <- counts / widths } bin.size <- length(x) / nbins - cut.pt <- unique(c(min(x) - abs(min(x)) / 1000, - stats::approx(seq(length(x)), x, seq_len(nbins - 1) * bin.size, rule = 2)$y, max(x))) + cut.pt <- unique(c( + min(x) - abs(min(x)) / 1000, + stats::approx(seq(length(x)), x, seq_len(nbins - 1) * bin.size, rule = 2)$y, max(x) + )) aa <- graphics::hist(x, breaks = cut.pt, plot = FALSE, probability = TRUE) if (a == Inf) { heights <- aa$counts @@ -116,19 +119,20 @@ dhist <- function(x, a = 5 * iqr(x), nbins = grDevices::nclass.Sturges(x), rx = amt.height <- 3 q75 <- stats::quantile(heights, 0.75) if (sum(flag.vec) != 0) { - amt <- max(heights[!flag.vec]) - ylim.height <- amt * amt.height - ind.h <- flag.vec & heights > ylim.height + amt <- max(heights[!flag.vec]) + ylim.height <- amt * amt.height + ind.h <- flag.vec & heights > ylim.height flag.vec[heights < ylim.height * (amt.height - 1) / amt.height] <- FALSE - heights[ind.h] <- ylim.height + heights[ind.h] <- ylim.height } amt.txt <- 0 end.y <- (-10000) if (plot) { graphics::barplot(heights, abs(diff(xbr)), - space = 0, density = -1, - xlab = xlab, plot = TRUE, - xaxt = "n", yaxt = "n") + space = 0, density = -1, + xlab = xlab, plot = TRUE, + xaxt = "n", yaxt = "n" + ) at <- pretty(xbr) graphics::axis(1, at = at - xbr[1], labels = as.character(at)) if (lab.spikes) { @@ -145,13 +149,17 @@ dhist <- function(x, a = 5 * iqr(x), nbins = grDevices::nclass.Sturges(x), rx = end.y <- xbr[i] - xbr[1] + 3 * graphics::par("cxy")[1] } if (flag.vec[i]) { - txt <- paste0(" ", format(round(counts[i]/sum(counts) * 100)), "%") + txt <- paste0(" ", format(round(counts[i] / sum(counts) * 100)), "%") graphics::par(xpd = TRUE) graphics::text(xbr[i + 1] - xbr[1], - ylim.height - graphics::par("cxy")[2] * (amt.txt -1), txt, adj = 0) + ylim.height - graphics::par("cxy")[2] * (amt.txt - 1), txt, + adj = 0 + ) } } - } else print("no spikes or more than one spike") + } else { + print("no spikes or more than one spike") + } } return(invisible(list(heights = heights, xbr = xbr))) } else { @@ -167,14 +175,14 @@ dhist <- function(x, a = 5 * iqr(x), nbins = grDevices::nclass.Sturges(x), rx = ##' @name iqr ##' @title Interquartile range ##' @param x vector -##' @return numeric vector of length 2, with the 25th and 75th quantiles of input vector x +##' @return numeric vector of length 2, with the 25th and 75th quantiles of input vector x iqr <- function(x) { return(diff(stats::quantile(x, c(0.25, 0.75), na.rm = TRUE))) } # iqr -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Add data to an existing plot or create a new one ##' ##' Used to add raw data or summary statistics to the plot of a distribution. @@ -182,13 +190,13 @@ iqr <- function(x) { ##' If SE estimates are available, the SE will be plotted ##' @name plot_data ##' @aliases plot.data -##' @title Add data to plot +##' @title Add data to plot ##' @param trait.data Data to be plotted ##' @param base.plot a ggplot object (grob), ##' created if none provided ##' @param ymax maximum height of y -##' -##' +##' +##' ##' @return updated plot object ##' @author David LeBauer ##' @export @@ -196,11 +204,10 @@ iqr <- function(x) { ##' @examples ##' \dontrun{plot_data(data.frame(Y = c(1, 2), se = c(1,2)), base.plot = NULL, ymax = 10)} plot_data <- function(trait.data, base.plot = NULL, ymax) { - if (is.null(base.plot)) { base.plot <- ggplot2::ggplot() } - + n.pts <- nrow(trait.data) if (n.pts == 1) { ymax <- ymax / 16 @@ -210,25 +217,30 @@ plot_data <- function(trait.data, base.plot = NULL, ymax) { ymax <- ymax / 4 } y.pts <- seq(0, ymax, length.out = 1 + n.pts)[-1] - + if (!"ghs" %in% names(trait.data)) { trait.data$ghs <- 1 } - - plot.data <- data.frame(x = trait.data$Y, - y = y.pts, - se = trait.data$se, - control = !trait.data$trt == 1 & trait.data$ghs == 1) - new.plot <- base.plot + + + plot.data <- data.frame( + x = trait.data$Y, + y = y.pts, + se = trait.data$se, + control = !trait.data$trt == 1 & trait.data$ghs == 1 + ) + new.plot <- base.plot + ggplot2::geom_point( data = plot.data, - ggplot2::aes(x = .data$x, y = .data$y, color = .data$control)) + + ggplot2::aes(x = .data$x, y = .data$y, color = .data$control) + ) + ggplot2::geom_segment( data = plot.data, ggplot2::aes( x = .data$x - .data$se, y = .data$y, xend = .data$x + .data$se, yend = .data$y, - color = .data$control)) + + color = .data$control + ) + ) + ggplot2::scale_color_manual(values = c("black", "grey")) + ggplot2::theme(legend.position = "none") return(new.plot) @@ -251,7 +263,7 @@ plot_data <- function(trait.data, base.plot = NULL, ymax) { ##' @param colour what colo(u)r should the border be ##' @param size relative line thickness ##' @param linetype "solid", "dashed", etc. -##' +##' ##' @return adds borders to ggplot as a side effect ##' @author Rudolf Cardinal ##' @author [ggplot2 google group](https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE) @@ -263,7 +275,7 @@ plot_data <- function(trait.data, base.plot = NULL, ymax) { ##' ggplot(data=df, aes(x=x, y=y)) + geom_point() + theme_bw() + ##' opts(panel.border = theme_border(c('b','l')) ) ##' } -theme_border <- function(type = c("left", "right", "bottom", "top", "none"), +theme_border <- function(type = c("left", "right", "bottom", "top", "none"), colour = "black", size = 1, linetype = 1) { type <- match.arg(type, several.ok = TRUE) structure(function(x = 0, y = 0, width = 1, height = 1, ...) { @@ -294,11 +306,15 @@ theme_border <- function(type = c("left", "right", "bottom", "top", "none"), ylist <- append(ylist, c(y, y + height)) idlist <- append(idlist, c(4, 4)) } - grid::polylineGrob(x = xlist, y = ylist, - id = idlist, ..., - default.units = "npc", - gp = grid::gpar(lwd = size, - col = colour, - lty = linetype), ) + grid::polylineGrob( + x = xlist, y = ylist, + id = idlist, ..., + default.units = "npc", + gp = grid::gpar( + lwd = size, + col = colour, + lty = linetype + ), + ) }, class = "theme", type = "box", call = match.call()) } # theme_border diff --git a/base/visualization/R/visually.weighted.watercolor.plots.R b/base/visualization/R/visually.weighted.watercolor.plots.R index a494c0bb5ae..49ef94ee212 100644 --- a/base/visualization/R/visually.weighted.watercolor.plots.R +++ b/base/visualization/R/visually.weighted.watercolor.plots.R @@ -1,19 +1,19 @@ ## Copyright 2012 Felix Schönbrodt ## All rights reserved. -## +## ## FreeBSD License -## +## ## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: -## +## ## 1. Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. -## +## ## 2. Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. -## +## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER `AS IS'' AND ANY ## EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR @@ -25,7 +25,7 @@ ## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -## +## ## The views and conclusions contained in the software and documentation ## are those of the authors and should not be interpreted as representing ## official policies, either expressed or implied, of the copyright @@ -36,11 +36,11 @@ ## 0.1.1: changed license to FreeBSD; re-established compability to ggplot2 (new version 0.9.2) ##' Visually weighted regression / Watercolor plots -##' +##' ##' Idea: Solomon Hsiang, with additional ideas from many blog commenters ##' Details: \url{http://www.nicebread.de/visually-weighted-regression-in-r-a-la-solomon-hsiang/} ##' \url{http://www.nicebread.de/visually-weighted-watercolor-plots-new-variants-please-vote/} -##' +##' ##' @title PEcAn worldmap ##' ##' @param formula variables to plot. See examples @@ -65,14 +65,14 @@ ##' @param quantize either 'continuous', or 'SD'. In the latter case, we get three color regions for 1, 2, and 3 SD (an idea of John Mashey) ##' @param add if add == FALSE, a new ggplot is returned. If add == TRUE, only the elements are returned, which can be added to an existing ggplot (with the '+' operator) ##' @param ... further parameters passed to the fitting function, in the case of loess, for example, 'span = .9', or 'family = 'symmetric'' -##' +##' ##' @return NULL plot as side effect ##' @author Felix Schönbrodt ##' @export ##' @examples ##' # build a demo data set ##' set.seed(1) -##' x <- rnorm(200, 0.8, 1.2) +##' x <- rnorm(200, 0.8, 1.2) ##' e <- rnorm(200, 0, 3)*(abs(x)^1.5 + .5) + rnorm(200, 0, 4) ##' e2 <- rnorm(200, 0, 5)*(abs(x)^1.5 + .8) + rnorm(200, 0, 5) ##' y <- 8*x - x^3 + e @@ -82,29 +82,29 @@ ##' p2 <- vwReg(y2~x, df, add=TRUE, spag=TRUE, shade=FALSE, spag.color='red', shape=3) ##' p3 <- p1 + p2 ##' p3 -##' +##' ##' y <- x + x^2 + runif(200, 0, 0.4) ##' vwReg(y ~ x, df, method=lm) ##' vwReg(y ~ x + I(x^2), df, method=lm) -vwReg <- function(formula, data, title = "", B = 1000, shade = TRUE, shade.alpha = 0.1, +vwReg <- function(formula, data, title = "", B = 1000, shade = TRUE, shade.alpha = 0.1, spag = FALSE, spag.color = "darkblue", mweight = TRUE, show.lm = FALSE, - show.median = TRUE, median.col = "white", shape = 21, show.CI = FALSE, + show.median = TRUE, median.col = "white", shape = 21, show.CI = FALSE, method = stats::loess, bw = FALSE, slices = 200, palette = grDevices::colorRampPalette(c("#FFEDA0", "#DD0000"), bias = 2)(20), ylim = NULL, quantize = "continuous", add = FALSE, ...) { IV <- all.vars(formula)[2] DV <- all.vars(formula)[1] data <- stats::na.omit(data[order(data[, IV]), c(IV, DV)]) - + if (bw) { palette <- grDevices::colorRampPalette(c("#EEEEEE", "#999999", "#333333"), bias = 2)(20) } - + print("Computing boostrapped smoothers ...") newx <- data.frame(seq(min(data[, IV]), max(data[, IV]), length = slices)) colnames(newx) <- IV l0.boot <- matrix(NA, nrow = nrow(newx), ncol = B) - + l0 <- method(formula, data) for (i in seq_len(B)) { data2 <- data[sample(nrow(data), replace = TRUE), ] @@ -116,45 +116,51 @@ vwReg <- function(formula, data, title = "", B = 1000, shade = TRUE, shade.alpha control = stats::loess.control( surface = "i", statistics = "a", - trace.hat = "a"), - ...) + trace.hat = "a" + ), + ... + ) } else { m1 <- method(formula, data2, ...) } l0.boot[, i] <- stats::predict(m1, newdata = newx) } - + # compute median and CI limits of bootstrap CI.boot <- plyr::adply(l0.boot, 1, function(x) { stats::quantile( x, - prob = c(0.025, 0.5, 0.975, - stats::pnorm(c(-3, -2, -1, 0, 1, 2, 3))), - na.rm = TRUE) + prob = c( + 0.025, 0.5, 0.975, + stats::pnorm(c(-3, -2, -1, 0, 1, 2, 3)) + ), + na.rm = TRUE + ) })[, -1] colnames(CI.boot)[1:10] <- c("LL", "M", "UL", paste0("SD", 1:7)) CI.boot$x <- newx[, 1] CI.boot$width <- CI.boot$UL - CI.boot$LL - + # scale the CI width to the range 0 to 1 and flip it (bigger numbers = narrower CI) CI.boot$w2 <- (CI.boot$width - min(CI.boot$width)) CI.boot$w3 <- 1 - (CI.boot$w2 / max(CI.boot$w2)) - + # convert bootstrapped spaghettis to long format b2 <- reshape2::melt(l0.boot) b2$x <- newx[, 1] colnames(b2) <- c("index", "B", "value", "x") - + # Construct ggplot All plot elements are constructed as a list, so they can be added to # an existing ggplot - + # if add == FALSE: provide the basic ggplot object - p0 <- ggplot2::ggplot(data, ggplot2::aes_string(x = IV, y = DV)) + ggplot2::theme_bw() - + p0 <- ggplot2::ggplot(data, ggplot2::aes_string(x = IV, y = DV)) + + ggplot2::theme_bw() + # initialize elements with NULL (if they are defined, they are overwritten with something # meaningful) gg.tiles <- gg.poly <- gg.spag <- gg.median <- gg.CI1 <- gg.CI2 <- gg.lm <- gg.points <- gg.title <- NULL - + if (shade) { quantize <- match.arg(quantize, c("continuous", "SD")) if (quantize == "continuous") { @@ -166,35 +172,38 @@ vwReg <- function(formula, data, title = "", B = 1000, shade = TRUE, shade.alpha max_value <- max(max(l0.boot, na.rm = TRUE), max(data[, DV], na.rm = TRUE)) ylim <- c(min_value, max_value) } - + # vertical cross-sectional density estimate d2 <- plyr::ddply(b2[, c("x", "value")], "x", function(df) { res <- data.frame(stats::density(df$value, - na.rm = TRUE, - n = slices, - from = ylim[1], - to = ylim[2])[c("x", "y")]) + na.rm = TRUE, + n = slices, + from = ylim[1], + to = ylim[2] + )[c("x", "y")]) # res <- data.frame(stats::density(df$value, na.rm=TRUE, n=slices)[c('x', 'y')]) colnames(res) <- c("y", "dens") return(res) }, .progress = "text") - + maxdens <- max(d2$dens) mindens <- min(d2$dens) d2$dens.scaled <- (d2$dens - mindens) / maxdens - + ## Tile approach d2$alpha.factor <- d2$dens.scaled^shade.alpha gg.tiles <- list( ggplot2::geom_tile( data = d2, - ggplot2::aes(x = .data$x, y = .data$y, fill = .data$dens.scaled, alpha = .data$alpha.factor)), + ggplot2::aes(x = .data$x, y = .data$y, fill = .data$dens.scaled, alpha = .data$alpha.factor) + ), ggplot2::scale_fill_gradientn("dens.scaled", colours = palette), - ggplot2::scale_alpha_continuous(range = c(0.001, 1))) + ggplot2::scale_alpha_continuous(range = c(0.001, 1)) + ) } if (quantize == "SD") { ## Polygon approach - + SDs <- reshape2::melt(CI.boot[, c("x", paste0("SD", 1:7))], id.vars = "x") count <- 0 d3 <- data.frame() @@ -208,72 +217,79 @@ vwReg <- function(formula, data, title = "", B = 1000, shade = TRUE, shade.alpha count <- count + 1 d3 <- rbind(d3, seg) } - + gg.poly <- list( ggplot2::geom_polygon(data = d3, ggplot2::aes(x = .data$x, y = .data$value, color = NULL, fill = col, group = .data$group)), - ggplot2::scale_fill_gradientn("dens.scaled", colours = palette, values = seq(-1, 3, 1))) + ggplot2::scale_fill_gradientn("dens.scaled", colours = palette, values = seq(-1, 3, 1)) + ) } } - + print("Build ggplot figure ...") utils::flush.console() - + if (spag) { gg.spag <- ggplot2::geom_path( data = b2, ggplot2::aes(x = .data$x, y = .data$value, group = B), size = 0.7, alpha = 10 / B, - color = spag.color) + color = spag.color + ) } - + if (show.median) { if (mweight) { gg.median <- ggplot2::geom_path( data = CI.boot, - ggplot2::aes(x = .data$x, y = .data$M, alpha = .data$w3 ^ 3), + ggplot2::aes(x = .data$x, y = .data$M, alpha = .data$w3^3), size = 0.6, linejoin = "mitre", - color = median.col) + color = median.col + ) } else { gg.median <- ggplot2::geom_path( data = CI.boot, ggplot2::aes(x = .data$x, y = .data$M), size = 0.6, linejoin = "mitre", - color = median.col) + color = median.col + ) } } - + # Confidence limits if (show.CI) { gg.CI1 <- ggplot2::geom_path(data = CI.boot, ggplot2::aes(x = .data$x, y = .data$UL), size = 1, color = "red") gg.CI2 <- ggplot2::geom_path(data = CI.boot, ggplot2::aes(x = .data$x, y = .data$LL), size = 1, color = "red") } - + # plain linear regression line if (show.lm) { gg.lm <- ggplot2::geom_smooth(method = "lm", color = "darkgreen", se = FALSE) } - + gg.points <- ggplot2::geom_point( data = data, ggplot2::aes_string(x = IV, y = DV), size = 1, shape = shape, fill = "white", - color = "black") - + color = "black" + ) + if (title != "") { gg.title <- ggplot2::theme(title = title) } - - gg.elements <- list(gg.tiles, gg.poly, gg.spag, gg.median, gg.CI1, gg.CI2, - gg.lm, gg.points, gg.title, ggplot2::theme(legend.position = "none")) - + + gg.elements <- list( + gg.tiles, gg.poly, gg.spag, gg.median, gg.CI1, gg.CI2, + gg.lm, gg.points, gg.title, ggplot2::theme(legend.position = "none") + ) + if (!add) { return(p0 + gg.elements) } else { return(gg.elements) } -} # vwReg +} # vwReg diff --git a/base/visualization/tests/testthat/test.add_icon.R b/base/visualization/tests/testthat/test.add_icon.R index d487b87714e..f7ca3633222 100644 --- a/base/visualization/tests/testthat/test.add_icon.R +++ b/base/visualization/tests/testthat/test.add_icon.R @@ -4,4 +4,4 @@ test_that("`add_icon()` able to create the correct output file", { # check if file exists expect_true(file.exists("Rplots.pdf")) }) -}) \ No newline at end of file +}) diff --git a/base/visualization/tests/testthat/test.plot_data.R b/base/visualization/tests/testthat/test.plot_data.R index 25264be0474..2a88cd7f0ae 100644 --- a/base/visualization/tests/testthat/test.plot_data.R +++ b/base/visualization/tests/testthat/test.plot_data.R @@ -1,7 +1,7 @@ test_that("`plot_data()` able to create a new plot for data passed to it", { withr::with_dir(tempdir(), { - res <- plot_data(data.frame(Y = c(1, 2), se = c(1,2), trt = c(1, 2)), base.plot = NULL, ymax = 10) + res <- plot_data(data.frame(Y = c(1, 2), se = c(1, 2), trt = c(1, 2)), base.plot = NULL, ymax = 10) print(res) expect_true(file.exists(paste0(getwd(), "/Rplots.pdf"))) }) -}) \ No newline at end of file +}) diff --git a/base/visualization/tests/testthat/test.plot_netcdf.R b/base/visualization/tests/testthat/test.plot_netcdf.R index c0e32252adc..3e203ef5d04 100644 --- a/base/visualization/tests/testthat/test.plot_netcdf.R +++ b/base/visualization/tests/testthat/test.plot_netcdf.R @@ -1,7 +1,7 @@ test_that("`data.fetch()` able to return aggregated data with the correct label", { nc <- ncdf4::nc_open("./data/urbana_subdaily_test.nc") on.exit(ncdf4::nc_close(nc)) - + res <- data.fetch("time", nc, mean) expect_equal(attr(res, "lbl"), "days since 1700-01-01T00:00:00Z") @@ -12,10 +12,10 @@ test_that("`data.fetch()` able to return aggregated data with the correct label" test_that("`plot_netcdf()` able to correctly plot the netcdf file data and create the plot file at the desired location", { nc <- ncdf4::nc_open("./data/urbana_subdaily_test.nc") withr::with_dir(tempdir(), { - mockery::stub(plot_netcdf, 'ncdf4::nc_open', nc) + mockery::stub(plot_netcdf, "ncdf4::nc_open", nc) res <- plot_netcdf("./data/urbana_subdaily_test.nc", "time", "air_temperature", year = 2010) # check if file exists expect_true(file.exists("Rplots.pdf")) }) -}) \ No newline at end of file +}) diff --git a/base/visualization/tests/testthat/test.viz.R b/base/visualization/tests/testthat/test.viz.R index d205843a58e..11ce94176d2 100644 --- a/base/visualization/tests/testthat/test.viz.R +++ b/base/visualization/tests/testthat/test.viz.R @@ -1,3 +1 @@ - context("Testing Visualization") - diff --git a/base/workflow/R/create_execute_test_xml.R b/base/workflow/R/create_execute_test_xml.R index aacb05d8b08..9317ac9f208 100644 --- a/base/workflow/R/create_execute_test_xml.R +++ b/base/workflow/R/create_execute_test_xml.R @@ -46,15 +46,14 @@ create_execute_test_xml <- function(model_id, db_bety_hostname = NULL, db_bety_port = NULL, db_bety_driver = "Postgres") { - php_file <- file.path(pecan_path, "web", "config.php") config.list <- PEcAn.utils::read_web_config(php_file) if (is.null(db_bety_username)) db_bety_username <- config.list$db_bety_username if (is.null(db_bety_password)) db_bety_password <- config.list$db_bety_password if (is.null(db_bety_hostname)) db_bety_hostname <- config.list$db_bety_hostname if (is.null(db_bety_port)) db_bety_port <- config.list$db_bety_port - - #opening a connection to bety + + # opening a connection to bety con <- PEcAn.DB::db.open(list( user = db_bety_username, password = db_bety_password, @@ -65,17 +64,19 @@ create_execute_test_xml <- function(model_id, on.exit(PEcAn.DB::db.close(con), add = TRUE) settings <- list( - info = list(notes = "Test_Run", - userid = user_id, - username = "None", - dates = Sys.Date()) + info = list( + notes = "Test_Run", + userid = user_id, + username = "None", + dates = Sys.Date() + ) ) - #Outdir + # Outdir model.new <- dplyr::tbl(con, "models") %>% dplyr::filter(.data$id == !!model_id) %>% dplyr::collect() - + outdir_pre <- paste( model.new[["model_name"]], format(as.Date(start_date), "%Y-%m"), @@ -90,75 +91,84 @@ create_execute_test_xml <- function(model_id, outdir <- normalizePath(outdir) settings$outdir <- outdir - #Database BETY + # Database BETY settings$database <- list( - bety = list(user = db_bety_username, - password = db_bety_password, - host = db_bety_hostname, - dbname = "bety", - driver = db_bety_driver, - write = FALSE), + bety = list( + user = db_bety_username, + password = db_bety_password, + host = db_bety_hostname, + dbname = "bety", + driver = db_bety_driver, + write = FALSE + ), dbfiles = dbfiles_folder ) - #PFT - if (is.null(pft)){ + # PFT + if (is.null(pft)) { # Select the first PFT in the model list. pft <- dplyr::tbl(con, "pfts") %>% dplyr::filter(.data$modeltype_id == !!model.new$modeltype_id) %>% dplyr::collect() - + pft <- pft$name[[1]] - message("PFT is `NULL`. Defaulting to the following PFT: ", - pft) + message( + "PFT is `NULL`. Defaulting to the following PFT: ", + pft + ) } ## Putting multiple PFTs separated by semicolon settings$pfts <- strsplit(pft, ";")[[1]] %>% - purrr::map( ~ list(name = .x, - constants = list(num = 1) - ) - ) %>% - stats::setNames(rep("pft", length(.data))) + purrr::map(~ list( + name = .x, + constants = list(num = 1) + )) %>% + stats::setNames(rep("pft", length(.data))) - #Meta Analysis + # Meta Analysis settings$meta.analysis <- list(iter = 3000, random.effects = FALSE) - #Ensemble + # Ensemble settings$ensemble <- list( size = ensemble_size, variable = sensitivity_variable, - samplingspace = list(met = list(method = "sampling"), - parameters = list(method = "uniform")) + samplingspace = list( + met = list(method = "sampling"), + parameters = list(method = "uniform") + ) ) - #Sensitivity + # Sensitivity if (sensitivity) { settings$sensitivity.analysis <- list( quantiles = list(sigma1 = -2, sigma2 = -1, sigma3 = 1, sigma4 = 2) ) } - #Model + # Model settings$model$id <- model.new[["id"]] - #Workflow + # Workflow settings$workflow$id - settings$workflow$id <- paste0("Test_run_","_",model.new$model_name) + settings$workflow$id <- paste0("Test_run_", "_", model.new$model_name) settings$run <- list( site = list(id = site_id, met.start = start_date, met.end = end_date), - inputs = list(met = list(source = met, output = model.new[["model_name"]], - username = "pecan")), + inputs = list(met = list( + source = met, output = model.new[["model_name"]], + username = "pecan" + )), start.date = start_date, end.date = end_date ) settings$host$name <- "localhost" - + # Add model specific options - settings<-model_specific_tags(settings, model.new) - #create file and Run + settings <- model_specific_tags(settings, model.new) + # create file and Run XML::saveXML(PEcAn.settings::listToXml(settings, "pecan"), - file = file.path(outdir, "pecan.xml")) + file = file.path(outdir, "pecan.xml") + ) file.copy(file.path(pecan_path, "web", "workflow.R"), outdir) cwd <- getwd() setwd(outdir) @@ -180,14 +190,14 @@ create_execute_test_xml <- function(model_id, #' @return updated settings list #' @export #' -model_specific_tags <- function(settings, model.info){ - - #some extra settings for LPJ-GUESS - if(model.info$model_name=="LPJ-GUESS"){ - settings$run$inputs <- c(settings$run$inputs , - list(soil=list(id=1000000903)) - ) +model_specific_tags <- function(settings, model.info) { + # some extra settings for LPJ-GUESS + if (model.info$model_name == "LPJ-GUESS") { + settings$run$inputs <- c( + settings$run$inputs, + list(soil = list(id = 1000000903)) + ) } - + return(settings) } diff --git a/base/workflow/R/do_conversions.R b/base/workflow/R/do_conversions.R index 50fd71e812e..9354954afd0 100644 --- a/base/workflow/R/do_conversions.R +++ b/base/workflow/R/do_conversions.R @@ -12,61 +12,61 @@ do_conversions <- function(settings, overwrite.met = FALSE, overwrite.fia = FALS if (PEcAn.settings::is.MultiSettings(settings)) { return(PEcAn.settings::papply(settings, do_conversions)) } - + needsave <- FALSE if (is.character(settings$run$inputs)) { - settings$run$inputs <- NULL ## check for empty set + settings$run$inputs <- NULL ## check for empty set } - + dbfiles.local <- settings$database$dbfiles dbfiles <- ifelse(!PEcAn.remote::is.localhost(settings$host) & !is.null(settings$host$folder), settings$host$folder, dbfiles.local) - PEcAn.logger::logger.debug("do.conversion outdir",dbfiles) + PEcAn.logger::logger.debug("do.conversion outdir", dbfiles) # For each input for (i in seq_along(settings$run$inputs)) { input <- settings$run$inputs[[i]] if (is.null(input)) { next } - + input.tag <- names(settings$run$input)[i] - PEcAn.logger::logger.info("PROCESSING: ",input.tag) - - + PEcAn.logger::logger.info("PROCESSING: ", input.tag) + + ic.flag <- fia.flag <- FALSE - - if ((input.tag %in% c("css", "pss", "site")) && - is.null(input$path) && !is.null(input$source)) { - if(!is.null(input$useic)){ # set TRUE if IC Workflow, leave empty if not - ic.flag <- TRUE - }else if(input$source == "FIA"){ + + if ((input.tag %in% c("css", "pss", "site")) && + is.null(input$path) && !is.null(input$source)) { + if (!is.null(input$useic)) { # set TRUE if IC Workflow, leave empty if not + ic.flag <- TRUE + } else if (input$source == "FIA") { fia.flag <- TRUE # possibly a warning for deprecation in the future } } - + # BADM IC - if(input.tag == "poolinitcond" && is.null(input$path)){ - ic.flag <- TRUE + if (input.tag == "poolinitcond" && is.null(input$path)) { + ic.flag <- TRUE } - + # IC conversion : for now for ED only, hence the css/pss/site check # TRUE if (ic.flag) { - settings <- PEcAn.data.land::ic_process(settings, input, dir = dbfiles, overwrite = overwrite.ic) + settings <- PEcAn.data.land::ic_process(settings, input, dir = dbfiles, overwrite = overwrite.ic) needsave <- TRUE } - + # keep fia.to.psscss if (fia.flag) { settings <- PEcAn.data.land::fia.to.psscss(settings, overwrite = overwrite.fia) needsave <- TRUE } - - + + # soil extraction - if(input.tag == "soil" && is.null(input$path)){ - settings$run$inputs[[i]]$path <- PEcAn.data.land::soil_process(settings, input, dbfiles.local, overwrite=FALSE) + if (input.tag == "soil" && is.null(input$path)) { + settings$run$inputs[[i]]$path <- PEcAn.data.land::soil_process(settings, input, dbfiles.local, overwrite = FALSE) needsave <- TRUE ## NOTES: at the moment only processing soil locally. Need to think about how to generalize this ## because many models will read PEcAn standard in write.configs and write out into settings @@ -75,30 +75,31 @@ do_conversions <- function(settings, overwrite.met = FALSE, overwrite.fia = FALS } # Phenology data extraction - if(input.tag == "leaf_phenology" && is.null(input$path)){ - #settings$run$inputs[[i]]$path <- PEcAn.data.remote::extract_phenology_MODIS(site_info,start_date,end_date,outdir,run_parallel = TRUE,ncores = NULL) + if (input.tag == "leaf_phenology" && is.null(input$path)) { + # settings$run$inputs[[i]]$path <- PEcAn.data.remote::extract_phenology_MODIS(site_info,start_date,end_date,outdir,run_parallel = TRUE,ncores = NULL) needsave <- TRUE } # met conversion - + if (input.tag == "met") { name <- "MET Process" - if ( (PEcAn.utils::status.check(name) == 0)) { ## previously is.null(input$path) && - PEcAn.logger::logger.info("calling met.process: ",settings$run$inputs[[i]][['path']]) - settings$run$inputs[[i]] <- + if ((PEcAn.utils::status.check(name) == 0)) { ## previously is.null(input$path) && + PEcAn.logger::logger.info("calling met.process: ", settings$run$inputs[[i]][["path"]]) + settings$run$inputs[[i]] <- PEcAn.data.atmosphere::met.process( - site = settings$run$site, + site = settings$run$site, input_met = settings$run$inputs$met, start_date = settings$run$start.date, end_date = settings$run$end.date, model = settings$model$type, host = settings$host, - dbparms = settings$database$bety, + dbparms = settings$database$bety, dir = dbfiles, spin = settings$spin, - overwrite = overwrite.met) - PEcAn.logger::logger.debug("updated met path: ",settings$run$inputs[[i]][['path']]) + overwrite = overwrite.met + ) + PEcAn.logger::logger.debug("updated met path: ", settings$run$inputs[[i]][["path"]]) needsave <- TRUE } } diff --git a/base/workflow/R/run.write.configs.R b/base/workflow/R/run.write.configs.R index 22ed6f3b729..f8036400989 100644 --- a/base/workflow/R/run.write.configs.R +++ b/base/workflow/R/run.write.configs.R @@ -22,51 +22,58 @@ #' @export #' #' @author David LeBauer, Shawn Serbin, Ryan Kelly, Mike Dietze -run.write.configs <- function(settings, write = TRUE, ens.sample.method = "uniform", - posterior.files = rep(NA, length(settings$pfts)), +run.write.configs <- function(settings, write = TRUE, ens.sample.method = "uniform", + posterior.files = rep(NA, length(settings$pfts)), overwrite = TRUE) { - tryCatch({ - con <- PEcAn.DB::db.open(settings$database$bety) - on.exit(PEcAn.DB::db.close(con), add = TRUE) - }, error = function(e) { - PEcAn.logger::logger.severe( - "Connection requested, but failed to open with the following error: ", - conditionMessage(e)) - }) - + tryCatch( + { + con <- PEcAn.DB::db.open(settings$database$bety) + on.exit(PEcAn.DB::db.close(con), add = TRUE) + }, + error = function(e) { + PEcAn.logger::logger.severe( + "Connection requested, but failed to open with the following error: ", + conditionMessage(e) + ) + } + ) + ## Which posterior to use? for (i in seq_along(settings$pfts)) { ## if posterior.files is specified us that if (is.na(posterior.files[i])) { ## otherwise, check to see if posteriorid exists if (!is.null(settings$pfts[[i]]$posteriorid)) { - #TODO: sometimes `files` is a 0x0 tibble and other operations with it fail. + # TODO: sometimes `files` is a 0x0 tibble and other operations with it fail. files <- PEcAn.DB::dbfile.check("Posterior", - settings$pfts[[i]]$posteriorid, - con, settings$host$name, return.all = TRUE) - pid <- grep("post.distns.*Rdata", files$file_name) ## is there a posterior file? + settings$pfts[[i]]$posteriorid, + con, settings$host$name, + return.all = TRUE + ) + pid <- grep("post.distns.*Rdata", files$file_name) ## is there a posterior file? if (length(pid) == 0) { - pid <- grep("prior.distns.Rdata", files$file_name) ## is there a prior file? + pid <- grep("prior.distns.Rdata", files$file_name) ## is there a prior file? } if (length(pid) > 0) { posterior.files[i] <- file.path(files$file_path[pid], files$file_name[pid]) - } ## otherwise leave posteriors as NA + } ## otherwise leave posteriors as NA } ## otherwise leave NA and get.parameter.samples will look for local } else { ## does posterior.files point to a directory instead of a file? - if(utils::file_test("-d",posterior.files[i])){ - pfiles = dir(posterior.files[i],pattern = "post.distns.*Rdata",full.names = TRUE) - if(length(pfiles)>1){ - pid = grep("post.distns.Rdata",pfiles) - if(length(pid > 0)){ - pfiles = pfiles[grep("post.distns.Rdata",pfiles)] + if (utils::file_test("-d", posterior.files[i])) { + pfiles <- dir(posterior.files[i], pattern = "post.distns.*Rdata", full.names = TRUE) + if (length(pfiles) > 1) { + pid <- grep("post.distns.Rdata", pfiles) + if (length(pid > 0)) { + pfiles <- pfiles[grep("post.distns.Rdata", pfiles)] } else { PEcAn.logger::logger.error( "run.write.configs: could uniquely identify posterior files within", - posterior.files[i]) + posterior.files[i] + ) } - posterior.files[i] = pfiles + posterior.files[i] <- pfiles } } ## also, double check PFT outdir exists @@ -74,9 +81,9 @@ run.write.configs <- function(settings, write = TRUE, ens.sample.method = "unifo ## no outdir settings$pfts[[i]]$outdir <- file.path(settings$outdir, "pfts", settings$pfts[[i]]$name) } - } ## end else + } ## end else } ## end for loop over pfts - + ## Sample parameters model <- settings$model$type scipen <- getOption("scipen") @@ -95,90 +102,96 @@ run.write.configs <- function(settings, write = TRUE, ens.sample.method = "unifo } else { PEcAn.logger::logger.error(samples.file, "not found, this file is required by the run.write.configs function") } - + ## remove previous runs.txt if (overwrite && file.exists(file.path(settings$rundir, "runs.txt"))) { PEcAn.logger::logger.warn("Existing runs.txt file will be removed.") unlink(file.path(settings$rundir, "runs.txt")) } - + PEcAn.utils::load.modelpkg(model) - + ## Check for model-specific write configs - - my.write.config <- paste0("write.config.",model) + + my.write.config <- paste0("write.config.", model) if (!exists(my.write.config)) { - PEcAn.logger::logger.error(my.write.config, - "does not exist, please make sure that the model package contains a function called", - my.write.config) + PEcAn.logger::logger.error( + my.write.config, + "does not exist, please make sure that the model package contains a function called", + my.write.config + ) } - + ## Prepare for model output. Clean up any old config files (if exists) - #TODO: shouldn't this check if the files exist before removing them? + # TODO: shouldn't this check if the files exist before removing them? my.remove.config <- paste0("remove.config.", model) if (exists(my.remove.config)) { do.call(my.remove.config, args = list(settings$rundir, settings)) } - + # TODO RK : need to write to runs_inputs table - + # Save names pft.names <- names(trait.samples) trait.names <- lapply(trait.samples, names) - + ### NEED TO IMPLEMENT: Load Environmental Priors and Posteriors - + ### Sensitivity Analysis if ("sensitivity.analysis" %in% names(settings)) { - ### Write out SA config files PEcAn.logger::logger.info("\n ----- Writing model run config files ----") - sa.runs <- PEcAn.uncertainty::write.sa.configs(defaults = settings$pfts, - quantile.samples = sa.samples, - settings = settings, - model = model, - write.to.db = write) - + sa.runs <- PEcAn.uncertainty::write.sa.configs( + defaults = settings$pfts, + quantile.samples = sa.samples, + settings = settings, + model = model, + write.to.db = write + ) + # Store output in settings and output variables runs.samples$sa <- sa.run.ids <- sa.runs$runs settings$sensitivity.analysis$ensemble.id <- sa.ensemble.id <- sa.runs$ensemble.id - + # Save sensitivity analysis info fname <- PEcAn.uncertainty::sensitivity.filename(settings, "sensitivity.samples", "Rdata", - all.var.yr = TRUE, pft = NULL) + all.var.yr = TRUE, pft = NULL + ) save(sa.run.ids, sa.ensemble.id, sa.samples, pft.names, trait.names, file = fname) - - } ### End of SA - + } ### End of SA + ### Write ENSEMBLE if ("ensemble" %in% names(settings)) { - ens.runs <- PEcAn.uncertainty::write.ensemble.configs(defaults = settings$pfts, - ensemble.samples = ensemble.samples, - settings = settings, - model = model, - write.to.db = write) - + ens.runs <- PEcAn.uncertainty::write.ensemble.configs( + defaults = settings$pfts, + ensemble.samples = ensemble.samples, + settings = settings, + model = model, + write.to.db = write + ) + # Store output in settings and output variables runs.samples$ensemble <- ens.run.ids <- ens.runs$runs settings$ensemble$ensemble.id <- ens.ensemble.id <- ens.runs$ensemble.id - ens.samples <- ensemble.samples # rename just for consistency - + ens.samples <- ensemble.samples # rename just for consistency + # Save ensemble analysis info fname <- PEcAn.uncertainty::ensemble.filename(settings, "ensemble.samples", "Rdata", all.var.yr = TRUE) save(ens.run.ids, ens.ensemble.id, ens.samples, pft.names, trait.names, file = fname) } else { PEcAn.logger::logger.info("not writing config files for ensemble, settings are NULL") - } ### End of Ensemble - + } ### End of Ensemble + PEcAn.logger::logger.info("###### Finished writing model run config files #####") PEcAn.logger::logger.info("config files samples in ", file.path(settings$outdir, "run")) - + ### Save output from SA/Ensemble runs - # A lot of this is duplicate with the ensemble/sa specific output above, but kept for backwards compatibility. - save(ensemble.samples, trait.samples, sa.samples, runs.samples, pft.names, trait.names, - file = file.path(settings$outdir, "samples.Rdata")) + # A lot of this is duplicate with the ensemble/sa specific output above, but kept for backwards compatibility. + save(ensemble.samples, trait.samples, sa.samples, runs.samples, pft.names, trait.names, + file = file.path(settings$outdir, "samples.Rdata") + ) PEcAn.logger::logger.info("parameter values for runs in ", file.path(settings$outdir, "samples.RData")) options(scipen = scipen) - + return(invisible(settings)) } diff --git a/base/workflow/R/runModule.get.trait.data.R b/base/workflow/R/runModule.get.trait.data.R index 3b312dcf330..f126ac29cdf 100644 --- a/base/workflow/R/runModule.get.trait.data.R +++ b/base/workflow/R/runModule.get.trait.data.R @@ -4,7 +4,9 @@ ##' `MultiSettings` ##' @export runModule.get.trait.data <- function(settings) { - if (is.null(settings$meta.analysis)) return(settings) ## if there's no MA, there's no need for traits + if (is.null(settings$meta.analysis)) { + return(settings) + } ## if there's no MA, there's no need for traits if (PEcAn.settings::is.MultiSettings(settings)) { pfts <- list() pft.names <- character(0) @@ -29,8 +31,9 @@ runModule.get.trait.data <- function(settings) { database <- settings$database$bety forceupdate <- ifelse(is.null(settings$meta.analysis$update), - FALSE, - settings$meta.analysis$update) + FALSE, + settings$meta.analysis$update + ) write <- settings$database$bety$write settings$pfts <- PEcAn.DB::get.trait.data( @@ -52,8 +55,9 @@ runModule.get.trait.data <- function(settings) { database <- settings$database$bety forceupdate <- ifelse(is.null(settings$meta.analysis$update), - FALSE, - settings$meta.analysis$update) + FALSE, + settings$meta.analysis$update + ) write <- settings$database$bety$write settings$pfts <- PEcAn.DB::get.trait.data( diff --git a/base/workflow/R/runModule.run.write.configs.R b/base/workflow/R/runModule.run.write.configs.R index 265da4f6c1c..95b904e04ad 100644 --- a/base/workflow/R/runModule.run.write.configs.R +++ b/base/workflow/R/runModule.run.write.configs.R @@ -5,7 +5,6 @@ #' @return A modified settings object, invisibly #' @export runModule.run.write.configs <- function(settings, overwrite = TRUE) { - if (PEcAn.settings::is.MultiSettings(settings)) { if (overwrite && file.exists(file.path(settings$rundir, "runs.txt"))) { PEcAn.logger::logger.warn("Existing runs.txt file will be removed.") @@ -16,17 +15,17 @@ runModule.run.write.configs <- function(settings, overwrite = TRUE) { write <- settings$database$bety$write # double check making sure we have method for parameter sampling if (is.null(settings$ensemble$samplingspace$parameters$method)) settings$ensemble$samplingspace$parameters$method <- "uniform" - ens.sample.method <- settings$ensemble$samplingspace$parameters$method - - - #check to see if there are posterior.files tags under pft - posterior.files <-settings$pfts %>% - purrr::map(purrr::possibly('posterior.files', NA_character_)) %>% + ens.sample.method <- settings$ensemble$samplingspace$parameters$method + + + # check to see if there are posterior.files tags under pft + posterior.files <- settings$pfts %>% + purrr::map(purrr::possibly("posterior.files", NA_character_)) %>% purrr::modify_depth(1, function(x) { ifelse(is.null(x), NA_character_, x) }) %>% unlist() - + return(PEcAn.workflow::run.write.configs(settings, write, ens.sample.method, posterior.files = posterior.files, overwrite = overwrite)) } else { stop("runModule.run.write.configs only works with Settings or MultiSettings") diff --git a/base/workflow/R/start_model_runs.R b/base/workflow/R/start_model_runs.R index 12ef228704e..e4ec46fbbbc 100644 --- a/base/workflow/R/start_model_runs.R +++ b/base/workflow/R/start_model_runs.R @@ -6,42 +6,45 @@ #' @export #' @examples #' \dontrun{ -#' start_model_runs(settings) +#' start_model_runs(settings) #' } #' @author Shawn Serbin, Rob Kooper, David LeBauer, Alexey Shiklomanov #' start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { - run_file <- file.path(settings$rundir, "runs.txt") # check if runs need to be done if (!file.exists(run_file)) { PEcAn.logger::logger.warn( - "runs.txt not found, assuming no runs need to be done") + "runs.txt not found, assuming no runs need to be done" + ) return() } run_list <- readLines(con = run_file) nruns <- length(run_list) if (nruns == 0) { PEcAn.logger::logger.warn( - "runs.txt found, but is empty. Assuming no runs need to be done") + "runs.txt found, but is empty. Assuming no runs need to be done" + ) return() } - + model <- settings$model$type PEcAn.logger::logger.info( - "-------------------------------------------------------------------") + "-------------------------------------------------------------------" + ) PEcAn.logger::logger.info(paste(" Starting model runs", model)) PEcAn.logger::logger.info( - "-------------------------------------------------------------------") - + "-------------------------------------------------------------------" + ) + is_local <- PEcAn.remote::is.localhost(settings$host) is_qsub <- !is.null(settings$host$qsub) is_rabbitmq <- !is.null(settings$host$rabbitmq) is_modellauncher <- !is.null(settings$host$modellauncher) - + # Check if Njobmax tag exists in seetings - if (is_modellauncher){ - if (!is.null(settings$host$modellauncher$Njobmax)){ + if (is_modellauncher) { + if (!is.null(settings$host$modellauncher$Njobmax)) { Njobmax <- settings$host$modellauncher$Njobmax } else { Njobmax <- nruns @@ -50,14 +53,14 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { compt_run_modellauncher <- 1 job_modellauncher <- list() } - + # loop through runs and either call start run, or launch job on remote machine jobids <- list() - + ## setup progressbar pb <- utils::txtProgressBar(min = 0, max = nruns, style = 3) pbi <- 0 - + # create database connection if (write) { dbcon <- PEcAn.DB::db.open(settings$database$bety) @@ -65,53 +68,53 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { } else { dbcon <- NULL } - + # launcher folder jobfile <- NULL firstrun <- NULL - - #Copy all run directories over if not local + + # Copy all run directories over if not local if (!is_local) { # copy over run directories PEcAn.utils::retry.func( PEcAn.remote::remote.copy.to( host = settings$host, - src = settings$rundir, - dst = dirname(settings$host$rundir), + src = settings$rundir, + dst = dirname(settings$host$rundir), delete = TRUE - ), + ), sleep = 2 ) - + # copy over out directories PEcAn.utils::retry.func( PEcAn.remote::remote.copy.to( host = settings$host, src = settings$modeloutdir, dst = dirname(settings$host$outdir), - #include all directories, exclude all files + # include all directories, exclude all files options = c("--include=*/", "--exclude=*"), delete = TRUE ), sleep = 2 ) } - + # launch each of the jobs for (run in run_list) { run_id_string <- format(run, scientific = FALSE) # write start time to database PEcAn.DB::stamp_started(con = dbcon, run = run) - + # check to see if we use the model launcher if (is_rabbitmq) { run_id_string <- format(run, scientific = FALSE) folder <- file.path(settings$rundir, run_id_string) out <- PEcAn.remote::start_rabbitmq( - folder, settings$host$rabbitmq$uri, settings$host$rabbitmq$queue) + folder, settings$host$rabbitmq$uri, settings$host$rabbitmq$queue + ) PEcAn.logger::logger.debug("JOB.SH submit status:", out) jobids[run] <- folder - } else if (is_modellauncher) { # set up launcher script if we use modellauncher if (is.null(firstrun)) { @@ -121,15 +124,16 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { rundir = settings$rundir, host_rundir = settings$host$rundir, mpirun = settings$host$modellauncher$mpirun, - binary = settings$host$modellauncher$binary) + binary = settings$host$modellauncher$binary + ) job_modellauncher[compt_run_modellauncher] <- run - compt_run_modellauncher <- compt_run_modellauncher+1 + compt_run_modellauncher <- compt_run_modellauncher + 1 } writeLines( c(file.path(settings$host$rundir, run_id_string)), - con = jobfile) + con = jobfile + ) pbi <- pbi + 1 - } else if (is_qsub) { out <- PEcAn.remote::start_qsub( run = run, @@ -140,13 +144,14 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { host_outdir = settings$host$outdir, stdout_log = "stdout.log", stderr_log = "stderr.log", - job_script = "job.sh") + job_script = "job.sh" + ) PEcAn.logger::logger.debug("JOB.SH submit status:", out) jobids[run] <- PEcAn.remote::qsub_get_jobid( out = out[length(out)], qsub.jobid = settings$host$qsub.jobid, - stop.on.error = stop.on.error) - + stop.on.error = stop.on.error + ) } else { # if qsub option is not invoked. just start model runs in serial. out <- PEcAn.remote::start_serial( @@ -154,53 +159,53 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { host = settings$host, rundir = settings$rundir, host_rundir = settings$host$rundir, - job_script = "job.sh") - + job_script = "job.sh" + ) + # check output to see if an error occurred during the model run PEcAn.remote::check_model_run(out = out, stop.on.error = stop.on.error) - + if (!is_local) { # copy data back to local PEcAn.utils::retry.func( PEcAn.remote::remote.copy.from( host = settings$host, src = file.path(settings$host$outdir, run_id_string), - dst = settings$modeloutdir), + dst = settings$modeloutdir + ), sleep = 2 ) } - + # write finished time to database PEcAn.DB::stamp_finished(con = dbcon, run = run) - + pbi <- pbi + 1 utils::setTxtProgressBar(pb, pbi) } - + # Check if compt_run has reached Njobmax - if (is_modellauncher){ + if (is_modellauncher) { compt_run <- compt_run + 1 - if (compt_run == Njobmax){ + if (compt_run == Njobmax) { close(jobfile) firstrun <- NULL compt_run <- 0 jobfile <- NULL - } + } } - } # end loop over runs close(pb) - + # need to actually launch the model launcher if (is_modellauncher) { - # Only close if not already closed - if (compt_run != 0){ + if (compt_run != 0) { close(jobfile) } - + if (!is_local) { - for (run in run_list){ #only re-copy run dirs that have launcher and job list + for (run in run_list) { # only re-copy run dirs that have launcher and job list if (run %in% job_modellauncher) { # copy launcher and joblist PEcAn.utils::retry.func( @@ -208,15 +213,15 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { host = settings$host, src = file.path(settings$rundir, format(run, scientific = FALSE)), dst = settings$host$rundir, - delete = TRUE), + delete = TRUE + ), sleep = 2 ) - } } } if (is_qsub) { - for (run in run_list){ + for (run in run_list) { if (run %in% job_modellauncher) { out <- PEcAn.remote::start_qsub( run = run, @@ -228,71 +233,73 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { stdout_log = "launcher.out.log", stderr_log = "launcher.err.log", job_script = "launcher.sh", - qsub_extra = settings$host$modellauncher$qsub) + qsub_extra = settings$host$modellauncher$qsub + ) } # HACK: Code below gets 'run' from names(jobids) so need an entry for # each run. But when using modellauncher all runs have the same jobid jobids[run] <- sub(settings$host$qsub.jobid, "\\1", out[length(out)]) } - } else { out <- PEcAn.remote::start_serial( run = run, host = settings$host, rundir = settings$rundir, host_rundir = settings$host$rundir, - job_script = "launcher.sh") - + job_script = "launcher.sh" + ) + # check output to see if an error occurred during the model run PEcAn.remote::check_model_run(out = out, stop.on.error = TRUE) - + # write finished time to database for (run in run_list) { PEcAn.DB::stamp_finished(con = dbcon, run = run) } - + utils::setTxtProgressBar(pb, pbi) } } - + # wait for all jobs to finish if (length(jobids) > 0) { PEcAn.logger::logger.debug( "Waiting for the following jobs:", - unlist(unique(jobids))) + unlist(unique(jobids)) + ) } - - #TODO figure out a way to do this while for unique(jobids) instead of jobids + + # TODO figure out a way to do this while for unique(jobids) instead of jobids while (length(jobids) > 0) { Sys.sleep(10) - + if (!is_local) { - #Copy over log files to check progress + # Copy over log files to check progress try(PEcAn.remote::remote.copy.from( host = settings$host, src = settings$host$outdir, dst = dirname(settings$modeloutdir), - options = c('--exclude=*.h5') + options = c("--exclude=*.h5") )) } - + for (run in names(jobids)) { run_id_string <- format(run, scientific = FALSE) - + # check to see if job is done job_finished <- FALSE if (is_rabbitmq) { - job_finished <- + job_finished <- file.exists(file.path(jobids[run], "rabbitmq.out")) } else if (is_qsub) { job_finished <- PEcAn.remote::qsub_run_finished( run = jobids[run], host = settings$host, - qstat = settings$host$qstat) + qstat = settings$host$qstat + ) } - + if (job_finished) { - # TODO check output log if (is_rabbitmq) { data <- readLines(file.path(jobids[run], "rabbitmq.out")) @@ -305,9 +312,9 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { } } } - + # Write finish time to database - #TODO this repeats for every run in `jobids` writing every run's time stamp every time. This actually takes quite a long time with a lot of ensembles and should either 1) not be a for loop (no `for(x in run_list)`) or 2) if `is_modellauncher`, be done outside of the jobids for loop after all jobs are finished. + # TODO this repeats for every run in `jobids` writing every run's time stamp every time. This actually takes quite a long time with a lot of ensembles and should either 1) not be a for loop (no `for(x in run_list)`) or 2) if `is_modellauncher`, be done outside of the jobids for loop after all jobs are finished. if (is_modellauncher) { for (x in run_list) { PEcAn.DB::stamp_finished(con = dbcon, run = x) @@ -315,25 +322,25 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { } else { PEcAn.DB::stamp_finished(con = dbcon, run = run) } - + # move progress bar if (!is_modellauncher) { pbi <- pbi + 1 } utils::setTxtProgressBar(pb, pbi) - + # remove job if (is_modellauncher) { for (x in run_list) { jobids[x] <- NULL - } + } } else { jobids[run] <- NULL } } # End job finished - } # end loop over runs - } # end while loop checking runs - + } # end loop over runs + } # end while loop checking runs + # Copy data back to local if (!is_local) { PEcAn.utils::retry.func( @@ -354,13 +361,14 @@ start_model_runs <- function(settings, write = TRUE, stop.on.error = TRUE) { #' `settings` instead of as a separate argument. #' #' @export -runModule_start_model_runs <- function(settings, stop.on.error=TRUE) { +runModule_start_model_runs <- function(settings, stop.on.error = TRUE) { if (PEcAn.settings::is.MultiSettings(settings) || - PEcAn.settings::is.Settings(settings)) { + PEcAn.settings::is.Settings(settings)) { write <- settings$database$bety$write return(start_model_runs(settings, write, stop.on.error)) } else { PEcAn.logger::logger.severe( - "runModule_start_model_runs only works with Settings or MultiSettings") + "runModule_start_model_runs only works with Settings or MultiSettings" + ) } -} # runModule_start_model_runs \ No newline at end of file +} # runModule_start_model_runs diff --git a/base/workflow/inst/permutation_tests.R b/base/workflow/inst/permutation_tests.R index b9187399d34..9cd29786386 100755 --- a/base/workflow/inst/permutation_tests.R +++ b/base/workflow/inst/permutation_tests.R @@ -55,8 +55,10 @@ model_df <- tbl(bety, "dbfiles") %>% dplyr::filter(machine_id == !!mach_id) %>% dplyr::filter(container_type == "Model") %>% dplyr::left_join(tbl(bety, "models"), c("container_id" = "id")) %>% - dplyr::select(model_id = container_id, model_name, revision, - file_name, file_path, dbfile_id = id, ) %>% + dplyr::select( + model_id = container_id, model_name, revision, + file_name, file_path, dbfile_id = id, + ) %>% dplyr::collect() %>% dplyr::mutate(exists = file.exists(file.path(file_path, file_name))) @@ -64,8 +66,10 @@ message("Found the following models on the machine:") print(model_df) if (!all(model_df$exists)) { - message("WARNING: The following models are registered on the machine ", - "but their files do not exist:") + message( + "WARNING: The following models are registered on the machine ", + "but their files do not exist:" + ) model_df %>% dplyr::filter(!exists) %>% print() @@ -78,23 +82,25 @@ if (!all(model_df$exists)) { ## Site with no inputs from any machines that is part of Ameriflux site group and Fluxnet Site group site_id_noinput <- tbl(bety, "sites") %>% dplyr::anti_join(tbl(bety, "inputs")) %>% - dplyr::inner_join(tbl(bety, "sitegroups_sites") %>% - filter(sitegroup_id == 1), - by = c("id" = "site_id")) %>% + dplyr::inner_join( + tbl(bety, "sitegroups_sites") %>% + filter(sitegroup_id == 1), + by = c("id" = "site_id") + ) %>% dplyr::select("id.x", "notes", "sitename") %>% dplyr::filter(grepl("TOWER_BEGAN", notes)) %>% dplyr::collect() %>% dplyr::mutate( # Grab years from string within the notes - start_year = substring(stringr::str_extract(notes,pattern = ("(?<=TOWER_BEGAN = ).*(?= TOWER_END)")),1,4), - #Empty tower end in the notes means that it goes until present day so if empty enter curent year. + start_year = substring(stringr::str_extract(notes, pattern = ("(?<=TOWER_BEGAN = ).*(?= TOWER_END)")), 1, 4), + # Empty tower end in the notes means that it goes until present day so if empty enter curent year. end_year = dplyr::if_else( - substring(stringr::str_extract(notes,pattern = ("(?<=TOWER_END = ).*(?=)")),1,4) == "", + substring(stringr::str_extract(notes, pattern = ("(?<=TOWER_END = ).*(?=)")), 1, 4) == "", as.character(lubridate::year(Sys.Date())), - substring(stringr::str_extract(notes,pattern = ("(?<=TOWER_END = ).*(?=)")),1,4) + substring(stringr::str_extract(notes, pattern = ("(?<=TOWER_END = ).*(?=)")), 1, 4) ), - #Check if startdate year is within the inerval of that is given - in_date = data.table::between(as.numeric(lubridate::year(startdate)),as.numeric(start_year),as.numeric(end_year)) + # Check if startdate year is within the inerval of that is given + in_date = data.table::between(as.numeric(lubridate::year(startdate)), as.numeric(start_year), as.numeric(end_year)) ) %>% dplyr::filter( in_date, diff --git a/base/workflow/tests/testthat/test-runModule.get.trait.data.R b/base/workflow/tests/testthat/test-runModule.get.trait.data.R index e1ba65a12a1..7293b64b035 100644 --- a/base/workflow/tests/testthat/test-runModule.get.trait.data.R +++ b/base/workflow/tests/testthat/test-runModule.get.trait.data.R @@ -1,11 +1,11 @@ - context("testing workflow functions") settings <- PEcAn.settings::Settings(list( outdir = "/dev/null", database = list(bety = list(), dbfiles = "fake_path"), pfts = list(pft = list(name = "fake", outdir = "fake_pft_path")), - meta.analysis = list(threshold = 1, iter = 1))) + meta.analysis = list(threshold = 1, iter = 1) +)) test_that("settings not changed if no MA", { settings_noMA <- settings @@ -14,11 +14,12 @@ test_that("settings not changed if no MA", { }) test_that("get.trait.data is called exactly once", { - mock_vals <- mockery::mock(settings$pfts, cycle=TRUE) + mock_vals <- mockery::mock(settings$pfts, cycle = TRUE) mockery::stub( where = runModule.get.trait.data, what = "PEcAn.DB::get.trait.data", - how = mock_vals) + how = mock_vals + ) res <- runModule.get.trait.data(settings) expect_equal(res, settings) @@ -30,4 +31,4 @@ test_that("get.trait.data is called exactly once", { expect_equal(length(res_multi), 3) expect_identical(res_multi[[1]], settings) mockery::expect_called(mock_vals, 2) -}) \ No newline at end of file +}) diff --git a/base/workflow/tests/testthat/test.do_conversions.R b/base/workflow/tests/testthat/test.do_conversions.R index a28a09667ff..8ba03f66ad1 100644 --- a/base/workflow/tests/testthat/test.do_conversions.R +++ b/base/workflow/tests/testthat/test.do_conversions.R @@ -1,6 +1,6 @@ test_that("`do_conversions` able to return settings from pecan.METProcess.xml if it already exists", { withr::with_tempdir({ - settings <- list(host = list(name = 'test', folder = 'test'), outdir = getwd()) + settings <- list(host = list(name = "test", folder = "test"), outdir = getwd()) file_path <- file.path(getwd(), "pecan.METProcess.xml") file.create(file_path) writeLines( @@ -16,16 +16,16 @@ test_that("`do_conversions` able to return settings from pecan.METProcess.xml if test_that("`do_conversions` able to call met.process if the input tag has met, update the met path and save settings to pecan.METProcess.xml", { withr::with_tempdir({ - mocked_res <- mockery::mock(list(path = 'test')) - mockery::stub(do_conversions, 'PEcAn.data.atmosphere::met.process', mocked_res) + mocked_res <- mockery::mock(list(path = "test")) + mockery::stub(do_conversions, "PEcAn.data.atmosphere::met.process", mocked_res) settings <- list( - host = list(name = 'test', folder = 'test'), + host = list(name = "test", folder = "test"), outdir = getwd(), run = list(site = list(id = 0), inputs = list(met = list(id = 1))) ) res <- do_conversions(settings) mockery::expect_called(mocked_res, 1) - expect_equal(res$run$inputs$met$path, 'test') + expect_equal(res$run$inputs$met$path, "test") expect_true(file.exists(file.path(getwd(), "pecan.METProcess.xml"))) }) }) diff --git a/base/workflow/tests/testthat/test.start_model_runs.R b/base/workflow/tests/testthat/test.start_model_runs.R index 052950e2c93..7f876a0431c 100644 --- a/base/workflow/tests/testthat/test.start_model_runs.R +++ b/base/workflow/tests/testthat/test.start_model_runs.R @@ -17,4 +17,3 @@ test_that("`start_model_runs` throws a warning if runs.txt is empty", { expect_output(start_model_runs(settings), "runs.txt found, but is empty") }) }) - diff --git a/contrib/browndog/PEcAn#DALEC_convert.R b/contrib/browndog/PEcAn#DALEC_convert.R index bfbd05fd9d7..aab166ed973 100644 --- a/contrib/browndog/PEcAn#DALEC_convert.R +++ b/contrib/browndog/PEcAn#DALEC_convert.R @@ -43,42 +43,42 @@ dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) # unzip and parse filenames if (grepl("pecan.zip$", args[1])) { - system2("/usr/bin/unzip", c("-o", "-d", cffolder, inputFile)) - site <- NA - startYear <- NA - endYear <- NA - for(file in list.files(path=cffolder, pattern="*.nc")) { - pieces <- strsplit(file, ".", fixed=TRUE)[[1]] - if (length(pieces) != 3) { - usage(paste0("invalid file ", file, " should be ..nc")) - } - if (is.na(site)) { - site <- pieces[1] - } else if (site != pieces[1]) { - usage(paste0("incosistent sites ", file, " should be ", site, "..nc")) - } - if (is.na(startYear) || pieces[2] < startYear) { - startYear <- pieces[2] - } - if (is.na(endYear) || pieces[2] > endYear) { - endYear <- pieces[2] - } - startDate <- as.POSIXlt(paste0(startYear,"-01-01 00:00:00"), tz = "UTC") - endDate <- as.POSIXlt(paste0(endYear,"-12-31 23:59:59"), tz = "UTC") + system2("/usr/bin/unzip", c("-o", "-d", cffolder, inputFile)) + site <- NA + startYear <- NA + endYear <- NA + for (file in list.files(path = cffolder, pattern = "*.nc")) { + pieces <- strsplit(file, ".", fixed = TRUE)[[1]] + if (length(pieces) != 3) { + usage(paste0("invalid file ", file, " should be ..nc")) } if (is.na(site)) { site <- pieces[1] } else if (site != pieces[1]) { - usage(paste0("inconsistent sites ", file, " should be ", site, "..nc")) + usage(paste0("incosistent sites ", file, " should be ", site, "..nc")) } if (is.na(startYear) || pieces[2] < startYear) { startYear <- pieces[2] } + if (is.na(endYear) || pieces[2] > endYear) { + endYear <- pieces[2] + } + startDate <- as.POSIXlt(paste0(startYear, "-01-01 00:00:00"), tz = "UTC") + endDate <- as.POSIXlt(paste0(endYear, "-12-31 23:59:59"), tz = "UTC") + } + if (is.na(site)) { site <- pieces[1] - year <- pieces[2] - file.copy(inputFile, file.path(cffolder, paste(site, year, "nc", sep="."))) - startDate <- as.POSIXlt(paste0(year,"-01-01 00:00:00"), tz = "UTC") - endDate <- as.POSIXlt(paste0(year,"-12-31 23:59:59"), tz = "UTC") + } else if (site != pieces[1]) { + usage(paste0("inconsistent sites ", file, " should be ", site, "..nc")) + } + if (is.na(startYear) || pieces[2] < startYear) { + startYear <- pieces[2] + } + site <- pieces[1] + year <- pieces[2] + file.copy(inputFile, file.path(cffolder, paste(site, year, "nc", sep = "."))) + startDate <- as.POSIXlt(paste0(year, "-01-01 00:00:00"), tz = "UTC") + endDate <- as.POSIXlt(paste0(year, "-12-31 23:59:59"), tz = "UTC") } else { usage("Did not recognize type of file") } diff --git a/contrib/browndog/PEcAn#LINKAGES_convert.R b/contrib/browndog/PEcAn#LINKAGES_convert.R index bd1c8357277..54365819041 100644 --- a/contrib/browndog/PEcAn#LINKAGES_convert.R +++ b/contrib/browndog/PEcAn#LINKAGES_convert.R @@ -82,8 +82,10 @@ if (grepl("pecan.zip$", args[1])) { # convert CF to output, in this case ed.zip library(PEcAn.LINKAGES) -result <- met2model.LINKAGES(cffolder, site, outfolder, start_date = startDate, end_date = endDate, - overwrite = overwrite) +result <- met2model.LINKAGES(cffolder, site, outfolder, + start_date = startDate, end_date = endDate, + overwrite = overwrite +) # next rename outfile to output file file.rename(result$file, outputFile) diff --git a/contrib/browndog/PEcAn#Sipnet_convert.R b/contrib/browndog/PEcAn#Sipnet_convert.R index 72a5ce133d1..f58b653c9bd 100755 --- a/contrib/browndog/PEcAn#Sipnet_convert.R +++ b/contrib/browndog/PEcAn#Sipnet_convert.R @@ -82,8 +82,10 @@ if (grepl("pecan.zip$", args[1])) { # convert CF to output, in this case ed.zip library(PEcAn.SIPNET) -result <- met2model.SIPNET(cffolder, site, outfolder, start_date = startDate, end_date = endDate, - overwrite = overwrite) +result <- met2model.SIPNET(cffolder, site, outfolder, + start_date = startDate, end_date = endDate, + overwrite = overwrite +) # next rename outfile to output file file.rename(result$file, outputFile) diff --git a/contrib/browndog/PEcAn#Site_convert.R b/contrib/browndog/PEcAn#Site_convert.R index b2b30593fd1..b3ef2ff754a 100755 --- a/contrib/browndog/PEcAn#Site_convert.R +++ b/contrib/browndog/PEcAn#Site_convert.R @@ -6,9 +6,9 @@ # Ameriflux # US-Dk3 # 35.9782 -# -79.0942 -# 2001-01-01 00:00:00 -# 2001-12-31 23:59:59 +# -79.0942 +# 2001-01-01 00:00:00 +# 2001-12-31 23:59:59 # # send all output to stdout (incl stderr) @@ -57,8 +57,10 @@ verbose <- FALSE rawfolder <- file.path(cacheDir, mettype, "raw") dir.create(rawfolder, showWarnings = FALSE, recursive = TRUE) if (mettype == "Ameriflux") { - do.call(paste0("download.", mettype), list(sitename, rawfolder, start_date, end_date, overwrite, - verbose)) + do.call(paste0("download.", mettype), list( + sitename, rawfolder, start_date, end_date, overwrite, + verbose + )) } else if (mettype == "NARR") { do.call(paste0("download.", mettype), list(rawfolder, start_date, end_date, overwrite)) } else { @@ -68,27 +70,31 @@ if (mettype == "Ameriflux") { # convert to CF cffolder <- file.path(cacheDir, mettype, "cf") dir.create(cffolder, showWarnings = FALSE, recursive = TRUE) -do.call(paste0("met2CF.", mettype), list(rawfolder, sitename, cffolder, start_date, end_date, overwrite, - verbose)) +do.call(paste0("met2CF.", mettype), list( + rawfolder, sitename, cffolder, start_date, end_date, overwrite, + verbose +)) if (mettype == "Ameriflux") { # gapfill gapfolder <- file.path(cacheDir, mettype, "gap") dir.create(gapfolder, showWarnings = FALSE, recursive = TRUE) metgapfill(cffolder, sitename, gapfolder, start_date, end_date, 0, overwrite, verbose) - + folder <- gapfolder outname <- sitename } else if (mettype == "NARR") { permutefolder <- file.path(cacheDir, mettype, "permute") dir.create(permutefolder, showWarnings = FALSE, recursive = TRUE) permute.nc(cffolder, mettype, permutefolder, start_date, end_date, overwrite, verbose) - + sitefolder <- file.path(cacheDir, mettype, "site", sitename) dir.create(sitefolder, showWarnings = FALSE, recursive = TRUE) - extract.nc(permutefolder, mettype, sitefolder, start_date, end_date, site_lat, site_lon, overwrite, - verbose) - + extract.nc( + permutefolder, mettype, sitefolder, start_date, end_date, site_lat, site_lon, overwrite, + verbose + ) + folder <- sitefolder outname <- mettype } @@ -104,7 +110,7 @@ if (grepl("\\.zip$", outputfile) || (end_year - start_year > 1)) { for (year in start_year:end_year) { files <- c(files, file.path(folder, paste(outname, year, "nc", sep = "."))) } - + # use intermediate file so it does not get marked as done until really done dir.create(tempDir, showWarnings = FALSE, recursive = TRUE) zipfile <- file.path(tempDir, "temp.zip") diff --git a/contrib/browndog/PEcAn.R b/contrib/browndog/PEcAn.R index fe9092152a1..e92acb15d42 100755 --- a/contrib/browndog/PEcAn.R +++ b/contrib/browndog/PEcAn.R @@ -55,12 +55,16 @@ site_lon <- ifelse(is.null(input$lon), NA, input$lon) # connect to DB and get site name con <- db.open(dbparams) # query site based on location -site <- db.query(paste0("SELECT id, sitename AS name FROM sites ORDER BY st_distance(geometry, ST_GeogFromText('POINT(", - site_lon, " ", site_lat, ")')) limit 1"), con) +site <- db.query(paste0( + "SELECT id, sitename AS name FROM sites ORDER BY st_distance(geometry, ST_GeogFromText('POINT(", + site_lon, " ", site_lat, ")')) limit 1" +), con) if (length(site) < 0) { # query site based on name - site <- db.query(paste0("SELECT id, sitename AS name FROM sites WHERE sitename LIKE '%", input$site, - "%'"), con) + site <- db.query(paste0( + "SELECT id, sitename AS name FROM sites WHERE sitename LIKE '%", input$site, + "%'" + ), con) } if (length(site) < 0) { # insert site info @@ -104,10 +108,12 @@ if (grepl("\\.zip$", outputfile) || (end_year - start_year > 1) && grepl("\\.pec # get list of files we need to zip files <- c() for (year in start_year:end_year) { - files <- c(files, files <- file.path(folder, list.files(folder, pattern = paste0("*", year, - "*")))) + files <- c(files, files <- file.path(folder, list.files(folder, pattern = paste0( + "*", year, + "*" + )))) } - + # use intermediate file so it does not get marked as done until really done dir.create(tempDir, showWarnings = FALSE, recursive = TRUE) zipfile <- file.path(tempDir, "temp.zip") diff --git a/docker/depends/pecan.depends.R b/docker/depends/pecan.depends.R index 0f3057d896d..a2f79925c02 100644 --- a/docker/depends/pecan.depends.R +++ b/docker/depends/pecan.depends.R @@ -2,20 +2,21 @@ # Don't use X11 for rgl Sys.setenv(RGL_USE_NULL = TRUE) -rlib <- Sys.getenv('R_LIBS_USER', '/usr/local/lib/R/site-library') +rlib <- Sys.getenv("R_LIBS_USER", "/usr/local/lib/R/site-library") Sys.setenv(RLIB = rlib) # Find the latest of several possible minimum package versions condense_version_requirements <- function(specs) { - if (all(specs == "*")) { + if (all(specs == "*")) { # any version is acceptable return("*") } specs <- unique(specs[specs != "*"]) versions <- package_version( - gsub("[^[:digit:].-]+", "", specs)) - + gsub("[^[:digit:].-]+", "", specs) + ) + if ((length(unique(versions)) > 1) && any(!grepl(">", specs))) { # Can't assume the latest version works for all, so give up. # We *could* write more to handle this case if needed, but it seems very rare: @@ -31,7 +32,8 @@ condense_version_requirements <- function(specs) { "if all PEcAn packages declare the same version. ", "Sorry, this function doesn't know which dependency caused this. ", "To find it, search for these version strings in ", - "'pecan_package_dependencies.csv'.") + "'pecan_package_dependencies.csv'." + ) } specs[versions == max(versions)] } @@ -39,8 +41,8 @@ condense_version_requirements <- function(specs) { # Install or newer, # upgrading dependencies only if needed to satisfy stated version requirements ensure_version <- function(pkg, version) { - vers <- gsub('[^[:digit:].-]+', '', version) - cmp <- get(gsub('[^<>=]+', '', version)) + vers <- gsub("[^[:digit:].-]+", "", version) + cmp <- get(gsub("[^<>=]+", "", version)) ok <- requireNamespace(pkg, quietly = TRUE) && cmp(packageVersion(pkg), vers) if (!ok) { @@ -50,12 +52,12 @@ ensure_version <- function(pkg, version) { # (install_version doesn't resolve these when upgrade=FALSE) dep <- desc::desc_get_deps(system.file("DESCRIPTION", package = pkg)) dep <- dep[ - dep$type %in% c("Depends", "Imports", "LinkingTo") - & dep$version != "*" - & dep$package != "R",] + dep$type %in% c("Depends", "Imports", "LinkingTo") & + dep$version != "*" & + dep$package != "R", + ] invisible(Map(ensure_version, dep$package, dep$version)) } - } # Read list of dependencies. @@ -76,13 +78,14 @@ remotes::install_github(gh_repos, lib = rlib) uniq_deps <- tapply( all_deps$version, INDEX = all_deps$package, - FUN = condense_version_requirements) + FUN = condense_version_requirements +) # Install deps that declare no version restriction. # We'll install these with one plain old `install.packages()` call. unversioned <- names(uniq_deps[uniq_deps == "*"]) -missing <- unversioned[!(unversioned %in% installed.packages()[,'Package'])] +missing <- unversioned[!(unversioned %in% installed.packages()[, "Package"])] install.packages(missing, lib = rlib) @@ -92,8 +95,8 @@ install.packages(missing, lib = rlib) # it can't fill the version req from snapshot versions. # (Assumes our CRAN uses the same URL scheme as Posit package manager) options(repos = c( - getOption('repos'), - sub(r'(\d{4}-\d{2}-\d{2})', 'latest', getOption('repos')) + getOption("repos"), + sub(r'(\d{4}-\d{2}-\d{2})', "latest", getOption("repos")) )) versioned <- uniq_deps[uniq_deps != "*"] -invisible(Map(ensure_version, names(versioned), versioned)) \ No newline at end of file +invisible(Map(ensure_version, names(versioned), versioned)) diff --git a/models/basgra/R/read_restart.BASGRA.R b/models/basgra/R/read_restart.BASGRA.R index 4020b57bde1..7ba5ff1a92e 100644 --- a/models/basgra/R/read_restart.BASGRA.R +++ b/models/basgra/R/read_restart.BASGRA.R @@ -1,156 +1,156 @@ ##' @title Read restart function for SDA with BASGRA -##' +##' ##' @author Istem Fer -##' +##' ##' @inheritParams PEcAn.ModelName::read_restart.ModelName -##' +##' ##' @description Read Restart for BASGRA -##' +##' ##' @return X.vec vector of forecasts ##' @export -##' +##' read_restart.BASGRA <- function(outdir, runid, stop.time, settings, var.names, params) { - forecast <- list() # maybe have some checks here to make sure the first run is actually ran for the period you requested - + # Read ensemble output - ens <- PEcAn.utils::read.output(runid = runid, - outdir = file.path(outdir, runid), - start.year = lubridate::year(stop.time), - end.year = lubridate::year(stop.time), - variables = var.names) - + ens <- PEcAn.utils::read.output( + runid = runid, + outdir = file.path(outdir, runid), + start.year = lubridate::year(stop.time), + end.year = lubridate::year(stop.time), + variables = var.names + ) + last <- length(ens[[1]]) - + params$restart <- c() - + if ("LAI" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$LAI[last] ## m2 m-2 + forecast[[length(forecast) + 1]] <- ens$LAI[last] ## m2 m-2 names(forecast[[length(forecast)]]) <- c("LAI") } - + if ("fast_soil_pool_carbon_content" %in% var.names) { forecast[[length(forecast) + 1]] <- ens$fast_soil_pool_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("fast_soil_pool_carbon_content") } - + if ("slow_soil_pool_carbon_content" %in% var.names) { forecast[[length(forecast) + 1]] <- ens$slow_soil_pool_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("slow_soil_pool_carbon_content") } - + if ("soil_organic_nitrogen_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$soil_nitrogen_content[last] # kg N m-2 + forecast[[length(forecast) + 1]] <- ens$soil_nitrogen_content[last] # kg N m-2 names(forecast[[length(forecast)]]) <- c("soil_nitrogen_content") } - + if ("TotSoilCarb" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$TotSoilCarb[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$TotSoilCarb[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("TotSoilCarb") } - + if ("NEE" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$NEE[last] # kg C m-2 s-1 + forecast[[length(forecast) + 1]] <- ens$NEE[last] # kg C m-2 s-1 names(forecast[[length(forecast)]]) <- c("NEE") } - + if ("NPP" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$NPP[last] # kg C m-2 s-1 + forecast[[length(forecast) + 1]] <- ens$NPP[last] # kg C m-2 s-1 names(forecast[[length(forecast)]]) <- c("NPP") } - + if ("Qle" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$Qle[last] # W m-2 + forecast[[length(forecast) + 1]] <- ens$Qle[last] # W m-2 names(forecast[[length(forecast)]]) <- c("Qle") } - + if ("SoilResp" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$SoilResp[last] # kg C m-2 s-1 + forecast[[length(forecast) + 1]] <- ens$SoilResp[last] # kg C m-2 s-1 names(forecast[[length(forecast)]]) <- c("SoilResp") } - + if ("CropYield" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$CropYield[last] # kg DM m-2 TODO: check PEcAn standard units if DM is OK - names(forecast[[length(forecast)]]) <- c("CropYield") + forecast[[length(forecast) + 1]] <- ens$CropYield[last] # kg DM m-2 TODO: check PEcAn standard units if DM is OK + names(forecast[[length(forecast)]]) <- c("CropYield") } - + if ("litter_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$litter_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$litter_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("litter_carbon_content") } - + if ("stubble_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$stubble_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$stubble_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("stubble_carbon_content") } - + if ("stem_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$stem_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$stem_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("stem_carbon_content") } - + if ("root_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$root_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$root_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("root_carbon_content") } - + if ("reserve_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$reserve_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$reserve_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("reserve_carbon_content") } - + if ("leaf_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$leaf_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$leaf_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("leaf_carbon_content") } - + if ("dead_leaf_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$dead_leaf_carbon_content[last] # kg C m-2 + forecast[[length(forecast) + 1]] <- ens$dead_leaf_carbon_content[last] # kg C m-2 names(forecast[[length(forecast)]]) <- c("dead_leaf_carbon_content") } - + # I'm not deleting the following but updating the overall tiller_density in SDA worked better, so use it instead in the SDA.xml if ("nonelongating_generative_tiller" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$nonelongating_generative_tiller[last] # m-2 + forecast[[length(forecast) + 1]] <- ens$nonelongating_generative_tiller[last] # m-2 names(forecast[[length(forecast)]]) <- c("nonelongating_generative_tiller") } - + if ("elongating_generative_tiller" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$elongating_generative_tiller[last] # m-2 + forecast[[length(forecast) + 1]] <- ens$elongating_generative_tiller[last] # m-2 names(forecast[[length(forecast)]]) <- c("elongating_generative_tiller") } - + if ("nonelongating_vegetative_tiller" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$nonelongating_vegetative_tiller[last] # m-2 + forecast[[length(forecast) + 1]] <- ens$nonelongating_vegetative_tiller[last] # m-2 names(forecast[[length(forecast)]]) <- c("nonelongating_vegetative_tiller") - } - + } + if ("tiller_density" %in% var.names) { forecast[[length(forecast) + 1]] <- ens$tiller_density[last] names(forecast[[length(forecast)]]) <- c("tiller_density") } - + if ("phenological_stage" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$phenological_stage[last] + forecast[[length(forecast) + 1]] <- ens$phenological_stage[last] names(forecast[[length(forecast)]]) <- c("phenological_stage") - } - + } + if ("SoilMoistFrac" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$SoilMoistFrac[last] + forecast[[length(forecast) + 1]] <- ens$SoilMoistFrac[last] names(forecast[[length(forecast)]]) <- c("SoilMoistFrac") - } + } if ("harvest_carbon_flux" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$harvest_carbon_flux[last] # kg C m-2 s-1 + forecast[[length(forecast) + 1]] <- ens$harvest_carbon_flux[last] # kg C m-2 s-1 names(forecast[[length(forecast)]]) <- c("harvest_carbon_flux") } - + PEcAn.logger::logger.info(runid) - + X_tmp <- list(X = unlist(forecast), params = params) - + return(X_tmp) - } # read_restart.BASGRA diff --git a/models/basgra/R/run_BASGRA.R b/models/basgra/R/run_BASGRA.R index 00d178e2c99..5098b0ae0fc 100644 --- a/models/basgra/R/run_BASGRA.R +++ b/models/basgra/R/run_BASGRA.R @@ -1,10 +1,9 @@ - -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' BASGRA wrapper function. Runs and writes model outputs in PEcAn standard. ##' ##' BASGRA is written in fortran is run through R by wrapper functions written by Marcel Van Oijen. ##' This function makes use of those wrappers but gives control of datastream in and out of the model to PEcAn. -##' With this function we skip model2netcdf, we can also skip met2model but keeping it for now. +##' With this function we skip model2netcdf, we can also skip met2model but keeping it for now. ##' write.config.BASGRA modifies args of this function through template.job ##' then job.sh runs calls this function to run the model ##' @@ -21,575 +20,629 @@ ##' @param sitelon longitude of the site ##' @param co2_file path to daily atmospheric CO2 concentration file, optional, defaults to 350 ppm when missing ##' @param write_raw_output write raw output in csv or not -##' +##' ##' @export ##' @useDynLib PEcAn.BASGRA, .registration = TRUE ##' @author Istem Fer, Julius Vira -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_date, end_date, outdir, - sitelat, sitelon, co2_file = NULL, write_raw_output = FALSE){ - - start_date <- as.POSIXlt(start_date, tz = "UTC") - if(lubridate::hour(start_date) == 23){ +run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_date, end_date, outdir, + sitelat, sitelon, co2_file = NULL, write_raw_output = FALSE) { + start_date <- as.POSIXlt(start_date, tz = "UTC") + if (lubridate::hour(start_date) == 23) { # could be made more sophisticated but if it is specified to the hour this is probably coming from SDA start_date <- lubridate::ceiling_date(start_date, "day") } - end_date <- as.POSIXlt(end_date, tz = "UTC") - start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - - if(length(co2_file) > 0 && co2_file == "NULL") co2_file <- NULL + end_date <- as.POSIXlt(end_date, tz = "UTC") + start_year <- lubridate::year(start_date) + end_year <- lubridate::year(end_date) + + if (length(co2_file) > 0 && co2_file == "NULL") co2_file <- NULL ################################################################################ ### FUNCTIONS FOR READING WEATHER DATA mini_met2model_BASGRA <- function(file_path, start_date, start_year, end_date, end_year) { - # TODO: read partial years - + out.list <- list() - + ctr <- 1 - for(year in seq(start_year, end_year)) { - - if(year == start_year & year != end_year){ + for (year in seq(start_year, end_year)) { + if (year == start_year & year != end_year) { simdays <- seq(lubridate::yday(start_date), PEcAn.utils::days_in_year(year)) - }else if(year != start_year & year == end_year){ + } else if (year != start_year & year == end_year) { simdays <- seq(1, lubridate::yday(end_date)) - }else{ - if(year == start_year & year == end_year){ + } else { + if (year == start_year & year == end_year) { simdays <- seq(lubridate::yday(start_date), lubridate::yday(end_date)) - }else{ + } else { simdays <- seq_len(PEcAn.utils::days_in_year(year)) } - } - - NDAYS <- length(simdays) - NWEATHER <- as.integer(9) - matrix_weather <- matrix( 0., nrow = NDAYS, ncol = NWEATHER ) - - + + NDAYS <- length(simdays) + NWEATHER <- as.integer(9) + matrix_weather <- matrix(0., nrow = NDAYS, ncol = NWEATHER) + + # prepare data frame for BASGRA format, daily inputs, but doesn't have to be full year - - - matrix_weather[ ,1] <- rep(year, NDAYS) # year - matrix_weather[ ,2] <- simdays - if(endsWith(file_path, '.nc')){ + + matrix_weather[, 1] <- rep(year, NDAYS) # year + matrix_weather[, 2] <- simdays + + if (endsWith(file_path, ".nc")) { # we probably have a (near-term) forecast met old.file <- file_path - }else{ + } else { old.file <- file.path(dirname(file_path), paste(basename(file_path), year, "nc", sep = ".")) } - + if (file.exists(old.file)) { - ## open netcdf - nc <- ncdf4::nc_open(old.file) + nc <- ncdf4::nc_open(old.file) on.exit(ncdf4::nc_close(nc), add = TRUE) - + ## convert time to seconds sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - + dt <- diff(sec)[1] tstep <- round(86400 / dt) dt <- 86400 / tstep - + ind <- rep(simdays, each = tstep) - - if(unlist(strsplit(nc$dim$time$units, " "))[1] %in% c("days", "day")){ - #this should always be the case, butorigin just in case - origin_dt <- (as.POSIXct(unlist(strsplit(nc$dim$time$units, " "))[3], "%Y-%m-%d", tz="UTC") + 60*60*24) - dt + + if (unlist(strsplit(nc$dim$time$units, " "))[1] %in% c("days", "day")) { + # this should always be the case, butorigin just in case + origin_dt <- (as.POSIXct(unlist(strsplit(nc$dim$time$units, " "))[3], "%Y-%m-%d", tz = "UTC") + 60 * 60 * 24) - dt # below -dt means that midnights belong to the day that ends. This is consistent # with data files which are exclusive of the 1 Jan midnight + dt till 1 Jan next year. # ydays <- lubridate::yday(origin_dt + sec - dt) - ydays <- lubridate::yday(origin_dt + sec) + ydays <- lubridate::yday(origin_dt + sec) all_days <- origin_dt + sec } else { PEcAn.logger::logger.error("Check units of time in the weather data.") } rad <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") - gr <- rad * 0.0864 # W m-2 to MJ m-2 d-1 + gr <- rad * 0.0864 # W m-2 to MJ m-2 d-1 # temporary hack, not sure if it will generalize with other data products # function might need a splitting arg - gr <- gr[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + gr <- gr[(ydays %in% simdays) & (lubridate::year(all_days) == year)] if (length(ind) > length(gr)) { - PEcAn.logger::logger.severe('The input does not cover the requested simulation period') + PEcAn.logger::logger.severe("The input does not cover the requested simulation period") } - matrix_weather[ ,3] <- round(tapply(gr, ind, mean, na.rm = TRUE), digits = 2) # irradiation (MJ m-2 d-1) - - Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin - Tair <- Tair[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + matrix_weather[, 3] <- round(tapply(gr, ind, mean, na.rm = TRUE), digits = 2) # irradiation (MJ m-2 d-1) + + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + Tair <- Tair[(ydays %in% simdays) & (lubridate::year(all_days) == year)] Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") - - #in BASGRA tmin and tmax is only used to calculate the average daily temperature, see environment.f90 - t_dmean <- round(tapply(Tair_C, ind, mean, na.rm = TRUE), digits = 2) # maybe round these numbers + + # in BASGRA tmin and tmax is only used to calculate the average daily temperature, see environment.f90 + t_dmean <- round(tapply(Tair_C, ind, mean, na.rm = TRUE), digits = 2) # maybe round these numbers t_dmin <- round(tapply(Tair_C, ind, min, na.rm = TRUE), digits = 2) t_dmax <- round(tapply(Tair_C, ind, max, na.rm = TRUE), digits = 2) - matrix_weather[ ,4] <- t_dmin # mean temperature (degrees Celsius) - matrix_weather[ ,5] <- t_dmax # that's what they had in read_weather_Bioforsk - - RH <- ncdf4::ncvar_get(nc, "relative_humidity") # % + matrix_weather[, 4] <- t_dmin # mean temperature (degrees Celsius) + matrix_weather[, 5] <- t_dmax # that's what they had in read_weather_Bioforsk + + RH <- ncdf4::ncvar_get(nc, "relative_humidity") # % RH <- RH[(ydays %in% simdays) & (lubridate::year(all_days) == year)] - RH <- round(tapply(RH, ind, mean, na.rm = TRUE), digits = 2) - + RH <- round(tapply(RH, ind, mean, na.rm = TRUE), digits = 2) + # This is vapor pressure according to BASGRA.f90#L86 and environment.f90#L49 - matrix_weather[ ,6] <- round(exp(17.27*t_dmean/(t_dmean+239)) * 0.6108 * RH / 100, digits = 2) - + matrix_weather[, 6] <- round(exp(17.27 * t_dmean / (t_dmean + 239)) * 0.6108 * RH / 100, digits = 2) + # TODO: check these - Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 - Rain <- Rain[(ydays %in% simdays) & (lubridate::year(all_days) == year)] - raini <- tapply(Rain*86400, ind, mean, na.rm = TRUE) - matrix_weather[ ,7] <- round(raini, digits = 2) # precipitation (mm d-1) - + Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 + Rain <- Rain[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + raini <- tapply(Rain * 86400, ind, mean, na.rm = TRUE) + matrix_weather[, 7] <- round(raini, digits = 2) # precipitation (mm d-1) + U <- try(ncdf4::ncvar_get(nc, "eastward_wind")) V <- try(ncdf4::ncvar_get(nc, "northward_wind")) - if(is.numeric(U) & is.numeric(V)){ - U <- U[(ydays %in% simdays) & (lubridate::year(all_days) == year)] - V <- V[(ydays %in% simdays) & (lubridate::year(all_days) == year)] - ws <- sqrt(U ^ 2 + V ^ 2) - }else{ + if (is.numeric(U) & is.numeric(V)) { + U <- U[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + V <- V[(ydays %in% simdays) & (lubridate::year(all_days) == year)] + ws <- sqrt(U^2 + V^2) + } else { ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) ws <- ws[(ydays %in% simdays) & (lubridate::year(all_days) == year)] if (is.numeric(ws)) { PEcAn.logger::logger.info("eastward_wind and northward_wind absent; using wind_speed") - }else{ + } else { PEcAn.logger::logger.severe("No variable found to calculate wind_speed") } } - - matrix_weather[ ,8] <- round(tapply(ws, ind, mean, na.rm = TRUE), digits = 2) # mean wind speed (m s-1) - + + matrix_weather[, 8] <- round(tapply(ws, ind, mean, na.rm = TRUE), digits = 2) # mean wind speed (m s-1) + # CO2 co2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) - if(is.numeric(co2)){ + if (is.numeric(co2)) { co2 <- co2[(ydays %in% simdays) & (lubridate::year(all_days) == year)] / 1e-06 # ppm - co2 <- round(tapply(co2, ind, mean, na.rm = TRUE), digits = 2) - }else{ + co2 <- round(tapply(co2, ind, mean, na.rm = TRUE), digits = 2) + } else { co2 <- NA } - + # This is new BASGRA code that can be passed CO2 cals - matrix_weather[ ,9] <- co2 - + matrix_weather[, 9] <- co2 + ncdf4::nc_close(nc) } else { PEcAn.logger::logger.info("File for year", year, old.file, "not found. Skipping to next year") next } - + out.list[[ctr]] <- matrix_weather ctr <- ctr + 1 } # end for-loop around years - + matrix_weather <- do.call("rbind", out.list) - - #BASGRA wants the matrix_weather to be of 10000 x 8 matrix + + # BASGRA wants the matrix_weather to be of 10000 x 8 matrix NMAXDAYS <- as.integer(365000) - nmw <- nrow(matrix_weather) - if(nmw > NMAXDAYS){ + nmw <- nrow(matrix_weather) + if (nmw > NMAXDAYS) { matrix_weather <- matrix_weather[seq_len(NMAXDAYS), ] - PEcAn.logger::logger.info("BASGRA currently runs only", NMAXDAYS, - "simulation days. Limiting the run to the first ", NMAXDAYS, "days of the requested period.") - }else{ + PEcAn.logger::logger.info( + "BASGRA currently runs only", NMAXDAYS, + "simulation days. Limiting the run to the first ", NMAXDAYS, "days of the requested period." + ) + } else { # append zeros at the end - matrix_weather <- rbind(matrix_weather, matrix( 0., nrow = (NMAXDAYS - nmw), ncol = 9 )) + matrix_weather <- rbind(matrix_weather, matrix(0., nrow = (NMAXDAYS - nmw), ncol = 9)) } - + return(matrix_weather) } - - - + + + ################################################################################ ### OUTPUT VARIABLES (from BASGRA scripts) outputNames <- c( - "Time" , "year" , "doy" , "DAVTMP" , "CLV" , "CLVD" , - "YIELD" , "CRES" , "CRT" , "CST" , "CSTUB" , "DRYSTOR" , - "Fdepth" , "LAI" , "LT50" , "O2" , "PHEN" , "ROOTD" , - "Sdepth" , "TANAER" , "TILG" , "TILV" , "WAL" , "WAPL" , - "WAPS" , "WAS" , "WETSTOR" , "DM" , "RES" , "PHENCR" , - "NELLVG" , "NELLVM" , "SLA" , "TILTOT" , "FRTILG" , "TILG1" , - "TILG2" , "RDRT" , "VERN" , - "CLITT" , "CSOMF", "CSOMS" , "NLITT" , "NSOMF", - "NSOMS" , "NMIN" , "PHOT" , "RplantAer" ,"Rsoil" , "NemissionN2O", - "NemissionNO", "Nfert", "Ndep" , "RWA" , - "NSH" , "GNSH" , "DNSH" , "HARVNSH" , "NCSH" , - "NCGSH" , "NCDSH", "NCHARVSH", - "fNgrowth","RGRTV","FSPOT","RESNOR","TV2TIL","NSHNOR","KNMAX","KN", # 63:70 - "DMLV" , "DMST" , "NSH_DMSH" , # 71:73 - "Nfert_TOT" , "YIELD_POT" , "DM_MAX" , # 74:76 - "F_PROTEIN" , "F_ASH" , # 77:78 - "F_WALL_DM" , "F_WALL_DMSH" , "F_WALL_LV" , "F_WALL_ST", # 79:82 - "F_DIGEST_DM", "F_DIGEST_DMSH" , # 83:84 - "F_DIGEST_LV", "F_DIGEST_ST" , "F_DIGEST_WALL", # 85:87 - "RDRS" , "Precipitation" , "Nleaching" , "NSHmob", # 88:91 - "NSHmobsoil" , "Nfixation" , "Nupt" , "Nmineralisation", # 92:95 - "NSOURCE" , "NSINK" , # 96:97 - "NRT" , "NCRT" , # 98:99 - "rNLITT" , "rNSOMF" , # 100:101 - "DAYL" , "EVAP" , "TRAN" , "FLITTC_LEAF", # 102:105 - "FLITTC_ROOT", "NEE" , "FHARVC" , "FRUNOFFC", # 106:109 - "CSOM_A" , "CSOM_W" , "CSOM_E" , "CSOM_N", # 110:113 - "CSOM_H" , "NSOM" , "TEMPR30" , "PRECIP30", # 114:117 - "FSOILAMDC" # 118 + "Time", "year", "doy", "DAVTMP", "CLV", "CLVD", + "YIELD", "CRES", "CRT", "CST", "CSTUB", "DRYSTOR", + "Fdepth", "LAI", "LT50", "O2", "PHEN", "ROOTD", + "Sdepth", "TANAER", "TILG", "TILV", "WAL", "WAPL", + "WAPS", "WAS", "WETSTOR", "DM", "RES", "PHENCR", + "NELLVG", "NELLVM", "SLA", "TILTOT", "FRTILG", "TILG1", + "TILG2", "RDRT", "VERN", + "CLITT", "CSOMF", "CSOMS", "NLITT", "NSOMF", + "NSOMS", "NMIN", "PHOT", "RplantAer", "Rsoil", "NemissionN2O", + "NemissionNO", "Nfert", "Ndep", "RWA", + "NSH", "GNSH", "DNSH", "HARVNSH", "NCSH", + "NCGSH", "NCDSH", "NCHARVSH", + "fNgrowth", "RGRTV", "FSPOT", "RESNOR", "TV2TIL", "NSHNOR", "KNMAX", "KN", # 63:70 + "DMLV", "DMST", "NSH_DMSH", # 71:73 + "Nfert_TOT", "YIELD_POT", "DM_MAX", # 74:76 + "F_PROTEIN", "F_ASH", # 77:78 + "F_WALL_DM", "F_WALL_DMSH", "F_WALL_LV", "F_WALL_ST", # 79:82 + "F_DIGEST_DM", "F_DIGEST_DMSH", # 83:84 + "F_DIGEST_LV", "F_DIGEST_ST", "F_DIGEST_WALL", # 85:87 + "RDRS", "Precipitation", "Nleaching", "NSHmob", # 88:91 + "NSHmobsoil", "Nfixation", "Nupt", "Nmineralisation", # 92:95 + "NSOURCE", "NSINK", # 96:97 + "NRT", "NCRT", # 98:99 + "rNLITT", "rNSOMF", # 100:101 + "DAYL", "EVAP", "TRAN", "FLITTC_LEAF", # 102:105 + "FLITTC_ROOT", "NEE", "FHARVC", "FRUNOFFC", # 106:109 + "CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", # 110:113 + "CSOM_H", "NSOM", "TEMPR30", "PRECIP30", # 114:117 + "FSOILAMDC" # 118 ) - + outputUnits <- c( - "(y)" , "(y)" , "(d)" , "(degC)" , "(g C m-2)", "(g C m-2)", # 1: 6 - "(g DM m-2)", "(g C m-2)", "(g C m-2)", "(g C m-2)" , "(g C m-2)", "(mm)" , # 7:12 - "(m)" , "(m2 m-2)" , "(degC)" , "(mol m-2)" , "(-)" , "(m)" , # 13:18 - "(m)" , "(d)" , "(m-2)" , "(m-2)" , "(mm)" , "(mm)" , # 19:24 - "(mm)" , "(mm)" , "(mm)" , "(g DM m-2)", "(g g-1)" , "(m d-1)" , # 25:30 - "(tiller-1)", "(d-1)" , "(m2 g-1)" , "(m-2)" , "(-)" , "(-)" , # 31:36 - "(-)" , "(d-1)" , "(-)" , # 37:39 - "(g C m-2)" , "(g C m-2)" , "(g C m-2)" , "(g N m-2)" , "(g N m-2)", # 40:44 - "(g N m-2)" , "(g N m-2)" , "(g C m-2 d-1)", "(g N m-2 d-1)", # 45:48 - "(g C m-2 d-1)", "(g C m-2 d-1)", # 49:50 - "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", "(-)" , # 51:54 - "(g N m-2)" , "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", "(-)" , # 55:59 - "(-)" , "(-)" , "(-)" , # 60:62 - "(-)", "(d-1)", "(-)", "(-)", "(d-1)", "(-)", "(m2 m-2)", "(m2 m-2)", # 63:70 - "(g DM m-2)" , "(g DM m-2)" , "(g N g-1 DM)" , # 71:73 - "(g N m-2)" , "(g DM m-2)" , "(g DM m-2)" , # 74:76 - "(g g-1 DM)" , "(g g-1 DM)" , # 77:78 - "(g g-1 DM)" , "(g g-1 DM)" , "(g g-1 DM)" , "(g g-1 DM)" , # 79:82 - "(-)" , "(-)" , # 83:84 - "(-)" , "(-)" , "(-)" , # 85:87 - "(d-1)" , "(mm d-1)" , "(g N m-2 d-1)" , "(g N m-2 d-1)", # 88:91 - "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)" , "(g N m-2 d-1)", # 92:95 - "(g N m-2 d-1)", "(g N m-2 d-1)", # 96:97 - "(g N m-2)" , "(g N g-1 C)" , # 98:99 - "(g N m-2)" , "(g N g-1 C)" , # 100:101 - "(d d-1)" , "(mm d-1)" , "(mm d-1)" , "(g C m-2 d-1)", # 102:105 - "(g C m-2 d-1)", "(g C m-2 d-1)", "(g C m-2 d-1)" , "(g C m-2 d-1)", # 106:109 - "(g C m-2)" , "(g C m-2)" , "(g C m-2)" , "(g C m-2)", # 110:113 - "(g C m-2)" , "(g N m-2)" , "(degC)" , "(mm)", # 114:117 - "(g C m-2 d-1)" # 118 + "(y)", "(y)", "(d)", "(degC)", "(g C m-2)", "(g C m-2)", # 1: 6 + "(g DM m-2)", "(g C m-2)", "(g C m-2)", "(g C m-2)", "(g C m-2)", "(mm)", # 7:12 + "(m)", "(m2 m-2)", "(degC)", "(mol m-2)", "(-)", "(m)", # 13:18 + "(m)", "(d)", "(m-2)", "(m-2)", "(mm)", "(mm)", # 19:24 + "(mm)", "(mm)", "(mm)", "(g DM m-2)", "(g g-1)", "(m d-1)", # 25:30 + "(tiller-1)", "(d-1)", "(m2 g-1)", "(m-2)", "(-)", "(-)", # 31:36 + "(-)", "(d-1)", "(-)", # 37:39 + "(g C m-2)", "(g C m-2)", "(g C m-2)", "(g N m-2)", "(g N m-2)", # 40:44 + "(g N m-2)", "(g N m-2)", "(g C m-2 d-1)", "(g N m-2 d-1)", # 45:48 + "(g C m-2 d-1)", "(g C m-2 d-1)", # 49:50 + "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", "(-)", # 51:54 + "(g N m-2)", "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", "(-)", # 55:59 + "(-)", "(-)", "(-)", # 60:62 + "(-)", "(d-1)", "(-)", "(-)", "(d-1)", "(-)", "(m2 m-2)", "(m2 m-2)", # 63:70 + "(g DM m-2)", "(g DM m-2)", "(g N g-1 DM)", # 71:73 + "(g N m-2)", "(g DM m-2)", "(g DM m-2)", # 74:76 + "(g g-1 DM)", "(g g-1 DM)", # 77:78 + "(g g-1 DM)", "(g g-1 DM)", "(g g-1 DM)", "(g g-1 DM)", # 79:82 + "(-)", "(-)", # 83:84 + "(-)", "(-)", "(-)", # 85:87 + "(d-1)", "(mm d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", # 88:91 + "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", "(g N m-2 d-1)", # 92:95 + "(g N m-2 d-1)", "(g N m-2 d-1)", # 96:97 + "(g N m-2)", "(g N g-1 C)", # 98:99 + "(g N m-2)", "(g N g-1 C)", # 100:101 + "(d d-1)", "(mm d-1)", "(mm d-1)", "(g C m-2 d-1)", # 102:105 + "(g C m-2 d-1)", "(g C m-2 d-1)", "(g C m-2 d-1)", "(g C m-2 d-1)", # 106:109 + "(g C m-2)", "(g C m-2)", "(g C m-2)", "(g C m-2)", # 110:113 + "(g C m-2)", "(g N m-2)", "(degC)", "(mm)", # 114:117 + "(g C m-2 d-1)" # 118 ) - - NOUT <- as.integer( length(outputNames) ) - - if (length(outputUnits) != NOUT) { PEcAn.logger::logger.severe('#outputNames != #outputUnits') } - + + NOUT <- as.integer(length(outputNames)) + + if (length(outputUnits) != NOUT) { + PEcAn.logger::logger.severe("#outputNames != #outputUnits") + } + ############################# SITE CONDITIONS ######################## - # this part corresponds to initialise_BASGRA_***.R functions - - year_start <- as.integer(start_year) - doy_start <- as.integer(lubridate::yday(start_date)) - + # this part corresponds to initialise_BASGRA_***.R functions + + year_start <- as.integer(start_year) + doy_start <- as.integer(lubridate::yday(start_date)) + matrix_weather <- mini_met2model_BASGRA(run_met, start_date, start_year, end_date, end_year) - - NDAYS <- as.integer(sum(matrix_weather[,1] != 0)) - - if(!is.null(co2_file)){ # if a separate co2 file was passed use that - co2val <- utils::read.table(co2_file, header=TRUE, sep = ",") - - weird_line <- which(!paste0(matrix_weather[1:NDAYS,1], matrix_weather[1:NDAYS,2]) %in% paste0(co2val[,1], co2val[,2])) - if(length(weird_line)!=0){ - matrix_weather <- matrix_weather[-weird_line,] - NDAYS <- NDAYS-length(weird_line) + + NDAYS <- as.integer(sum(matrix_weather[, 1] != 0)) + + if (!is.null(co2_file)) { # if a separate co2 file was passed use that + co2val <- utils::read.table(co2_file, header = TRUE, sep = ",") + + weird_line <- which(!paste0(matrix_weather[1:NDAYS, 1], matrix_weather[1:NDAYS, 2]) %in% paste0(co2val[, 1], co2val[, 2])) + if (length(weird_line) != 0) { + matrix_weather <- matrix_weather[-weird_line, ] + NDAYS <- NDAYS - length(weird_line) } - matrix_weather[1:NDAYS,9] <- co2val[paste0(co2val[,1], co2val[,2]) %in% paste0(matrix_weather[1:NDAYS,1], matrix_weather[1:NDAYS,2]),3] - }else if(all(is.na(matrix_weather[1:NDAYS,9]))){ # this means there were no CO2 in the netcdf as well + matrix_weather[1:NDAYS, 9] <- co2val[paste0(co2val[, 1], co2val[, 2]) %in% paste0(matrix_weather[1:NDAYS, 1], matrix_weather[1:NDAYS, 2]), 3] + } else if (all(is.na(matrix_weather[1:NDAYS, 9]))) { # this means there were no CO2 in the netcdf as well PEcAn.logger::logger.info("No atmospheric CO2 concentration was provided. Using default 420 ppm.") - matrix_weather[1:NDAYS,9] <- 420 + matrix_weather[1:NDAYS, 9] <- 420 } # checking/debugging met - # write.table(matrix_weather[1:NDAYS,], file=paste0(outdir,"/clim",start_date,".",substr(end_date, 1,10),".csv"), + # write.table(matrix_weather[1:NDAYS,], file=paste0(outdir,"/clim",start_date,".",substr(end_date, 1,10),".csv"), # sep=",", row.names = FALSE, col.names=FALSE) - - calendar_fert <- matrix( 0, nrow=300, ncol=6) - + + calendar_fert <- matrix(0, nrow = 300, ncol = 6) + # read in fertilization f_days <- as.matrix(utils::read.table(site_fertilize, header = TRUE, sep = ",")) if (ncol(f_days) == 3) { # old-style fertilization file - calendar_fert[1:nrow(f_days),1:3] <- f_days + calendar_fert[1:nrow(f_days), 1:3] <- f_days } else { if (ncol(f_days) != 6) { - PEcAn.logger::logger.severe(sprintf('Wrong number of columns (%i) in fertilization file', ncol(f_days))) + PEcAn.logger::logger.severe(sprintf("Wrong number of columns (%i) in fertilization file", ncol(f_days))) } - columns <- c('year', 'doy', 'Nmin', 'Norg', 'C_soluble', 'C_compost') - calendar_fert[1:nrow(f_days),] <- f_days[,columns] + columns <- c("year", "doy", "Nmin", "Norg", "C_soluble", "C_compost") + calendar_fert[1:nrow(f_days), ] <- f_days[, columns] } - - calendar_Ndep <- matrix( 0, nrow=300, ncol=3 ) - #calendar_Ndep[1,] <- c(1900, 1,0) - #calendar_Ndep[2,] <- c(2100, 366, 0) - + + calendar_Ndep <- matrix(0, nrow = 300, ncol = 3) + # calendar_Ndep[1,] <- c(1900, 1,0) + # calendar_Ndep[2,] <- c(2100, 366, 0) + # hardcoding these for now to be 0, should be able to modify later on # calendar_fert[3,] <- c( 2001, 123, 0*1000/ 10000 ) # 0 kg N ha-1 applied on day 123 - calendar_Ndep[1,] <- c( 1900, 1, 0*1000/(10000*365) ) # 0 kg N ha-1 y-1 N-deposition in 1900 - calendar_Ndep[2,] <- c( 1980, 366, 0*1000/(10000*365) ) # 0 kg N ha-1 y-1 N-deposition in 1980 - calendar_Ndep[3,] <- c( 2100, 366, 0*1000/(10000*365) ) # 0 kg N ha-1 y-1 N-deposition in 2100 - - harvest_params <- matrix(0.0, nrow=300, ncol=2) + calendar_Ndep[1, ] <- c(1900, 1, 0 * 1000 / (10000 * 365)) # 0 kg N ha-1 y-1 N-deposition in 1900 + calendar_Ndep[2, ] <- c(1980, 366, 0 * 1000 / (10000 * 365)) # 0 kg N ha-1 y-1 N-deposition in 1980 + calendar_Ndep[3, ] <- c(2100, 366, 0 * 1000 / (10000 * 365)) # 0 kg N ha-1 y-1 N-deposition in 2100 + + harvest_params <- matrix(0.0, nrow = 300, ncol = 2) df_harvest <- utils::read.csv(site_harvest) n_events <- nrow(df_harvest) - allowed_harv_colnames <- c('year', 'doy', 'CLAIV', 'cut_only') + allowed_harv_colnames <- c("year", "doy", "CLAIV", "cut_only") if (!all(colnames(df_harvest) %in% allowed_harv_colnames)) { - PEcAn.logger::logger.severe(c('Bad column names in harvest file: ', colnames(df_harvest))) + PEcAn.logger::logger.severe(c("Bad column names in harvest file: ", colnames(df_harvest))) } - days_harvest <- matrix(as.integer(-1), nrow= 300, ncol = 2) + days_harvest <- matrix(as.integer(-1), nrow = 300, ncol = 2) if (n_events > 0) { - days_harvest[1:n_events,1:2] <- as.matrix(df_harvest[,c('year', 'doy')]) + days_harvest[1:n_events, 1:2] <- as.matrix(df_harvest[, c("year", "doy")]) } - if ('CLAIV' %in% colnames(df_harvest)) { - harvest_params[1:n_events,1] <- df_harvest$CLAIV + if ("CLAIV" %in% colnames(df_harvest)) { + harvest_params[1:n_events, 1] <- df_harvest$CLAIV } else { # default - harvest_params[1:n_events,1] <- run_params[names(run_params) == "CLAIV"] + harvest_params[1:n_events, 1] <- run_params[names(run_params) == "CLAIV"] } - if ('cut_only' %in% colnames(df_harvest)) { - harvest_params[1:n_events,2] <- df_harvest$cut_only + if ("cut_only" %in% colnames(df_harvest)) { + harvest_params[1:n_events, 2] <- df_harvest$cut_only } else { - harvest_params[1:n_events,2] <- 0.0 + harvest_params[1:n_events, 2] <- 0.0 } # read in harvest days - #h_days <- as.matrix(utils::read.table(site_harvest, header = TRUE, sep = ",")) - #days_harvest[1:nrow(h_days),1:2] <- h_days[,1:2] - + # h_days <- as.matrix(utils::read.table(site_harvest, header = TRUE, sep = ",")) + # days_harvest[1:nrow(h_days),1:2] <- h_days[,1:2] + # This is a management specific parameter # CLAIV is used to determine LAI remaining after harvest # I modified BASGRA code to use different values for different harvests # I'll pass it via harvest file as the 3rd column # but just in case users forgot to add the third column to the harvest file: - #if(ncol(h_days) == 3){ + # if(ncol(h_days) == 3){ # days_harvest[1:nrow(h_days),3] <- h_days[,3]*10 # as.integer - #}else{ + # }else{ # PEcAn.logger::logger.info("CLAIV not provided via harvest file. Using defaults.") - # days_harvest[1:nrow(h_days),3] <- run_params[names(run_params) == "CLAIV"] - #} - #days_harvest <- as.integer(days_harvest) - + # days_harvest[1:nrow(h_days),3] <- run_params[names(run_params) == "CLAIV"] + # } + # days_harvest <- as.integer(days_harvest) + # run model - NPARAMS = as.integer(160) # from set_params.f90 + NPARAMS <- as.integer(160) # from set_params.f90 if (length(run_params) != NPARAMS) { - PEcAn.logger::logger.severe(sprintf('%i parameters required, %i given', NPARAMS, length(run_params))) + PEcAn.logger::logger.severe(sprintf("%i parameters required, %i given", NPARAMS, length(run_params))) } if (NOUT < 118) { # from BASGRA.f90 PEcAn.logger::logger.severe("at least 118 parameters required,", NOUT, "given") } - output <- .Fortran('BASGRA', - run_params, - matrix_weather, - calendar_fert, - calendar_Ndep, - as.integer(days_harvest), - harvest_params, - NPARAMS, - NDAYS, - NOUT, - matrix(0, NDAYS, NOUT))[[10]] + output <- .Fortran( + "BASGRA", + run_params, + matrix_weather, + calendar_fert, + calendar_Ndep, + as.integer(days_harvest), + harvest_params, + NPARAMS, + NDAYS, + NOUT, + matrix(0, NDAYS, NOUT) + )[[10]] # for now a hack to write other states out # save(output, file = file.path(outdir, "output_basgra.Rdata")) if (write_raw_output) { utils::write.csv(stats::setNames(as.data.frame(output), outputNames), file.path(outdir, "output_basgra.csv")) } - last_vals <- output[nrow(output),] + last_vals <- output[nrow(output), ] names(last_vals) <- outputNames save(last_vals, file = file.path(outdir, "last_vals_basgra.Rdata")) - + ############################# WRITE OUTPUTS ########################### # writing model outputs already in standard format - + # only LAI and CropYield for now sec_in_day <- 86400 - + years <- seq(start_year, end_year) # Having the Yasso soil affects how some C pools are aggregated have_yasso <- run_params[137] > 0 - + for (y in years) { - - thisyear <- output[ , outputNames == "year"] == y - + thisyear <- output[, outputNames == "year"] == y + outlist <- list() - outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "LAI")] # LAI in (m2 m-2) - - CropYield <- output[thisyear, which(outputNames == "YIELD_POT")] # (g DM m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(CropYield, "g m-2", "kg m-2") - - clitt <- output[thisyear, which(outputNames == "CLITT")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(clitt, "g m-2", "kg m-2") - - cstub <- output[thisyear, which(outputNames == "CSTUB")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(cstub, "g m-2", "kg m-2") - - cst <- output[thisyear, which(outputNames == "CST")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(cst, "g m-2", "kg m-2") - - crt <- output[thisyear, which(outputNames == "CRT")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(crt, "g m-2", "kg m-2") - - cres <- output[thisyear, which(outputNames == "CRES")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(cres, "g m-2", "kg m-2") - - clv <- output[thisyear, which(outputNames == "CLV")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(clv, "g m-2", "kg m-2") - - clvd <- output[thisyear, which(outputNames == "CLVD")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(clvd, "g m-2", "kg m-2") + outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "LAI")] # LAI in (m2 m-2) + + CropYield <- output[thisyear, which(outputNames == "YIELD_POT")] # (g DM m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(CropYield, "g m-2", "kg m-2") + + clitt <- output[thisyear, which(outputNames == "CLITT")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(clitt, "g m-2", "kg m-2") + + cstub <- output[thisyear, which(outputNames == "CSTUB")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(cstub, "g m-2", "kg m-2") + + cst <- output[thisyear, which(outputNames == "CST")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(cst, "g m-2", "kg m-2") + + crt <- output[thisyear, which(outputNames == "CRT")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(crt, "g m-2", "kg m-2") + + cres <- output[thisyear, which(outputNames == "CRES")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(cres, "g m-2", "kg m-2") + + clv <- output[thisyear, which(outputNames == "CLV")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(clv, "g m-2", "kg m-2") + + clvd <- output[thisyear, which(outputNames == "CLVD")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(clvd, "g m-2", "kg m-2") if (have_yasso) { - csomf <- rowSums(output[thisyear, outputNames %in% c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N'), drop=FALSE]) # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") - csoms <- output[thisyear, outputNames == "CSOM_H"] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") - nsom <- output[thisyear, outputNames == "NSOM"] # (g N m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nsom, "g m-2", "kg m-2") + csomf <- rowSums(output[thisyear, outputNames %in% c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N"), drop = FALSE]) # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") + csoms <- output[thisyear, outputNames == "CSOM_H"] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") + nsom <- output[thisyear, outputNames == "NSOM"] # (g N m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(nsom, "g m-2", "kg m-2") } else { - csomf <- output[thisyear, which(outputNames == "CSOMF")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") - csoms <- output[thisyear, which(outputNames == "CSOMS")] # (g C m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") - nsomf <- output[thisyear, outputNames == "NSOMF"] # (g N m-2) - nsoms <- output[thisyear, outputNames == "NSOMS"] # (g N m-2) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nsomf+nsoms, "g m-2", "kg m-2") + csomf <- output[thisyear, which(outputNames == "CSOMF")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(csomf, "g m-2", "kg m-2") + csoms <- output[thisyear, which(outputNames == "CSOMS")] # (g C m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(csoms, "g m-2", "kg m-2") + nsomf <- output[thisyear, outputNames == "NSOMF"] # (g N m-2) + nsoms <- output[thisyear, outputNames == "NSOMS"] # (g N m-2) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(nsomf + nsoms, "g m-2", "kg m-2") } - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(csomf + csoms, "g m-2", "kg m-2") - outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "TILG1")] - outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "TILG2")] - outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "TILV")] - outlist[[length(outlist)+1]] <- output[thisyear, which(outputNames == "PHEN")] - - outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "TILG1")] + + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(csomf + csoms, "g m-2", "kg m-2") + outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "TILG1")] + outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "TILG2")] + outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "TILV")] + outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "PHEN")] + + outlist[[length(outlist) + 1]] <- output[thisyear, which(outputNames == "TILG1")] + output[thisyear, which(outputNames == "TILG2")] + output[thisyear, which(outputNames == "TILV")] - + # Soil Respiration in kgC/m2/s - rsoil <- output[thisyear, which(outputNames == "Rsoil")] # (g C m-2 d-1) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(rsoil, "g m-2", "kg m-2") / sec_in_day - + rsoil <- output[thisyear, which(outputNames == "Rsoil")] # (g C m-2 d-1) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(rsoil, "g m-2", "kg m-2") / sec_in_day + # Autotrophic Respiration in kgC/m2/s - rplantaer <- output[thisyear, which(outputNames == "RplantAer")] # (g C m-2 d-1) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(rplantaer, "g m-2", "kg m-2") / sec_in_day - + rplantaer <- output[thisyear, which(outputNames == "RplantAer")] # (g C m-2 d-1) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(rplantaer, "g m-2", "kg m-2") / sec_in_day + # NEE in kgC/m2/s - # NOTE: According to BASGRA_N documentation: LUEMXQ (used in PHOT calculation) accounts for carbon lost to maintenance respiration, + # NOTE: According to BASGRA_N documentation: LUEMXQ (used in PHOT calculation) accounts for carbon lost to maintenance respiration, # but not growth respiration. So, photosynthesis rate is gross photosynthesis minus maintenance respiration # So this is not really GPP, but it wasn't obvious to add what to get GPP, but I just want NEE for now, so it's OK - phot <- output[thisyear, which(outputNames == "PHOT")] # (g C m-2 d-1) - nee <- -1.0 * (phot - (rsoil + rplantaer)) - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nee, "g m-2", "kg m-2") / sec_in_day - + phot <- output[thisyear, which(outputNames == "PHOT")] # (g C m-2 d-1) + nee <- -1.0 * (phot - (rsoil + rplantaer)) + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(nee, "g m-2", "kg m-2") / sec_in_day + # again this is not technically GPP - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(phot, "g m-2", "kg m-2") / sec_in_day - - # NPP - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(nee - rsoil, "g m-2", "kg m-2") / sec_in_day - + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(phot, "g m-2", "kg m-2") / sec_in_day + + # NPP + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(nee - rsoil, "g m-2", "kg m-2") / sec_in_day + # Qle W/m2 - outlist[[length(outlist)+1]] <- ( output[thisyear, which(outputNames == "EVAP")] + output[thisyear, which(outputNames == "TRAN")] * - PEcAn.data.atmosphere::get.lv()) / sec_in_day - + outlist[[length(outlist) + 1]] <- (output[thisyear, which(outputNames == "EVAP")] + output[thisyear, which(outputNames == "TRAN")] * + PEcAn.data.atmosphere::get.lv()) / sec_in_day + # SoilMoist (!!! only liquid water !!!) kg m-2 # during the groowing season its depth will mainly be equal to the rooting depth, but during winter its depth will be ROOTD-Fdepth soilm <- output[thisyear, which(outputNames == "WAL")] # mm - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(soilm, "mm", "m") * 1000 # (kg m-3) density of water in soil + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert(soilm, "mm", "m") * 1000 # (kg m-3) density of water in soil # WCL = WAL*0.001 / (ROOTD-Fdepth) Water concentration in non-frozen soil # need to think about ice! but the sensors maybe don't measure that - ROOTD <- output[thisyear, which(outputNames == "ROOTD")] - Fdepth <- output[thisyear, which(outputNames == "Fdepth")] - outlist[[length(outlist)+1]] <- soilm * 0.001 / (ROOTD - Fdepth) - + ROOTD <- output[thisyear, which(outputNames == "ROOTD")] + Fdepth <- output[thisyear, which(outputNames == "Fdepth")] + outlist[[length(outlist) + 1]] <- soilm * 0.001 / (ROOTD - Fdepth) + # Additional C fluxes - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FLITTC_LEAF"], - "g m-2", "kg m-2") / sec_in_day - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FLITTC_ROOT"], - "g m-2", "kg m-2") / sec_in_day - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FHARVC"], - "g m-2", "kg m-2") / sec_in_day - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "NEE"], - "g m-2", "kg m-2") / sec_in_day - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FRUNOFFC"], - "g m-2", "kg m-2") / sec_in_day - outlist[[length(outlist)+1]] <- PEcAn.utils::ud_convert(output[thisyear, outputNames == "FSOILAMDC"], - "g m-2", "kg m-2") / sec_in_day - outlist[[length(outlist)+1]] <- output[thisyear, outputNames == "TEMPR30"] - outlist[[length(outlist)+1]] <- output[thisyear, outputNames == "PRECIP30"] - - + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert( + output[thisyear, outputNames == "FLITTC_LEAF"], + "g m-2", "kg m-2" + ) / sec_in_day + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert( + output[thisyear, outputNames == "FLITTC_ROOT"], + "g m-2", "kg m-2" + ) / sec_in_day + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert( + output[thisyear, outputNames == "FHARVC"], + "g m-2", "kg m-2" + ) / sec_in_day + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert( + output[thisyear, outputNames == "NEE"], + "g m-2", "kg m-2" + ) / sec_in_day + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert( + output[thisyear, outputNames == "FRUNOFFC"], + "g m-2", "kg m-2" + ) / sec_in_day + outlist[[length(outlist) + 1]] <- PEcAn.utils::ud_convert( + output[thisyear, outputNames == "FSOILAMDC"], + "g m-2", "kg m-2" + ) / sec_in_day + outlist[[length(outlist) + 1]] <- output[thisyear, outputNames == "TEMPR30"] + outlist[[length(outlist) + 1]] <- output[thisyear, outputNames == "PRECIP30"] + + # ******************** Declare netCDF dimensions and variables ********************# - t <- ncdf4::ncdim_def(name = "time", - units = paste0("days since ", y, "-01-01 00:00:00"), - matrix_weather[matrix_weather[,1] == y, 2], # allow partial years, this info is already in matrix_weather - calendar = "standard", - unlim = TRUE) - - + t <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since ", y, "-01-01 00:00:00"), + matrix_weather[matrix_weather[, 1] == y, 2], # allow partial years, this info is already in matrix_weather + calendar = "standard", + unlim = TRUE + ) + + lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") - + dims <- list(lon = lon, lat = lat, time = t) - + nc_var <- list() - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("LAI", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("CropYield", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("litter_carbon_content", dims) - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("stubble_carbon_content", units = "kg C m-2", dim = dims, missval = -999, - longname = "Stubble Carbon Content") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("stem_carbon_content", units = "kg C m-2", dim = dims, missval = -999, - longname = "Stem Carbon Content") - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("root_carbon_content", dims) - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("reserve_carbon_content", units = "kg C m-2", dim = dims, missval = -999, - longname = "Reserve Carbon Content") - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("leaf_carbon_content", dims) - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("dead_leaf_carbon_content", units = "kg C m-2", dim = dims, missval = -999, - longname = "Dead Leaf Carbon Content") - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("fast_soil_pool_carbon_content", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("slow_soil_pool_carbon_content", dims) - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("soil_organic_nitrogen_content", units = "kg N m-2", dim = dims, missval = -999, - longname = "Soil Organic Nitrogen Content by Layer ") - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("nonelongating_generative_tiller", units = "m-2", dim = dims, missval = -999, - longname = "Non-elongating generative tiller density") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("elongating_generative_tiller", units = "m-2", dim = dims, missval = -999, - longname = "Elongating generative tiller density") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("nonelongating_vegetative_tiller", units = "m-2", dim = dims, missval = -999, - longname = "Non-elongating vegetative tiller density") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("phenological_stage", units = "-", dim = dims, missval = -999, - longname = "Phenological stage") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("tiller_density", units = "m-2", dim = dims, missval = -999, - longname = "Tiller density") - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("SoilResp", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("AutoResp", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NEE", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("GPP", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NPP", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("Qle", dims) - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("SoilMoist", units = "kg m-2", dim = dims, missval = -999, - longname = "Average Layer Soil Moisture") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("SoilMoistFrac", units = "m3 m-3", dim = dims, missval = -999, - longname = "Average Layer Fraction of Saturation") - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("leaf_litter_carbon_flux", units = "kg C m-2 s-1", dim = dims, - missval = -999, longname='Flux of carbon from leaf litter to soil pools') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("fine_root_litter_carbon_flux", units = "kg C m-2 s-1", dim = dims, - missval = -999, longname='Flux of carbon from fine root litter to soil pools') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("harvest_carbon_flux", units = "kg C m-2 s-1", dim = dims, - missval = -999, longname='Flux of carbon removed by harvest') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("NEE_alt", units = "kg C m-2 s-1", dim = dims, - missval = -999, longname='Alternative NEE') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("FRUNOFFC", units = "kg C m-2 s-1", dim = dims, - missval = -999, longname='C in runoff') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("FSOILAMDC", units = "kg C m-2 s-1", dim = dims, - missval = -999, longname='Flux of carbon input in soil amendments') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("TEMPR30", units = "degC", dim = dims, - missval = -999, longname='Smoothed air temperature') - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("PRECIP30", units = "mm/day", dim = dims, - missval = -999, longname='Smoothed daily precipitation') - - + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("LAI", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("CropYield", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("litter_carbon_content", dims) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("stubble_carbon_content", + units = "kg C m-2", dim = dims, missval = -999, + longname = "Stubble Carbon Content" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("stem_carbon_content", + units = "kg C m-2", dim = dims, missval = -999, + longname = "Stem Carbon Content" + ) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("root_carbon_content", dims) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("reserve_carbon_content", + units = "kg C m-2", dim = dims, missval = -999, + longname = "Reserve Carbon Content" + ) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("leaf_carbon_content", dims) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("dead_leaf_carbon_content", + units = "kg C m-2", dim = dims, missval = -999, + longname = "Dead Leaf Carbon Content" + ) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("fast_soil_pool_carbon_content", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("slow_soil_pool_carbon_content", dims) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("soil_organic_nitrogen_content", + units = "kg N m-2", dim = dims, missval = -999, + longname = "Soil Organic Nitrogen Content by Layer " + ) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("nonelongating_generative_tiller", + units = "m-2", dim = dims, missval = -999, + longname = "Non-elongating generative tiller density" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("elongating_generative_tiller", + units = "m-2", dim = dims, missval = -999, + longname = "Elongating generative tiller density" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("nonelongating_vegetative_tiller", + units = "m-2", dim = dims, missval = -999, + longname = "Non-elongating vegetative tiller density" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("phenological_stage", + units = "-", dim = dims, missval = -999, + longname = "Phenological stage" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("tiller_density", + units = "m-2", dim = dims, missval = -999, + longname = "Tiller density" + ) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("SoilResp", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("AutoResp", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("NEE", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("GPP", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("NPP", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("Qle", dims) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("SoilMoist", + units = "kg m-2", dim = dims, missval = -999, + longname = "Average Layer Soil Moisture" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("SoilMoistFrac", + units = "m3 m-3", dim = dims, missval = -999, + longname = "Average Layer Fraction of Saturation" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("leaf_litter_carbon_flux", + units = "kg C m-2 s-1", dim = dims, + missval = -999, longname = "Flux of carbon from leaf litter to soil pools" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("fine_root_litter_carbon_flux", + units = "kg C m-2 s-1", dim = dims, + missval = -999, longname = "Flux of carbon from fine root litter to soil pools" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("harvest_carbon_flux", + units = "kg C m-2 s-1", dim = dims, + missval = -999, longname = "Flux of carbon removed by harvest" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("NEE_alt", + units = "kg C m-2 s-1", dim = dims, + missval = -999, longname = "Alternative NEE" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("FRUNOFFC", + units = "kg C m-2 s-1", dim = dims, + missval = -999, longname = "C in runoff" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("FSOILAMDC", + units = "kg C m-2 s-1", dim = dims, + missval = -999, longname = "Flux of carbon input in soil amendments" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("TEMPR30", + units = "degC", dim = dims, + missval = -999, longname = "Smoothed air temperature" + ) + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("PRECIP30", + units = "mm/day", dim = dims, + missval = -999, longname = "Smoothed daily precipitation" + ) + + # ******************** Declare netCDF variables ********************# - + ### Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") @@ -601,6 +654,4 @@ run_BASGRA <- function(run_met, run_params, site_harvest, site_fertilize, start_ close(varfile) ncdf4::nc_close(nc) } # end year-loop over outputs - } # run_BASGRA - diff --git a/models/basgra/R/write.config.BASGRA.R b/models/basgra/R/write.config.BASGRA.R index da3d55474ac..faee07bfa2f 100644 --- a/models/basgra/R/write.config.BASGRA.R +++ b/models/basgra/R/write.config.BASGRA.R @@ -1,4 +1,4 @@ -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Writes a BASGRA config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -14,254 +14,252 @@ ##' @return configuration file for BASGRA for given run ##' @export ##' @author Istem Fer -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = NULL) { - # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - + # load default(!) BASGRA params if (!is.null(settings$run$inputs$defaults$path)) { df_run_params <- utils::read.csv(settings$run$inputs$defaults$path) } else { df_run_params <- utils::read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) } - run_params <- stats::setNames(df_run_params[,2], df_run_params[,1]) + run_params <- stats::setNames(df_run_params[, 2], df_run_params[, 1]) run_params[which(names(run_params) == "LAT")] <- as.numeric(settings$run$site$lat) #### write run-specific PFT parameters here #### Get parameters being handled by PEcAn for (pft in seq_along(trait.values)) { - pft.traits <- unlist(trait.values[[pft]]) pft.names <- names(pft.traits) - + # Replace matches for (mi in seq_along(pft.traits)) { ind <- which(names(run_params) == pft.names[mi]) run_params[ind] <- pft.traits[mi] } - + # Maximum SLA of new leaves if ("SLAMAX" %in% pft.names) { - run_params[which(names(run_params) == "SLAMAX")] <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "SLAMAX")], "kg-1","g-1") + run_params[which(names(run_params) == "SLAMAX")] <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "SLAMAX")], "kg-1", "g-1") } - + # Number of elongating leaves per non-elongating tiller if ("n_el2nel" %in% pft.names) { run_params[which(names(run_params) == "NELLVM")] <- pft.traits[which(pft.names == "n_el2nel")] } - + # N-C ratio of roots (g N g-1 C) if ("c2n_fineroot" %in% pft.names) { - run_params[which(names(run_params) == "NCR")] <- 1/pft.traits[which(pft.names == "c2n_fineroot")] + run_params[which(names(run_params) == "NCR")] <- 1 / pft.traits[which(pft.names == "c2n_fineroot")] } - + # Phenological stage above which elongation and appearance of leaves on elongating tillers decreases if ("phen_etil_decrease" %in% pft.names) { run_params[which(names(run_params) == "PHENCR")] <- pft.traits[which(pft.names == "phen_etil_decrease")] } - + # Relative death rate of leaves and non-elongating tillers due to shading when LAI is twice the threshold (LAICR) if ("relative_shading_death" %in% pft.names) { run_params[which(names(run_params) == "RDRSCO")] <- pft.traits[which(pft.names == "relative_shading_death")] } - + # PAR extinction coefficient (m2 m-2) if ("extinction_coefficient" %in% pft.names) { run_params[which(names(run_params) == "K")] <- pft.traits[which(pft.names == "extinction_coefficient")] } - + # Transpiration coefficient (mm d-1) if ("transpiration_coefficient" %in% pft.names) { run_params[which(names(run_params) == "TRANCO")] <- pft.traits[which(pft.names == "transpiration_coefficient")] } - + if ("phyllochron" %in% pft.names) { run_params[which(names(run_params) == "PHY")] <- pft.traits[which(pft.names == "phyllochron")] } - + if ("leaf_width" %in% pft.names) { # Leaf width on non-elongating tillers (m) run_params[which(names(run_params) == "LFWIDV")] <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "leaf_width")], "mm", "m") } - + if ("generative_leaf_width" %in% pft.names) { # Leaf width on elongating tillers (m) run_params[which(names(run_params) == "LFWIDG")] <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "generative_leaf_width")], "mm", "m") } - + # Maximum root depth growth rate (m day-1) if ("root_growth_rate" %in% pft.names) { run_params[which(names(run_params) == "RRDMAX")] <- pft.traits[which(pft.names == "root_growth_rate")] } - + # Rubisco content of upper leaves (g m-2 leaf) if ("rubisco_content" %in% pft.names) { run_params[which(names(run_params) == "RUBISC")] <- pft.traits[which(pft.names == "rubisco_content")] } - + # Area of a leaf relative to a rectangle of same length and width (-) if ("shape" %in% pft.names) { run_params[which(names(run_params) == "SHAPE")] <- pft.traits[which(pft.names == "shape")] } - + # Sink strength of small elongating tillers (g C tiller-1 d-1) if ("elongating_tiller_sink_strength" %in% pft.names) { run_params[which(names(run_params) == "SIMAX1T")] <- pft.traits[which(pft.names == "elongating_tiller_sink_strength")] } - + # Minimum value of effective temperature for leaf elongation (deg C) if ("gdd_tbase" %in% pft.names) { run_params[which(names(run_params) == "TBASE")] <- pft.traits[which(pft.names == "gdd_tbase")] } - + # Time constant of mobilisation of reserves (day) if ("tc_res" %in% pft.names) { run_params[which(names(run_params) == "TCRES")] <- pft.traits[which(pft.names == "tc_res")] } - + # Optimum temperature for vegetative tillers becoming generative (deg C) if ("TOpt_ge" %in% pft.names) { run_params[which(names(run_params) == "TOPTGE")] <- pft.traits[which(pft.names == "TOpt_ge")] } - + # Growth yield per unit expended carbohydrate (g C g-1 C) if ("growthYield" %in% pft.names) { run_params[which(names(run_params) == "YG")] <- pft.traits[which(pft.names == "growthYield")] } - + # Maximum surface temperature at which hardening is possible (deg C) if ("THard_max" %in% pft.names) { run_params[which(names(run_params) == "THARDMX")] <- pft.traits[which(pft.names == "THard_max")] } - + # Minimum relative death rate of foliage (day-1) if ("min_foliage_mort_rate" %in% pft.names) { run_params[which(names(run_params) == "RDRTMIN")] <- pft.traits[which(pft.names == "min_foliage_mort_rate")] } - + # Maximum N-C ratio of shoot (g N g-1 C) if ("n2c_shoot_max" %in% pft.names) { run_params[which(names(run_params) == "NCSHMAX")] <- pft.traits[which(pft.names == "n2c_shoot_max")] } - + # Maximum refreezing rate per degree below temperature which snow melts if ("max_refreezing_rate" %in% pft.names) { run_params[which(names(run_params) == "SWrf")] <- pft.traits[which(pft.names == "max_refreezing_rate")] } - + # Vernalisation threshold (deg C) if ("vernalization_threshold" %in% pft.names) { run_params[which(names(run_params) == "TVERN")] <- pft.traits[which(pft.names == "vernalization_threshold")] } - + if ("hardening_parameter" %in% pft.names) { run_params[which(names(run_params) == "Hparam")] <- pft.traits[which(pft.names == "hardening_parameter")] } - + if ("max_res_abg" %in% pft.names) { run_params[which(names(run_params) == "COCRESMX")] <- pft.traits[which(pft.names == "max_res_abg")] } - + if ("max_size_etil" %in% pft.names) { run_params[which(names(run_params) == "CSTAVM")] <- pft.traits[which(pft.names == "max_size_etil")] } - + if ("maxSLAmin" %in% pft.names) { run_params[which(names(run_params) == "FSLAMIN")] <- pft.traits[which(pft.names == "maxSLAmin")] } - + if ("rehardening_disappears" %in% pft.names) { run_params[which(names(run_params) == "reHardRedDay")] <- pft.traits[which(pft.names == "rehardening_disappears")] } - + if ("LUE_increase" %in% pft.names) { run_params[which(names(run_params) == "KLUETILG")] <- pft.traits[which(pft.names == "LUE_increase")] } - - + + ##### Soil parameters - + # Fraction of decomposed litter becoming fast SOM if ("f_litter_SOM_fast" %in% pft.names) { run_params[which(names(run_params) == "FLITTSOMF")] <- pft.traits[which(pft.names == "f_litter_SOM_fast")] } - + # Fraction of decomposed fast SOM if ("fastOM2slowOM" %in% pft.names) { run_params[which(names(run_params) == "FSOMFSOMS")] <- pft.traits[which(pft.names == "fastOM2slowOM")] } - + # Residence time of slowly decomposing OM if ("sOM_residence_time" %in% pft.names) { run_params[which(names(run_params) == "TCSOMS")] <- pft.traits[which(pft.names == "sOM_residence_time")] } - + # Residence time of fast decomposing OM if ("fOM_residence_time" %in% pft.names) { run_params[which(names(run_params) == "TCSOMF")] <- round(pft.traits[which(pft.names == "fOM_residence_time")]) } - + # Residence time of litter if ("litter_residence_time" %in% pft.names) { run_params[which(names(run_params) == "TCLITT")] <- pft.traits[which(pft.names == "litter_residence_time")] } - + # Temperature at which decomposition is maximal (deg C) if ("Tdecomp_max" %in% pft.names) { run_params[which(names(run_params) == "TMAXF")] <- pft.traits[which(pft.names == "Tdecomp_max")] } - + # Resilience of decomposition to temperature change (deg C) if ("decomp_res2Tdelta" %in% pft.names) { run_params[which(names(run_params) == "TSIGMAF")] <- pft.traits[which(pft.names == "decomp_res2Tdelta")] } - + # Ratio of total to aerobic respiration if ("total2RA" %in% pft.names) { run_params[which(names(run_params) == "KRTOTAER")] <- pft.traits[which(pft.names == "total2RA")] } - + # Day length below which phenological stage is reset to zero if ("min_daylength_reset" %in% pft.names) { run_params[which(names(run_params) == "DAYLB")] <- pft.traits[which(pft.names == "min_daylength_reset")] } - + # Day length below which phenological development slows down if ("min_daylength_slow" %in% pft.names) { run_params[which(names(run_params) == "DAYLP")] <- pft.traits[which(pft.names == "min_daylength_slow")] } - + # Day length below which DAYLGE becomes less than 1 if ("daylength_effect" %in% pft.names) { run_params[which(names(run_params) == "DLMXGE")] <- pft.traits[which(pft.names == "daylength_effect")] } - + # LAI above which shading induces leaf senescence if ("lai_senescence" %in% pft.names) { run_params[which(names(run_params) == "LAICR")] <- pft.traits[which(pft.names == "lai_senescence")] } - + # Decrease in tillering with leaf area index if ("lai_til_decrease" %in% pft.names) { run_params[which(names(run_params) == "LAIEFT")] <- pft.traits[which(pft.names == "lai_til_decrease")] } - + # Maximum ratio of tiller and leaf apearance at low leaf area index if ("lai_til2leaf_max" %in% pft.names) { run_params[which(names(run_params) == "LAITIL")] <- pft.traits[which(pft.names == "lai_til2leaf_max")] } - + # Proportionality of leaf senescence with temperature if ("leaf_senescence_temp_rate" %in% pft.names) { run_params[which(names(run_params) == "RDRTEM")] <- pft.traits[which(pft.names == "leaf_senescence_temp_rate")] } - + # Maximum relative rate of tillers becoming elongating tillers if ("til2etil_max_rate" %in% pft.names) { run_params[which(names(run_params) == "RGENMX")] <- pft.traits[which(pft.names == "til2etil_max_rate")] } - + # Fraction of reserves in elongating tillers that is harvested if ("etil_resv_harv" %in% pft.names) { run_params[which(names(run_params) == "HAGERE")] <- pft.traits[which(pft.names == "etil_resv_harv")] @@ -269,9 +267,9 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N # Yasso decomposition parameters param_pairs <- list( - c('som_a_decomp_rate', 'yasso_alpha_a'), c('som_w_decomp_rate', 'yasso_alpha_w'), - c('som_e_decomp_rate', 'yasso_alpha_e'), c('som_n_decomp_rate', 'yasso_alpha_n'), - c('yasso_rate_pc', 'yasso_rate_pc'), c('yasso_tresp_pc', 'yasso_tres_pc') + c("som_a_decomp_rate", "yasso_alpha_a"), c("som_w_decomp_rate", "yasso_alpha_w"), + c("som_e_decomp_rate", "yasso_alpha_e"), c("som_n_decomp_rate", "yasso_alpha_n"), + c("yasso_rate_pc", "yasso_rate_pc"), c("yasso_tresp_pc", "yasso_tres_pc") ) for (param_pair in param_pairs) { # Yasso-specific params @@ -279,418 +277,407 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N run_params[which(names(run_params) == param_pair[2])] <- pft.traits[which(pft.names == param_pair[1])] } } - } #### End parameter update #### Update initial conditions if (!is.null(IC)) { - ic.names <- names(IC) - + # let's combine these here last_vals <- c() last_states_file <- file.path(outdir, "last_vals_basgra.Rdata") - - if(!file.exists(last_states_file) & is.null(IC$test_vals)){ + + if (!file.exists(last_states_file) & is.null(IC$test_vals)) { PEcAn.logger::logger.severe("Last step output values are missing for restart.") - }else if(!is.null(IC$test_vals)){ + } else if (!is.null(IC$test_vals)) { # for package testing last_vals <- IC$test_vals - }else{ + } else { load(last_states_file) } - - if ("LAI" %in% ic.names) { + + if ("LAI" %in% ic.names) { run_params[names(run_params) == "LOG10LAII"] <- IC$LAI - }else{ + } else { run_params[names(run_params) == "LOG10LAII"] <- last_vals[names(last_vals) == "LAI"] } - + # For Yasso restart - if(run_params[names(run_params) == "use_yasso"]){ - - last_somf <- sum(last_vals[names(last_vals) == "CSOM_A"], - last_vals[names(last_vals) == "CSOM_W"], - last_vals[names(last_vals) == "CSOM_E"], - last_vals[names(last_vals) == "CSOM_N"]) - + if (run_params[names(run_params) == "use_yasso"]) { + last_somf <- sum( + last_vals[names(last_vals) == "CSOM_A"], + last_vals[names(last_vals) == "CSOM_W"], + last_vals[names(last_vals) == "CSOM_E"], + last_vals[names(last_vals) == "CSOM_N"] + ) + last_soms <- last_vals[names(last_vals) == "CSOM_H"] - - if ("fast_soil_pool_carbon_content" %in% ic.names & "slow_soil_pool_carbon_content" %in% ic.names) { - - new_somf <- PEcAn.utils::ud_convert(IC$fast_soil_pool_carbon_content, "kg", "g") - new_soms <- PEcAn.utils::ud_convert(IC$slow_soil_pool_carbon_content, "kg", "g") - + + if ("fast_soil_pool_carbon_content" %in% ic.names & "slow_soil_pool_carbon_content" %in% ic.names) { + new_somf <- PEcAn.utils::ud_convert(IC$fast_soil_pool_carbon_content, "kg", "g") + new_soms <- PEcAn.utils::ud_convert(IC$slow_soil_pool_carbon_content, "kg", "g") + ratio_somf <- new_somf / last_somf - + # update via ratio run_params[names(run_params) == "CSOM_A"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_A"] run_params[names(run_params) == "CSOM_W"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_W"] run_params[names(run_params) == "CSOM_E"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_E"] run_params[names(run_params) == "CSOM_N"] <- ratio_somf * last_vals[names(last_vals) == "CSOM_N"] run_params[names(run_params) == "CSOM_H"] <- new_soms - - run_params[names(run_params) == "NSOM"] <- ((new_somf+new_soms)/(last_somf+last_soms)) * last_vals[names(last_vals) == "NSOM"] - - }else{ - + + run_params[names(run_params) == "NSOM"] <- ((new_somf + new_soms) / (last_somf + last_soms)) * last_vals[names(last_vals) == "NSOM"] + } else { run_params[names(run_params) == "CSOM_A"] <- last_vals[names(last_vals) == "CSOM_A"] run_params[names(run_params) == "CSOM_W"] <- last_vals[names(last_vals) == "CSOM_W"] run_params[names(run_params) == "CSOM_E"] <- last_vals[names(last_vals) == "CSOM_E"] run_params[names(run_params) == "CSOM_N"] <- last_vals[names(last_vals) == "CSOM_N"] run_params[names(run_params) == "CSOM_H"] <- last_soms - + run_params[names(run_params) == "NSOM"] <- last_vals[names(last_vals) == "NSOM"] - } - + # #else-if ("TotSoilCarb" %in% ic.names)? - # new_totc <- PEcAn.utils::ud_convert(IC$TotSoilCarb, "kg", "g") - # + # new_totc <- PEcAn.utils::ud_convert(IC$TotSoilCarb, "kg", "g") + # # ratio_soc <- new_totc / (last_somf + last_soms) - # + # # # update via ratio # run_params[names(run_params) == "CSOM_A"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_A"] # run_params[names(run_params) == "CSOM_W"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_W"] # run_params[names(run_params) == "CSOM_E"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_E"] # run_params[names(run_params) == "CSOM_N"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_N"] # run_params[names(run_params) == "CSOM_H"] <- ratio_soc * last_vals[names(last_vals) == "CSOM_H"] - - - }else{ # no Yasso - if ("fast_soil_pool_carbon_content" %in% ic.names) { + } else { # no Yasso + if ("fast_soil_pool_carbon_content" %in% ic.names) { run_params[names(run_params) == "CSOMF0"] <- PEcAn.utils::ud_convert(IC$fast_soil_pool_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "CSOMF0"] <- last_vals[names(last_vals) == "CSOMF"] } - run_params[names(run_params) == "NSOMF0"] <- run_params[names(run_params) == "CSOMF0"] / run_params[names(run_params) == "CNSOMF0"] - - if ("slow_soil_pool_carbon_content" %in% ic.names) { + run_params[names(run_params) == "NSOMF0"] <- run_params[names(run_params) == "CSOMF0"] / run_params[names(run_params) == "CNSOMF0"] + + if ("slow_soil_pool_carbon_content" %in% ic.names) { run_params[names(run_params) == "CSOMS0"] <- PEcAn.utils::ud_convert(IC$slow_soil_pool_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "CSOMS0"] <- last_vals[names(last_vals) == "CSOMS"] } - run_params[names(run_params) == "NSOMS0"] <- run_params[names(run_params) == "CSOMS0"] / run_params[names(run_params) == "CNSOMS0"] - + run_params[names(run_params) == "NSOMS0"] <- run_params[names(run_params) == "CSOMS0"] / run_params[names(run_params) == "CNSOMS0"] } - - if ("CropYield" %in% ic.names) { + + if ("CropYield" %in% ic.names) { run_params[names(run_params) == "YIELDI"] <- PEcAn.utils::ud_convert(IC$CropYield, "kg", "g") - }else{ - run_params[names(run_params) == "YIELDI"] <- last_vals[names(last_vals) == "YIELD_POT"] + } else { + run_params[names(run_params) == "YIELDI"] <- last_vals[names(last_vals) == "YIELD_POT"] } - - if ("litter_carbon_content" %in% ic.names) { + + if ("litter_carbon_content" %in% ic.names) { run_params[names(run_params) == "CLITT0"] <- PEcAn.utils::ud_convert(IC$litter_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "CLITT0"] <- last_vals[names(last_vals) == "CLITT"] } - #run_params[names(run_params) == "NLITT0"] <- run_params[names(run_params) == "CLITT0"] / run_params[names(run_params) == "CNLITT0"] + # run_params[names(run_params) == "NLITT0"] <- run_params[names(run_params) == "CLITT0"] / run_params[names(run_params) == "CNLITT0"] run_params[which(names(run_params) == "NLITT0")] <- last_vals[names(last_vals) == "NLITT"] - - if ("stubble_carbon_content" %in% ic.names) { + + if ("stubble_carbon_content" %in% ic.names) { run_params[names(run_params) == "CSTUBI"] <- PEcAn.utils::ud_convert(IC$stubble_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "CSTUBI"] <- last_vals[names(last_vals) == "CSTUB"] } - - if ("stem_carbon_content" %in% ic.names) { + + if ("stem_carbon_content" %in% ic.names) { run_params[names(run_params) == "CSTI"] <- PEcAn.utils::ud_convert(IC$stem_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "CSTI"] <- last_vals[names(last_vals) == "CST"] } - + # NRT = NCR * CRTI - #run_params[names(run_params) == "NCR"] <- last_vals[names(last_vals) == "NRT"] / last_vals[names(last_vals) == "CRT"] - if ("root_carbon_content" %in% ic.names) { - run_params[names(run_params) == "LOG10CRTI"] <- PEcAn.utils::ud_convert(IC$root_carbon_content, "kg", "g") - }else{ + # run_params[names(run_params) == "NCR"] <- last_vals[names(last_vals) == "NRT"] / last_vals[names(last_vals) == "CRT"] + if ("root_carbon_content" %in% ic.names) { + run_params[names(run_params) == "LOG10CRTI"] <- PEcAn.utils::ud_convert(IC$root_carbon_content, "kg", "g") + } else { run_params[names(run_params) == "LOG10CRTI"] <- last_vals[names(last_vals) == "CRT"] } - run_params[which(names(run_params) == "NRTI")] <- run_params[names(run_params) == "LOG10CRTI"]*run_params[names(run_params) == "NCR"] - # if(run_params[which(names(run_params) == "NRTI")] <= 0) run_params[which(names(run_params) == "NRTI")] <- 0.0001 + run_params[which(names(run_params) == "NRTI")] <- run_params[names(run_params) == "LOG10CRTI"] * run_params[names(run_params) == "NCR"] + # if(run_params[which(names(run_params) == "NRTI")] <= 0) run_params[which(names(run_params) == "NRTI")] <- 0.0001 # # NCSHI = NCSHMAX * (1-EXP(-K*LAII)) / (K*LAII) # # NSH = NCSHI * (CLVI+CSTI) lai_tmp <- run_params[names(run_params) == "LOG10LAII"] ncshi <- run_params[names(run_params) == "NCSHMAX"] * - (1-exp(-run_params[names(run_params) == "K"]*lai_tmp)) / (run_params[names(run_params) == "K"]*lai_tmp) + (1 - exp(-run_params[names(run_params) == "K"] * lai_tmp)) / (run_params[names(run_params) == "K"] * lai_tmp) run_params[which(names(run_params) == "NSHI")] <- ncshi * ((run_params[names(run_params) == "LOG10CLVI"]) + run_params[names(run_params) == "CSTI"]) - - if ("reserve_carbon_content" %in% ic.names) { + + if ("reserve_carbon_content" %in% ic.names) { run_params[names(run_params) == "LOG10CRESI"] <- PEcAn.utils::ud_convert(IC$reserve_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "LOG10CRESI"] <- last_vals[names(last_vals) == "CRES"] } - - if ("leaf_carbon_content" %in% ic.names) { + + if ("leaf_carbon_content" %in% ic.names) { run_params[names(run_params) == "LOG10CLVI"] <- PEcAn.utils::ud_convert(IC$leaf_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "LOG10CLVI"] <- last_vals[names(last_vals) == "CLV"] } - - if ("dead_leaf_carbon_content" %in% ic.names) { + + if ("dead_leaf_carbon_content" %in% ic.names) { run_params[names(run_params) == "CLVDI"] <- PEcAn.utils::ud_convert(IC$dead_leaf_carbon_content, "kg", "g") - }else{ + } else { run_params[names(run_params) == "CLVDI"] <- last_vals[names(last_vals) == "CLVD"] } - - if ("tiller_density" %in% ic.names) { + + if ("tiller_density" %in% ic.names) { run_params[names(run_params) == "TILTOTI"] <- IC$tiller_density # all the tillers are updated from this with respect to model preserved ratios - }else{ + } else { run_params[names(run_params) == "TILTOTI"] <- last_vals[names(last_vals) == "TILTOT"] } - run_params[names(run_params) == "FRTILGI"] <- last_vals[names(last_vals) == "FRTILG"] - - if(run_params[names(run_params) == "FRTILGI"] == 0) run_params[names(run_params) == "FRTILGI"] <- 0.01 - - #TILV = TILTOTI * (1. - FRTILGI) - if ("nonelongating_vegetative_tiller" %in% ic.names) { - run_params[names(run_params) == "TILVI"] <- IC$nonelongating_vegetative_tiller + run_params[names(run_params) == "FRTILGI"] <- last_vals[names(last_vals) == "FRTILG"] + + if (run_params[names(run_params) == "FRTILGI"] == 0) run_params[names(run_params) == "FRTILGI"] <- 0.01 + + # TILV = TILTOTI * (1. - FRTILGI) + if ("nonelongating_vegetative_tiller" %in% ic.names) { + run_params[names(run_params) == "TILVI"] <- IC$nonelongating_vegetative_tiller # preserve ratio - #run_params[names(run_params) == "FRTILGI"] <- 1 - (run_params[names(run_params) == "TILVI"]/run_params[names(run_params) == "TILTOTI"]) - }else{ - run_params[names(run_params) == "TILVI"] <- run_params[names(run_params) == "TILTOTI"] * (1-run_params[names(run_params) == "FRTILGI"]) + # run_params[names(run_params) == "FRTILGI"] <- 1 - (run_params[names(run_params) == "TILVI"]/run_params[names(run_params) == "TILTOTI"]) + } else { + run_params[names(run_params) == "TILVI"] <- run_params[names(run_params) == "TILTOTI"] * (1 - run_params[names(run_params) == "FRTILGI"]) } - + gtil <- run_params[names(run_params) == "TILTOTI"] - run_params[names(run_params) == "TILVI"] - - #TILG1 = TILTOTI * FRTILGI * FRTILGG1I - if ("nonelongating_generative_tiller" %in% ic.names) { - run_params[names(run_params) == "TILG1I"] <- IC$nonelongating_generative_tiller + + # TILG1 = TILTOTI * FRTILGI * FRTILGG1I + if ("nonelongating_generative_tiller" %in% ic.names) { + run_params[names(run_params) == "TILG1I"] <- IC$nonelongating_generative_tiller # can also update FRTILGG1I but I don't throw these into the state matrix anymore and TILG1I is initialized from its own variable, not derived from fractions - }else{ - run_params[names(run_params) == "TILG1I"] <- gtil*(last_vals[names(last_vals) == "TILG1"] / - (last_vals[names(last_vals) == "TILG1"]+last_vals[names(last_vals) == "TILG2"])) - if(is.nan(run_params[names(run_params) == "TILG1I"])) run_params[names(run_params) == "TILG1I"] <- 1 - #if(is.infinite(run_params[names(run_params) == "TILG1I"])) run_params[names(run_params) == "TILG1I"] <- 1 - } - - #TILG2 = TILTOTI * FRTILGI * (1-FRTILGG1I) - if ("elongating_generative_tiller" %in% ic.names) { - run_params[names(run_params) == "TILG2I"] <- IC$elongating_generative_tiller - }else{ - run_params[names(run_params) == "TILG2I"] <- gtil*(last_vals[names(last_vals) == "TILG2"] / - (last_vals[names(last_vals) == "TILG1"]+last_vals[names(last_vals) == "TILG2"])) - if(is.nan(run_params[names(run_params) == "TILG2I"])) run_params[names(run_params) == "TILG2I"] <- 1 + } else { + run_params[names(run_params) == "TILG1I"] <- gtil * (last_vals[names(last_vals) == "TILG1"] / + (last_vals[names(last_vals) == "TILG1"] + last_vals[names(last_vals) == "TILG2"])) + if (is.nan(run_params[names(run_params) == "TILG1I"])) run_params[names(run_params) == "TILG1I"] <- 1 + # if(is.infinite(run_params[names(run_params) == "TILG1I"])) run_params[names(run_params) == "TILG1I"] <- 1 + } + + # TILG2 = TILTOTI * FRTILGI * (1-FRTILGG1I) + if ("elongating_generative_tiller" %in% ic.names) { + run_params[names(run_params) == "TILG2I"] <- IC$elongating_generative_tiller + } else { + run_params[names(run_params) == "TILG2I"] <- gtil * (last_vals[names(last_vals) == "TILG2"] / + (last_vals[names(last_vals) == "TILG1"] + last_vals[names(last_vals) == "TILG2"])) + if (is.nan(run_params[names(run_params) == "TILG2I"])) run_params[names(run_params) == "TILG2I"] <- 1 # if(is.infinite(run_params[names(run_params) == "TILG2I"])) run_params[names(run_params) == "TILG2I"] <- 1 } - - if ("phenological_stage" %in% ic.names) { + + if ("phenological_stage" %in% ic.names) { run_params[names(run_params) == "PHENI"] <- IC$phenological_stage - }else{ + } else { run_params[names(run_params) == "PHENI"] <- last_vals[names(last_vals) == "PHEN"] } - if ("lethal_temperature50" %in% ic.names) { + if ("lethal_temperature50" %in% ic.names) { run_params[names(run_params) == "LT50I"] <- IC$lethal_temperature50 - }else{ + } else { run_params[names(run_params) == "LT50I"] <- last_vals[names(last_vals) == "LT50"] } - - - if ("rooting_depth" %in% ic.names) { + + + if ("rooting_depth" %in% ic.names) { run_params[names(run_params) == "ROOTDM"] <- IC$rooting_depth - }else{ + } else { run_params[names(run_params) == "ROOTDM"] <- last_vals[names(last_vals) == "ROOTD"] # this doesn't change } - + # these change too - run_params[names(run_params) == "TEMPR30"] <- last_vals[names(last_vals) == "TEMPR30"] - run_params[names(run_params) == "PRECIP30"] <- last_vals[names(last_vals) == "PRECIP30"] - - run_params[names(run_params) == "DAYLI"] <- last_vals[names(last_vals) == "DAYL"] - - run_params[names(run_params) == "NMIN0"] <- last_vals[names(last_vals) == "NMIN"] - - run_params[names(run_params) == "O2I"] <- last_vals[names(last_vals) == "O2"] - - - # water stuff, to be in SDA - + run_params[names(run_params) == "TEMPR30"] <- last_vals[names(last_vals) == "TEMPR30"] + run_params[names(run_params) == "PRECIP30"] <- last_vals[names(last_vals) == "PRECIP30"] + + run_params[names(run_params) == "DAYLI"] <- last_vals[names(last_vals) == "DAYL"] + + run_params[names(run_params) == "NMIN0"] <- last_vals[names(last_vals) == "NMIN"] + + run_params[names(run_params) == "O2I"] <- last_vals[names(last_vals) == "O2"] + + + # water stuff, to be in SDA + run_params[names(run_params) == "DRYSTORI"] <- last_vals[names(last_vals) == "DRYSTOR"] - run_params[names(run_params) == "FdepthI"] <- last_vals[names(last_vals) == "Fdepth"] - run_params[names(run_params) == "SDEPTHI"] <- last_vals[names(last_vals) == "Sdepth"] - run_params[names(run_params) == "TANAERI"] <- last_vals[names(last_vals) == "TANAER"] - run_params[names(run_params) == "WAPLI"] <- last_vals[names(last_vals) == "WAPL"] - run_params[names(run_params) == "WAPSI"] <- last_vals[names(last_vals) == "WAPS"] - run_params[names(run_params) == "WASI"] <- last_vals[names(last_vals) == "WAS"] + run_params[names(run_params) == "FdepthI"] <- last_vals[names(last_vals) == "Fdepth"] + run_params[names(run_params) == "SDEPTHI"] <- last_vals[names(last_vals) == "Sdepth"] + run_params[names(run_params) == "TANAERI"] <- last_vals[names(last_vals) == "TANAER"] + run_params[names(run_params) == "WAPLI"] <- last_vals[names(last_vals) == "WAPL"] + run_params[names(run_params) == "WAPSI"] <- last_vals[names(last_vals) == "WAPS"] + run_params[names(run_params) == "WASI"] <- last_vals[names(last_vals) == "WAS"] run_params[names(run_params) == "WETSTORI"] <- last_vals[names(last_vals) == "WETSTOR"] - + # WAL = 1000. * ROOTDM * WCI - if ("SoilMoistFrac" %in% ic.names) { - run_params[names(run_params) == "WCI"] <- IC$SoilMoistFrac - run_params[names(run_params) == "WALI"] <- 1000. * (run_params[names(run_params) == "ROOTDM"] - run_params[names(run_params) == "FdepthI"]) * run_params[names(run_params) == "WCI"] - }else{ - run_params[names(run_params) == "WALI"] <- last_vals[names(last_vals) == "WAL"] - run_params[names(run_params) == "WCI"] <- run_params[names(run_params) == "WALI"] / (1000 * (run_params[names(run_params) == "ROOTDM"]- run_params[names(run_params) == "FdepthI"])) - } - - yasso_pools <- c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30') + if ("SoilMoistFrac" %in% ic.names) { + run_params[names(run_params) == "WCI"] <- IC$SoilMoistFrac + run_params[names(run_params) == "WALI"] <- 1000. * (run_params[names(run_params) == "ROOTDM"] - run_params[names(run_params) == "FdepthI"]) * run_params[names(run_params) == "WCI"] + } else { + run_params[names(run_params) == "WALI"] <- last_vals[names(last_vals) == "WAL"] + run_params[names(run_params) == "WCI"] <- run_params[names(run_params) == "WALI"] / (1000 * (run_params[names(run_params) == "ROOTDM"] - run_params[names(run_params) == "FdepthI"])) + } + + yasso_pools <- c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM", "TEMPR30", "PRECIP30") for (p in yasso_pools) { if (p %in% ic.names) { run_params[names(run_params) == p] <- IC[[p]] } } - - - }else if(!is.null(settings$run$inputs$poolinitcond$path)){ - + } else if (!is.null(settings$run$inputs$poolinitcond$path)) { IC.path <- settings$run$inputs$poolinitcond$path - #IC.pools <- PEcAn.data.land::prepare_pools(IC.path, constants = list(sla = SLA)) - - #if(!is.null(IC.pools)){ + # IC.pools <- PEcAn.data.land::prepare_pools(IC.path, constants = list(sla = SLA)) + + # if(!is.null(IC.pools)){ IC.nc <- ncdf4::nc_open(IC.path) - + ## laiInit m2/m2 lai <- try(ncdf4::ncvar_get(IC.nc, "LAI"), silent = TRUE) if (!is.na(lai) && is.numeric(lai)) { run_params[which(names(run_params) == "LOG10LAII")] <- lai } - + # Initial value of litter C (g C m-2) clitt0 <- try(ncdf4::ncvar_get(IC.nc, "litter_carbon_content"), silent = TRUE) if (!is.na(clitt0) && is.numeric(clitt0)) { run_params[which(names(run_params) == "CLITT0")] <- PEcAn.utils::ud_convert(clitt0, "kg", "g") } - - + + # Initial value of slow SOM (g C m-2) csoms0 <- try(ncdf4::ncvar_get(IC.nc, "slow_soil_pool_carbon_content"), silent = TRUE) if (!is.na(csoms0) && is.numeric(csoms0)) { run_params[which(names(run_params) == "CSOMS0")] <- PEcAn.utils::ud_convert(csoms0, "kg", "g") } - + # Initial value of fast SOM (g C m-2) csomf0 <- try(ncdf4::ncvar_get(IC.nc, "fast_soil_pool_carbon_content"), silent = TRUE) if (!is.na(csomf0) && is.numeric(csomf0)) { run_params[which(names(run_params) == "CSOMF0")] <- PEcAn.utils::ud_convert(csomf0, "kg", "g") } - + # Initial value of root C (g C m-2) crti <- try(ncdf4::ncvar_get(IC.nc, "root_carbon_content"), silent = TRUE) if (!is.na(crti) && is.numeric(crti)) { # not log10 anymore, don't mind the name run_params[which(names(run_params) == "LOG10CRTI")] <- PEcAn.utils::ud_convert(crti, "kg", "g") } - + # Initial value of leaf C (g C m-2) clvi <- try(ncdf4::ncvar_get(IC.nc, "leaf_carbon_content"), silent = TRUE) if (!is.na(clvi) && is.numeric(clvi)) { # not log10 anymore, don't mind the name run_params[which(names(run_params) == "LOG10CLVI")] <- PEcAn.utils::ud_convert(clvi, "kg", "g") } - + # Initial mineral N nmin0 <- try(ncdf4::ncvar_get(IC.nc, "soil_inorganic_nitrogen_content"), silent = TRUE) if (!is.na(nmin0) && is.numeric(nmin0)) { run_params[which(names(run_params) == "NMIN0")] <- PEcAn.utils::ud_convert(nmin0, "kg", "g") } - - # Initial organic N + + # Initial organic N nsom0 <- try(ncdf4::ncvar_get(IC.nc, "soil_organic_nitrogen_content"), silent = TRUE) if (!is.na(nsom0) && is.numeric(nsom0)) { run_params[which(names(run_params) == "NSOM")] <- PEcAn.utils::ud_convert(nsom0, "kg", "g") } - + # Rooting depth (m) rootd <- try(ncdf4::ncvar_get(IC.nc, "rooting_depth"), silent = TRUE) if (!is.na(rootd) && is.numeric(rootd)) { run_params[which(names(run_params) == "ROOTDM")] <- rootd } - + # WCI wci <- try(ncdf4::ncvar_get(IC.nc, "SoilMoistFrac"), silent = TRUE) if (!is.na(wci) && is.numeric(wci)) { run_params[which(names(run_params) == "WCI")] <- wci } - + # Tiller density (m-2) tiltoti <- try(ncdf4::ncvar_get(IC.nc, "tiller_density"), silent = TRUE) if (!is.na(tiltoti) && is.numeric(tiltoti)) { run_params[which(names(run_params) == "TILTOTI")] <- tiltoti } - + tilg1 <- try(ncdf4::ncvar_get(IC.nc, "nonelongating_generative_tiller"), silent = TRUE) if (!is.na(tilg1) && is.numeric(tilg1)) { - run_params[names(run_params) == "TILG1I"] <- tilg1 + run_params[names(run_params) == "TILG1I"] <- tilg1 } - + tilg2 <- try(ncdf4::ncvar_get(IC.nc, "elongating_generative_tiller"), silent = TRUE) if (!is.na(tilg2) && is.numeric(tilg2)) { - run_params[names(run_params) == "TILG2I"] <- tilg2 + run_params[names(run_params) == "TILG2I"] <- tilg2 } - + tilv <- try(ncdf4::ncvar_get(IC.nc, "nonelongating_vegetative_tiller"), silent = TRUE) if (!is.na(tilv) && is.numeric(tilv)) { - run_params[names(run_params) == "TILVI"] <- tilv + run_params[names(run_params) == "TILVI"] <- tilv } - + # Phenological stage pheni <- try(ncdf4::ncvar_get(IC.nc, "phenological_stage"), silent = TRUE) if (!is.na(pheni) && is.numeric(pheni)) { run_params[which(names(run_params) == "PHENI")] <- pheni } - + # Initial C in reserves (g C m-2) cresi <- try(ncdf4::ncvar_get(IC.nc, "reserve_carbon_content"), silent = TRUE) if (!is.na(cresi) && is.numeric(cresi)) { # not log10 anymore, don't mind the name run_params[which(names(run_params) == "LOG10CRESI")] <- PEcAn.utils::ud_convert(cresi, "kg", "g") } - + # N-C ratio of roots n2c <- try(ncdf4::ncvar_get(IC.nc, "n2c_roots"), silent = TRUE) if (!is.na(n2c) && is.numeric(n2c)) { run_params[which(names(run_params) == "NCR")] <- n2c } - + # Initial C-N ratio of fast SOM c2n <- try(ncdf4::ncvar_get(IC.nc, "c2n_fast_pool"), silent = TRUE) if (!is.na(c2n) && is.numeric(c2n)) { run_params[which(names(run_params) == "CNSOMF0")] <- c2n } - + # Water concentration at saturation (m3 m-3) wcst <- try(ncdf4::ncvar_get(IC.nc, "water_concentration_at_saturation"), silent = TRUE) if (!is.na(wcst) && is.numeric(wcst)) { run_params[which(names(run_params) == "WCST")] <- wcst } - + # Water concentration at field capacity (m3 m-3) wcfc <- try(ncdf4::ncvar_get(IC.nc, "water_concentration_at_field_capacity"), silent = TRUE) if (!is.na(wcfc) && is.numeric(wcfc)) { # WCFC = FWCFC * WCST - run_params[which(names(run_params) == "FWCFC")] <- wcfc / wcst + run_params[which(names(run_params) == "FWCFC")] <- wcfc / wcst } - + # Water concentration at wilting point (m3 m-3) wcwp <- try(ncdf4::ncvar_get(IC.nc, "water_concentration_at_wilting_point"), silent = TRUE) if (!is.na(wcwp) && is.numeric(wcwp)) { # WCWP = FWCWP * WCST - run_params[which(names(run_params) == "FWCWP")] <- wcwp / wcst + run_params[which(names(run_params) == "FWCWP")] <- wcwp / wcst } - yasso_pools <- c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30') + yasso_pools <- c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM", "TEMPR30", "PRECIP30") for (p in yasso_pools) { - value <- try(ncdf4::ncvar_get(IC.nc, p), silent=TRUE) + value <- try(ncdf4::ncvar_get(IC.nc, p), silent = TRUE) if (!is.na(value) && is.numeric(value)) { run_params[names(run_params) == p] <- value } } } - + # if the default parameter file is set to force some parameter values, override the trait.values here: - if ('force' %in% colnames(df_run_params)) { + if ("force" %in% colnames(df_run_params)) { mask <- as.logical(df_run_params$force) run_params[mask] <- df_run_params$value[mask] } - - + + #----------------------------------------------------------------------- # write job.sh # create launch script (which will create symlink) @@ -699,7 +686,7 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.BASGRA"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -708,7 +695,7 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -716,27 +703,27 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - - + + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) - - jobsh <- gsub("@SITE_MET@", settings$run$inputs$met$path, jobsh) + + jobsh <- gsub("@SITE_MET@", settings$run$inputs$met$path, jobsh) jobsh <- gsub("@SITE_HARVEST@", settings$run$inputs$harvest$path, jobsh) jobsh <- gsub("@SITE_FERTILIZE@", settings$run$inputs$fertilize$path, jobsh) - if(!is.null(settings$run$inputs$co2_file$path)){ + if (!is.null(settings$run$inputs$co2_file$path)) { jobsh <- gsub("@SITE_CO2FILE@", settings$run$inputs$co2_file$path, jobsh) - }else{ - jobsh <- gsub("@SITE_CO2FILE@", 'NULL', jobsh) + } else { + jobsh <- gsub("@SITE_CO2FILE@", "NULL", jobsh) } - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) if (!is.null(settings$run$write.raw.output)) { @@ -744,13 +731,12 @@ write.config.BASGRA <- function(defaults, trait.values, settings, run.id, IC = N } else { jobsh <- gsub("@WRITE_RAW_OUTPUT@", FALSE, jobsh) } - + jobsh <- gsub( "@RUN_PARAMS@", paste0("c(", PEcAn.utils::listToArgString(run_params), ")"), - jobsh) + jobsh + ) writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - - } # write.config.BASGRA diff --git a/models/basgra/R/write_restart.BASGRA.R b/models/basgra/R/write_restart.BASGRA.R index 6b4c8062a7c..0d19941d61f 100644 --- a/models/basgra/R/write_restart.BASGRA.R +++ b/models/basgra/R/write_restart.BASGRA.R @@ -1,91 +1,89 @@ ##' @title write_restart.SIPNET -##' +##' ##' @author Istem Fer ##' ##' @inheritParams PEcAn.ModelName::write_restart.ModelName ##' -##' @description Write restart files for BASGRA -##' +##' @description Write restart files for BASGRA +##' ##' @return TRUE if successful ##' @export write_restart.BASGRA <- function(outdir, runid, start.time, stop.time, settings, new.state, RENAME = TRUE, new.params = FALSE, inputs) { - - - rundir <- settings$host$rundir + rundir <- settings$host$rundir variables <- colnames(new.state) - + settings$run$start.date <- start.time - settings$run$end.date <- stop.time - + settings$run$end.date <- stop.time + analysis.save <- list() - + if ("LAI" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- new.state$LAI + analysis.save[[length(analysis.save) + 1]] <- new.state$LAI if (new.state$LAI < 0) analysis.save[[length(analysis.save)]] <- 0.0001 names(analysis.save[[length(analysis.save)]]) <- c("LAI") } - + if ("fast_soil_pool_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$fast_soil_pool_carbon_content if (new.state$fast_soil_pool_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("fast_soil_pool_carbon_content") } - + if ("slow_soil_pool_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$slow_soil_pool_carbon_content if (new.state$slow_soil_pool_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("slow_soil_pool_carbon_content") } - + if ("soil_organic_nitrogen_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$soil_nitrogen_content if (new.state$soil_nitrogen_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("soil_nitrogen_content") } - + if ("TotSoilCarb" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$TotSoilCarb if (new.state$TotSoilCarb < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("TotSoilCarb") } - + if ("CropYield" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- new.state$CropYield - if (new.state$CropYield < 0) analysis.save[[length(analysis.save)]] <- 0 - names(analysis.save[[length(analysis.save)]]) <- c("CropYield") + analysis.save[[length(analysis.save) + 1]] <- new.state$CropYield + if (new.state$CropYield < 0) analysis.save[[length(analysis.save)]] <- 0 + names(analysis.save[[length(analysis.save)]]) <- c("CropYield") } - + if ("litter_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$litter_carbon_content if (new.state$litter_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("litter_carbon_content") } - + if ("stubble_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$stubble_carbon_content if (new.state$stubble_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("stubble_carbon_content") } - + if ("stem_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$stem_carbon_content if (new.state$stem_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("stem_carbon_content") } - + if ("root_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$root_carbon_content if (new.state$root_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0.0001 names(analysis.save[[length(analysis.save)]]) <- c("root_carbon_content") } - + if ("reserve_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$reserve_carbon_content if (new.state$reserve_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 1e-05 names(analysis.save[[length(analysis.save)]]) <- c("reserve_carbon_content") } - + if ("leaf_carbon_content" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$leaf_carbon_content if (new.state$leaf_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0.001 @@ -97,62 +95,64 @@ write_restart.BASGRA <- function(outdir, runid, start.time, stop.time, settings, if (new.state$dead_leaf_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("dead_leaf_carbon_content") } - + if ("nonelongating_generative_tiller" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$nonelongating_generative_tiller if (new.state$nonelongating_generative_tiller < 0) analysis.save[[length(analysis.save)]] <- 10 names(analysis.save[[length(analysis.save)]]) <- c("nonelongating_generative_tiller") } - + if ("elongating_generative_tiller" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$elongating_generative_tiller if (new.state$elongating_generative_tiller < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("elongating_generative_tiller") } - + if ("nonelongating_vegetative_tiller" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$nonelongating_vegetative_tiller if (new.state$nonelongating_vegetative_tiller < 0) analysis.save[[length(analysis.save)]] <- 100 names(analysis.save[[length(analysis.save)]]) <- c("nonelongating_vegetative_tiller") } - + if ("tiller_density" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$tiller_density if (new.state$tiller_density < 0) analysis.save[[length(analysis.save)]] <- 100 names(analysis.save[[length(analysis.save)]]) <- c("tiller_density") } - + if ("phenological_stage" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$phenological_stage if (new.state$phenological_stage < 0) analysis.save[[length(analysis.save)]] <- 0 if (new.state$phenological_stage > 1) analysis.save[[length(analysis.save)]] <- 1 names(analysis.save[[length(analysis.save)]]) <- c("phenological_stage") } - + if ("SoilMoistFrac" %in% variables) { analysis.save[[length(analysis.save) + 1]] <- new.state$SoilMoistFrac if (new.state$SoilMoistFrac < 0) analysis.save[[length(analysis.save)]] <- 0.001 if (new.state$SoilMoistFrac > 1) analysis.save[[length(analysis.save)]] <- 1 names(analysis.save[[length(analysis.save)]]) <- c("SoilMoistFrac") } - - if (!is.null(analysis.save) && length(analysis.save) > 0){ + + if (!is.null(analysis.save) && length(analysis.save) > 0) { analysis.save.mat <- data.frame(matrix(unlist(analysis.save, use.names = TRUE), nrow = 1)) colnames(analysis.save.mat) <- names(unlist(analysis.save)) - }else{ + } else { analysis.save.mat <- NULL } - + PEcAn.logger::logger.info(runid) PEcAn.logger::logger.info(analysis.save.mat) - + settings$run$inputs$met <- inputs$met - do.call(write.config.BASGRA, args = list(defaults = NULL, - trait.values = new.params, - settings = settings, - run.id = runid, - IC = analysis.save.mat)) - - + do.call(write.config.BASGRA, args = list( + defaults = NULL, + trait.values = new.params, + settings = settings, + run.id = runid, + IC = analysis.save.mat + )) + + return(TRUE) -} # write_restart.BASGRA \ No newline at end of file +} # write_restart.BASGRA diff --git a/models/basgra/tests/testthat/test.run_BASGRA.R b/models/basgra/tests/testthat/test.run_BASGRA.R index 64b0e43c035..5d49eccd3de 100644 --- a/models/basgra/tests/testthat/test.run_BASGRA.R +++ b/models/basgra/tests/testthat/test.run_BASGRA.R @@ -15,19 +15,19 @@ write_harv_fert <- function(path_harv, path_fert) { doy = 163, CLAIV = 0.7 ) - harvest_file <- path_harv # - write.csv(df_harvest, harvest_file, row.names=FALSE) + harvest_file <- path_harv # + write.csv(df_harvest, harvest_file, row.names = FALSE) df_fertilize <- data.frame( year = 2019, doy = 128, amount = 10.0 ) fertilize_file <- path_fert # - write.csv(df_fertilize, fertilize_file, row.names=FALSE) + write.csv(df_fertilize, fertilize_file, row.names = FALSE) } write_new_fert <- function(path_fert, which_type) { - if (which_type == 'mineral') { + if (which_type == "mineral") { df_fertilize <- data.frame( year = 2019, doy = 128, @@ -36,7 +36,7 @@ write_new_fert <- function(path_fert, which_type) { C_soluble = 0.0, C_compost = 0.0 ) - } else if (which_type == 'soluble') { + } else if (which_type == "soluble") { df_fertilize <- data.frame( year = 2019, doy = 128, @@ -45,7 +45,7 @@ write_new_fert <- function(path_fert, which_type) { C_soluble = 200.0, C_compost = 0.0 ) - } else if (which_type == 'compost') { + } else if (which_type == "compost") { df_fertilize <- data.frame( year = 2019, doy = 128, @@ -54,7 +54,7 @@ write_new_fert <- function(path_fert, which_type) { C_soluble = 0.0, C_compost = 200.0 ) - } else if (which_type == 'invalid') { + } else if (which_type == "invalid") { df_fertilize <- data.frame( year = 2019, doy = 128, @@ -63,86 +63,89 @@ write_new_fert <- function(path_fert, which_type) { ) } fertilize_file <- path_fert # - write.csv(df_fertilize, fertilize_file, row.names=FALSE) + write.csv(df_fertilize, fertilize_file, row.names = FALSE) } -test_that('two harvests yield more than one', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("two harvests yield more than one", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - fert_file <- file.path(outfolder, 'fertilize.csv') - write_new_fert(fert_file, 'mineral') + fert_file <- file.path(outfolder, "fertilize.csv") + write_new_fert(fert_file, "mineral") - harv_file <- file.path(outfolder, 'harvest.csv') + harv_file <- file.path(outfolder, "harvest.csv") write.csv( data.frame( year = 2019, doy = 125 ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_one_harv <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_one_harv <- read.csv(file.path(outfolder, "output_basgra.csv")) - harv_file <- file.path(outfolder, 'harvest.csv') + harv_file <- file.path(outfolder, "harvest.csv") write.csv( data.frame( year = c(2019, 2019), doy = c(125, 165) ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_two_harv <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_two_harv <- read.csv(file.path(outfolder, "output_basgra.csv")) expect_gt(sum(output_two_harv$YIELD), sum(output_one_harv$YIELD)) }) -test_that('harvest followed by cut yields same as only harvest but different mean LAI', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("harvest followed by cut yields same as only harvest but different mean LAI", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - fert_file <- file.path(outfolder, 'fertilize.csv') - write_new_fert(fert_file, 'mineral') + fert_file <- file.path(outfolder, "fertilize.csv") + write_new_fert(fert_file, "mineral") - harv_file <- file.path(outfolder, 'harvest.csv') + harv_file <- file.path(outfolder, "harvest.csv") write.csv( data.frame( year = 2019, doy = 125, CLAIV = 0.7 ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_only_harv <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_only_harv <- read.csv(file.path(outfolder, "output_basgra.csv")) - harv_file <- file.path(outfolder, 'harvest.csv') + harv_file <- file.path(outfolder, "harvest.csv") write.csv( data.frame( year = c(2019, 2019), @@ -150,50 +153,52 @@ test_that('harvest followed by cut yields same as only harvest but different mea CLAIV = c(0.7, 0.7), cut_only = c(0, 1) ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file - write_raw_output = TRUE + write_raw_output = TRUE ) - output_harv_cut <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_harv_cut <- read.csv(file.path(outfolder, "output_basgra.csv")) expect_equal(sum(output_only_harv$YIELD), sum(output_harv_cut$YIELD)) expect_equal(sum(output_only_harv$FHARVC), sum(output_harv_cut$FHARVC)) expect_false(mean(output_only_harv$LAI) == mean(output_harv_cut$LAI)) }) -test_that('changing CLAIV changes LAI and yield', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("changing CLAIV changes LAI and yield", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - fert_file <- file.path(outfolder, 'fertilize.csv') - write_new_fert(fert_file, 'mineral') - harv_file <- file.path(outfolder, 'harvest.csv') + fert_file <- file.path(outfolder, "fertilize.csv") + write_new_fert(fert_file, "mineral") + harv_file <- file.path(outfolder, "harvest.csv") write.csv( data.frame( year = 2019, doy = 125, CLAIV = 1.0 ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_high <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_high <- read.csv(file.path(outfolder, "output_basgra.csv")) write.csv( data.frame( @@ -201,45 +206,47 @@ test_that('changing CLAIV changes LAI and yield', { doy = 125, CLAIV = 0.001 ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_low <- read.csv(file.path(outfolder, 'output_basgra.csv')) - + output_low <- read.csv(file.path(outfolder, "output_basgra.csv")) + expect_true(any(output_high$LAI > output_low$LAI)) # for a single harvest, we expect a higher yield with lower CLAIV expect_gt(sum(output_low$FHARVC), sum(output_high$FHARVC)) }) -test_that('invalid harvest file raises an error', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("invalid harvest file raises an error", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - fert_file <- file.path(outfolder, 'fertilize.csv') - write_new_fert(fert_file, 'mineral') - harv_file <- file.path(outfolder, 'harvest.csv') + fert_file <- file.path(outfolder, "fertilize.csv") + write_new_fert(fert_file, "mineral") + harv_file <- file.path(outfolder, "harvest.csv") write.csv( data.frame( year = 2019, doy = 125, garbage = 1.0 ), - harv_file, row.names=FALSE + harv_file, + row.names = FALSE ) expect_error( run_BASGRA( met_path, run_params, harv_file, fert_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39 # match the test meteo data file @@ -247,107 +254,107 @@ test_that('invalid harvest file raises an error', { ) }) -test_that('model produces some output', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("model produces some output", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + output <- read.csv(file.path(outfolder, "output_basgra.csv")) expect_true(any(output$LAI > 0)) expect_true(all(!is.na(output))) }) -test_that('Fertilizer C inputs are zeroed without Yasso', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params <- setNames(df_params[,2], df_params[,1]) - +test_that("Fertilizer C inputs are zeroed without Yasso", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params <- setNames(df_params[, 2], df_params[, 1]) + outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fert_file_mineral <- file.path(outfolder, 'fert.mineral.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fert_file_mineral <- file.path(outfolder, "fert.mineral.csv") write_harv_fert(harvest_file, fert_file_mineral) - fert_file_soluble <- file.path(outfolder, 'fert.soluble.csv') - write_new_fert(fert_file_soluble, 'soluble') + fert_file_soluble <- file.path(outfolder, "fert.soluble.csv") + write_new_fert(fert_file_soluble, "soluble") run_BASGRA( met_path, run_params, harvest_file, fert_file_soluble, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) expect_true(all(output$FSOILAMDC == 0.0)) }) -test_that('Fertilizer C inputs work consistently with Yasso', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - - run_params <- setNames(df_params[,2], df_params[,1]) - +test_that("Fertilizer C inputs work consistently with Yasso", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + + run_params <- setNames(df_params[, 2], df_params[, 1]) + outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fert_file_mineral <- file.path(outfolder, 'fert.mineral.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fert_file_mineral <- file.path(outfolder, "fert.mineral.csv") write_harv_fert(harvest_file, fert_file_mineral) - + run_BASGRA( met_path, run_params, harvest_file, fert_file_mineral, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file - write_raw_output = TRUE + write_raw_output = TRUE ) - output_mineral <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_mineral <- read.csv(file.path(outfolder, "output_basgra.csv")) - fert_file_soluble <- file.path(outfolder, 'fert.soluble.csv') - write_new_fert(fert_file_soluble, 'soluble') + fert_file_soluble <- file.path(outfolder, "fert.soluble.csv") + write_new_fert(fert_file_soluble, "soluble") run_BASGRA( met_path, run_params, harvest_file, fert_file_soluble, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_soluble <- read.csv(file.path(outfolder, 'output_basgra.csv')) - + output_soluble <- read.csv(file.path(outfolder, "output_basgra.csv")) + expect_true(all(output_soluble$CSOM_W >= output_mineral$CSOM_W)) expect_true(any(output_soluble$CSOM_W > output_mineral$CSOM_W)) expect_equal(sum(output_soluble$FSOILAMDC), 200.0) - - fert_file_compost <- file.path(outfolder, 'fert.compost.csv') - write_new_fert(fert_file_compost, 'compost') + + fert_file_compost <- file.path(outfolder, "fert.compost.csv") + write_new_fert(fert_file_compost, "compost") run_BASGRA( met_path, run_params, harvest_file, fert_file_compost, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_compost <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_compost <- read.csv(file.path(outfolder, "output_basgra.csv")) expect_true(all(output_compost$CSOM_A >= output_mineral$CSOM_A)) expect_true(any(output_compost$CSOM_A > output_mineral$CSOM_A)) expect_true(all(output_compost$CSOM_N >= output_mineral$CSOM_N)) @@ -357,14 +364,14 @@ test_that('Fertilizer C inputs work consistently with Yasso', { expect_true(any(output_compost$CSOM_A > output_soluble$CSOM_A)) expect_equal(sum(output_compost$FSOILAMDC), 200.0) - - fert_file_bad <- file.path(outfolder, 'fert.bad.csv') - write_new_fert(fert_file_bad, 'invalid') + + fert_file_bad <- file.path(outfolder, "fert.bad.csv") + write_new_fert(fert_file_bad, "invalid") expect_error( run_BASGRA( met_path, run_params, harvest_file, fert_file_bad, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39 # match the test meteo data file @@ -373,68 +380,68 @@ test_that('Fertilizer C inputs work consistently with Yasso', { }) -test_that('new fertilization file format matches the old', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - run_params_basic <- setNames(df_params[,2], df_params[,1]) +test_that("new fertilization file format matches the old", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + run_params_basic <- setNames(df_params[, 2], df_params[, 1]) + + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + run_params_yasso <- setNames(df_params[, 2], df_params[, 1]) - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - run_params_yasso <- setNames(df_params[,2], df_params[,1]) - outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fert_file_old <- file.path(outfolder, 'fert.old.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fert_file_old <- file.path(outfolder, "fert.old.csv") write_harv_fert(harvest_file, fert_file_old) - fert_file_mineral <- file.path(outfolder, 'fert.mineral.csv') - write_new_fert(fert_file_mineral, 'mineral') + fert_file_mineral <- file.path(outfolder, "fert.mineral.csv") + write_new_fert(fert_file_mineral, "mineral") run_BASGRA( met_path, run_params_basic, harvest_file, fert_file_old, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_old_fert <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_old_fert <- read.csv(file.path(outfolder, "output_basgra.csv")) run_BASGRA( met_path, run_params_basic, harvest_file, fert_file_mineral, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_mineral <- read.csv(file.path(outfolder, 'output_basgra.csv')) - expect_equal(output_old_fert, output_mineral) + output_mineral <- read.csv(file.path(outfolder, "output_basgra.csv")) + expect_equal(output_old_fert, output_mineral) }) -test_that('model shows no nitrogen limitation when run with use_nitrogen = 0', { - met_path <- 'test.met' - #df_params <- read.csv('BASGRA_params_no_nitrogen.csv') - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("model shows no nitrogen limitation when run with use_nitrogen = 0", { + met_path <- "test.met" + # df_params <- read.csv('BASGRA_params_no_nitrogen.csv') + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + + output <- read.csv(file.path(outfolder, "output_basgra.csv")) expect_equal(output$fNgrowth, rep(1.0, 365)) # if fNgrowth == 1 growth is not N-limited }) @@ -468,68 +475,68 @@ test_that('model shows no nitrogen limitation when run with use_nitrogen = 0', { ## # trying to capture the output, bit of a kludge but maybe works on linux and mac ## suppressWarnings(messages <- system(sprintf('echo "load(\\"%s\\"); run_wrapper()"|R --vanilla 2>&1', wrap_file), intern=TRUE)) ## expect_true(length(grep('ERROR STOP', messages)) > 0) -## expect_false(file.exists(file.path(outfolder, 'output_basgra.csv'))) +## expect_false(file.exists(file.path(outfolder, 'output_basgra.csv'))) ## }) -test_that('model produces reasonable yasso-specific output when use_yasso = 1 and use_nitrogen = 0', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("model produces reasonable yasso-specific output when use_yasso = 1 and use_nitrogen = 0", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) - expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] > 0)) - expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] < 1e6)) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) + expect_true(all(output[, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")] > 0)) + expect_true(all(output[, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")] < 1e6)) expect_true(all(!is.na(output))) }) -test_that('model produces reasonable yasso-specific output when use_yasso = 1 and use_nitrogen = 1', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("model produces reasonable yasso-specific output when use_yasso = 1 and use_nitrogen = 1", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 1 + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) - expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] > 0)) - expect_true(all(output[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] < 1e6)) - expect_true(all(output[,'Nmineralisation'] > 0)) - expect_true(all(output[,'NMIN'] >= 0)) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) + expect_true(all(output[, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")] > 0)) + expect_true(all(output[, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")] < 1e6)) + expect_true(all(output[, "Nmineralisation"] > 0)) + expect_true(all(output[, "NMIN"] >= 0)) expect_true(all(!is.na(output))) }) -test_that('NSH is positive', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 - df_params[df_params[,1] == 'NMIN0', 2] <- 0.0 - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("NSH is positive", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 1 + df_params[df_params[, 1] == "NMIN0", 2] <- 0.0 + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() df_fertilize <- data.frame( @@ -537,191 +544,191 @@ test_that('NSH is positive', { doy = 128, amount = 1e-6 ) - no_fertilize_file <- file.path(outfolder, 'no-fertilize.csv') - write.csv(df_fertilize, no_fertilize_file, row.names=FALSE) + no_fertilize_file <- file.path(outfolder, "no-fertilize.csv") + write.csv(df_fertilize, no_fertilize_file, row.names = FALSE) - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, no_fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) - expect_true(all(output[, 'NSH'] > 0)) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) + expect_true(all(output[, "NSH"] > 0)) }) -test_that('Netcdf output is consistent with the raw output for certain variables', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("Netcdf output is consistent with the raw output for certain variables", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output.raw <- read.csv(file.path(outfolder, 'output_basgra.csv')) - nc <- ncdf4::nc_open(file.path(outfolder, '2019.nc')) + output.raw <- read.csv(file.path(outfolder, "output_basgra.csv")) + nc <- ncdf4::nc_open(file.path(outfolder, "2019.nc")) - fastc_nc <- ncdf4::ncvar_get(nc, 'fast_soil_pool_carbon_content') - fastc_raw <- rowSums(output.raw[,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N')]) - expect_equal(as.vector(fastc_nc), as.vector(fastc_raw*1e-3)) # g vs kg + fastc_nc <- ncdf4::ncvar_get(nc, "fast_soil_pool_carbon_content") + fastc_raw <- rowSums(output.raw[, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N")]) + expect_equal(as.vector(fastc_nc), as.vector(fastc_raw * 1e-3)) # g vs kg - slowc_nc <- ncdf4::ncvar_get(nc, 'slow_soil_pool_carbon_content') - slowc_raw <- output.raw[,'CSOM_H'] - expect_equal(as.vector(slowc_nc), as.vector(slowc_raw*1e-3)) + slowc_nc <- ncdf4::ncvar_get(nc, "slow_soil_pool_carbon_content") + slowc_raw <- output.raw[, "CSOM_H"] + expect_equal(as.vector(slowc_nc), as.vector(slowc_raw * 1e-3)) - totc_nc <- ncdf4::ncvar_get(nc, 'TotSoilCarb') - totc_raw <- rowSums(output.raw[, c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H')]) - expect_equal(as.vector(totc_nc), as.vector(totc_raw*1e-3)) + totc_nc <- ncdf4::ncvar_get(nc, "TotSoilCarb") + totc_raw <- rowSums(output.raw[, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H")]) + expect_equal(as.vector(totc_nc), as.vector(totc_raw * 1e-3)) }) -test_that('The yasso_rate_pc parameter has a reasonable effect', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("The yasso_rate_pc parameter has a reasonable effect", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + run_params <- setNames(df_params[, 2], df_params[, 1]) outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) run_params_mod <- run_params - run_params_mod['yasso_rate_pc'] <- -1.0 # the component has negative values so this speeds up the decomp + run_params_mod["yasso_rate_pc"] <- -1.0 # the component has negative values so this speeds up the decomp run_BASGRA( met_path, run_params_mod, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output_mod <- read.csv(file.path(outfolder, 'output_basgra.csv')) + output_mod <- read.csv(file.path(outfolder, "output_basgra.csv")) # One could have thought that output_mod should have a higher Rsoil. But it doesn't # always, because also the initial state changes. Now we'll just test the # initialization. - expect_gt(output_mod[1, 'CSOM_H'], output[1, 'CSOM_H']) - awen = c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N') + expect_gt(output_mod[1, "CSOM_H"], output[1, "CSOM_H"]) + awen <- c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N") expect_lt(sum(output_mod[1, awen]), sum(output[1, awen])) }) -test_that('The yasso ICs are handled consistently', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 - - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("The yasso ICs are handled consistently", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 1 + + run_params <- setNames(df_params[, 2], df_params[, 1]) yasso_state <- c( 849, 95, 51, 1092, 14298, 1536 ) - run_params[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] <- yasso_state + run_params[c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")] <- yasso_state outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) - output_state = as.numeric(output[1,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')]) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) + output_state <- as.numeric(output[1, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")]) expect_equal( output_state, yasso_state, - tolerance=2 # needed because the end of time step state is in output + tolerance = 2 # needed because the end of time step state is in output ) }) -test_that('The yasso ICs are ignored if negative', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 1 - - run_params <- setNames(df_params[,2], df_params[,1]) +test_that("The yasso ICs are ignored if negative", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 1 + + run_params <- setNames(df_params[, 2], df_params[, 1]) yasso_state <- rep(-1, 6) - run_params[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')] <- yasso_state + run_params[c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")] <- yasso_state outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) - output_state = as.numeric(output[1,c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N','CSOM_H','NSOM')]) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) + output_state <- as.numeric(output[1, c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM")]) expect_gt(sum(output_state), 1000) # check that met values are reasonable from the defaults - output_met <- as.numeric(output[1,c('TEMPR30', 'PRECIP30')]) - expect_equal(output_met[1], 10, tolerance=20) - expect_equal(output_met[2], 1, tolerance=5) + output_met <- as.numeric(output[1, c("TEMPR30", "PRECIP30")]) + expect_equal(output_met[1], 10, tolerance = 20) + expect_equal(output_met[2], 1, tolerance = 5) }) -test_that('The smoothed tempr and precip are read from params', { - met_path <- 'test.met' - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA')) - df_params[df_params[,1] == 'use_yasso', 2] <- 1 - df_params[df_params[,1] == 'use_nitrogen', 2] <- 0 - - run_params <- setNames(df_params[,2], df_params[,1]) - met_values = c(-1e5, 1e5) - run_params[c('TEMPR30', 'PRECIP30')] <- met_values +test_that("The smoothed tempr and precip are read from params", { + met_path <- "test.met" + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA")) + df_params[df_params[, 1] == "use_yasso", 2] <- 1 + df_params[df_params[, 1] == "use_nitrogen", 2] <- 0 + + run_params <- setNames(df_params[, 2], df_params[, 1]) + met_values <- c(-1e5, 1e5) + run_params[c("TEMPR30", "PRECIP30")] <- met_values outfolder <- mktmpdir() - harvest_file <- file.path(outfolder, 'harvest.csv') - fertilize_file <- file.path(outfolder, 'fertilize.csv') + harvest_file <- file.path(outfolder, "harvest.csv") + fertilize_file <- file.path(outfolder, "fertilize.csv") write_harv_fert(harvest_file, fertilize_file) run_BASGRA( met_path, run_params, harvest_file, fertilize_file, - start_date = '2019-01-01', - end_date = '2019-12-31 23:00', + start_date = "2019-01-01", + end_date = "2019-12-31 23:00", outdir = outfolder, sitelat = 60.29, sitelon = 22.39, # match the test meteo data file write_raw_output = TRUE ) - output <- read.csv(file.path(outfolder, 'output_basgra.csv')) - output_values <- as.numeric(output[1,c('TEMPR30', 'PRECIP30')]) + output <- read.csv(file.path(outfolder, "output_basgra.csv")) + output_values <- as.numeric(output[1, c("TEMPR30", "PRECIP30")]) expect_lt(output_values[1], -1e2) expect_gt(output_values[2], 1e2) }) diff --git a/models/basgra/tests/testthat/test.write.config.R b/models/basgra/tests/testthat/test.write.config.R index e467afbcf2b..a3c066d3568 100644 --- a/models/basgra/tests/testthat/test.write.config.R +++ b/models/basgra/tests/testthat/test.write.config.R @@ -7,8 +7,8 @@ teardown(unlink(outfolder, recursive = TRUE)) basesettings <- list( rundir = outfolder, host = list( - rundir=outfolder, - outdir=outfolder + rundir = outfolder, + outdir = outfolder ), run = list( site = list( @@ -17,74 +17,73 @@ basesettings <- list( ), inputs = list( met = list( - path = 'dummy' + path = "dummy" ), harvest = list( - path = 'dummy' + path = "dummy" ), fertilize = list( - path = 'dummy' + path = "dummy" ) ), - start.date = 'start_date', - end.date = 'end_date' + start.date = "start_date", + end.date = "end_date" ), model = list() - ) create_job_template <- function(content) { # write.config has a handy feature to override the default job.template. # We'll use this for testing individual items in it. - filename <- file.path(outfolder, 'job.template') + filename <- file.path(outfolder, "job.template") write(content, filename) filename } -test_that('write.config retrieves default parameters from the file', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("write.config retrieves default parameters from the file", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate - trait.values = list(list()) # no traits given - params.from.file <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) + trait.values <- list(list()) # no traits given + params.from.file <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA"), col.names = c("name", "value")) default <- NULL run.id <- 9999 dir.create(file.path(outfolder, run.id)) write.config.BASGRA(defaults, trait.values, settings, run.id) - job.file <- file.path(outfolder, run.id, 'job.sh') - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) + job.file <- file.path(outfolder, run.id, "job.sh") + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) expect_equal(params.from.file$name, names(param.vector)) # a few of the parameters are redundant and get reset byt write.config based on the other parameters. # these parameters need to be set consistently in the default parameter file or otherwise this test fails. - expect_equal(params.from.file$value, setNames(param.vector, NULL), tolerance=1e-4) + expect_equal(params.from.file$value, setNames(param.vector, NULL), tolerance = 1e-4) }) -test_that('default param path from settings overrides the global default', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("default param path from settings overrides the global default", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate - trait.values = list(list()) # no traits given - param_path <- file.path(outfolder, 'modified.defaults.csv') - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) - df_params[df_params$name == 'NMIN0', 'value'] <- -9991 - write.csv(df_params, param_path, row.names=FALSE) + trait.values <- list(list()) # no traits given + param_path <- file.path(outfolder, "modified.defaults.csv") + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA"), col.names = c("name", "value")) + df_params[df_params$name == "NMIN0", "value"] <- -9991 + write.csv(df_params, param_path, row.names = FALSE) settings$run$inputs$defaults$path <- param_path - run.id = 9998 + run.id <- 9998 dir.create(file.path(outfolder, run.id)) write.config.BASGRA(defaults, trait.values, settings, run.id) - job.file <- file.path(outfolder, run.id, 'job.sh') - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) - expect_equal(setNames(param.vector['NMIN0'], NULL), -9991) + job.file <- file.path(outfolder, run.id, "job.sh") + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) + expect_equal(setNames(param.vector["NMIN0"], NULL), -9991) }) -test_that('write.config modifies some trait values', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("write.config modifies some trait values", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate - trait.values = list( + trait.values <- list( list( c2n_fineroot = 50.0, leaf_width = 6.1 @@ -94,86 +93,86 @@ test_that('write.config modifies some trait values', { run.id <- 9999 dir.create(file.path(outfolder, run.id), showWarnings = FALSE) write.config.BASGRA(defaults, trait.values, settings, run.id) - job.file <- file.path(outfolder, run.id, 'job.sh') - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) - expect_equal(param.vector['NCR'], c(NCR = 0.02)) - expect_equal(param.vector['LFWIDV'], c(LFWIDV = 6.1e-3)) # in meters + job.file <- file.path(outfolder, run.id, "job.sh") + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) + expect_equal(param.vector["NCR"], c(NCR = 0.02)) + expect_equal(param.vector["LFWIDV"], c(LFWIDV = 6.1e-3)) # in meters }) -test_that('the force column in defaulfs.csv keep the default parameters even if pecan provides trait values', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("the force column in defaulfs.csv keep the default parameters even if pecan provides trait values", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate - trait.values = list( + trait.values <- list( list( c2n_fineroot = 50.0, leaf_width = 6.1 ) ) - param_path <- file.path(outfolder, 'modified.defaults.csv') - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) - df_params$force = rep(FALSE, nrow(df_params)) - df_params[df_params$name == 'LFWIDV', 'force'] <- TRUE - leaf_width_value <- df_params[df_params$name=='LFWIDV', 'value'] - write.csv(df_params, param_path, row.names=FALSE) + param_path <- file.path(outfolder, "modified.defaults.csv") + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA"), col.names = c("name", "value")) + df_params$force <- rep(FALSE, nrow(df_params)) + df_params[df_params$name == "LFWIDV", "force"] <- TRUE + leaf_width_value <- df_params[df_params$name == "LFWIDV", "value"] + write.csv(df_params, param_path, row.names = FALSE) settings$run$inputs$defaults$path <- param_path - run.id = 9998 + run.id <- 9998 write.config.BASGRA(defaults, trait.values, settings, run.id) - job.file <- file.path(outfolder, run.id, 'job.sh') - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) - expect_equal(param.vector['LFWIDV'], c(LFWIDV=leaf_width_value)) # deafult value - expect_equal(param.vector['NCR'], c(NCR=0.02)) # trait value + job.file <- file.path(outfolder, run.id, "job.sh") + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) + expect_equal(param.vector["LFWIDV"], c(LFWIDV = leaf_width_value)) # deafult value + expect_equal(param.vector["NCR"], c(NCR = 0.02)) # trait value }) -test_that('the force column values are interpreted flexibly', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("the force column values are interpreted flexibly", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate - param_path <- file.path(outfolder, 'modified.defaults.csv') - df_params <- read.csv(system.file('BASGRA_params.csv', package='PEcAn.BASGRA'), col.names=c('name', 'value')) + param_path <- file.path(outfolder, "modified.defaults.csv") + df_params <- read.csv(system.file("BASGRA_params.csv", package = "PEcAn.BASGRA"), col.names = c("name", "value")) settings$run$inputs$defaults$path <- param_path - run.id = 9998 - flagvalue <- -999999 + run.id <- 9998 + flagvalue <- -999999 trait.values <- list(list(leaf_width = 6.1)) - job.file <- file.path(outfolder, run.id, 'job.sh') - df_params[,2] <-flagvalue - - df_params$force = rep('True', nrow(df_params)) - write.csv(df_params, param_path, row.names=FALSE) + job.file <- file.path(outfolder, run.id, "job.sh") + df_params[, 2] <- flagvalue + + df_params$force <- rep("True", nrow(df_params)) + write.csv(df_params, param_path, row.names = FALSE) write.config.BASGRA(defaults, trait.values, settings, run.id) - content <- paste(readLines(job.file), collapse='\n') - param.vector <- setNames(eval(parse(text=content)), NULL) + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- setNames(eval(parse(text = content)), NULL) expect_equal(length(param.vector), nrow(df_params)) expect_equal(param.vector, rep(flagvalue, length(param.vector))) - df_params$force = rep(1, nrow(df_params)) - write.csv(df_params, param_path, row.names=FALSE) + df_params$force <- rep(1, nrow(df_params)) + write.csv(df_params, param_path, row.names = FALSE) write.config.BASGRA(defaults, trait.values, settings, run.id) - content <- paste(readLines(job.file), collapse='\n') - param.vector <- setNames(eval(parse(text=content)), NULL) + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- setNames(eval(parse(text = content)), NULL) expect_equal(length(param.vector), nrow(df_params)) expect_true(all(param.vector == flagvalue)) - df_params$force = rep(0, nrow(df_params)) - write.csv(df_params, param_path, row.names=FALSE) + df_params$force <- rep(0, nrow(df_params)) + write.csv(df_params, param_path, row.names = FALSE) write.config.BASGRA(defaults, trait.values, settings, run.id) - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) expect_equal(length(param.vector), nrow(df_params)) - expect_equal(param.vector['LFWIDV'], c(LFWIDV=6.1*1e-3)) # in mm + expect_equal(param.vector["LFWIDV"], c(LFWIDV = 6.1 * 1e-3)) # in mm }) -test_that('YASSO pool ICs pass thru (list)', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("YASSO pool ICs pass thru (list)", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate default <- NULL run.id <- 9999 dir.create(file.path(outfolder, run.id), showWarnings = FALSE) - load(system.file('last_vals_basgra.Rdata', package='PEcAn.BASGRA')) - ic_list <- list( + load(system.file("last_vals_basgra.Rdata", package = "PEcAn.BASGRA")) + ic_list <- list( CSOM_A = 1, CSOM_W = 2, CSOM_E = 3, @@ -184,35 +183,32 @@ test_that('YASSO pool ICs pass thru (list)', { PRECIP30 = 8, test_vals = last_vals ) - write.config.BASGRA(defaults, trait.values=list(), settings=settings, run.id=run.id, IC=ic_list) - job.file <- file.path(outfolder, run.id, 'job.sh') - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) - state <- param.vector[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30')] + write.config.BASGRA(defaults, trait.values = list(), settings = settings, run.id = run.id, IC = ic_list) + job.file <- file.path(outfolder, run.id, "job.sh") + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) + state <- param.vector[c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM", "TEMPR30", "PRECIP30")] expect_equal(setNames(state, NULL), seq(8)) }) -test_that('YASSO pool ICs pass thru (file)', { - jobtemplate <- create_job_template('@RUN_PARAMS@') +test_that("YASSO pool ICs pass thru (file)", { + jobtemplate <- create_job_template("@RUN_PARAMS@") settings <- basesettings settings$model$jobtemplate <- jobtemplate - settings$run$inputs$poolinitcond = list( - path='ic_with_yasso_pools_and_met.nc' + settings$run$inputs$poolinitcond <- list( + path = "ic_with_yasso_pools_and_met.nc" ) default <- NULL run.id <- 9999 dir.create(file.path(outfolder, run.id), showWarnings = FALSE) - write.config.BASGRA(defaults, trait.values=list(), settings=settings, run.id=run.id) - job.file <- file.path(outfolder, run.id, 'job.sh') - content <- paste(readLines(job.file), collapse='\n') - param.vector <- eval(parse(text=content)) - state <- param.vector[c('CSOM_A', 'CSOM_W', 'CSOM_E', 'CSOM_N', 'CSOM_H', 'NSOM', 'TEMPR30', 'PRECIP30')] + write.config.BASGRA(defaults, trait.values = list(), settings = settings, run.id = run.id) + job.file <- file.path(outfolder, run.id, "job.sh") + content <- paste(readLines(job.file), collapse = "\n") + param.vector <- eval(parse(text = content)) + state <- param.vector[c("CSOM_A", "CSOM_W", "CSOM_E", "CSOM_N", "CSOM_H", "NSOM", "TEMPR30", "PRECIP30")] correct_state <- c( 1011.55245115532, 118.194058863007, 62.5131705827862, 1153.2435021838, 14274.4980088834, 1549.22075041662, 12.0709309808298, 1.28496155077734 ) expect_equal(setNames(state, NULL), correct_state) }) - - - diff --git a/models/biocro/R/call_biocro.R b/models/biocro/R/call_biocro.R index 226bcbca1f8..5db0d760ceb 100644 --- a/models/biocro/R/call_biocro.R +++ b/models/biocro/R/call_biocro.R @@ -1,4 +1,3 @@ - l2n <- function(x) lapply(x, as.numeric) # wrapper to encapsulate version-specific logic for BioCro 0.9x @@ -6,7 +5,6 @@ l2n <- function(x) lapply(x, as.numeric) call_biocro_0.9 <- function(WetDat, genus, year_in_run, config, lat, lon, tmp.result, HarvestedYield) { - # Check that all variables are present in the expected order -- # BioGro < 1.0 accesses weather vars by position and DOES NOT check headers. expected_cols <- c("year", "doy", "hour", "[Ss]olar", "Temp", "RH", "WS|windspeed", "precip") diff --git a/models/biocro/R/close_nc_if_open.R b/models/biocro/R/close_nc_if_open.R index 95505a3b6d3..c8e3ae0e05e 100644 --- a/models/biocro/R/close_nc_if_open.R +++ b/models/biocro/R/close_nc_if_open.R @@ -2,11 +2,11 @@ # Used by met2model.BIOCRO to allow closing files cleanly on exit # without keeping them open longer than needed while looping over other years close_nc_if_open <- function(ncfile) { - if (!inherits(ncfile, "ncdf4")) { PEcAn.logger::logger.error( substitute(ncfile), - " is not an NCDF file object. Don't know how to close it.") + " is not an NCDF file object. Don't know how to close it." + ) return(invisible()) } @@ -21,6 +21,7 @@ close_nc_if_open <- function(ncfile) { return(invisible()) } else { PEcAn.logger::logger.error( - "Closing NCDF file", ncfile$filename, "failed with message", res) + "Closing NCDF file", ncfile$filename, "failed with message", res + ) } } diff --git a/models/biocro/R/get_biocro_defaults.R b/models/biocro/R/get_biocro_defaults.R index ac729d90511..c00327a704c 100644 --- a/models/biocro/R/get_biocro_defaults.R +++ b/models/biocro/R/get_biocro_defaults.R @@ -1,9 +1,8 @@ - # Retrieve Biocro:: # # Needs to be a function because `::`(pkg, name) treats its arguments as # literals, so need to fully substitute before calling -from_bc <- function(dfname){ +from_bc <- function(dfname) { do.call(`::`, list("BioCro", dfname)) } @@ -18,17 +17,18 @@ from_bc <- function(dfname){ #' or NULL if genus not found #' @export #' -get_biocro_defaults <- function(genus){ - +get_biocro_defaults <- function(genus) { default_names <- grep( pattern = genus, - x = utils::data(package = "BioCro")$results[,"Item"], + x = utils::data(package = "BioCro")$results[, "Item"], ignore.case = TRUE, - value = TRUE) + value = TRUE + ) if (length(default_names) < 3) { PEcAn.logger::logger.error( - "No default parameter sets for", genus, "found in BioCro") + "No default parameter sets for", genus, "found in BioCro" + ) return(NULL) } @@ -38,23 +38,26 @@ get_biocro_defaults <- function(genus){ # Report the name BioCro uses # We're matching on prefixes only, so this may not be the same as `genus` - biocro_genus_name <- gsub("_modules$", "", genus_module_name) + biocro_genus_name <- gsub("_modules$", "", genus_module_name) if (length(default_names) > 3) { PEcAn.logger::logger.error( "Multiple possible default parameter sets for", genus, "found in BioCro.", "Using '", biocro_genus_name, "', the first one found.", - "If this is wrong, specify a parameter file in settings$pft$constants$file") + "If this is wrong, specify a parameter file in settings$pft$constants$file" + ) } genus_photosynth <- sub( pattern = "^c([34]).*", replacement = "C\\1", - x = from_bc(genus_module_name)$canopy_module_name) + x = from_bc(genus_module_name)$canopy_module_name + ) list( type = list(photosynthesis = genus_photosynth, genus = biocro_genus_name), initial_values = from_bc(genus_init_name), parameters = from_bc(genus_param_name), - modules = from_bc(genus_module_name)) + modules = from_bc(genus_module_name) + ) } diff --git a/models/biocro/R/met2model.BIOCRO.R b/models/biocro/R/met2model.BIOCRO.R index 9e993e59a6b..0bdb49f5b39 100644 --- a/models/biocro/R/met2model.BIOCRO.R +++ b/models/biocro/R/met2model.BIOCRO.R @@ -1,8 +1,7 @@ - .datatable.aware <- TRUE -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# #' Write BioCro met files #' #' Converts a met CF file to a model specific met file. The input @@ -18,52 +17,60 @@ #' @return a dataframe of information about the written file #' @export #' @author Rob Kooper, David LeBauer -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite = FALSE, lat, lon, start_date, end_date, ...) { - start_date <- lubridate::parse_date_time(start_date, tz = "UTC", - orders = c("ymdHMSz", "ymdHMS", "ymdH", "ymd")) - end_date <- lubridate::parse_date_time(end_date, tz = "UTC", - orders = c("ymdHMSz", "ymdHMS", "ymdH", "ymd")) + start_date <- lubridate::parse_date_time(start_date, + tz = "UTC", + orders = c("ymdHMSz", "ymdHMS", "ymdH", "ymd") + ) + end_date <- lubridate::parse_date_time(end_date, + tz = "UTC", + orders = c("ymdHMSz", "ymdHMS", "ymdH", "ymd") + ) dir.create(file.path(outfolder), recursive = TRUE, showWarnings = FALSE) years_wanted <- lubridate::year(start_date):lubridate::year(end_date) res <- list() for (year in years_wanted) { - yrstart = max(lubridate::date(start_date), lubridate::ymd(paste0(year, "-01-01"))) - yrend = min(lubridate::date(end_date), lubridate::ymd(paste0(year, "-12-31"))) + yrstart <- max(lubridate::date(start_date), lubridate::ymd(paste0(year, "-01-01"))) + yrend <- min(lubridate::date(end_date), lubridate::ymd(paste0(year, "-12-31"))) ncfile <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) csvfile <- file.path(outfolder, paste(in.prefix, year, "csv", sep = ".")) - if (file.exists(csvfile) && as.logical(overwrite) != TRUE){ + if (file.exists(csvfile) && as.logical(overwrite) != TRUE) { PEcAn.logger::logger.warn(paste("Output file", csvfile, "already exists! Moving to next year.")) next } met.nc <- ncdf4::nc_open(ncfile) on.exit(close_nc_if_open(met.nc), add = FALSE) - # add = FALSE because any previous file was closed at end of prev. loop + # add = FALSE because any previous file was closed at end of prev. loop dt <- mean(diff(PEcAn.utils::ud_convert( met.nc$dim$time$vals, met.nc$dim$time$units, - "hours since 1700-01-01 00:00:00"))) + "hours since 1700-01-01 00:00:00" + ))) if (dt < 1) { # More than one obs/hour. Write upscaled hourly file and reload. ncdf4::nc_close(met.nc) upscale_result <- PEcAn.data.atmosphere::upscale_met( outfolder = outfolder, input_met = ncfile, - site.id = in.prefix, resolution = 1/24, - overwrite = overwrite) + site.id = in.prefix, resolution = 1 / 24, + overwrite = overwrite + ) met.nc <- ncdf4::nc_open(upscale_result$file) } tmp.met <- PEcAn.data.atmosphere::load.cfmet( - met.nc, lat = lat, lon = lon, - start.date = yrstart, end.date = yrend) + met.nc, + lat = lat, lon = lon, + start.date = yrstart, end.date = yrend + ) # NB we need this nc_close even though on.exit is set: # close here to avoid leaking filehandle at end of loop iteration, @@ -87,15 +94,16 @@ met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite = FALSE, startdate = yrstart, enddate = yrend, dbfile.name = in.prefix, - stringsAsFactors = FALSE) + stringsAsFactors = FALSE + ) } result <- do.call("rbind", res) return(result) -} # met2model.BIOCRO +} # met2model.BIOCRO -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# #' Converts a CF data frame into a BioCro met input #' #' @param met data.table object with met for a single site; output from \code{\link{load.cfmet}} @@ -133,14 +141,13 @@ met2model.BIOCRO <- function(in.path, in.prefix, outfolder, overwrite = FALSE, #' @importFrom data.table := #' @author David LeBauer cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE) { - if (!data.table::is.data.table(met)) { met <- data.table::copy(met) data.table::setDT(met) } if ((!is.null(longitude)) & zulu2solarnoon) { - solarnoon_offset <- PEcAn.utils::ud_convert(longitude/360, "day", "minute") + solarnoon_offset <- PEcAn.utils::ud_convert(longitude / 360, "day", "minute") met[, `:=`(solardate = met$date + lubridate::minutes(solarnoon_offset))] } if (!"relative_humidity" %in% colnames(met)) { @@ -148,11 +155,14 @@ cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE) { rh <- PEcAn.data.atmosphere::qair2rh( qair = met$specific_humidity, temp = PEcAn.utils::ud_convert(met$air_temperature, "Kelvin", "Celsius"), - press = PEcAn.utils::ud_convert(met$air_pressure, "Pa", "hPa")) + press = PEcAn.utils::ud_convert(met$air_pressure, "Pa", "hPa") + ) met[, `:=`(relative_humidity = rh)] } else { - PEcAn.logger::logger.error("neither relative_humidity nor [air_temperature, air_pressure, and specific_humidity]", - "are in met data") + PEcAn.logger::logger.error( + "neither relative_humidity nor [air_temperature, air_pressure, and specific_humidity]", + "are in met data" + ) } } if (!"ppfd" %in% colnames(met)) { @@ -172,19 +182,21 @@ cf2biocro <- function(met, longitude = NULL, zulu2solarnoon = FALSE) { PEcAn.logger::logger.error("neither wind_speed nor both eastward_wind and northward_wind are present in met data") } } - + ## Convert RH from percent to fraction BioCro functions just to confirm if (max(met$relative_humidity) > 1) { - met[, `:=`(relative_humidity = met$relative_humidity/100)] + met[, `:=`(relative_humidity = met$relative_humidity / 100)] } - newmet <- met[, list(year = lubridate::year(met$date), - doy = lubridate::yday(met$date), - hour = round(lubridate::hour(met$date) + lubridate::minute(met$date) / 60, 0), - solar = ppfd, - Temp = PEcAn.utils::ud_convert(met$air_temperature, "Kelvin", "Celsius"), - RH = met$relative_humidity, - windspeed = wind_speed, - precip = PEcAn.utils::ud_convert(met$precipitation_flux, "s-1", "h-1"))] - newmet <- newmet[newmet$hour <= 23,] + newmet <- met[, list( + year = lubridate::year(met$date), + doy = lubridate::yday(met$date), + hour = round(lubridate::hour(met$date) + lubridate::minute(met$date) / 60, 0), + solar = ppfd, + Temp = PEcAn.utils::ud_convert(met$air_temperature, "Kelvin", "Celsius"), + RH = met$relative_humidity, + windspeed = wind_speed, + precip = PEcAn.utils::ud_convert(met$precipitation_flux, "s-1", "h-1") + )] + newmet <- newmet[newmet$hour <= 23, ] return(as.data.frame(newmet)) -} # cf2biocro +} # cf2biocro diff --git a/models/biocro/R/model2netcdf.BIOCRO.R b/models/biocro/R/model2netcdf.BIOCRO.R index d6fb4dbbff7..3fbe480d677 100644 --- a/models/biocro/R/model2netcdf.BIOCRO.R +++ b/models/biocro/R/model2netcdf.BIOCRO.R @@ -12,65 +12,71 @@ #' @export #' @author David LeBauer, Deepak Jaiswal, Rob Kooper model2netcdf.BIOCRO <- function(result, genus = NULL, outdir, lat = -9999, lon = -9999) { - if (!("hour" %in% colnames(result))) { result$hour <- 0 } if (all(c("year", "hour", "doy") %in% colnames(result))) { data.table::setnames(result, c("year", "hour", "doy"), c("Year", "Hour", "DayofYear")) } - + ## longname prefix station_* used for a point ## http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.4/cf-conventions.html#scalar-coordinate-variables - x <- ncdf4::ncdim_def("latitude", "degrees_north", - vals = as.numeric(lat), - longname = "station_latitude", - unlim = TRUE) + x <- ncdf4::ncdim_def("latitude", "degrees_north", + vals = as.numeric(lat), + longname = "station_latitude", + unlim = TRUE + ) y <- ncdf4::ncdim_def("longitude", "degrees_east", - vals = as.numeric(lon), - longname = "station_longitude", - unlim = TRUE) - + vals = as.numeric(lon), + longname = "station_longitude", + unlim = TRUE + ) + for (yeari in unique(result$Year)) { result_yeari <- result[result$Year == yeari] - dates <- lubridate::ymd(paste0(result_yeari$Year, "-01-01")) + lubridate::days(as.numeric(result_yeari$DayofYear - 1)) + + dates <- lubridate::ymd(paste0(result_yeari$Year, "-01-01")) + lubridate::days(as.numeric(result_yeari$DayofYear - 1)) + lubridate::hours(result_yeari$Hour) days_since_origin <- dates - lubridate::ymd_hms("1700-01-01 00:00:00") if (!units(days_since_origin) == "days") { stop("check time units") } - t <- ncdf4::ncdim_def("time", "days since 1700-01-01", as.numeric(days_since_origin)) # define netCDF dimensions for variables + t <- ncdf4::ncdim_def("time", "days since 1700-01-01", as.numeric(days_since_origin)) # define netCDF dimensions for variables if (exists("genus") & (genus == "Saccharum")) { for (variable in c("Leaf", "Root", "Stem", "LAI", "DayofYear")) { v <- result_yeari[[variable]] result_yeari[[variable]] <- c(v[1], rep(v[-1], 24, each = TRUE)) } } - + dims <- list(lat = x, lon = y, time = t) - vars <- list(NPP = PEcAn.utils::to_ncvar("NPP", dims), - TotLivBiom = PEcAn.utils::to_ncvar("TotLivBiom", dims), - root_carbon_content = PEcAn.utils::to_ncvar("root_carbon_content", dims), - AbvGrndWood = PEcAn.utils::to_ncvar("AbvGrndWood", dims), - AGB = PEcAn.utils::to_ncvar("AGB", dims), - Evap = PEcAn.utils::to_ncvar("Evap", dims), - TVeg = PEcAn.utils::to_ncvar("TVeg", dims), - LAI = PEcAn.utils::to_ncvar("LAI", dims)) - + vars <- list( + NPP = PEcAn.utils::to_ncvar("NPP", dims), + TotLivBiom = PEcAn.utils::to_ncvar("TotLivBiom", dims), + root_carbon_content = PEcAn.utils::to_ncvar("root_carbon_content", dims), + AbvGrndWood = PEcAn.utils::to_ncvar("AbvGrndWood", dims), + AGB = PEcAn.utils::to_ncvar("AGB", dims), + Evap = PEcAn.utils::to_ncvar("Evap", dims), + TVeg = PEcAn.utils::to_ncvar("TVeg", dims), + LAI = PEcAn.utils::to_ncvar("LAI", dims) + ) + biomass2c <- 0.4 k <- PEcAn.utils::ud_convert(1, "Mg/ha", "kg/m2") * biomass2c - + result_yeari_std <- with(result_yeari, list( - TotLivBiom = k * (Leaf + Root + Stem + Rhizome + Grain), + TotLivBiom = k * (Leaf + Root + Stem + Rhizome + Grain), root_carbon_content = k * Root, AbvGrndWood = k * Stem, - AGB = k * (Leaf + Stem + Grain), - Evap = PEcAn.utils::ud_convert(SoilEvaporation + CanopyTrans, "Mg/ha/h", "kg/m2/s"), - TVeg = PEcAn.utils::ud_convert(CanopyTrans, "Mg/ha/h", "kg/m2/s"), - LAI = LAI)) - - total_biomass <- with(result_yeari, # this is for calculating NPP and includes litter - k * (Leaf + Root + Stem + Rhizome + Grain + AboveLitter + BelowLitter)) + AGB = k * (Leaf + Stem + Grain), + Evap = PEcAn.utils::ud_convert(SoilEvaporation + CanopyTrans, "Mg/ha/h", "kg/m2/s"), + TVeg = PEcAn.utils::ud_convert(CanopyTrans, "Mg/ha/h", "kg/m2/s"), + LAI = LAI + )) + + total_biomass <- with( + result_yeari, # this is for calculating NPP and includes litter + k * (Leaf + Root + Stem + Rhizome + Grain + AboveLitter + BelowLitter) + ) delta_biomass <- PEcAn.utils::ud_convert(c(0, diff(total_biomass)), "kg/m2/h", "kg/m2/s") delta_biomass[delta_biomass < 0] <- 0 result_yeari_std$NPP <- delta_biomass @@ -81,17 +87,19 @@ model2netcdf.BIOCRO <- function(result, genus = NULL, outdir, lat = -9999, lon = nc <- ncdf4::nc_create(filename = file.path(outdir, paste0(yeari, ".nc")), vars = vars) ncdf4::ncatt_put(nc, 0, "description", "This is an output from the BioCro Crop model generated by the model2netcdf.BIOCRO.R function in the PEcAn.BIOCRO package; see https://pecanproject.github.io/pecan-documentation/latest/ for more information") } - + varfile <- file(file.path(outdir, paste(yeari, "nc", "var", sep = ".")), "w") - + ## Output netCDF data for (.vname in names(vars)) { ncdf4::ncvar_put(nc, varid = vars[[.vname]], vals = result_yeari_std[[.vname]]) - cat(paste(vars[[.vname]]$name, vars[[.vname]]$longname), file = varfile, - sep = "\n") + cat(paste(vars[[.vname]]$name, vars[[.vname]]$longname), + file = varfile, + sep = "\n" + ) } - + close(varfile) ncdf4::nc_close(nc) } -} # model2netcdf.BIOCRO +} # model2netcdf.BIOCRO diff --git a/models/biocro/R/read.biocro.config.R b/models/biocro/R/read.biocro.config.R index e1a58b6b19f..fca63bbbbd9 100644 --- a/models/biocro/R/read.biocro.config.R +++ b/models/biocro/R/read.biocro.config.R @@ -5,17 +5,21 @@ #' @export #' @author David LeBauer read.biocro.config <- function(config.file = "config.xml") { - config <- XML::xmlToList(XML::xmlTreeParse(file = config.file, - handlers = list(comment = function(x) { NULL }), - asTree = TRUE)) - if(utils::packageVersion('BioCro') < 1.0){ + config <- XML::xmlToList(XML::xmlTreeParse( + file = config.file, + handlers = list(comment = function(x) { + NULL + }), + asTree = TRUE + )) + if (utils::packageVersion("BioCro") < 1.0) { config$pft$canopyControl$mResp <- unlist(strsplit(config$pft$canopyControl$mResp, split = ",")) } - if(!is.null(config$pft$initial_values)){ + if (!is.null(config$pft$initial_values)) { config$pft$initial_values <- lapply(config$pft$initial_values, as.numeric) } - if(!is.null(config$pft$parameters)){ + if (!is.null(config$pft$parameters)) { config$pft$parameters <- lapply(config$pft$parameters, as.numeric) } return(config) -} # read.biocro.config +} # read.biocro.config diff --git a/models/biocro/R/run.biocro.R b/models/biocro/R/run.biocro.R index 9a0b50205a6..a1260816ffc 100644 --- a/models/biocro/R/run.biocro.R +++ b/models/biocro/R/run.biocro.R @@ -12,25 +12,25 @@ #' @importFrom rlang .data #' @author David LeBauer run.biocro <- function(lat, lon, metpath, soil.nc = NULL, config = config, coppice.interval = 1) { - start.date <- lubridate::date(config$run$start.date) - end.date <- lubridate::date(config$run$end.date) + end.date <- lubridate::date(config$run$end.date) genus <- config$pft$type$genus years <- lubridate::year(start.date):lubridate::year(end.date) if (!is.null(soil.nc)) { soil <- PEcAn.data.land::get.soil(lat = lat, lon = lon, soil.nc = soil.nc) - config$pft$soilControl$soilType <- ifelse(soil$usda_class %in% 1:10, - soil$usda_class, - 10) + config$pft$soilControl$soilType <- ifelse(soil$usda_class %in% 1:10, + soil$usda_class, + 10 + ) config$pft$soilControl$soilDepth <- soil$ref_depth } if (coppice.interval > 1) { - config$pft$coppice.interval = coppice.interval + config$pft$coppice.interval <- coppice.interval } - if (utils::packageVersion('BioCro') >= 1.0) { + if (utils::packageVersion("BioCro") >= 1.0) { caller_fn <- call_biocro_1 } else { caller_fn <- call_biocro_0.9 @@ -41,7 +41,7 @@ run.biocro <- function(lat, lon, metpath, soil.nc = NULL, config = config, coppi yeari <- years[i] metfile <- paste(metpath, yeari, "csv", sep = ".") WetDat <- data.table::fread(metfile) - if(!all(sapply(WetDat, is.numeric))){ + if (!all(sapply(WetDat, is.numeric))) { PEcAn.logger::logger.severe("Format error in weather file: All columns must be numeric, but got (", sapply(WetDat, class), ")") } @@ -53,16 +53,17 @@ run.biocro <- function(lat, lon, metpath, soil.nc = NULL, config = config, coppi day1 <- lubridate::yday(config$simulationPeriod$dateofplanting) dayn <- lubridate::yday(config$simulationPeriod$dateofharvest) } else if (lat > 0) { - day1 <- max(WetDat[ (WetDat[,"doy"] < 180 & WetDat[,"Temp"] < -2), "doy"]) - dayn <- min(WetDat[ (WetDat[,"doy"] > 180 & WetDat[,"Temp"] < -2), "doy"]) + day1 <- max(WetDat[(WetDat[, "doy"] < 180 & WetDat[, "Temp"] < -2), "doy"]) + dayn <- min(WetDat[(WetDat[, "doy"] > 180 & WetDat[, "Temp"] < -2), "doy"]) ## day1 = last spring frost dayn = first fall frost from Miguez et al 2009 } else { day1 <- NULL dayn <- NULL } WetDat <- WetDat[ - WetDat$doy >= max(day1, lubridate::yday(starti)) - & WetDat$doy <= min(dayn, lubridate::yday(endi)), ] + WetDat$doy >= max(day1, lubridate::yday(starti)) & + WetDat$doy <= min(dayn, lubridate::yday(endi)), + ] HarvestedYield <- 0 @@ -73,12 +74,14 @@ run.biocro <- function(lat, lon, metpath, soil.nc = NULL, config = config, coppi config = config, lat = lat, lon = lon, tmp.result = tmp.result, - HarvestedYield = HarvestedYield) + HarvestedYield = HarvestedYield + ) tmp.result <- call_result$tmp.result HarvestedYield <- call_result$HarvestedYield - result.yeari.hourly <- with(tmp.result, + result.yeari.hourly <- with( + tmp.result, data.table::data.table( year = yeari, doy, hour, ThermalT, @@ -86,15 +89,19 @@ run.biocro <- function(lat, lon, metpath, soil.nc = NULL, config = config, coppi AboveLitter, BelowLitter, Rhizome, Grain, LAI, SoilEvaporation, CanopyTrans, - key = c("year", "doy", "hour"))) - result.yeari.withmet <- merge(x = result.yeari.hourly, - y = WetDat, by = c("year", "doy", "hour")) + key = c("year", "doy", "hour") + ) + ) + result.yeari.withmet <- merge( + x = result.yeari.hourly, + y = WetDat, by = c("year", "doy", "hour") + ) hourly.results[[i]] <- result.yeari.withmet } - + hourly.results <- do.call("rbind", hourly.results) - hourly.results <- hourly.results[order(hourly.results$year, hourly.results$doy, hourly.results$hour),] + hourly.results <- hourly.results[order(hourly.results$year, hourly.results$doy, hourly.results$hour), ] # Compute daily and yearly results by taking max or sum as appropriate. # This notation could be more compact if we used nonstandard evaluation @@ -105,64 +112,88 @@ run.biocro <- function(lat, lon, metpath, soil.nc = NULL, config = config, coppi dplyr::summarize_at( .tbl = hourly_grp, .vars = c("Stem", "Leaf", "Root", "AboveLitter", "BelowLitter", - "Rhizome", "Grain", "LAI", tmax = "Temp"), - .fun = max), + "Rhizome", "Grain", "LAI", + tmax = "Temp" + ), + .fun = max + ), dplyr::summarize_at( .tbl = hourly_grp, .vars = c("SoilEvaporation", "CanopyTrans", "precip"), - .fun = sum), + .fun = sum + ), dplyr::summarize_at( .tbl = hourly_grp, .vars = c(tmin = "Temp"), - .fun = min), + .fun = min + ), dplyr::summarize_at( .tbl = hourly_grp, .vars = c(tavg = "Temp"), - .fun = mean)) - daily.results.inter <- dplyr::select(daily.results.initial, "year...1", - "doy...2", "Stem", "Leaf", - "Root", "AboveLitter", "BelowLitter", - "Rhizome", "Grain", "LAI", - "tmax", "SoilEvaporation", - "CanopyTrans", "precip", - "tmin", "tavg") - daily.results <- dplyr::rename(daily.results.inter, - year = "year...1", doy = "doy...2") + .fun = mean + ) + ) + daily.results.inter <- dplyr::select( + daily.results.initial, "year...1", + "doy...2", "Stem", "Leaf", + "Root", "AboveLitter", "BelowLitter", + "Rhizome", "Grain", "LAI", + "tmax", "SoilEvaporation", + "CanopyTrans", "precip", + "tmin", "tavg" + ) + daily.results <- dplyr::rename(daily.results.inter, + year = "year...1", doy = "doy...2" + ) # bind_cols on 4 tables leaves 3 sets of duplicate year and day columns. # Let's drop these. - col_order <- c("year", "doy", "Stem", "Leaf", "Root", - "AboveLitter", "BelowLitter", "Rhizome", - "SoilEvaporation", "CanopyTrans", "Grain", "LAI", - "tmax", "tmin", "tavg", "precip") + col_order <- c( + "year", "doy", "Stem", "Leaf", "Root", + "AboveLitter", "BelowLitter", "Rhizome", + "SoilEvaporation", "CanopyTrans", "Grain", "LAI", + "tmax", "tmin", "tavg", "precip" + ) daily.results <- daily.results[, col_order] - + daily_grp <- dplyr::group_by_at(.tbl = hourly.results, .vars = "year") annual.results.initial <- dplyr::bind_cols( dplyr::summarize_at( .tbl = daily_grp, - .vars = c("Stem", "Leaf", "Root", "AboveLitter", "BelowLitter", - "Rhizome", "Grain"), - .fun = max), + .vars = c( + "Stem", "Leaf", "Root", "AboveLitter", "BelowLitter", + "Rhizome", "Grain" + ), + .fun = max + ), dplyr::summarize_at( .tbl = daily_grp, .vars = c("SoilEvaporation", "CanopyTrans", map = "precip"), - .fun = sum), + .fun = sum + ), dplyr::summarize_at( .tbl = daily_grp, .vars = c(mat = "Temp"), - .fun = mean)) - annual.results.inter <- dplyr::select(annual.results.initial, .data$year...1, - .data$Stem, .data$Leaf, .data$Root, - .data$AboveLitter, .data$BelowLitter, - .data$Rhizome, .data$Grain, .data$SoilEvaporation, - .data$CanopyTrans, .data$map, .data$mat) + .fun = mean + ) + ) + annual.results.inter <- dplyr::select( + annual.results.initial, .data$year...1, + .data$Stem, .data$Leaf, .data$Root, + .data$AboveLitter, .data$BelowLitter, + .data$Rhizome, .data$Grain, .data$SoilEvaporation, + .data$CanopyTrans, .data$map, .data$mat + ) annual.results <- dplyr::rename(annual.results.inter, year = "year...1") - col_order <- c("year", "Stem", "Leaf", "Root", "AboveLitter", "BelowLitter", - "Rhizome", "Grain", "SoilEvaporation", "CanopyTrans", - "map", "mat") + col_order <- c( + "year", "Stem", "Leaf", "Root", "AboveLitter", "BelowLitter", + "Rhizome", "Grain", "SoilEvaporation", "CanopyTrans", + "map", "mat" + ) annual.results <- annual.results[, col_order] - return(list(hourly = hourly.results, - daily = daily.results, - annually = data.table::data.table(lat = lat, lon = lon, annual.results))) + return(list( + hourly = hourly.results, + daily = daily.results, + annually = data.table::data.table(lat = lat, lon = lon, annual.results) + )) } # run.biocro diff --git a/models/biocro/R/write.configs.BIOCRO.R b/models/biocro/R/write.configs.BIOCRO.R index b7b869376a6..b80acb74190 100644 --- a/models/biocro/R/write.configs.BIOCRO.R +++ b/models/biocro/R/write.configs.BIOCRO.R @@ -1,4 +1,3 @@ - PREFIX_XML <- "\n\n" #' convert parameters from PEcAn database default units to biocro defaults @@ -11,22 +10,21 @@ PREFIX_XML <- "\n\ #' @return dataframe with values transformed #' @export #' @author David LeBauer -convert.samples.BIOCRO <- function(trait.samples, biocro_version=1.0) { - +convert.samples.BIOCRO <- function(trait.samples, biocro_version = 1.0) { trait.samples <- as.data.frame(trait.samples) trait.names <- colnames(trait.samples) ## transform values with different units cuticular conductance - BETY default is ## umol; BioCro uses mol if ("cuticular_cond" %in% trait.names) { - trait.samples$cuticular_cond = PEcAn.utils::ud_convert(trait.samples$cuticular_cond, "umol", "mol") + trait.samples$cuticular_cond <- PEcAn.utils::ud_convert(trait.samples$cuticular_cond, "umol", "mol") } if ("SLA" %in% trait.names) { - trait.samples$SLA = PEcAn.utils::ud_convert(trait.samples$SLA, "kg/m2", "g/cm2") + trait.samples$SLA <- PEcAn.utils::ud_convert(trait.samples$SLA, "kg/m2", "g/cm2") } ## rename bety variables to match active version of biocro - names(trait.names) <- trait.names #looks weird, but naming this vector simplifies indexing below + names(trait.names) <- trait.names # looks weird, but naming this vector simplifies indexing below name_lookup <- dplyr::tribble( ~bety_name, ~biocro_0.9_name, ~biocro_1.0_name, @@ -41,7 +39,7 @@ convert.samples.BIOCRO <- function(trait.samples, biocro_version=1.0) { "chi_leaf", "chi.l", "chil", "quantum_efficiency", "alpha", "alpha" ) - name_lookup <- name_lookup[name_lookup$bety_name %in% trait.names,] + name_lookup <- name_lookup[name_lookup$bety_name %in% trait.names, ] if (biocro_version >= 1.0) { biocro_name <- "biocro_1.0_name" @@ -52,7 +50,7 @@ convert.samples.BIOCRO <- function(trait.samples, biocro_version=1.0) { colnames(trait.samples) <- trait.names return(trait.samples) -} # convert.samples.BIOCRO +} # convert.samples.BIOCRO #' Writes a configuration files for the biocro model @@ -67,7 +65,6 @@ convert.samples.BIOCRO <- function(trait.samples, biocro_version=1.0) { #' @return nothing, writes configuration file as side effect #' @author David LeBauer write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) { - ## find out where to write run/ouput rundir <- file.path(settings$host$rundir, as.character(run.id)) outdir <- file.path(settings$host$outdir, as.character(run.id)) @@ -77,31 +74,38 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) } ## create launch script (which will create symlink) - writeLines(c("#!/bin/bash", - paste(settings$model$job.sh), - paste("mkdir -p", outdir), - paste("cd", rundir), - paste(settings$model$binary, normalizePath(rundir, mustWork = FALSE), normalizePath(outdir, mustWork = FALSE)), - "if [ $? -ne 0 ]; then", - " echo ERROR IN MODEL RUN >&2", - " exit 1", - "fi", - paste("cp ", file.path(rundir, "README.txt"), file.path(outdir, "README.txt"))), - con = file.path(settings$rundir, run.id, "job.sh")) + writeLines( + c( + "#!/bin/bash", + paste(settings$model$job.sh), + paste("mkdir -p", outdir), + paste("cd", rundir), + paste(settings$model$binary, normalizePath(rundir, mustWork = FALSE), normalizePath(outdir, mustWork = FALSE)), + "if [ $? -ne 0 ]; then", + " echo ERROR IN MODEL RUN >&2", + " exit 1", + "fi", + paste("cp ", file.path(rundir, "README.txt"), file.path(outdir, "README.txt")) + ), + con = file.path(settings$rundir, run.id, "job.sh") + ) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) ## write configuration file traits <- convert.samples.BIOCRO( trait.samples = trait.values[[settings$pfts$pft$name]], - biocro_version = settings$model$revision) - + biocro_version = settings$model$revision + ) + pft_member_file <- file.path(settings$pfts$pft$outdir, "species.csv") - if(!file.exists(pft_member_file)){ + if (!file.exists(pft_member_file)) { pft_member_file <- file.path(settings$pfts$pft$outdir, "cultivars.csv") } - if(!file.exists(pft_member_file)){ - PEcAn.logger::logger.severe("Can't find PFT info: No species.csv nor cultivars.csv in", - settings$pfts$pft$outdir) + if (!file.exists(pft_member_file)) { + PEcAn.logger::logger.severe( + "Can't find PFT info: No species.csv nor cultivars.csv in", + settings$pfts$pft$outdir + ) } pft_members <- utils::read.csv(pft_member_file) @@ -127,15 +131,16 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) if (is.null(defaults.file) && settings$model$revision >= 1.0 && utils::packageVersion("BioCro") >= 1.0) { # Look for defaults provided as datasets in the BioCro model package - defaults = get_biocro_defaults(genus) + defaults <- get_biocro_defaults(genus) } - if (is.null(defaults) && is.null(defaults.file)) { + if (is.null(defaults) && is.null(defaults.file)) { defaults.file <- system.file(file.path("extdata/defaults", paste0(tolower(genus), ".xml")), - package = "PEcAn.BIOCRO") - if(defaults.file != ""){ + package = "PEcAn.BIOCRO" + ) + if (defaults.file != "") { defaults <- XML::xmlToList(XML::xmlParse(defaults.file)) - } else{ + } else { PEcAn.logger::logger.severe("no defaults file given and ", genus, "not supported in BioCro") } } @@ -159,13 +164,17 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) ### Replace Defaults with meta-analysis results unused.traits <- !traits.used ## a clunky way to only use logger for MEDIAN rather than all runs - if (any(grepl("MEDIAN", scan(file = file.path(settings$rundir, run.id, "README.txt"), - character(0), - sep = ":", - strip.white = TRUE)))) { + if (any(grepl("MEDIAN", scan( + file = file.path(settings$rundir, run.id, "README.txt"), + character(0), + sep = ":", + strip.white = TRUE + )))) { if (sum(unused.traits) > 0) { - PEcAn.logger::logger.warn("the following traits parameters are not added to config file:", - PEcAn.utils::vecpaste(names(unused.traits)[unused.traits == TRUE])) + PEcAn.logger::logger.warn( + "the following traits parameters are not added to config file:", + PEcAn.utils::vecpaste(names(unused.traits)[unused.traits == TRUE]) + ) } } @@ -173,19 +182,31 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) ## Put defaults and other parts of config file together parms.xml <- PEcAn.settings::listToXml(defaults, "pft") - location.xml <- PEcAn.settings::listToXml(list(latitude = settings$run$site$lat, - longitude = settings$run$site$lon), - "location") - run.xml <- PEcAn.settings::listToXml(list(start.date = settings$run$start.date, - end.date = settings$run$end.date, - met.path = settings$run$inputs$met$path, - soil.file = settings$run$inputs$soil$path), - "run") + location.xml <- PEcAn.settings::listToXml( + list( + latitude = settings$run$site$lat, + longitude = settings$run$site$lon + ), + "location" + ) + run.xml <- PEcAn.settings::listToXml( + list( + start.date = settings$run$start.date, + end.date = settings$run$end.date, + met.path = settings$run$inputs$met$path, + soil.file = settings$run$inputs$soil$path + ), + "run" + ) slashdate <- function(x) substr(gsub("-", "/", x), 1, 10) - simulationPeriod.xml <- PEcAn.settings::listToXml(list(dateofplanting = slashdate(settings$run$start.date), - dateofharvest = slashdate(settings$run$end.date)), - "simulationPeriod") + simulationPeriod.xml <- PEcAn.settings::listToXml( + list( + dateofplanting = slashdate(settings$run$start.date), + dateofharvest = slashdate(settings$run$end.date) + ), + "simulationPeriod" + ) config.xml <- XML::xmlNode("config") config.xml <- XML::append.xmlNode(config.xml, run.xml) @@ -193,9 +214,11 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) config.xml <- XML::append.xmlNode(config.xml, simulationPeriod.xml) config.xml <- XML::append.xmlNode(config.xml, parms.xml) - XML::saveXML(config.xml, file = file.path(settings$rundir, run.id, "config.xml"), - indent = TRUE) -} # write.config.BIOCRO + XML::saveXML(config.xml, + file = file.path(settings$rundir, run.id, "config.xml"), + indent = TRUE + ) +} # write.config.BIOCRO #' Clear out previous config and parameter files. @@ -206,20 +229,19 @@ write.config.BIOCRO <- function(defaults = NULL, trait.values, settings, run.id) #' @export #' @author Shawn Serbin, David LeBauer remove.config.BIOCRO <- function(main.outdir, settings) { - ## Remove files on localhost if (settings$host$name == "localhost") { - files <- paste0(settings$outdir, list.files(path = settings$outdir, recursive = FALSE)) # Need to change this to the run folder when implemented - files <- files[-grep("*.xml", files)] # Keep pecan.xml file + files <- paste0(settings$outdir, list.files(path = settings$outdir, recursive = FALSE)) # Need to change this to the run folder when implemented + files <- files[-grep("*.xml", files)] # Keep pecan.xml file pft.dir <- strsplit(settings$pfts$pft$outdir, "/")[[1]] - ln <- length(pft.dir) + ln <- length(pft.dir) pft.dir <- pft.dir[ln] - files <- files[-grep(pft.dir, files)] # Keep pft folder + files <- files[-grep(pft.dir, files)] # Keep pft folder # file.remove(files,recursive=TRUE) - system(paste("rm -r ", files, sep = "", collapse = " "), ignore.stderr = TRUE) # remove files/dirs + system(paste("rm -r ", files, sep = "", collapse = " "), ignore.stderr = TRUE) # remove files/dirs ## On remote host } else { print("*** WARNING: Removal of files on remote host not yet implemented ***") } -} # remove.config.BIOCRO +} # remove.config.BIOCRO diff --git a/models/biocro/inst/workflow.R b/models/biocro/inst/workflow.R index 915f04b78ea..3e5de177b7e 100644 --- a/models/biocro/inst/workflow.R +++ b/models/biocro/inst/workflow.R @@ -19,18 +19,22 @@ settings$meta.analysis$update <- TRUE #---------------- Run PEcAn workflow. -------------------------------------------------------------# # Query the trait database for data and priors -settings$pfts <- get.trait.data(settings$pfts, settings$model$type, settings$database$dbfiles, - settings$database$bety, settings$meta.analysis$update) +settings$pfts <- get.trait.data( + settings$pfts, settings$model$type, settings$database$dbfiles, + settings$database$bety, settings$meta.analysis$update +) # Run the PEcAn meta.analysis -run.meta.analysis(settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects, - settings$meta.analysis$threshold, settings$database$dbfiles, settings$database$bety) +run.meta.analysis( + settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects, + settings$meta.analysis$threshold, settings$database$dbfiles, settings$database$bety +) -run.write.configs(model) # Calls model specific write.configs e.g. write.config.ed.R +run.write.configs(model) # Calls model specific write.configs e.g. write.config.ed.R ## load met data -PEcAn.workflow::start_model_runs(model) # Start ecosystem model runs +PEcAn.workflow::start_model_runs(model) # Start ecosystem model runs -get.results(settings) # Get results of model runs +get.results(settings) # Get results of model runs # run.sensitivity.analysis() # Run sensitivity analysis and variance # decomposition on model output diff --git a/models/biocro/tests/testthat.R b/models/biocro/tests/testthat.R index da34f056ebe..0458ad0afc0 100644 --- a/models/biocro/tests/testthat.R +++ b/models/biocro/tests/testthat.R @@ -3,4 +3,4 @@ library(PEcAn.settings) library(testthat) PEcAn.logger::logger.setQuitOnSevere(FALSE) -test_check('PEcAn.BIOCRO') +test_check("PEcAn.BIOCRO") diff --git a/models/biocro/tests/testthat/helper.R b/models/biocro/tests/testthat/helper.R index 425196182be..2e14ee925fb 100644 --- a/models/biocro/tests/testthat/helper.R +++ b/models/biocro/tests/testthat/helper.R @@ -1,18 +1,18 @@ - # Return precalculated BioCro 0.9 results from specified days in 2004 # Accepts same arguments as BioCro::BioGro, ignores all but day1 and dayn -mock_run <- function(WetDat = NULL, day1 = 1, dayn = 7, ...){ - load("data/result.RData", envir = environment()) - resultDT[resultDT$Year == 2004 & resultDT$DayofYear %in% day1:dayn,] +mock_run <- function(WetDat = NULL, day1 = 1, dayn = 7, ...) { + load("data/result.RData", envir = environment()) + resultDT[resultDT$Year == 2004 & resultDT$DayofYear %in% day1:dayn, ] } # Report BioCro version as 0.95, even if not installed -mock_version <- function(pkg, lib.loc = NULL){ - if (pkg == "BioCro"){ - return(structure( - list(c(0L, 95L)), - class = c("package_version", "numeric_version"))) - } else { - packageVersion(pkg, lib.loc) - } +mock_version <- function(pkg, lib.loc = NULL) { + if (pkg == "BioCro") { + return(structure( + list(c(0L, 95L)), + class = c("package_version", "numeric_version") + )) + } else { + packageVersion(pkg, lib.loc) + } } diff --git a/models/biocro/tests/testthat/test-call_biocro.R b/models/biocro/tests/testthat/test-call_biocro.R index 5f77027ca79..6374320e500 100644 --- a/models/biocro/tests/testthat/test-call_biocro.R +++ b/models/biocro/tests/testthat/test-call_biocro.R @@ -1,40 +1,43 @@ - context("checking call_biocro wrappers") loglevel <- PEcAn.logger::logger.getLevel() PEcAn.logger::logger.setLevel("OFF") teardown(PEcAn.logger::logger.setLevel(loglevel)) -WetDat <- read.csv("data/US-Bo1.2004.csv", nrows=7*24) +WetDat <- read.csv("data/US-Bo1.2004.csv", nrows = 7 * 24) config <- list(pft = list( - name="fake_pft", - phenoParms=list("3", "10"), - canopyControl=list(a=1, b=2, c=3), - parameters=list(aa=1, bb=2, cc=3), - initial_values=list(Root=10, Leaf=3, Stem=20))) -fake_b0.9_result = structure( + name = "fake_pft", + phenoParms = list("3", "10"), + canopyControl = list(a = 1, b = 2, c = 3), + parameters = list(aa = 1, bb = 2, cc = 3), + initial_values = list(Root = 10, Leaf = 3, Stem = 20) +)) +fake_b0.9_result <- structure( list( DayofYear = c(1, 1, 1), Hour = c(1, 2, 3), Leaf = c(10, 11, 12), Root = c(20, 30, 40), LAI = c(2.0, 2.01, 2.03), - rdMat = matrix(c(0,0,0))), - class = "BioGro") + rdMat = matrix(c(0, 0, 0)) + ), + class = "BioGro" +) fake_b1_result <- data.frame( - DOY = c(1, 1, 1), - Hour = c(1, 2, 3), - Leaf = c(10, 11, 12), - Root = c(20, 30, 40), - Stem = c(200, 300, 400), - lai = c(2.0, 2.01, 2.03), - TTc = c(1, 1.5, 1.8), - soil_evaporation = c(0, 0, 0), - canopy_transpiration = c(0, 0, 0), - LeafLitter = c(1, 1.01, 1.02), - StemLitter = c(10, 10.01, 10.02), - RootLitter = c(15, 15.01, 15.02), - RhizomeLitter = c(20, 20.01, 20.02)) + DOY = c(1, 1, 1), + Hour = c(1, 2, 3), + Leaf = c(10, 11, 12), + Root = c(20, 30, 40), + Stem = c(200, 300, 400), + lai = c(2.0, 2.01, 2.03), + TTc = c(1, 1.5, 1.8), + soil_evaporation = c(0, 0, 0), + canopy_transpiration = c(0, 0, 0), + LeafLitter = c(1, 1.01, 1.02), + StemLitter = c(10, 10.01, 10.02), + RootLitter = c(15, 15.01, 15.02), + RhizomeLitter = c(20, 20.01, 20.02) +) test_that("call_biocro_0.9 passes expected arguments to every supported genus", { @@ -51,7 +54,8 @@ test_that("call_biocro_0.9 passes expected arguments to every supported genus", res <- call_biocro_0.9( WetDat = WetDat, genus = i, year_in_run = 1, config = config, lat = 40, lon = -88, tmp.result = list(), - HarvestedYield = 0) + HarvestedYield = 0 + ) expect_length(res, 2) expect_equal(names(res), c("tmp.result", "HarvestedYield")) expect_type(res$tmp.result, "list") @@ -65,42 +69,51 @@ test_that("call_biocro_0.9 passes expected arguments to every supported genus", mockery::expect_called(willowmock, 1) expect_equal( mockery::mock_args(willowmock)[[1]]$canopyControl, - config$pft$canopyControl) + config$pft$canopyControl + ) # character param lists are converted to numeric expect_type( mockery::mock_args(willowmock)[[1]]$willowphenoControl[[1]], - "double") + "double" + ) expect_equal( mockery::mock_args(willowmock)[[1]]$willowphenoControl, - lapply(config$pft$phenoParms, as.numeric)) -# genus-specific params not passed to other genera + lapply(config$pft$phenoParms, as.numeric) + ) + # genus-specific params not passed to other genera mockery::expect_called(canemock, 1) expect_null( - mockery::mock_args(canemock)[[1]]$willowphenoControl) + mockery::mock_args(canemock)[[1]]$willowphenoControl + ) # BioGro is called once every invocation to test if BioCro checks input DOY, # plus again for output in Miscanthus and Sorghum, equals six calls in total mockery::expect_called(biomock, 6) - for (j in c(1,2,3,5)) { + for (j in c(1, 2, 3, 5)) { mockery::expect_call( biomock, j, - BioCro::BioGro(WetDat = matrix(c(0,10,0,0,0,0,0,0), nrow = 1), - day1 = 10, dayn = 10, timestep = 24)) + BioCro::BioGro( + WetDat = matrix(c(0, 10, 0, 0, 0, 0, 0, 0), nrow = 1), + day1 = 10, dayn = 10, timestep = 24 + ) + ) } expect_equal(mockery::mock_args(biomock)[[4]]$day1, min(WetDat$doy)) expect_equal(mockery::mock_args(biomock)[[4]]$dayn, max(WetDat$doy)) expect_error( - call_biocro_0.9(WetDat = WetDat, genus = "not_a_genus", year_in_run = 1, - config = config, lat = 40, lon = -88, - tmp.result = list(), HarvestedYield = 0), - "not supported by PEcAn.BIOCRO when using BioCro 0.9x") + call_biocro_0.9( + WetDat = WetDat, genus = "not_a_genus", year_in_run = 1, + config = config, lat = 40, lon = -88, + tmp.result = list(), HarvestedYield = 0 + ), + "not supported by PEcAn.BIOCRO when using BioCro 0.9x" + ) }) -test_that("call_biocro_0.9 adjusts day1 and dayn when weather is not a whole year",{ - +test_that("call_biocro_0.9 adjusts day1 and dayn when weather is not a whole year", { # call_biocro_0.9 tests whether to adjust days by passing a one-line file # with DOY > 1 and adjusting iff BioCro::BioGro throws an error. # ==> To test, our BioGro stub needs to provide the error too. @@ -108,35 +121,39 @@ test_that("call_biocro_0.9 adjusts day1 and dayn when weather is not a whole yea mockery::stub( call_biocro_0.9, "BioCro::BioGro", - function(WetDat, ...){ - if(nrow(WetDat)==1 && WetDat$doy > 1){ + function(WetDat, ...) { + if (nrow(WetDat) == 1 && WetDat$doy > 1) { stop("This error should be caught silently") - }else{ + } else { biomock(WetDat, ...) } - }) + } + ) # whole file: day numbers unchanged res_whole <- call_biocro_0.9( WetDat = WetDat, genus = "Miscanthus", year_in_run = 1, config = config, - lat = 40, lon = -88, tmp.result = list(), HarvestedYield = 0) + lat = 40, lon = -88, tmp.result = list(), HarvestedYield = 0 + ) expect_equal(mockery::mock_args(biomock)[[1]]$day1, min(WetDat$doy)) expect_equal(mockery::mock_args(biomock)[[1]]$dayn, max(WetDat$doy)) # subset starting DOY 1: day numbers unchanged res_start <- call_biocro_0.9( - WetDat = WetDat[WetDat$doy <= 3,], genus = "Miscanthus", year_in_run = 1, + WetDat = WetDat[WetDat$doy <= 3, ], genus = "Miscanthus", year_in_run = 1, config = config, lat = 40, lon = -88, - tmp.result = list(), HarvestedYield = 0) + tmp.result = list(), HarvestedYield = 0 + ) expect_equal(mockery::mock_args(biomock)[[2]]$day1, 1) expect_equal(mockery::mock_args(biomock)[[2]]$dayn, 3) # subset starting DOY > 1: day numbers adjusted -- # BioCro secretly wants day of *file*, not day of year res_jan <- call_biocro_0.9( - WetDat = WetDat[WetDat$doy >= 3 & WetDat$doy <= 6,], - genus = "Miscanthus", year_in_run = 1, config = config, lat = 40, - lon = -88, tmp.result = list(), HarvestedYield = 0) + WetDat = WetDat[WetDat$doy >= 3 & WetDat$doy <= 6, ], + genus = "Miscanthus", year_in_run = 1, config = config, lat = 40, + lon = -88, tmp.result = list(), HarvestedYield = 0 + ) expect_equal(round(mockery::mock_args(biomock)[[3]]$day1), 1) expect_equal(round(mockery::mock_args(biomock)[[3]]$dayn), 4) }) @@ -151,7 +168,8 @@ test_that("call_biocro_1 passes expected arguments", { res <- call_biocro_1( WetDat = WetDat, genus = i, year_in_run = 1, config = config, lat = 40, lon = -88, tmp.result = list(), - HarvestedYield = 0) + HarvestedYield = 0 + ) expect_length(res, 2) expect_equal(names(res), c("tmp.result", "HarvestedYield")) expect_s3_class(res$tmp.result, "data.frame") @@ -159,16 +177,19 @@ test_that("call_biocro_1 passes expected arguments", { expect_equal(res$tmp.result$ThermalT, fake_b1_result$TTc) expect_equal( res$tmp.result$BelowLitter, - fake_b1_result$RootLitter + fake_b1_result$RhizomeLitter) + fake_b1_result$RootLitter + fake_b1_result$RhizomeLitter + ) expect_type(res$HarvestedYield, "double") # BioCro 1 param lists passed unchanged expect_equal( mockery::mock_args(b1mock)[[length(b1mock)]]$parameters, - config$pft$parameters) + config$pft$parameters + ) expect_equal( mockery::mock_args(b1mock)[[length(b1mock)]]$varying_parameters, - WetDat) + WetDat + ) # BioCro 0.9 param lists, day1, dayn not used expect_null(mockery::mock_args(b1mock)[[length(b1mock)]]$phenoControl) expect_null(mockery::mock_args(b1mock)[[length(b1mock)]]$day1) @@ -176,28 +197,31 @@ test_that("call_biocro_1 passes expected arguments", { } mockery::expect_called(b1mock, 3) - }) test_that("call_biocro_1 updates initial values after year 1", { b1mock <- mockery::mock(fake_b1_result, cycle = TRUE) mockery::stub(call_biocro_1, "BioCro::Gro", b1mock) - res1 <- call_biocro_1( - WetDat = WetDat, genus = "Salix", year_in_run = 1, config = config, - lat = 40, lon = -88, tmp.result = list(), - HarvestedYield = 0) + res1 <- call_biocro_1( + WetDat = WetDat, genus = "Salix", year_in_run = 1, config = config, + lat = 40, lon = -88, tmp.result = list(), + HarvestedYield = 0 + ) + expect_equal( + mockery::mock_args(b1mock)[[1]]$initial_values, + config$pft$initial_values + ) + + res2 <- call_biocro_1( + WetDat = WetDat, genus = "Salix", year_in_run = 2, config = config, + lat = 40, lon = -88, tmp.result = res1$tmp.result, + HarvestedYield = res1$HarvestedYield + ) + for (var in names(config$pft$initial_values)) { expect_equal( - mockery::mock_args(b1mock)[[1]]$initial_values, - config$pft$initial_values) - - res2 <- call_biocro_1( - WetDat = WetDat, genus = "Salix", year_in_run = 2, config = config, - lat = 40, lon = -88, tmp.result = res1$tmp.result, - HarvestedYield = res1$HarvestedYield) - for (var in names(config$pft$initial_values)) { - expect_equal( - mockery::mock_args(b1mock)[[2]]$initial_values[[!!var]], - res1$tmp.result[[!!var]][nrow(res1$tmp.result)]) - } + mockery::mock_args(b1mock)[[2]]$initial_values[[!!var]], + res1$tmp.result[[!!var]][nrow(res1$tmp.result)] + ) + } }) diff --git a/models/biocro/tests/testthat/test-run.biocro.R b/models/biocro/tests/testthat/test-run.biocro.R index 2bcd18beeb0..5b4a990868a 100644 --- a/models/biocro/tests/testthat/test-run.biocro.R +++ b/models/biocro/tests/testthat/test-run.biocro.R @@ -2,7 +2,7 @@ context("check that BioCro output is summarized correctly") # Hand-calculate reference values ref_output <- mock_run() -ref_met <- read.csv("data/US-Bo1.2004.csv", nrows=7*24) +ref_met <- read.csv("data/US-Bo1.2004.csv", nrows = 7 * 24) ref_leaf1 <- max(ref_output$Leaf[ref_output$DayofYear == 1]) ref_soil5 <- sum(ref_output$SoilEvaporation[ref_output$DayofYear == 5]) ref_mat <- mean(ref_met$Temp) @@ -12,7 +12,8 @@ metpath <- "data/US-Bo1" settings <- PEcAn.settings::read.settings("data/pecan.biocro.xml") settings$database$bety <- do.call( PEcAn.DB::get_postgres_envvars, - settings$database$bety) + settings$database$bety +) config <- PEcAn.settings::prepare.settings(settings) config$pft$type$genus <- "Salix" config$run$start.date <- as.POSIXct("2004-01-01") diff --git a/models/biocro/tests/testthat/test.cf2biocro.R b/models/biocro/tests/testthat/test.cf2biocro.R index 0c39eaf23f0..cc4d45a4dcf 100644 --- a/models/biocro/tests/testthat/test.cf2biocro.R +++ b/models/biocro/tests/testthat/test.cf2biocro.R @@ -1,24 +1,23 @@ context("check output from cf2biocro") test.nc <- ncdf4::nc_open("data/urbana_subdaily_test.nc") -cfmet <- PEcAn.data.atmosphere::load.cfmet(test.nc, lat = 40.25, lon = -88.125, - start.date = "1979-05-05", end.date = "1979-07-01") +cfmet <- PEcAn.data.atmosphere::load.cfmet(test.nc, + lat = 40.25, lon = -88.125, + start.date = "1979-05-05", end.date = "1979-07-01" +) # ncdf4::nc_close(test.nc) cfmet.hourly <- PEcAn.data.atmosphere::cfmet.downscale.time(cfmet) biocro.met <- cf2biocro(cfmet.hourly) test_that("cf2biocro creates BioCro compatible met from CF compliant file", { - - expect_true(all(c("year", "doy", "hour", "solar", "Temp", "RH", "windspeed", "precip") %in% colnames(biocro.met))) - }) test_that("cf2biocro provides hours in 0:23 range", { tmp <- cfmet.hourly tmp$hour <- tmp$hour + 1 - + expect_equal(range(tmp$hour), c(1, 24)) biocro.met <- cf2biocro(tmp) expect_equal(range(biocro.met$hour), c(0, 23)) diff --git a/models/biocro/tests/testthat/test.met2model.R b/models/biocro/tests/testthat/test.met2model.R index 7e87a7a8832..dd4391169f9 100644 --- a/models/biocro/tests/testthat/test.met2model.R +++ b/models/biocro/tests/testthat/test.met2model.R @@ -10,16 +10,18 @@ test_that("Met conversion runs without error", { "See issue #2274 (https://github.com/PecanProject/pecan/issues/2274)." )) nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" end_date <- "2000-12-31" result <- met2model.BIOCRO(in.path, in.prefix, outfolder, - lat = 45.25, - lon = -84.75, - start_date = start_date, - end_date = end_date) + lat = 45.25, + lon = -84.75, + start_date = start_date, + end_date = end_date + ) expect_s3_class(result, "data.frame") expect_true(file.exists(result[["file"]][[1]])) }) diff --git a/models/biocro/tests/testthat/test.model2netcdf.BIOCRO.R b/models/biocro/tests/testthat/test.model2netcdf.BIOCRO.R index e036ae92cbe..78e5827e586 100644 --- a/models/biocro/tests/testthat/test.model2netcdf.BIOCRO.R +++ b/models/biocro/tests/testthat/test.model2netcdf.BIOCRO.R @@ -8,22 +8,24 @@ file.copy(from = "data/result.RData", to = outdir) settings <- PEcAn.settings::read.settings("data/pecan.biocro.xml") settings$database$bety <- do.call( PEcAn.DB::get_postgres_envvars, - settings$database$bety) + settings$database$bety +) start_date <- settings$run$start.date load("data/result.RData") biocro.ncfile <- file.path(outdir, paste0(resultDT[, min(Year)], ".nc")) -if (file.exists(biocro.ncfile)) { file.remove(biocro.ncfile) } +if (file.exists(biocro.ncfile)) { + file.remove(biocro.ncfile) +} model2netcdf.BIOCRO(resultDT, genus = "foo", outdir = outdir, lat = 44.5, lon = -88) -test_that("model2netcdf.BIOCRO reads a .csv and writes a netcdf file for each year", - { - for (year in resultDT[, unique(Year)]) { - expect_true(file.exists(file.path(outdir, paste0(year, ".nc")))) - } - }) +test_that("model2netcdf.BIOCRO reads a .csv and writes a netcdf file for each year", { + for (year in resultDT[, unique(Year)]) { + expect_true(file.exists(file.path(outdir, paste0(year, ".nc")))) + } +}) biocro.nc <- ncdf4::nc_open(biocro.ncfile) vars <- biocro.nc$var @@ -34,15 +36,14 @@ test_that("model2netcdf.BIOCRO wrote netCDF with correct variables", { expect_true(all(c("TotLivBiom", "root_carbon_content", "AbvGrndWood", "Evap", "TVeg", "LAI") %in% names(vars))) expect_true(all(c("latitude", "longitude", "time") %in% names(dims))) - + expect_true(all(sapply(vars, function(x) x$ndims) == 3)) - - + + units <- sapply(vars, function(x) x$units) }) test_that("dimensions have MsTMIP standard units", { - expect_equal(dims$lat$units, "degrees_north") expect_equal(dims$lon$units, "degrees_east") expect_true(grepl("days since", dims$time$units)) @@ -52,17 +53,17 @@ test_that("variables have MsTMIP standard units", { standard_vars <- PEcAn.utils::standard_vars for (var in vars) { if (var$name %in% standard_vars$Variable.Name) { - expect_true(var$units == standard_vars[standard_vars$Variable.Name == var$name, - "Units"]) + expect_true(var$units == standard_vars[ + standard_vars$Variable.Name == var$name, + "Units" + ]) } } - + null <- sapply(dims, function(x) expect_is(x$vals, "array")) - }) test_that("model2netcdf.BIOCRO will add a second site to an existing file", { - ncdf4::nc_close(biocro.nc) file.remove(biocro.ncfile) model2netcdf.BIOCRO(resultDT, genus = "foo", outdir = outdir, lat = 44.6, lon = -88.1) @@ -70,6 +71,6 @@ test_that("model2netcdf.BIOCRO will add a second site to an existing file", { biocro.nc <- ncdf4::nc_open(biocro.ncfile) vars <- biocro.nc$var dims <- biocro.nc$dim - + ncdf4::ncvar_get(biocro.nc, "latitude") }) diff --git a/models/biocro/tests/testthat/test.write.configs.BIOCRO.R b/models/biocro/tests/testthat/test.write.configs.BIOCRO.R index 983cec596bc..a62702db923 100644 --- a/models/biocro/tests/testthat/test.write.configs.BIOCRO.R +++ b/models/biocro/tests/testthat/test.write.configs.BIOCRO.R @@ -4,7 +4,8 @@ settings.xml <- file.path("data", "pecan.biocro.xml") settings <- PEcAn.settings::read.settings(settings.xml) settings$database$bety <- do.call( PEcAn.DB::get_postgres_envvars, - settings$database$bety) + settings$database$bety +) testthat::skip_if_not(PEcAn.DB::db.exists(settings[[c("database", "bety")]])) @@ -15,14 +16,15 @@ samples <- list(biocro.saof = (data.frame( cuticular_cond = c(1800, 4380, 10700), leaf_respiration_rate_m2 = c(1, 1.9, 3.6), stomatal_slope.BB = c(2.7, 3.3, 3.9), - row.names = c("15.866", "50", "84.134")))) + row.names = c("15.866", "50", "84.134") +))) test_that("convert.samples.BIOCRO works for BioCro 0.9", { biocro.parms <- convert.samples.BIOCRO(samples$biocro.saof, 0.9) expect_equal(dim(biocro.parms), dim(samples$biocro.saof)) expect_null(biocro.parms[["vmax1"]]) expect_equal(biocro.parms$vmax, samples$biocro.saof$Vcmax) - expect_equal(biocro.parms$b0, samples$biocro.saof$cuticular_cond/1e+06) + expect_equal(biocro.parms$b0, samples$biocro.saof$cuticular_cond / 1e+06) expect_equal(biocro.parms$SLA, samples$biocro.saof$SLA) expect_equal(biocro.parms$Rd, samples$biocro.saof$leaf_respiration_rate_m2) expect_equal(biocro.parms$b1, samples$biocro.saof$stomatal_slope.BB) @@ -33,22 +35,22 @@ test_that("convert.samples.BIOCRO works for BioCro 1.0", { expect_equal(dim(biocro.parms), dim(samples$biocro.saof)) expect_null(biocro.parms[["vmax"]]) # [[ instead of $ to avoid partial matching to "vmax1" expect_equal(biocro.parms$vmax1, samples$biocro.saof$Vcmax) - expect_equal(biocro.parms$b0, samples$biocro.saof$cuticular_cond/1e+06) + expect_equal(biocro.parms$b0, samples$biocro.saof$cuticular_cond / 1e+06) expect_equal(biocro.parms$iSp, samples$biocro.saof$SLA) expect_equal(biocro.parms$Rd, samples$biocro.saof$leaf_respiration_rate_m2) expect_equal(biocro.parms$b1, samples$biocro.saof$stomatal_slope.BB) }) test_that("convert.samples.BIOCRO accepts list, matrix, data frame", { - in_df <- data.frame(Vcmax = 1, b0 = 2, SLA=3) + in_df <- data.frame(Vcmax = 1, b0 = 2, SLA = 3) in_lst <- list(Vcmax = 1, b0 = 2, SLA = 3) in_mtrx <- matrix(1:3, ncol = 3, dimnames = list(NULL, c("Vcmax", "b0", "SLA"))) - + out <- in_df - out$SLA <- out$SLA/10 # bety sends kg/m2, biocro takes g/cm2 + out$SLA <- out$SLA / 10 # bety sends kg/m2, biocro takes g/cm2 colnames(out) <- c("vmax1", "b0", "iSp") - - expect_equal(convert.samples.BIOCRO(in_df, 1.0), out) # Redmine #1491 + + expect_equal(convert.samples.BIOCRO(in_df, 1.0), out) # Redmine #1491 expect_equal(convert.samples.BIOCRO(in_lst, 1.0), out) expect_equal(convert.samples.BIOCRO(in_mtrx, 1.0), out) }) @@ -59,28 +61,31 @@ test_that("write.config.BIOCRO produces expected output", { rundir <- file.path(settings$rundir, q) dir.create(outdir, showWarnings = FALSE, recursive = TRUE) dir.create(rundir, showWarnings = FALSE, recursive = TRUE) - + trait.values <- list() trait.values[[settings$pfts$pft$name]] <- samples$biocro.saof[q, ] - + readme <- file.path("data", "README.txt") species <- file.path("data", "species.csv") - + expect_true(file.exists(readme)) expect_true(file.exists(species)) - + expect_true(file.copy(readme, file.path(rundir, "README.txt"), overwrite = TRUE)) - expect_true(file.copy(species, file.path(settings$pfts$pft$outdir, "species.csv"), - overwrite = TRUE)) - + expect_true(file.copy(species, file.path(settings$pfts$pft$outdir, "species.csv"), + overwrite = TRUE + )) + # mock_version stub always reports BioCro version as 0.95 mockery::stub(write.config.BIOCRO, "utils::packageVersion", mock_version) - write.config.BIOCRO(defaults = settings$pfts, trait.values = trait.values, - settings = settings, run.id = q) - + write.config.BIOCRO( + defaults = settings$pfts, trait.values = trait.values, + settings = settings, run.id = q + ) + config <- file.path(rundir, "config.xml") expect_true(file.exists(config)) - + config.xml <- XML::xmlParse(config) config.list <- XML::xmlToList(config.xml) biocro.trait.values <- convert.samples.BIOCRO(trait.values[[settings$pfts$pft$name]], 0.9) @@ -89,12 +94,10 @@ test_that("write.config.BIOCRO produces expected output", { expect_equal(biocro.trait.values[["b1"]], as.numeric(config.list$pft$photoParms[["b1"]])) expect_equal(biocro.trait.values[["Rd"]], as.numeric(config.list$pft$photoParms[["Rd"]])) } - }) test_that("get_biocro_defaults returns a list and warns if no match", { - # Mock up the relevant bits of a result from data("packagename"), # containing all combinations of genus and param list: # "sorghum_initial_state", "sorghum_parameters", ..., "Zea_mays_modules" @@ -103,22 +106,28 @@ test_that("get_biocro_defaults returns a list and warns if no match", { data = as.vector(sapply( c("sorghum", "Zea_diploperennis", "Zea_mays"), paste0, - c("_initial_state", "_parameters", "_modules"))), + c("_initial_state", "_parameters", "_modules") + )), ncol = 1, - dimnames = list(NULL, "Item"))) - mockery::stub(get_biocro_defaults, "utils::data", function(...)data_result) + dimnames = list(NULL, "Item") + ) + ) + mockery::stub(get_biocro_defaults, "utils::data", function(...) data_result) # Mock up results from from_bc, which always returns a list but the contents # depend which dataset was requested. # It's called four times per invocation of get_biocro_results, so we provide # four appropriate mock responses and cycle over them mock_default_list <- mockery::mock( - list(canopy_module_name="c4_canopy"), # _modules - list(Stem=1, Leaf=0.02), # _initial_state - list(Rd=1.1, jmax=180), #_parameters - list(canopy_module_name="c4_canopy", # _modules again - soil_module_name="one_layer_soil_profile"), - cycle=TRUE) + list(canopy_module_name = "c4_canopy"), # _modules + list(Stem = 1, Leaf = 0.02), # _initial_state + list(Rd = 1.1, jmax = 180), #_parameters + list( + canopy_module_name = "c4_canopy", # _modules again + soil_module_name = "one_layer_soil_profile" + ), + cycle = TRUE + ) mockery::stub(get_biocro_defaults, "from_bc", mock_default_list) sorg <- get_biocro_defaults("Sorghum") @@ -134,27 +143,35 @@ test_that("get_biocro_defaults returns a list and warns if no match", { expect_equal(sorg$type$genus, "sorghum") zea_msg <- capture.output( - {zea_res <- get_biocro_defaults("Zea")}, - type = "message") + { + zea_res <- get_biocro_defaults("Zea") + }, + type = "message" + ) expect_match( zea_msg, "Multiple possible default parameter sets for Zea", - all = FALSE) + all = FALSE + ) mockery::expect_called(mock_default_list, 8) mockery::expect_args(mock_default_list, 7, "Zea_diploperennis_parameters") expect_is(zea_res, "list") expect_length(zea_res, 4) expect_equal(zea_res$initial_values$Stem, 1) expect_equal(zea_res$type$genus, "Zea_diploperennis") - # ^ Correct behavior, but maybe not what our hypothetical user wanted! + # ^ Correct behavior, but maybe not what our hypothetical user wanted! null_msg <- capture.output( - {null_res <- get_biocro_defaults("Not_a_genus")}, - type = "message") + { + null_res <- get_biocro_defaults("Not_a_genus") + }, + type = "message" + ) expect_match( null_msg, "No default parameter sets for Not_a_genus found in BioCro", - all=FALSE) + all = FALSE + ) expect_null(null_res) # expect get_biocro_defaults to exit w/o calling from_bc => # calls unchanged mockery::expect_called(mock_default_list, 8) diff --git a/models/cable/R/met2model.CABLE.R b/models/cable/R/met2model.CABLE.R index 0433d0247ce..f2786148499 100644 --- a/models/cable/R/met2model.CABLE.R +++ b/models/cable/R/met2model.CABLE.R @@ -1,13 +1,13 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Converts a met CF file to a model specific met file. The input ##' files are calld /.YYYY.cf ##' @@ -19,18 +19,17 @@ ##' @return OK if everything was succesful. ##' @export ##' @author Rob Kooper -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# met2model.CABLE <- function(in.path, in.prefix, outfolder, overwrite = FALSE) { PEcAn.logger::logger.severe("NOT IMPLEMENTED") # Please follow the PEcAn style guide: # https://pecan.gitbooks.io/pecan-documentation/content/developers_guide/Coding_style.html - + # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". # Calls to dependent packages should use a double colon, e.g. # `packageName::functionName()`. # Also, `require()` should be used only when a package dependency is truly - # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. - + # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. } # met2model.CABLE diff --git a/models/cable/R/model2netcdf.CABLE.R b/models/cable/R/model2netcdf.CABLE.R index 3bf4b333ff0..03bf93b2585 100644 --- a/models/cable/R/model2netcdf.CABLE.R +++ b/models/cable/R/model2netcdf.CABLE.R @@ -1,15 +1,15 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Convert CABLE output into the NACP Intercomparison format (ALMA using netCDF) -##' +##' ##' @name model2netcdf.CABLE ##' @title Code to convert CABLE's output into netCDF format ##' @@ -26,12 +26,11 @@ model2netcdf.CABLE <- function(outdir, sitelat, sitelon, start_date, end_date) { # Please follow the PEcAn style guide: # https://pecan.gitbooks.io/pecan-documentation/content/developers_guide/Coding_style.html - + # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". # Calls to dependent packages should use a double colon, e.g. # `packageName::functionName()`. # Also, `require()` should be used only when a package dependency is truly - # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. - + # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. } # model2netcdf.CABLE diff --git a/models/cable/R/read_restart.CABLE.R b/models/cable/R/read_restart.CABLE.R index dce1359d73d..15be7620229 100644 --- a/models/cable/R/read_restart.CABLE.R +++ b/models/cable/R/read_restart.CABLE.R @@ -1,24 +1,23 @@ #' @title Read restart template for SDA -#' +#' #' @author Alexey Shiklomanov -#' +#' #' @param outdir Output directory #' @param runid Run ID #' @param stop.time Year that is being read #' @param settings PEcAn settings object #' @param var.names Variable names to be extracted #' @param params Any parameters required for state calculations -#' +#' #' @description Read restart files from model. -#' +#' #' @return Forecast numeric matrix #' @export read_restart.CABLE <- function(outdir, - runid, - stop.time, - settings, - var.names, - params) { -PEcAn.logger::logger.severe("NOT IMPLEMENTED") + runid, + stop.time, + settings, + var.names, + params) { + PEcAn.logger::logger.severe("NOT IMPLEMENTED") } - diff --git a/models/cable/R/write.config.CABLE.R b/models/cable/R/write.config.CABLE.R index 8ddb6123201..2a4bb979b06 100644 --- a/models/cable/R/write.config.CABLE.R +++ b/models/cable/R/write.config.CABLE.R @@ -1,13 +1,13 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Writes a CABLE config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -22,23 +22,22 @@ ##' @return configuration file for CABLE for given run ##' @export ##' @author Rob Kooper, Kaitlin Ragosta -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.CABLE <- function(defaults, trait.values, settings, run.id) { - # Please follow the PEcAn style guide: # https://pecan.gitbooks.io/pecan-documentation/content/developers_guide/Coding_style.html - + # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". # Calls to dependent packages should use a double colon, e.g. # `packageName::functionName()`. # Also, `require()` should be used only when a package dependency is truly - # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. - + # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. + # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - + #----------------------------------------------------------------------- # create launch script (which will create symlink) if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { @@ -46,7 +45,7 @@ write.config.CABLE <- function(defaults, trait.values, settings, run.id) { } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.CABLE"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -55,7 +54,7 @@ write.config.CABLE <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -63,26 +62,26 @@ write.config.CABLE <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) jobsh <- gsub("@SITE_MET@", settings$run$site$met, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - + #----------------------------------------------------------------------- ### Edit a templated config file for runs if (!is.null(settings$model$config) && file.exists(settings$model$config)) { @@ -103,7 +102,7 @@ write.config.CABLE <- function(defaults, trait.values, settings, run.id) { PEcAn.logger::logger.info("Using", filename, "as template") config.text <- readLines(con = filename, n = -1) } - + config.text <- gsub("@SITE_LAT@", settings$run$site$lat, config.text) config.text <- gsub("@SITE_LON@", settings$run$site$lon, config.text) config.text <- gsub("@SITE_MET@", settings$run$inputs$met$path, config.text) @@ -118,7 +117,7 @@ write.config.CABLE <- function(defaults, trait.values, settings, run.id) { config.text <- gsub("@OUTDIR@", settings$host$outdir, config.text) config.text <- gsub("@ENSNAME@", run.id, config.text) config.text <- gsub("@OUTFILE@", paste0("out", run.id), config.text) - + #----------------------------------------------------------------------- config.file.name <- "cable.nml" writeLines(config.text, con = paste(outdir, config.file.name, sep = "")) diff --git a/models/cable/R/write_restart.CABLE.R b/models/cable/R/write_restart.CABLE.R index 61eda3defe8..9bbbdfb4256 100644 --- a/models/cable/R/write_restart.CABLE.R +++ b/models/cable/R/write_restart.CABLE.R @@ -1,20 +1,20 @@ #' @title Write restart template for SDA -#' +#' #' @author Alexey Shiklomanov -#' -#' @param start.time Time of current assimilation step +#' +#' @param start.time Time of current assimilation step #' @param stop.time Time of next assimilation step #' @param new.state Analysis state matrix returned by \code{sda.enkf} #' @inheritParams read.restart.CABLE -#' +#' #' @description Write restart files for model -#' +#' #' @export write_restart.CABLE <- function(outdir, - runid, - start.time, - stop.time, - settings, - new.state) { -PEcAn.logger::logger.severe("NOT IMPLEMENTED") + runid, + start.time, + stop.time, + settings, + new.state) { + PEcAn.logger::logger.severe("NOT IMPLEMENTED") } diff --git a/models/cable/tests/testthat.R b/models/cable/tests/testthat.R index d93798b4ffe..15de25bdc30 100644 --- a/models/cable/tests/testthat.R +++ b/models/cable/tests/testthat.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -10,4 +10,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.ModelName") +# test_check("PEcAn.ModelName") diff --git a/models/cable/tests/testthat/test.met2model.R b/models/cable/tests/testthat/test.met2model.R index dec8c21a3dd..1c246ca75be 100644 --- a/models/cable/tests/testthat/test.met2model.R +++ b/models/cable/tests/testthat/test.met2model.R @@ -6,14 +6,18 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" end_date <- "2000-12-31" - testthat::expect_error({ - result <- met2model.CABLE(in.path, in.prefix, outfolder) - }, "NOT IMPLEMENTED") + testthat::expect_error( + { + result <- met2model.CABLE(in.path, in.prefix, outfolder) + }, + "NOT IMPLEMENTED" + ) skip("met2model.CABLE not implemented") expect_s3_class(result, "data.frame") expect_true(file.exists(result[["file"]][[1]])) diff --git a/models/clm45/R/met2model.CLM45.R b/models/clm45/R/met2model.CLM45.R index 26aa11935a0..7cce4a0745c 100644 --- a/models/clm45/R/met2model.CLM45.R +++ b/models/clm45/R/met2model.CLM45.R @@ -14,117 +14,114 @@ ##' @param ... other arguments, currently ignored ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbosefor(year in start_year:end_year) -met2model.CLM45 <- function(in.path,in.prefix,outfolder,start_date, end_date, lst=0,lat,lon,..., overwrite=FALSE,verbose=FALSE){ - +met2model.CLM45 <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, lat, lon, ..., overwrite = FALSE, verbose = FALSE) { PEcAn.logger::logger.severe("NOT IMPLEMENTED") - #General Structure- CLM Uses Netcdf so for now just need to rename vars.(Many not is CF standard. Need to Check that out) - #Get Met file from inpath. - #Loop over years (Open nc.file,rename vars,change dimensions as needed,close/save .nc file) - #close - #defining temporal dimension needs to be figured out. If we configure clm to use same tstep then we may not need to change dimensions - -# -# #Process start and end dates -# start_date<-as.POSIXlt(start.date,tz="UTC") -# end_date<-as.POSIXlt(end.date,tz="UTC") -# -# start_year <- year(start_date) -# end_year <- year(end_date) -# -# timestep.s<-86400 #Number of seconds in a day -# -# ## Build met -# met <- NULL -# for(year in start_year:end_year){ -# -# met.file.y = paste(met.file,year,"nc",sep=".") -# -# if(file.exists(met.file.y)){ -# -# ## Open netcdf file -# nc=ncdf4::nc_open(met.file.y) -# -# -# ## convert time to seconds -# sec <- nc$dim$time$vals -# sec = PEcAn.utils::ud_convert(sec,unlist(strsplit(nc$dim$time$units," "))[1],"seconds") -# -# -# -# ##build day and year -# -# dt <- PEcAn.utils::seconds_in_year(year) / length(sec) -# tstep = round(timestep.s/dt) #time steps per day -# -# diy <- PEcAn.utils::days_in_year(year) -# doy <- rep(seq_len(diy), each=tstep)[1:length(sec)] -# + # General Structure- CLM Uses Netcdf so for now just need to rename vars.(Many not is CF standard. Need to Check that out) + # Get Met file from inpath. + # Loop over years (Open nc.file,rename vars,change dimensions as needed,close/save .nc file) + # close + # defining temporal dimension needs to be figured out. If we configure clm to use same tstep then we may not need to change dimensions - ## extract variables. These need to be read in and converted to CLM standards + # + # #Process start and end dates + # start_date<-as.POSIXlt(start.date,tz="UTC") + # end_date<-as.POSIXlt(end.date,tz="UTC") + # + # start_year <- year(start_date) + # end_year <- year(end_date) + # + # timestep.s<-86400 #Number of seconds in a day + # + # ## Build met + # met <- NULL + # for(year in start_year:end_year){ + # + # met.file.y = paste(met.file,year,"nc",sep=".") + # + # if(file.exists(met.file.y)){ + # + # ## Open netcdf file + # nc=ncdf4::nc_open(met.file.y) + # + # + # ## convert time to seconds + # sec <- nc$dim$time$vals + # sec = PEcAn.utils::ud_convert(sec,unlist(strsplit(nc$dim$time$units," "))[1],"seconds") + # + # + # + # ##build day and year + # + # dt <- PEcAn.utils::seconds_in_year(year) / length(sec) + # tstep = round(timestep.s/dt) #time steps per day + # + # diy <- PEcAn.utils::days_in_year(year) + # doy <- rep(seq_len(diy), each=tstep)[1:length(sec)] + # -# ncdf4::ncvar_rename(ncfile,varid="LONGXY") -# ncdf4::ncvar_rename(ncfile,varid="LATIXY") -# # double ZBOT(time, lat, lon) ; -# # ZBOT:long_name = "observational height" ; -# # ZBOT:units = "m" ; -# ZBOT = ncvar_rename(ncfile,"ZBOT","ZBOT") -# # -# # double EDGEW(scalar) ; -# # EDGEW:long_name = "western edge in atmospheric data" ; -# # EDGEW:units = "degrees E" ; -# EDGEW = ncvar_rename(ncfile,"EDGEW","EDGEW") -# -# # double EDGEE(scalar) ; -# # EDGEE:long_name = "eastern edge in atmospheric data" ; -# # EDGEE:units = "degrees E" ; -# EDGEE = ncvar_rename(ncfile,"EDGEE","EDGEE") -# -# # double EDGES(scalar) ; -# # EDGES:long_name = "southern edge in atmospheric data" ; -# # EDGES:units = "degrees N" ; -# EDGES = ncvar_rename(ncfile,"EDGES","EDGES") -# # -# # double EDGEN(scalar) ; -# # EDGEN:long_name = "northern edge in atmospheric data" ; -# # EDGEN:units = "degrees N" ; -# EDGEN = ncvar_rename(ncfile,"EDGEN","air_temperature") -# # double TBOT(time, lat, lon) ; -# # TBOT:long_name = "temperature at the lowest atm level (TBOT)" ; -# # TBOT:units = "K" ; -# TBOT = ncvar_rename(ncfile,"TBOT","specific_humidity") -# # double RH(time, lat, lon) ; -# # RH:long_name = "relative humidity at the lowest atm level (RH)" ; -# # relative_humidity -# # RH:units = "%" ; -# RH = ncvar_rename(ncfile,"RH","relative_humidity") -# # double WIND(time, lat, lon) ; -# # WIND:long_name = "wind at the lowest atm level (WIND)" ; -# # wind_speed -# # WIND:units = "m/s" ; -# WIND = ncvar_rename(ncfile,"WIND","wind_speed") -# # double FSDS(time, lat, lon) ; -# # FSDS:long_name = "incident solar (FSDS)" ; -# # FSDS:units = "W/m2" ; -# FSDS = ncvar_rename(ncfile,"FSDS","FSDS") -# # double FLDS(time, lat, lon) ; -# # FLDS:long_name = "incident longwave (FLDS)" ; -# # FLDS:units = "W/m2" ; -# FLDS = ncvar_rename(ncfile,"FLDS","") -# # double PSRF(time, lat, lon) ; -# # PSRF:long_name = "pressure at the lowest atm level (PSRF)" ; -# # PSRF:units = "Pa" ; -# PSRF = ncvar_rename(ncfile,"PSRF","air_pressure") -# # double PRECTmms(time, lat, lon) ; -# # PRECTmms:long_name = "precipitation (PRECTmms)" ; -# # PRECTmms:units = "mm/s" ; -# PRECTmms =ncvar_rename(ncfile,"PRECTmmc","precipitation_flux") + ## extract variables. These need to be read in and converted to CLM standards - #nc_close(ncfiles) + # ncdf4::ncvar_rename(ncfile,varid="LONGXY") + # ncdf4::ncvar_rename(ncfile,varid="LATIXY") + # # double ZBOT(time, lat, lon) ; + # # ZBOT:long_name = "observational height" ; + # # ZBOT:units = "m" ; + # ZBOT = ncvar_rename(ncfile,"ZBOT","ZBOT") + # # + # # double EDGEW(scalar) ; + # # EDGEW:long_name = "western edge in atmospheric data" ; + # # EDGEW:units = "degrees E" ; + # EDGEW = ncvar_rename(ncfile,"EDGEW","EDGEW") + # + # # double EDGEE(scalar) ; + # # EDGEE:long_name = "eastern edge in atmospheric data" ; + # # EDGEE:units = "degrees E" ; + # EDGEE = ncvar_rename(ncfile,"EDGEE","EDGEE") + # + # # double EDGES(scalar) ; + # # EDGES:long_name = "southern edge in atmospheric data" ; + # # EDGES:units = "degrees N" ; + # EDGES = ncvar_rename(ncfile,"EDGES","EDGES") + # # + # # double EDGEN(scalar) ; + # # EDGEN:long_name = "northern edge in atmospheric data" ; + # # EDGEN:units = "degrees N" ; + # EDGEN = ncvar_rename(ncfile,"EDGEN","air_temperature") + # # double TBOT(time, lat, lon) ; + # # TBOT:long_name = "temperature at the lowest atm level (TBOT)" ; + # # TBOT:units = "K" ; + # TBOT = ncvar_rename(ncfile,"TBOT","specific_humidity") + # # double RH(time, lat, lon) ; + # # RH:long_name = "relative humidity at the lowest atm level (RH)" ; + # # relative_humidity + # # RH:units = "%" ; + # RH = ncvar_rename(ncfile,"RH","relative_humidity") + # # double WIND(time, lat, lon) ; + # # WIND:long_name = "wind at the lowest atm level (WIND)" ; + # # wind_speed + # # WIND:units = "m/s" ; + # WIND = ncvar_rename(ncfile,"WIND","wind_speed") + # # double FSDS(time, lat, lon) ; + # # FSDS:long_name = "incident solar (FSDS)" ; + # # FSDS:units = "W/m2" ; + # FSDS = ncvar_rename(ncfile,"FSDS","FSDS") + # # double FLDS(time, lat, lon) ; + # # FLDS:long_name = "incident longwave (FLDS)" ; + # # FLDS:units = "W/m2" ; + # FLDS = ncvar_rename(ncfile,"FLDS","") + # # double PSRF(time, lat, lon) ; + # # PSRF:long_name = "pressure at the lowest atm level (PSRF)" ; + # # PSRF:units = "Pa" ; + # PSRF = ncvar_rename(ncfile,"PSRF","air_pressure") + # # double PRECTmms(time, lat, lon) ; + # # PRECTmms:long_name = "precipitation (PRECTmms)" ; + # # PRECTmms:units = "mm/s" ; + # PRECTmms =ncvar_rename(ncfile,"PRECTmmc","precipitation_flux") -#} ### end loop over met files + # nc_close(ncfiles) -#print("Done with met2model.CLM4") + # } ### end loop over met files + # print("Done with met2model.CLM4") } ### end met2model.CLM4 - diff --git a/models/clm45/R/model2netcdf.CLM45.R b/models/clm45/R/model2netcdf.CLM45.R index ec98d75e13a..3aec114e1b3 100644 --- a/models/clm45/R/model2netcdf.CLM45.R +++ b/models/clm45/R/model2netcdf.CLM45.R @@ -1,4 +1,4 @@ -##' +##' ##' @name model2netcdf.CLM45 ##' @title Code to convert CLM45 netcdf output into into CF standard ##' @@ -10,425 +10,424 @@ ##' @export ##' ##' @author Michael Dietze - model2netcdf.CLM45 <- function(outdir, sitelat, sitelon, start_date, end_date) { -# -# for (year in start_year:end_year){ -# +model2netcdf.CLM45 <- function(outdir, sitelat, sitelon, start_date, end_date) { + # + # for (year in start_year:end_year){ + # ### extract variables. These need to be read in and converted to CLM standards - -# levgrnd:long_name = "coordinate soil levels" ; -# levlak:long_name = "coordinate lake levels" ; -# levdcmp:long_name = "coordinate soil levels" ; -# mcdate:long_name = "current date (YYYYMMDD)" ; -# mcsec:long_name = "current seconds of current date" ; -# mdcur:long_name = "current day (from base day)" ; -# mscur:long_name = "current seconds of current day" ; -# nstep:long_name = "time step" ; -# lon:long_name = "coordinate longitude" ; -# lat:long_name = "coordinate latitude" ; -# area:long_name = "grid cell areas" ; -# topo:long_name = "grid cell topography" ; -# landfrac:long_name = "land fraction" ; -# landmask:long_name = "land/ocean mask (0.=ocean and 1.=land)" ; -# pftmask:long_name = "pft real/fake mask (0.=fake and 1.=real)" ; -# ZSOI:long_name = "soil depth" ; -# DZSOI:long_name = "soil thickness" ; -# WATSAT:long_name = "saturated soil water content (porosity)" ; -# SUCSAT:long_name = "saturated soil matric potential" ; -# BSW:long_name = "slope of soil water retention curve" ; -# HKSAT:long_name = "saturated hydraulic conductivity" ; -# ZLAKE:long_name = "lake layer node depth" ; -# DZLAKE:long_name = "lake layer thickness" ; -# ACTUAL_IMMOB:long_name = "actual N immobilization" ; -# AGNPP:long_name = "aboveground NPP" ; -# ALT:long_name = "current active layer thickness" ; -# ALTMAX:long_name = "maximum annual active layer thickness" ; -# ALTMAX_LASTYEAR:long_name = "maximum prior year active layer thickness" ; -# AR:long_name = "autotrophic respiration (MR + GR)" ; -# BAF_CROP:long_name = "fractional area burned for crop" ; -# BAF_PEATF:long_name = "fractional area burned in peatland" ; -# BCDEP:long_name = "total BC deposition (dry+wet) from atmosphere" ; -# BGNPP:long_name = "belowground NPP" ; -# BUILDHEAT:long_name = "heat flux from urban building interior to walls and roof" ; -# COL_CTRUNC:long_name = "column-level sink for C truncation" ; -# COL_FIRE_CLOSS:long_name = "total column-level fire C loss for non-peat fires outside land-type converted region" ; -# COL_FIRE_NLOSS:long_name = "total column-level fire N loss" ; -# COL_NTRUNC:long_name = "column-level sink for N truncation" ; -# CPOOL:long_name = "temporary photosynthate C pool" ; -# CWDC:long_name = "CWD C" ; -# CWDC_HR:long_name = "coarse woody debris C heterotrophic respiration" ; -# CWDC_LOSS:long_name = "coarse woody debris C loss" ; -# CWDC_TO_LITR2C:long_name = "decomp. of coarse woody debris C to litter 2 C" ; -# CWDC_TO_LITR3C:long_name = "decomp. of coarse woody debris C to litter 3 C" ; -# CWDN:long_name = "CWD N" ; -# CWDN_TO_LITR2N:long_name = "decomp. of coarse woody debris N to litter 2 N" ; -# CWDN_TO_LITR3N:long_name = "decomp. of coarse woody debris N to litter 3 N" ; -# DEADCROOTC:long_name = "dead coarse root C" ; -# DEADCROOTN:long_name = "dead coarse root N" ; -# DEADSTEMC:long_name = "dead stem C" ; -# DEADSTEMN:long_name = "dead stem N" ; -# DENIT:long_name = "total rate of denitrification" ; -# DISPVEGC:long_name = "displayed veg carbon, excluding storage and cpool" -# DISPVEGN:long_name = "displayed vegetation nitrogen" ; -# DSTDEP:long_name = "total dust deposition (dry+wet) from atmosphere" ; -# DSTFLXT:long_name = "total surface dust emission" ; -# DWT_CLOSS:long_name = "total carbon loss from land cover conversion" ; -# DWT_CONV_CFLUX:long_name = "conversion C flux (immediate loss to atm)" ; -# DWT_CONV_NFLUX:long_name = "conversion N flux (immediate loss to atm)" ; -# DWT_NLOSS:long_name = "total nitrogen loss from landcover conversion" ; -# DWT_PROD100C_GAIN:long_name = "landcover change-driven addition to 100-yr wood product pool" ; -# DWT_PROD100N_GAIN:long_name = "addition to 100-yr wood product pool" ; -# DWT_PROD10C_GAIN:long_name = "landcover change-driven addition to 10-yr wood product pool" ; -# DWT_PROD10N_GAIN:long_name = "addition to 10-yr wood product pool" ; -# DWT_SEEDC_TO_DEADSTEM:long_name = "seed source to patch-level deadstem" ; -# DWT_SEEDC_TO_LEAF:long_name = "seed source to patch-level leaf" ; -# DWT_SEEDN_TO_DEADSTEM:long_name = "seed source to PFT-level deadstem" ; -# DWT_SEEDN_TO_LEAF:long_name = "seed source to PFT-level leaf" ; -# EFLX_DYNBAL:long_name = "dynamic land cover change conversion energy flux" ; -# EFLX_GRND_LAKE:long_name = "net heat flux into lake/snow surface, excluding light transmission" ; -# EFLX_LH_TOT:long_name = "total latent heat flux [+ to atm]" ; -# EFLX_LH_TOT_R:long_name = "Rural total evaporation" ; -# EFLX_LH_TOT_U:long_name = "Urban total evaporation" ; -# ELAI:long_name = "exposed one-sided leaf area index" ; -# ER:long_name = "total ecosystem respiration, autotrophic + heterotrophic" ; -# ERRH2O:long_name = "total water conservation error" ; -# ERRH2OSNO:long_name = "imbalance in snow depth (liquid water)" ; -# ERRSEB:long_name = "surface energy conservation error" ; -# ERRSOI:long_name = "soil/lake energy conservation error" ; -# ERRSOL:long_name = "solar radiation conservation error" ; -# ESAI:long_name = "exposed one-sided stem area index" ; -# FAREA_BURNED:long_name = "timestep fractional area burned" ; -# FCEV:long_name = "canopy evaporation" ; -# FCOV:long_name = "fractional impermeable area" ; -# FCTR:long_name = "canopy transpiration" ; -# FGEV:long_name = "ground evaporation" ; -# FGR:long_name = "heat flux into soil/snow including snow melt and lake / snow light transmission" ; -# FGR12:long_name = "heat flux between soil layers 1 and 2" ; -# FGR_R:long_name = "Rural heat flux into soil/snow including snow melt and snow light transmission" ; -# FGR_U:long_name = "Urban heat flux into soil/snow including snow melt" ; -# FH2OSFC:long_name = "fraction of ground covered by surface water" ; -# FIRA:long_name = "net infrared (longwave) radiation" ; -# FIRA_R:long_name = "Rural net infrared (longwave) radiation" ; -# FIRA_U:long_name = "Urban net infrared (longwave) radiation" ; -# FIRE:long_name = "emitted infrared (longwave) radiation" ; -# FIRE_R:long_name = "Rural emitted infrared (longwave) radiation" ; -# FIRE_U:long_name = "Urban emitted infrared (longwave) radiation" ; -# FLDS:long_name = "atmospheric longwave radiation" ; -# FPG:long_name = "fraction of potential gpp" ; -# FPI:long_name = "fraction of potential immobilization" ; -# FPSN:long_name = "photosynthesis" ; -# FPSN_WC:long_name = "Rubisco-limited photosynthesis" ; -# FPSN_WJ:long_name = "RuBP-limited photosynthesis" ; -# FPSN_WP:long_name = "Product-limited photosynthesis" ; -# FROOTC:long_name = "fine root C" ; -# FROOTC_ALLOC:long_name = "fine root C allocation" ; -# FROOTC_LOSS:long_name = "fine root C loss" ; -# FROOTN:long_name = "fine root N" ; -# FSA:long_name = "absorbed solar radiation" ; -# FSAT:long_name = "fractional area with water table at surface" ; -# FSA_R:long_name = "Rural absorbed solar radiation" ; -# FSA_U:long_name = "Urban absorbed solar radiation" ; -# FSDS:long_name = "atmospheric incident solar radiation" ; -# FSDSND:long_name = "direct nir incident solar radiation" ; -# FSDSNDLN:long_name = "direct nir incident solar radiation at local noon" ; -# FSDSNI:long_name = "diffuse nir incident solar radiation" ; -# FSDSVD:long_name = "direct vis incident solar radiation" ; -# FSDSVDLN:long_name = "direct vis incident solar radiation at local noon" ; -# FSDSVI:long_name = "diffuse vis incident solar radiation" ; -# FSDSVILN:long_name = "diffuse vis incident solar radiation at local noon" ; -# FSH:long_name = "sensible heat" ; -# FSH_G:long_name = "sensible heat from ground" ; -# FSH_NODYNLNDUSE:long_name = "sensible heat not including correction for land use change" ; -# FSH_R:long_name = "Rural sensible heat" ; -# FSH_U:long_name = "Urban sensible heat" ; -# FSH_V:long_name = "sensible heat from veg" ; -# FSM:long_name = "snow melt heat flux" ; -# FSM_R:long_name = "Rural snow melt heat flux" ; -# FSM_U:long_name = "Urban snow melt heat flux" ; -# FSNO:long_name = "fraction of ground covered by snow" ; -# FSNO_EFF:long_name = "effective fraction of ground covered by snow" ; -# FSR:long_name = "reflected solar radiation" ; -# FSRND:long_name = "direct nir reflected solar radiation" ; -# FSRNDLN:long_name = "direct nir reflected solar radiation at local noon" ; -# FSRNI:long_name = "diffuse nir reflected solar radiation" ; -# FSRVD:long_name = "direct vis reflected solar radiation" ; -# FSRVDLN:long_name = "direct vis reflected solar radiation at local noon" ; -# FSRVI:long_name = "diffuse vis reflected solar radiation" ; -# FUELC:long_name = "fuel load" ; -# GC_HEAT1:long_name = "initial gridcell total heat content" ; -# GC_ICE1:long_name = "initial gridcell total ice content" ; -# GC_LIQ1:long_name = "initial gridcell total liq content" ; -# GPP:long_name = "gross primary production" ; -# GR:long_name = "total growth respiration" ; -# GROSS_NMIN:long_name = "gross rate of N mineralization" ; -# H2OCAN:long_name = "intercepted water" ; -# H2OSFC:long_name = "surface water depth" ; -# H2OSNO:long_name = "snow depth (liquid water)" ; -# H2OSNO_TOP:long_name = "mass of snow in top snow layer" ; -# HC:long_name = "heat content of soil/snow/lake" ; -# HCSOI:long_name = "soil heat content" ; -# HEAT_FROM_AC:long_name = "sensible heat flux put into canyon due to heat removed from air conditioning" ; -# HR:long_name = "total heterotrophic respiration" ; -# HTOP:long_name = "canopy top" ; -# LAISHA:long_name = "shaded projected leaf area index" ; -# LAISUN:long_name = "sunlit projected leaf area index" ; -# LAKEICEFRAC:long_name = "lake layer ice mass fraction" ; -# LAKEICETHICK:long_name = "thickness of lake ice (including physical expansion on freezing)" ; -# LAND_UPTAKE:long_name = "NEE minus LAND_USE_FLUX, negative for update" ; -# LAND_USE_FLUX:long_name = "total C emitted from land cover conversion and wood product pools" ; -# LEAFC:long_name = "leaf C" ; -# LEAFC_ALLOC:long_name = "leaf C allocation" ; -# LEAFC_LOSS:long_name = "leaf C loss" ; -# LEAFN:long_name = "leaf N" ; -# LEAF_MR:long_name = "leaf maintenance respiration" ; -# LFC2:long_name = "conversion area fraction of BET and BDT that burned" ; -# LF_CONV_CFLUX:long_name = "conversion carbon due to BET and BDT area decreasing" ; -# LITFALL:long_name = "litterfall (leaves and fine roots)" ; -# LITHR:long_name = "litter heterotrophic respiration" ; -# LITR1C:long_name = "LITR1 C" ; -# LITR1C_TO_SOIL1C:long_name = "decomp. of litter 1 C to soil 1 C" ; -# LITR1N:long_name = "LITR1 N" ; -# LITR1N_TNDNCY_VERT_TRANS:long_name = "litter 1 N tendency due to vertical transport" ; -# LITR1N_TO_SOIL1N:long_name = "decomp. of litter 1 N to soil 1 N" ; -# LITR1_HR:long_name = "Het. Resp. from litter 1" ; -# LITR2C:long_name = "LITR2 C" ; -# LITR2C_TO_SOIL2C:long_name = "decomp. of litter 2 C to soil 2 C" ; -# LITR2N:long_name = "LITR2 N" ; -# LITR2N_TNDNCY_VERT_TRANS:long_name = "litter 2 N tendency due to vertical transport" ; -# LITR2N_TO_SOIL2N:long_name = "decomp. of litter 2 N to soil 2 N" ; -# LITR2_HR:long_name = "Het. Resp. from litter 2" ; -# LITR3C:long_name = "LITR3 C" ; -# LITR3C_TO_SOIL3C:long_name = "decomp. of litter 3 C to soil 3 C" ; -# LITR3N:long_name = "LITR3 N" ; -# LITR3N_TNDNCY_VERT_TRANS:long_name = "litter 3 N tendency due to vertical transport" ; -# LITR3N_TO_SOIL3N:long_name = "decomp. of litter 3 N to soil 3 N" ; -# LITR3_HR:long_name = "Het. Resp. from litter 3" ; -# LITTERC:long_name = "litter C" ; -# LITTERC_HR:long_name = "litter C heterotrophic respiration" ; -# LITTERC_LOSS:long_name = "litter C loss" ; -# LIVECROOTC:long_name = "live coarse root C" ; -# LIVECROOTN:long_name = "live coarse root N" ; -# LIVESTEMC:long_name = "live stem C" ; -# LIVESTEMN:long_name = "live stem N" ; -# MEG_acetaldehyde:long_name = "MEGAN flux" ; -# MEG_acetic_acid:long_name = "MEGAN flux" ; -# MEG_acetone:long_name = "MEGAN flux" ; -# MEG_carene_3:long_name = "MEGAN flux" ; -# MEG_ethanol:long_name = "MEGAN flux" ; -# MEG_formaldehyde:long_name = "MEGAN flux" ; -# MEG_isoprene:long_name = "MEGAN flux" ; -# MEG_methanol:long_name = "MEGAN flux" ; -# MEG_pinene_a:long_name = "MEGAN flux" ; -# MEG_thujene_a:long_name = "MEGAN flux" ; -# MR:long_name = "maintenance respiration" ; -# M_LITR1C_TO_LEACHING:long_name = "litter 1 C leaching loss" ; -# M_LITR2C_TO_LEACHING:long_name = "litter 2 C leaching loss" ; -# M_LITR3C_TO_LEACHING:long_name = "litter 3 C leaching loss" ; -# M_SOIL1C_TO_LEACHING:long_name = "soil 1 C leaching loss" ; -# M_SOIL2C_TO_LEACHING:long_name = "soil 2 C leaching loss" ; -# M_SOIL3C_TO_LEACHING:long_name = "soil 3 C leaching loss" ; -# M_SOIL4C_TO_LEACHING:long_name = "soil 4 C leaching loss" ; -# NBP:long_name = "net biome production, includes fire, landuse, and harvest flux, positive for sink" ; -# NDEPLOY:long_name = "total N deployed in new growth" ; -# NDEP_TO_SMINN:long_name = "atmospheric N deposition to soil mineral N" ; -# NEE:long_name = "net ecosystem exchange of carbon, includes fire, landuse, harvest, and hrv_xsmrpool flux, positive for source" ; -# NEP:long_name = "net ecosystem production, excludes fire, landuse, and harvest flux, positive for sink" ; -# NET_NMIN:long_name = "net rate of N mineralization" ; -# NFIRE:long_name = "fire counts valid only in Reg.C" ; -# NFIX_TO_SMINN:long_name = "symbiotic/asymbiotic N fixation to soil mineral N" ; -# NPP:long_name = "net primary production" ; -# OCDEP:long_name = "total OC deposition (dry+wet) from atmosphere" ; -# O_SCALAR:long_name = "fraction by which decomposition is reduced due to anoxia" ; -# PARVEGLN:long_name = "absorbed par by vegetation at local noon" ; -# PBOT:long_name = "atmospheric pressure" ; -# PCO2:long_name = "atmospheric partial pressure of CO2" ; -# PCT_LANDUNIT:long_name = "% of each landunit on grid cell" ; -# PCT_NAT_PFT:long_name = "% of each PFT on the natural vegetation (i.e., soil) landunit" ; -# PFT_CTRUNC:long_name = "patch-level sink for C truncation" ; -# PFT_FIRE_CLOSS:long_name = "total patch-level fire C loss for non-peat fires outside land-type converted region" ; -# PFT_FIRE_NLOSS:long_name = "total pft-level fire N loss" ; -# PFT_NTRUNC:long_name = "pft-level sink for N truncation" ; -# PLANT_NDEMAND:long_name = "N flux required to support initial GPP" ; -# POTENTIAL_IMMOB:long_name = "potential N immobilization" ; -# PROD100C:long_name = "100-yr wood product C" ; -# PROD100C_LOSS:long_name = "loss from 100-yr wood product pool" ; -# PROD100N:long_name = "100-yr wood product N" ; -# PROD100N_LOSS:long_name = "loss from 100-yr wood product pool" ; -# PROD10C:long_name = "10-yr wood product C" ; -# PROD10C_LOSS:long_name = "loss from 10-yr wood product pool" ; -# PROD10N:long_name = "10-yr wood product N" ; -# PROD10N_LOSS:long_name = "loss from 10-yr wood product pool" ; -# PRODUCT_CLOSS:long_name = "total carbon loss from wood product pools" ; -# PRODUCT_NLOSS:long_name = "total N loss from wood product pools" ; -# PSNSHA:long_name = "shaded leaf photosynthesis" ; -# PSNSHADE_TO_CPOOL:long_name = "C fixation from shaded canopy" ; -# PSNSUN:long_name = "sunlit leaf photosynthesis" ; -# PSNSUN_TO_CPOOL:long_name = "C fixation from sunlit canopy" ; -# Q2M:long_name = "2m specific humidity" ; -# QBOT:long_name = "atmospheric specific humidity" ; -# QDRAI:long_name = "sub-surface drainage" ; -# QDRAI_PERCH:long_name = "perched wt drainage" ; -# QDRAI_XS:long_name = "saturation excess drainage" ; -# QDRIP:long_name = "throughfall" ; -# QFLOOD:long_name = "runoff from river flooding" ; -# QFLX_ICE_DYNBAL:long_name = "ice dynamic land cover change conversion runoff flux" ; -# QFLX_LIQ_DYNBAL:long_name = "liq dynamic land cover change conversion runoff flux" ; -# QH2OSFC:long_name = "surface water runoff" ; -# QINFL:long_name = "infiltration" ; -# QINTR:long_name = "interception" ; -# QIRRIG:long_name = "water added through irrigation" ; -# QOVER:long_name = "surface runoff" ; -# QRGWL:long_name = "surface runoff at glaciers (liquid only), wetlands, lakes" ; -# QRUNOFF:long_name = "total liquid runoff (does not include QSNWCPICE)" ; -# QRUNOFF_NODYNLNDUSE:long_name = "total liquid runoff (does not include QSNWCPICE) not including correction for land use change" ; -# QRUNOFF_R:long_name = "Rural total runoff" ; -# QRUNOFF_U:long_name = "Urban total runoff" ; -# QSNOMELT:long_name = "snow melt" ; -# QSNWCPICE:long_name = "excess snowfall due to snow capping" ; -# QSNWCPICE_NODYNLNDUSE:long_name = "excess snowfall due to snow capping not including correction for land use change" ; -# QSOIL:long_name = "Ground evaporation (soil/snow evaporation + soil/snow sublimation - dew)" ; -# QVEGE:long_name = "canopy evaporation" ; -# QVEGT:long_name = "canopy transpiration" ; -# RAIN:long_name = "atmospheric rain" ; -# RETRANSN:long_name = "plant pool of retranslocated N" ; -# RETRANSN_TO_NPOOL:long_name = "deployment of retranslocated N" ; -# RH2M:long_name = "2m relative humidity" ; -# RH2M_R:long_name = "Rural 2m specific humidity" ; -# RH2M_U:long_name = "Urban 2m relative humidity" ; -# RR:long_name = "root respiration (fine root MR + total root GR)" ; -# RSCANOPY:long_name = "canopy resistance" ; -# SABG:long_name = "solar rad absorbed by ground" ; -# SABG_PEN:long_name = "Rural solar rad penetrating top soil or snow layer" ; -# SABV:long_name = "solar rad absorbed by veg" ; -# SEEDC:long_name = "pool for seeding new Patches" ; -# SEEDN:long_name = "pool for seeding new PFTs" ; -# SMINN:long_name = "soil mineral N" ; -# SMINN_LEACHED:long_name = "soil mineral N pool loss to leaching" ; -# SMINN_TO_DENIT_L1S1:long_name = "denitrification for decomp. of litter 1to SOIL1" ; -# SMINN_TO_DENIT_L2S2:long_name = "denitrification for decomp. of litter 2to SOIL2" ; -# SMINN_TO_DENIT_L3S3:long_name = "denitrification for decomp. of litter 3to SOIL3" ; -# SMINN_TO_DENIT_S1S2:long_name = "denitrification for decomp. of soil 1to SOIL2" ; -# SMINN_TO_DENIT_S2S3:long_name = "denitrification for decomp. of soil 2to SOIL3" ; -# SMINN_TO_DENIT_S3S4:long_name = "denitrification for decomp. of soil 3to SOIL4" ; -# SMINN_TO_DENIT_S4:long_name = "denitrification for decomp. of soil 4to atmosphe" ; -# SMINN_TO_NPOOL:long_name = "deployment of soil mineral N uptake" ; -# SMINN_TO_PLANT:long_name = "plant uptake of soil mineral N" ; -# SMINN_TO_SOIL1N_L1:long_name = "mineral N flux for decomp. of LITR1to SOIL1" ; -# SMINN_TO_SOIL2N_L2:long_name = "mineral N flux for decomp. of LITR2to SOIL2" ; -# SMINN_TO_SOIL2N_S1:long_name = "mineral N flux for decomp. of SOIL1to SOIL2" ; -# SMINN_TO_SOIL3N_L3:long_name = "mineral N flux for decomp. of LITR3to SOIL3" ; -# SMINN_TO_SOIL3N_S2:long_name = "mineral N flux for decomp. of SOIL2to SOIL3" ; -# SMINN_TO_SOIL4N_S3:long_name = "mineral N flux for decomp. of SOIL3to SOIL4" ; -# SNOBCMCL:long_name = "mass of BC in snow column" ; -# SNOBCMSL:long_name = "mass of BC in top snow layer" ; -# SNODSTMCL:long_name = "mass of dust in snow column" ; -# SNODSTMSL:long_name = "mass of dust in top snow layer" ; -# SNOINTABS:long_name = "Percent of incoming solar absorbed by lower snow layers" ; -# SNOOCMCL:long_name = "mass of OC in snow column" ; -# SNOOCMSL:long_name = "mass of OC in top snow layer" ; -# SNOW:long_name = "atmospheric snow" ; -# SNOWDP:long_name = "gridcell mean snow height" ; -# SNOWICE:long_name = "snow ice" ; -# SNOWLIQ:long_name = "snow liquid water" ; -# SNOW_DEPTH:long_name = "snow height of snow covered area" ; -# SNOW_SINKS:long_name = "snow sinks (liquid water)" ; -# SNOW_SOURCES:long_name = "snow sources (liquid water)" ; -# SOIL1C:long_name = "SOIL1 C" ; -# SOIL1C_TO_SOIL2C:long_name = "decomp. of soil 1 C to soil 2 C" ; -# SOIL1N:long_name = "SOIL1 N" ; -# SOIL1N_TNDNCY_VERT_TRANS:long_name = "soil 1 N tendency due to vertical transport" ; -# SOIL1N_TO_SOIL2N:long_name = "decomp. of soil 1 N to soil 2 N" ; -# SOIL1_HR:long_name = "Het. Resp. from soil 1" ; -# SOIL2C:long_name = "SOIL2 C" ; -# SOIL2C_TO_SOIL3C:long_name = "decomp. of soil 2 C to soil 3 C" ; -# SOIL2N:long_name = "SOIL2 N" ; -# SOIL2N_TNDNCY_VERT_TRANS:long_name = "soil 2 N tendency due to vertical transport" ; -# SOIL2N_TO_SOIL3N:long_name = "decomp. of soil 2 N to soil 3 N" ; -# SOIL2_HR:long_name = "Het. Resp. from soil 2" ; -# SOIL3C:long_name = "SOIL3 C" ; -# SOIL3C_TO_SOIL4C:long_name = "decomp. of soil 3 C to soil 4 C" ; -# SOIL3N:long_name = "SOIL3 N" ; -# SOIL3N_TNDNCY_VERT_TRANS:long_name = "soil 3 N tendency due to vertical transport" ; -# SOIL3N_TO_SOIL4N:long_name = "decomp. of soil 3 N to soil 4 N" ; -# SOIL3_HR:long_name = "Het. Resp. from soil 3" ; -# SOIL4C:long_name = "SOIL4 C" ; -# SOIL4N:long_name = "SOIL4 N" ; -# SOIL4N_TNDNCY_VERT_TRANS:long_name = "soil 4 N tendency due to vertical transport" ; -# SOIL4N_TO_SMINN:long_name = "mineral N flux for decomp. of SOIL4" ; -# SOIL4_HR:long_name = "Het. Resp. from soil 4" ; -# SOILC:long_name = "soil C" ; -# SOILC_HR:long_name = "soil C heterotrophic respiration" ; -# SOILC_LOSS:long_name = "soil C loss" ; -# SOILPSI:long_name = "soil water potential in each soil layer" ; -# SOMC_FIRE:long_name = "C loss due to peat burning" ; -# SOMHR:long_name = "soil organic matter heterotrophic respiration" ; -# SOM_C_LEACHED:long_name = "total flux of C from SOM pools due to leaching" ; -# SR:long_name = "total soil respiration (HR + root resp)" ; -# STORVEGC:long_name = "stored vegetation carbon, excluding cpool" ; -# STORVEGN:long_name = "stored vegetation nitrogen" ; -# SUPPLEMENT_TO_SMINN:long_name = "supplemental N supply" ; -# SoilAlpha:long_name = "factor limiting ground evap" ; -# SoilAlpha_U:long_name = "urban factor limiting ground evap" ; -# TAUX:long_name = "zonal surface stress" ; -# TAUY:long_name = "meridional surface stress" ; -# TBOT:long_name = "atmospheric air temperature" ; -# TBUILD:long_name = "internal urban building temperature" ; -# TG:long_name = "ground temperature" ; -# TG_R:long_name = "Rural ground temperature" ; -# TG_U:long_name = "Urban ground temperature" ; -# TH2OSFC:long_name = "surface water temperature" ; -# THBOT:long_name = "atmospheric air potential temperature" ; -# TKE1:long_name = "top lake level eddy thermal conductivity" ; -# TLAI:long_name = "total projected leaf area index" ; -# TLAKE:long_name = "lake temperature" ; -# TOTCOLC:long_name = "total column carbon, incl veg and cpool" ; -# TOTCOLN:long_name = "total column-level N" ; -# TOTECOSYSC:long_name = "total ecosystem carbon, incl veg but excl cpool" ; -# TOTECOSYSN:long_name = "total ecosystem N" ; -# TOTLITC:long_name = "total litter carbon" ; -# TOTLITN:long_name = "total litter N" ; -# TOTPFTC:long_name = "total patch-level carbon, including cpool" ; -# TOTPFTN:long_name = "total PFT-level nitrogen" ; -# TOTPRODC:long_name = "total wood product C" ; -# TOTPRODN:long_name = "total wood product N" ; -# TOTSOMC:long_name = "total soil organic matter carbon" ; -# TOTSOMN:long_name = "total soil organic matter N" ; -# TOTVEGC:long_name = "total vegetation carbon, excluding cpool" ; -# TOTVEGN:long_name = "total vegetation nitrogen" ; -# TREFMNAV:long_name = "daily minimum of average 2-m temperature" ; -# TREFMNAV_R:long_name = "Rural daily minimum of average 2-m temperature" ; -# TREFMNAV_U:long_name = "Urban daily minimum of average 2-m temperature" ; -# TREFMXAV:long_name = "daily maximum of average 2-m temperature" ; -# TREFMXAV_R:long_name = "Rural daily maximum of average 2-m temperature" ; -# TREFMXAV_U:long_name = "Urban daily maximum of average 2-m temperature" ; -# TSA:long_name = "2m air temperature" ; -# TSAI:long_name = "total projected stem area index" ; -# TSA_R:long_name = "Rural 2m air temperature" ; -# TSA_U:long_name = "Urban 2m air temperature" ; -# TSOI_10CM:long_name = "soil temperature in top 10cm of soil" ; -# TV:long_name = "vegetation temperature" ; -# TWS:long_name = "total water storage" ; -# T_SCALAR:long_name = "temperature inhibition of decomposition" ; -# U10:long_name = "10-m wind" ; -# URBAN_AC:long_name = "urban air conditioning flux" ; -# URBAN_HEAT:long_name = "urban heating flux" ; -# VOCFLXT:long_name = "total VOC flux into atmosphere" ; -# VOLR:long_name = "river channel water storage" ; -# WASTEHEAT:long_name = "sensible heat flux from heating/cooling sources of urban waste heat" ; -# WF:long_name = "soil water as frac. of whc for top 0.05 m" ; -# WIND:long_name = "atmospheric wind velocity magnitude" ; -# WOODC:long_name = "wood C" ; -# WOODC_ALLOC:long_name = "wood C eallocation" ; -# WOODC_LOSS:long_name = "wood C loss" ; -# WOOD_HARVESTC:long_name = "wood harvest carbon (to product pools)" ; -# WOOD_HARVESTN:long_name = "wood harvest N (to product pools)" ; -# W_SCALAR:long_name = "Moisture (dryness) inhibition of decomposition" -#} - -} ## end model2netcdf.CLM45 -##==================================================================================================# + # levgrnd:long_name = "coordinate soil levels" ; + # levlak:long_name = "coordinate lake levels" ; + # levdcmp:long_name = "coordinate soil levels" ; + # mcdate:long_name = "current date (YYYYMMDD)" ; + # mcsec:long_name = "current seconds of current date" ; + # mdcur:long_name = "current day (from base day)" ; + # mscur:long_name = "current seconds of current day" ; + # nstep:long_name = "time step" ; + # lon:long_name = "coordinate longitude" ; + # lat:long_name = "coordinate latitude" ; + # area:long_name = "grid cell areas" ; + # topo:long_name = "grid cell topography" ; + # landfrac:long_name = "land fraction" ; + # landmask:long_name = "land/ocean mask (0.=ocean and 1.=land)" ; + # pftmask:long_name = "pft real/fake mask (0.=fake and 1.=real)" ; + # ZSOI:long_name = "soil depth" ; + # DZSOI:long_name = "soil thickness" ; + # WATSAT:long_name = "saturated soil water content (porosity)" ; + # SUCSAT:long_name = "saturated soil matric potential" ; + # BSW:long_name = "slope of soil water retention curve" ; + # HKSAT:long_name = "saturated hydraulic conductivity" ; + # ZLAKE:long_name = "lake layer node depth" ; + # DZLAKE:long_name = "lake layer thickness" ; + # ACTUAL_IMMOB:long_name = "actual N immobilization" ; + # AGNPP:long_name = "aboveground NPP" ; + # ALT:long_name = "current active layer thickness" ; + # ALTMAX:long_name = "maximum annual active layer thickness" ; + # ALTMAX_LASTYEAR:long_name = "maximum prior year active layer thickness" ; + # AR:long_name = "autotrophic respiration (MR + GR)" ; + # BAF_CROP:long_name = "fractional area burned for crop" ; + # BAF_PEATF:long_name = "fractional area burned in peatland" ; + # BCDEP:long_name = "total BC deposition (dry+wet) from atmosphere" ; + # BGNPP:long_name = "belowground NPP" ; + # BUILDHEAT:long_name = "heat flux from urban building interior to walls and roof" ; + # COL_CTRUNC:long_name = "column-level sink for C truncation" ; + # COL_FIRE_CLOSS:long_name = "total column-level fire C loss for non-peat fires outside land-type converted region" ; + # COL_FIRE_NLOSS:long_name = "total column-level fire N loss" ; + # COL_NTRUNC:long_name = "column-level sink for N truncation" ; + # CPOOL:long_name = "temporary photosynthate C pool" ; + # CWDC:long_name = "CWD C" ; + # CWDC_HR:long_name = "coarse woody debris C heterotrophic respiration" ; + # CWDC_LOSS:long_name = "coarse woody debris C loss" ; + # CWDC_TO_LITR2C:long_name = "decomp. of coarse woody debris C to litter 2 C" ; + # CWDC_TO_LITR3C:long_name = "decomp. of coarse woody debris C to litter 3 C" ; + # CWDN:long_name = "CWD N" ; + # CWDN_TO_LITR2N:long_name = "decomp. of coarse woody debris N to litter 2 N" ; + # CWDN_TO_LITR3N:long_name = "decomp. of coarse woody debris N to litter 3 N" ; + # DEADCROOTC:long_name = "dead coarse root C" ; + # DEADCROOTN:long_name = "dead coarse root N" ; + # DEADSTEMC:long_name = "dead stem C" ; + # DEADSTEMN:long_name = "dead stem N" ; + # DENIT:long_name = "total rate of denitrification" ; + # DISPVEGC:long_name = "displayed veg carbon, excluding storage and cpool" + # DISPVEGN:long_name = "displayed vegetation nitrogen" ; + # DSTDEP:long_name = "total dust deposition (dry+wet) from atmosphere" ; + # DSTFLXT:long_name = "total surface dust emission" ; + # DWT_CLOSS:long_name = "total carbon loss from land cover conversion" ; + # DWT_CONV_CFLUX:long_name = "conversion C flux (immediate loss to atm)" ; + # DWT_CONV_NFLUX:long_name = "conversion N flux (immediate loss to atm)" ; + # DWT_NLOSS:long_name = "total nitrogen loss from landcover conversion" ; + # DWT_PROD100C_GAIN:long_name = "landcover change-driven addition to 100-yr wood product pool" ; + # DWT_PROD100N_GAIN:long_name = "addition to 100-yr wood product pool" ; + # DWT_PROD10C_GAIN:long_name = "landcover change-driven addition to 10-yr wood product pool" ; + # DWT_PROD10N_GAIN:long_name = "addition to 10-yr wood product pool" ; + # DWT_SEEDC_TO_DEADSTEM:long_name = "seed source to patch-level deadstem" ; + # DWT_SEEDC_TO_LEAF:long_name = "seed source to patch-level leaf" ; + # DWT_SEEDN_TO_DEADSTEM:long_name = "seed source to PFT-level deadstem" ; + # DWT_SEEDN_TO_LEAF:long_name = "seed source to PFT-level leaf" ; + # EFLX_DYNBAL:long_name = "dynamic land cover change conversion energy flux" ; + # EFLX_GRND_LAKE:long_name = "net heat flux into lake/snow surface, excluding light transmission" ; + # EFLX_LH_TOT:long_name = "total latent heat flux [+ to atm]" ; + # EFLX_LH_TOT_R:long_name = "Rural total evaporation" ; + # EFLX_LH_TOT_U:long_name = "Urban total evaporation" ; + # ELAI:long_name = "exposed one-sided leaf area index" ; + # ER:long_name = "total ecosystem respiration, autotrophic + heterotrophic" ; + # ERRH2O:long_name = "total water conservation error" ; + # ERRH2OSNO:long_name = "imbalance in snow depth (liquid water)" ; + # ERRSEB:long_name = "surface energy conservation error" ; + # ERRSOI:long_name = "soil/lake energy conservation error" ; + # ERRSOL:long_name = "solar radiation conservation error" ; + # ESAI:long_name = "exposed one-sided stem area index" ; + # FAREA_BURNED:long_name = "timestep fractional area burned" ; + # FCEV:long_name = "canopy evaporation" ; + # FCOV:long_name = "fractional impermeable area" ; + # FCTR:long_name = "canopy transpiration" ; + # FGEV:long_name = "ground evaporation" ; + # FGR:long_name = "heat flux into soil/snow including snow melt and lake / snow light transmission" ; + # FGR12:long_name = "heat flux between soil layers 1 and 2" ; + # FGR_R:long_name = "Rural heat flux into soil/snow including snow melt and snow light transmission" ; + # FGR_U:long_name = "Urban heat flux into soil/snow including snow melt" ; + # FH2OSFC:long_name = "fraction of ground covered by surface water" ; + # FIRA:long_name = "net infrared (longwave) radiation" ; + # FIRA_R:long_name = "Rural net infrared (longwave) radiation" ; + # FIRA_U:long_name = "Urban net infrared (longwave) radiation" ; + # FIRE:long_name = "emitted infrared (longwave) radiation" ; + # FIRE_R:long_name = "Rural emitted infrared (longwave) radiation" ; + # FIRE_U:long_name = "Urban emitted infrared (longwave) radiation" ; + # FLDS:long_name = "atmospheric longwave radiation" ; + # FPG:long_name = "fraction of potential gpp" ; + # FPI:long_name = "fraction of potential immobilization" ; + # FPSN:long_name = "photosynthesis" ; + # FPSN_WC:long_name = "Rubisco-limited photosynthesis" ; + # FPSN_WJ:long_name = "RuBP-limited photosynthesis" ; + # FPSN_WP:long_name = "Product-limited photosynthesis" ; + # FROOTC:long_name = "fine root C" ; + # FROOTC_ALLOC:long_name = "fine root C allocation" ; + # FROOTC_LOSS:long_name = "fine root C loss" ; + # FROOTN:long_name = "fine root N" ; + # FSA:long_name = "absorbed solar radiation" ; + # FSAT:long_name = "fractional area with water table at surface" ; + # FSA_R:long_name = "Rural absorbed solar radiation" ; + # FSA_U:long_name = "Urban absorbed solar radiation" ; + # FSDS:long_name = "atmospheric incident solar radiation" ; + # FSDSND:long_name = "direct nir incident solar radiation" ; + # FSDSNDLN:long_name = "direct nir incident solar radiation at local noon" ; + # FSDSNI:long_name = "diffuse nir incident solar radiation" ; + # FSDSVD:long_name = "direct vis incident solar radiation" ; + # FSDSVDLN:long_name = "direct vis incident solar radiation at local noon" ; + # FSDSVI:long_name = "diffuse vis incident solar radiation" ; + # FSDSVILN:long_name = "diffuse vis incident solar radiation at local noon" ; + # FSH:long_name = "sensible heat" ; + # FSH_G:long_name = "sensible heat from ground" ; + # FSH_NODYNLNDUSE:long_name = "sensible heat not including correction for land use change" ; + # FSH_R:long_name = "Rural sensible heat" ; + # FSH_U:long_name = "Urban sensible heat" ; + # FSH_V:long_name = "sensible heat from veg" ; + # FSM:long_name = "snow melt heat flux" ; + # FSM_R:long_name = "Rural snow melt heat flux" ; + # FSM_U:long_name = "Urban snow melt heat flux" ; + # FSNO:long_name = "fraction of ground covered by snow" ; + # FSNO_EFF:long_name = "effective fraction of ground covered by snow" ; + # FSR:long_name = "reflected solar radiation" ; + # FSRND:long_name = "direct nir reflected solar radiation" ; + # FSRNDLN:long_name = "direct nir reflected solar radiation at local noon" ; + # FSRNI:long_name = "diffuse nir reflected solar radiation" ; + # FSRVD:long_name = "direct vis reflected solar radiation" ; + # FSRVDLN:long_name = "direct vis reflected solar radiation at local noon" ; + # FSRVI:long_name = "diffuse vis reflected solar radiation" ; + # FUELC:long_name = "fuel load" ; + # GC_HEAT1:long_name = "initial gridcell total heat content" ; + # GC_ICE1:long_name = "initial gridcell total ice content" ; + # GC_LIQ1:long_name = "initial gridcell total liq content" ; + # GPP:long_name = "gross primary production" ; + # GR:long_name = "total growth respiration" ; + # GROSS_NMIN:long_name = "gross rate of N mineralization" ; + # H2OCAN:long_name = "intercepted water" ; + # H2OSFC:long_name = "surface water depth" ; + # H2OSNO:long_name = "snow depth (liquid water)" ; + # H2OSNO_TOP:long_name = "mass of snow in top snow layer" ; + # HC:long_name = "heat content of soil/snow/lake" ; + # HCSOI:long_name = "soil heat content" ; + # HEAT_FROM_AC:long_name = "sensible heat flux put into canyon due to heat removed from air conditioning" ; + # HR:long_name = "total heterotrophic respiration" ; + # HTOP:long_name = "canopy top" ; + # LAISHA:long_name = "shaded projected leaf area index" ; + # LAISUN:long_name = "sunlit projected leaf area index" ; + # LAKEICEFRAC:long_name = "lake layer ice mass fraction" ; + # LAKEICETHICK:long_name = "thickness of lake ice (including physical expansion on freezing)" ; + # LAND_UPTAKE:long_name = "NEE minus LAND_USE_FLUX, negative for update" ; + # LAND_USE_FLUX:long_name = "total C emitted from land cover conversion and wood product pools" ; + # LEAFC:long_name = "leaf C" ; + # LEAFC_ALLOC:long_name = "leaf C allocation" ; + # LEAFC_LOSS:long_name = "leaf C loss" ; + # LEAFN:long_name = "leaf N" ; + # LEAF_MR:long_name = "leaf maintenance respiration" ; + # LFC2:long_name = "conversion area fraction of BET and BDT that burned" ; + # LF_CONV_CFLUX:long_name = "conversion carbon due to BET and BDT area decreasing" ; + # LITFALL:long_name = "litterfall (leaves and fine roots)" ; + # LITHR:long_name = "litter heterotrophic respiration" ; + # LITR1C:long_name = "LITR1 C" ; + # LITR1C_TO_SOIL1C:long_name = "decomp. of litter 1 C to soil 1 C" ; + # LITR1N:long_name = "LITR1 N" ; + # LITR1N_TNDNCY_VERT_TRANS:long_name = "litter 1 N tendency due to vertical transport" ; + # LITR1N_TO_SOIL1N:long_name = "decomp. of litter 1 N to soil 1 N" ; + # LITR1_HR:long_name = "Het. Resp. from litter 1" ; + # LITR2C:long_name = "LITR2 C" ; + # LITR2C_TO_SOIL2C:long_name = "decomp. of litter 2 C to soil 2 C" ; + # LITR2N:long_name = "LITR2 N" ; + # LITR2N_TNDNCY_VERT_TRANS:long_name = "litter 2 N tendency due to vertical transport" ; + # LITR2N_TO_SOIL2N:long_name = "decomp. of litter 2 N to soil 2 N" ; + # LITR2_HR:long_name = "Het. Resp. from litter 2" ; + # LITR3C:long_name = "LITR3 C" ; + # LITR3C_TO_SOIL3C:long_name = "decomp. of litter 3 C to soil 3 C" ; + # LITR3N:long_name = "LITR3 N" ; + # LITR3N_TNDNCY_VERT_TRANS:long_name = "litter 3 N tendency due to vertical transport" ; + # LITR3N_TO_SOIL3N:long_name = "decomp. of litter 3 N to soil 3 N" ; + # LITR3_HR:long_name = "Het. Resp. from litter 3" ; + # LITTERC:long_name = "litter C" ; + # LITTERC_HR:long_name = "litter C heterotrophic respiration" ; + # LITTERC_LOSS:long_name = "litter C loss" ; + # LIVECROOTC:long_name = "live coarse root C" ; + # LIVECROOTN:long_name = "live coarse root N" ; + # LIVESTEMC:long_name = "live stem C" ; + # LIVESTEMN:long_name = "live stem N" ; + # MEG_acetaldehyde:long_name = "MEGAN flux" ; + # MEG_acetic_acid:long_name = "MEGAN flux" ; + # MEG_acetone:long_name = "MEGAN flux" ; + # MEG_carene_3:long_name = "MEGAN flux" ; + # MEG_ethanol:long_name = "MEGAN flux" ; + # MEG_formaldehyde:long_name = "MEGAN flux" ; + # MEG_isoprene:long_name = "MEGAN flux" ; + # MEG_methanol:long_name = "MEGAN flux" ; + # MEG_pinene_a:long_name = "MEGAN flux" ; + # MEG_thujene_a:long_name = "MEGAN flux" ; + # MR:long_name = "maintenance respiration" ; + # M_LITR1C_TO_LEACHING:long_name = "litter 1 C leaching loss" ; + # M_LITR2C_TO_LEACHING:long_name = "litter 2 C leaching loss" ; + # M_LITR3C_TO_LEACHING:long_name = "litter 3 C leaching loss" ; + # M_SOIL1C_TO_LEACHING:long_name = "soil 1 C leaching loss" ; + # M_SOIL2C_TO_LEACHING:long_name = "soil 2 C leaching loss" ; + # M_SOIL3C_TO_LEACHING:long_name = "soil 3 C leaching loss" ; + # M_SOIL4C_TO_LEACHING:long_name = "soil 4 C leaching loss" ; + # NBP:long_name = "net biome production, includes fire, landuse, and harvest flux, positive for sink" ; + # NDEPLOY:long_name = "total N deployed in new growth" ; + # NDEP_TO_SMINN:long_name = "atmospheric N deposition to soil mineral N" ; + # NEE:long_name = "net ecosystem exchange of carbon, includes fire, landuse, harvest, and hrv_xsmrpool flux, positive for source" ; + # NEP:long_name = "net ecosystem production, excludes fire, landuse, and harvest flux, positive for sink" ; + # NET_NMIN:long_name = "net rate of N mineralization" ; + # NFIRE:long_name = "fire counts valid only in Reg.C" ; + # NFIX_TO_SMINN:long_name = "symbiotic/asymbiotic N fixation to soil mineral N" ; + # NPP:long_name = "net primary production" ; + # OCDEP:long_name = "total OC deposition (dry+wet) from atmosphere" ; + # O_SCALAR:long_name = "fraction by which decomposition is reduced due to anoxia" ; + # PARVEGLN:long_name = "absorbed par by vegetation at local noon" ; + # PBOT:long_name = "atmospheric pressure" ; + # PCO2:long_name = "atmospheric partial pressure of CO2" ; + # PCT_LANDUNIT:long_name = "% of each landunit on grid cell" ; + # PCT_NAT_PFT:long_name = "% of each PFT on the natural vegetation (i.e., soil) landunit" ; + # PFT_CTRUNC:long_name = "patch-level sink for C truncation" ; + # PFT_FIRE_CLOSS:long_name = "total patch-level fire C loss for non-peat fires outside land-type converted region" ; + # PFT_FIRE_NLOSS:long_name = "total pft-level fire N loss" ; + # PFT_NTRUNC:long_name = "pft-level sink for N truncation" ; + # PLANT_NDEMAND:long_name = "N flux required to support initial GPP" ; + # POTENTIAL_IMMOB:long_name = "potential N immobilization" ; + # PROD100C:long_name = "100-yr wood product C" ; + # PROD100C_LOSS:long_name = "loss from 100-yr wood product pool" ; + # PROD100N:long_name = "100-yr wood product N" ; + # PROD100N_LOSS:long_name = "loss from 100-yr wood product pool" ; + # PROD10C:long_name = "10-yr wood product C" ; + # PROD10C_LOSS:long_name = "loss from 10-yr wood product pool" ; + # PROD10N:long_name = "10-yr wood product N" ; + # PROD10N_LOSS:long_name = "loss from 10-yr wood product pool" ; + # PRODUCT_CLOSS:long_name = "total carbon loss from wood product pools" ; + # PRODUCT_NLOSS:long_name = "total N loss from wood product pools" ; + # PSNSHA:long_name = "shaded leaf photosynthesis" ; + # PSNSHADE_TO_CPOOL:long_name = "C fixation from shaded canopy" ; + # PSNSUN:long_name = "sunlit leaf photosynthesis" ; + # PSNSUN_TO_CPOOL:long_name = "C fixation from sunlit canopy" ; + # Q2M:long_name = "2m specific humidity" ; + # QBOT:long_name = "atmospheric specific humidity" ; + # QDRAI:long_name = "sub-surface drainage" ; + # QDRAI_PERCH:long_name = "perched wt drainage" ; + # QDRAI_XS:long_name = "saturation excess drainage" ; + # QDRIP:long_name = "throughfall" ; + # QFLOOD:long_name = "runoff from river flooding" ; + # QFLX_ICE_DYNBAL:long_name = "ice dynamic land cover change conversion runoff flux" ; + # QFLX_LIQ_DYNBAL:long_name = "liq dynamic land cover change conversion runoff flux" ; + # QH2OSFC:long_name = "surface water runoff" ; + # QINFL:long_name = "infiltration" ; + # QINTR:long_name = "interception" ; + # QIRRIG:long_name = "water added through irrigation" ; + # QOVER:long_name = "surface runoff" ; + # QRGWL:long_name = "surface runoff at glaciers (liquid only), wetlands, lakes" ; + # QRUNOFF:long_name = "total liquid runoff (does not include QSNWCPICE)" ; + # QRUNOFF_NODYNLNDUSE:long_name = "total liquid runoff (does not include QSNWCPICE) not including correction for land use change" ; + # QRUNOFF_R:long_name = "Rural total runoff" ; + # QRUNOFF_U:long_name = "Urban total runoff" ; + # QSNOMELT:long_name = "snow melt" ; + # QSNWCPICE:long_name = "excess snowfall due to snow capping" ; + # QSNWCPICE_NODYNLNDUSE:long_name = "excess snowfall due to snow capping not including correction for land use change" ; + # QSOIL:long_name = "Ground evaporation (soil/snow evaporation + soil/snow sublimation - dew)" ; + # QVEGE:long_name = "canopy evaporation" ; + # QVEGT:long_name = "canopy transpiration" ; + # RAIN:long_name = "atmospheric rain" ; + # RETRANSN:long_name = "plant pool of retranslocated N" ; + # RETRANSN_TO_NPOOL:long_name = "deployment of retranslocated N" ; + # RH2M:long_name = "2m relative humidity" ; + # RH2M_R:long_name = "Rural 2m specific humidity" ; + # RH2M_U:long_name = "Urban 2m relative humidity" ; + # RR:long_name = "root respiration (fine root MR + total root GR)" ; + # RSCANOPY:long_name = "canopy resistance" ; + # SABG:long_name = "solar rad absorbed by ground" ; + # SABG_PEN:long_name = "Rural solar rad penetrating top soil or snow layer" ; + # SABV:long_name = "solar rad absorbed by veg" ; + # SEEDC:long_name = "pool for seeding new Patches" ; + # SEEDN:long_name = "pool for seeding new PFTs" ; + # SMINN:long_name = "soil mineral N" ; + # SMINN_LEACHED:long_name = "soil mineral N pool loss to leaching" ; + # SMINN_TO_DENIT_L1S1:long_name = "denitrification for decomp. of litter 1to SOIL1" ; + # SMINN_TO_DENIT_L2S2:long_name = "denitrification for decomp. of litter 2to SOIL2" ; + # SMINN_TO_DENIT_L3S3:long_name = "denitrification for decomp. of litter 3to SOIL3" ; + # SMINN_TO_DENIT_S1S2:long_name = "denitrification for decomp. of soil 1to SOIL2" ; + # SMINN_TO_DENIT_S2S3:long_name = "denitrification for decomp. of soil 2to SOIL3" ; + # SMINN_TO_DENIT_S3S4:long_name = "denitrification for decomp. of soil 3to SOIL4" ; + # SMINN_TO_DENIT_S4:long_name = "denitrification for decomp. of soil 4to atmosphe" ; + # SMINN_TO_NPOOL:long_name = "deployment of soil mineral N uptake" ; + # SMINN_TO_PLANT:long_name = "plant uptake of soil mineral N" ; + # SMINN_TO_SOIL1N_L1:long_name = "mineral N flux for decomp. of LITR1to SOIL1" ; + # SMINN_TO_SOIL2N_L2:long_name = "mineral N flux for decomp. of LITR2to SOIL2" ; + # SMINN_TO_SOIL2N_S1:long_name = "mineral N flux for decomp. of SOIL1to SOIL2" ; + # SMINN_TO_SOIL3N_L3:long_name = "mineral N flux for decomp. of LITR3to SOIL3" ; + # SMINN_TO_SOIL3N_S2:long_name = "mineral N flux for decomp. of SOIL2to SOIL3" ; + # SMINN_TO_SOIL4N_S3:long_name = "mineral N flux for decomp. of SOIL3to SOIL4" ; + # SNOBCMCL:long_name = "mass of BC in snow column" ; + # SNOBCMSL:long_name = "mass of BC in top snow layer" ; + # SNODSTMCL:long_name = "mass of dust in snow column" ; + # SNODSTMSL:long_name = "mass of dust in top snow layer" ; + # SNOINTABS:long_name = "Percent of incoming solar absorbed by lower snow layers" ; + # SNOOCMCL:long_name = "mass of OC in snow column" ; + # SNOOCMSL:long_name = "mass of OC in top snow layer" ; + # SNOW:long_name = "atmospheric snow" ; + # SNOWDP:long_name = "gridcell mean snow height" ; + # SNOWICE:long_name = "snow ice" ; + # SNOWLIQ:long_name = "snow liquid water" ; + # SNOW_DEPTH:long_name = "snow height of snow covered area" ; + # SNOW_SINKS:long_name = "snow sinks (liquid water)" ; + # SNOW_SOURCES:long_name = "snow sources (liquid water)" ; + # SOIL1C:long_name = "SOIL1 C" ; + # SOIL1C_TO_SOIL2C:long_name = "decomp. of soil 1 C to soil 2 C" ; + # SOIL1N:long_name = "SOIL1 N" ; + # SOIL1N_TNDNCY_VERT_TRANS:long_name = "soil 1 N tendency due to vertical transport" ; + # SOIL1N_TO_SOIL2N:long_name = "decomp. of soil 1 N to soil 2 N" ; + # SOIL1_HR:long_name = "Het. Resp. from soil 1" ; + # SOIL2C:long_name = "SOIL2 C" ; + # SOIL2C_TO_SOIL3C:long_name = "decomp. of soil 2 C to soil 3 C" ; + # SOIL2N:long_name = "SOIL2 N" ; + # SOIL2N_TNDNCY_VERT_TRANS:long_name = "soil 2 N tendency due to vertical transport" ; + # SOIL2N_TO_SOIL3N:long_name = "decomp. of soil 2 N to soil 3 N" ; + # SOIL2_HR:long_name = "Het. Resp. from soil 2" ; + # SOIL3C:long_name = "SOIL3 C" ; + # SOIL3C_TO_SOIL4C:long_name = "decomp. of soil 3 C to soil 4 C" ; + # SOIL3N:long_name = "SOIL3 N" ; + # SOIL3N_TNDNCY_VERT_TRANS:long_name = "soil 3 N tendency due to vertical transport" ; + # SOIL3N_TO_SOIL4N:long_name = "decomp. of soil 3 N to soil 4 N" ; + # SOIL3_HR:long_name = "Het. Resp. from soil 3" ; + # SOIL4C:long_name = "SOIL4 C" ; + # SOIL4N:long_name = "SOIL4 N" ; + # SOIL4N_TNDNCY_VERT_TRANS:long_name = "soil 4 N tendency due to vertical transport" ; + # SOIL4N_TO_SMINN:long_name = "mineral N flux for decomp. of SOIL4" ; + # SOIL4_HR:long_name = "Het. Resp. from soil 4" ; + # SOILC:long_name = "soil C" ; + # SOILC_HR:long_name = "soil C heterotrophic respiration" ; + # SOILC_LOSS:long_name = "soil C loss" ; + # SOILPSI:long_name = "soil water potential in each soil layer" ; + # SOMC_FIRE:long_name = "C loss due to peat burning" ; + # SOMHR:long_name = "soil organic matter heterotrophic respiration" ; + # SOM_C_LEACHED:long_name = "total flux of C from SOM pools due to leaching" ; + # SR:long_name = "total soil respiration (HR + root resp)" ; + # STORVEGC:long_name = "stored vegetation carbon, excluding cpool" ; + # STORVEGN:long_name = "stored vegetation nitrogen" ; + # SUPPLEMENT_TO_SMINN:long_name = "supplemental N supply" ; + # SoilAlpha:long_name = "factor limiting ground evap" ; + # SoilAlpha_U:long_name = "urban factor limiting ground evap" ; + # TAUX:long_name = "zonal surface stress" ; + # TAUY:long_name = "meridional surface stress" ; + # TBOT:long_name = "atmospheric air temperature" ; + # TBUILD:long_name = "internal urban building temperature" ; + # TG:long_name = "ground temperature" ; + # TG_R:long_name = "Rural ground temperature" ; + # TG_U:long_name = "Urban ground temperature" ; + # TH2OSFC:long_name = "surface water temperature" ; + # THBOT:long_name = "atmospheric air potential temperature" ; + # TKE1:long_name = "top lake level eddy thermal conductivity" ; + # TLAI:long_name = "total projected leaf area index" ; + # TLAKE:long_name = "lake temperature" ; + # TOTCOLC:long_name = "total column carbon, incl veg and cpool" ; + # TOTCOLN:long_name = "total column-level N" ; + # TOTECOSYSC:long_name = "total ecosystem carbon, incl veg but excl cpool" ; + # TOTECOSYSN:long_name = "total ecosystem N" ; + # TOTLITC:long_name = "total litter carbon" ; + # TOTLITN:long_name = "total litter N" ; + # TOTPFTC:long_name = "total patch-level carbon, including cpool" ; + # TOTPFTN:long_name = "total PFT-level nitrogen" ; + # TOTPRODC:long_name = "total wood product C" ; + # TOTPRODN:long_name = "total wood product N" ; + # TOTSOMC:long_name = "total soil organic matter carbon" ; + # TOTSOMN:long_name = "total soil organic matter N" ; + # TOTVEGC:long_name = "total vegetation carbon, excluding cpool" ; + # TOTVEGN:long_name = "total vegetation nitrogen" ; + # TREFMNAV:long_name = "daily minimum of average 2-m temperature" ; + # TREFMNAV_R:long_name = "Rural daily minimum of average 2-m temperature" ; + # TREFMNAV_U:long_name = "Urban daily minimum of average 2-m temperature" ; + # TREFMXAV:long_name = "daily maximum of average 2-m temperature" ; + # TREFMXAV_R:long_name = "Rural daily maximum of average 2-m temperature" ; + # TREFMXAV_U:long_name = "Urban daily maximum of average 2-m temperature" ; + # TSA:long_name = "2m air temperature" ; + # TSAI:long_name = "total projected stem area index" ; + # TSA_R:long_name = "Rural 2m air temperature" ; + # TSA_U:long_name = "Urban 2m air temperature" ; + # TSOI_10CM:long_name = "soil temperature in top 10cm of soil" ; + # TV:long_name = "vegetation temperature" ; + # TWS:long_name = "total water storage" ; + # T_SCALAR:long_name = "temperature inhibition of decomposition" ; + # U10:long_name = "10-m wind" ; + # URBAN_AC:long_name = "urban air conditioning flux" ; + # URBAN_HEAT:long_name = "urban heating flux" ; + # VOCFLXT:long_name = "total VOC flux into atmosphere" ; + # VOLR:long_name = "river channel water storage" ; + # WASTEHEAT:long_name = "sensible heat flux from heating/cooling sources of urban waste heat" ; + # WF:long_name = "soil water as frac. of whc for top 0.05 m" ; + # WIND:long_name = "atmospheric wind velocity magnitude" ; + # WOODC:long_name = "wood C" ; + # WOODC_ALLOC:long_name = "wood C eallocation" ; + # WOODC_LOSS:long_name = "wood C loss" ; + # WOOD_HARVESTC:long_name = "wood harvest carbon (to product pools)" ; + # WOOD_HARVESTN:long_name = "wood harvest N (to product pools)" ; + # W_SCALAR:long_name = "Moisture (dryness) inhibition of decomposition" + + # } +} ## end model2netcdf.CLM45 +## ==================================================================================================# #################################################################################################### -### EOF. End of R script file. +### EOF. End of R script file. #################################################################################################### diff --git a/models/clm45/R/write.configs.CLM45.R b/models/clm45/R/write.configs.CLM45.R index b2ed9ea6ed6..2f5294d0bd0 100644 --- a/models/clm45/R/write.configs.CLM45.R +++ b/models/clm45/R/write.configs.CLM45.R @@ -9,28 +9,28 @@ ##' @return none ##' @export ##' @author Mike Dietze -##-------------------------------------------------------------------------------------------------# - write.config.CLM45 <- function(defaults, trait.values, settings, run.id){ -# -# #OUTLINE OF MODULES -# # Copy Case and Build -# # -symbolic link to refernce case that is already completed -# # Edit user_nl_* files to add site info -# # make Jobs.sh -case_submit -# # call met2model and add to namelists -# # -# -# # find out where to write run/ouput -# rundir <- file.path(settings$host$rundir, run.id) -# outdir <- file.path(settings$host$outdir, run.id) -# -# # create launch script (which will create symlink) -# if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { -# jobsh <- readLines(con=settings$model$jobtemplate, n=-1) -# } else { -# jobsh <- readLines(con=system.file("template.job", package = "PEcAn.CLM45"), n=-1) -# } -# +## -------------------------------------------------------------------------------------------------# +write.config.CLM45 <- function(defaults, trait.values, settings, run.id) { + # + # #OUTLINE OF MODULES + # # Copy Case and Build + # # -symbolic link to refernce case that is already completed + # # Edit user_nl_* files to add site info + # # make Jobs.sh -case_submit + # # call met2model and add to namelists + # # + # + # # find out where to write run/ouput + # rundir <- file.path(settings$host$rundir, run.id) + # outdir <- file.path(settings$host$outdir, run.id) + # + # # create launch script (which will create symlink) + # if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { + # jobsh <- readLines(con=settings$model$jobtemplate, n=-1) + # } else { + # jobsh <- readLines(con=system.file("template.job", package = "PEcAn.CLM45"), n=-1) + # } + # # # create host specific setttings # hostsetup <- "" # if (!is.null(settings$model$prerun)) { @@ -52,24 +52,24 @@ # jobsh <- gsub('@HOST_SETUP@', hostsetup, jobsh) # jobsh <- gsub('@HOST_TEARDOWN@', hostteardown, jobsh) -# -# jobsh <- gsub('@SITE_LAT@', settings$run$site$lat, jobsh) -# jobsh <- gsub('@SITE_LON@', settings$run$site$lon, jobsh) -# jobsh <- gsub('@SITE_MET@', settings$run$inputs$met$path, jobsh) -# -# jobsh <- gsub('@START_DATE@', settings$run$start.date, jobsh) -# jobsh <- gsub('@END_DATE@', settings$run$end.date, jobsh) -# -# jobsh <- gsub('@OUTDIR@', outdir, jobsh) -# jobsh <- gsub('@RUNDIR@', rundir, jobsh) -# -# jobsh <- gsub('@BINARY@', settings$model$binary, jobsh) -# -# writeLines(jobsh, con=file.path(settings$rundir, run.id, "job.sh")) -# Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) -# -# ## Write PARAMETER file -# -# ## Write SETTINGS file -# - } + # + # jobsh <- gsub('@SITE_LAT@', settings$run$site$lat, jobsh) + # jobsh <- gsub('@SITE_LON@', settings$run$site$lon, jobsh) + # jobsh <- gsub('@SITE_MET@', settings$run$inputs$met$path, jobsh) + # + # jobsh <- gsub('@START_DATE@', settings$run$start.date, jobsh) + # jobsh <- gsub('@END_DATE@', settings$run$end.date, jobsh) + # + # jobsh <- gsub('@OUTDIR@', outdir, jobsh) + # jobsh <- gsub('@RUNDIR@', rundir, jobsh) + # + # jobsh <- gsub('@BINARY@', settings$model$binary, jobsh) + # + # writeLines(jobsh, con=file.path(settings$rundir, run.id, "job.sh")) + # Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) + # + # ## Write PARAMETER file + # + # ## Write SETTINGS file + # +} diff --git a/models/clm45/tests/testthat.R b/models/clm45/tests/testthat.R index bd9de84301b..10dc3185281 100644 --- a/models/clm45/tests/testthat.R +++ b/models/clm45/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.CLM45") +# test_check("PEcAn.CLM45") diff --git a/models/clm45/tests/testthat/test.met2model.R b/models/clm45/tests/testthat/test.met2model.R index d71c05336e1..593ff977780 100644 --- a/models/clm45/tests/testthat/test.met2model.R +++ b/models/clm45/tests/testthat/test.met2model.R @@ -6,14 +6,18 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" end_date <- "2000-12-31" - expect_error({ - result <- met2model.CLM45(in.path, in.prefix, outfolder, start_date, end_date) - }, "NOT IMPLEMENTED") + expect_error( + { + result <- met2model.CLM45(in.path, in.prefix, outfolder, start_date, end_date) + }, + "NOT IMPLEMENTED" + ) skip("met2model.CLM45 is not implemented") expect_s3_class(result, "data.frame") expect_true(file.exists(result[["file"]][[1]])) diff --git a/models/dalec/R/met2model.DALEC.R b/models/dalec/R/met2model.DALEC.R index 0ce459df567..58d0beeea1a 100644 --- a/models/dalec/R/met2model.DALEC.R +++ b/models/dalec/R/met2model.DALEC.R @@ -19,8 +19,7 @@ ##' `PEcAn.data.atmosphere::spin.met()` ##' @param ... additional arguments, currently ignored met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, - overwrite = FALSE, verbose = FALSE, spin_nyear=NULL,spin_nsample=NULL,spin_resample=NULL, ...) { - + overwrite = FALSE, verbose = FALSE, spin_nyear = NULL, spin_nsample = NULL, spin_resample = NULL, ...) { ## DALEC 1 driver format (.csv): Runday, Min temp (°C), Max temp (°C), Radiation (MJ d-1), ## Atmospheric CO2 (μmol mol-1), Day of year @@ -34,35 +33,41 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, start_date <- as.POSIXlt(start_date, tz = "UTC") start_date_string <- as.character(strptime(start_date, "%Y-%m-%d")) end_date <- as.POSIXlt(end_date, tz = "UTC") - if(nchar(in.prefix)>0 & substr(in.prefix,nchar(in.prefix),nchar(in.prefix)) != ".") in.prefix = paste0(in.prefix,".") + if (nchar(in.prefix) > 0 & substr(in.prefix, nchar(in.prefix), nchar(in.prefix)) != ".") in.prefix <- paste0(in.prefix, ".") - if(!is.null(spin_nyear)){ + if (!is.null(spin_nyear)) { ## if spinning up, extend processed met by resampling or cycling met PEcAn.logger::logger.info("Adding Spin-up met for DALEC") spin_nyear <- as.numeric(spin_nyear) spin_nsample <- as.numeric(spin_nsample) spin_resample <- as.logical(spin_resample) - start_date <- PEcAn.data.atmosphere::spin.met(in.path,in.prefix,start_date,end_date, - spin_nyear,spin_nsample,spin_resample) -# start_date <- as.POSIXlt(strftime(start_date, "%Y-%m-%d"), tz = "UTC") -# start_date <- strptime(paste0(start_year,"-01-01"),"%Y-%m-%d", tz = "UTC") - start_date_string <- paste0(lubridate::year(start_date),"-01-01") ## strptime can't parse negative years - PEcAn.logger::logger.info("New Start Date",start_date_string) + start_date <- PEcAn.data.atmosphere::spin.met( + in.path, in.prefix, start_date, end_date, + spin_nyear, spin_nsample, spin_resample + ) + # start_date <- as.POSIXlt(strftime(start_date, "%Y-%m-%d"), tz = "UTC") + # start_date <- strptime(paste0(start_year,"-01-01"),"%Y-%m-%d", tz = "UTC") + start_date_string <- paste0(lubridate::year(start_date), "-01-01") ## strptime can't parse negative years + PEcAn.logger::logger.info("New Start Date", start_date_string) } - out.file <- paste0(in.prefix, start_date_string,".", - strptime(end_date, "%Y-%m-%d"), - ".dat") + out.file <- paste0( + in.prefix, start_date_string, ".", + strptime(end_date, "%Y-%m-%d"), + ".dat" + ) out.file.full <- file.path(outfolder, out.file) - results <- data.frame(file = c(out.file.full), - host = c(PEcAn.remote::fqdn()), - mimetype = c("text/plain"), - formatname = c("DALEC meteorology"), - startdate = c(start_date), - enddate = c(end_date), - dbfile.name = out.file, - stringsAsFactors = FALSE) + results <- data.frame( + file = c(out.file.full), + host = c(PEcAn.remote::fqdn()), + mimetype = c("text/plain"), + formatname = c("DALEC meteorology"), + startdate = c(start_date), + enddate = c(end_date), + dbfile.name = out.file, + stringsAsFactors = FALSE + ) print("internal results") print(results) @@ -70,7 +75,7 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, PEcAn.logger::logger.debug("File '", out.file.full, "' already exists, skipping to next file.") return(invisible(results)) } - + ## check to see if the outfolder is defined, if not create directory for output if (!file.exists(outfolder)) { dir.create(outfolder) @@ -91,29 +96,30 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, LeafWaterPot <- -0.8 old.file <- file.path(in.path, paste(in.prefix, year, ".nc", sep = "")) - if(!file.exists(old.file)) PEcAn.logger::logger.error("file not found",old.file) + if (!file.exists(old.file)) PEcAn.logger::logger.error("file not found", old.file) ## open netcdf nc <- ncdf4::nc_open(old.file) ## convert time to seconds sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - timestep.s <- 86400 # seconds in a day + timestep.s <- 86400 # seconds in a day dt <- PEcAn.utils::seconds_in_year(year) / length(sec) tstep <- round(timestep.s / dt) - dt <- timestep.s / tstep #dt is now an integer + dt <- timestep.s / tstep # dt is now an integer ## extract variables - lat <- ncdf4::ncvar_get(nc, "latitude") - lon <- ncdf4::ncvar_get(nc, "longitude") - Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 - CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) + lat <- ncdf4::ncvar_get(nc, "latitude") + lon <- ncdf4::ncvar_get(nc, "longitude") + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 + CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) ncdf4::nc_close(nc) useCO2 <- is.numeric(CO2) - if (useCO2) - CO2 <- CO2 * 1e+06 ## convert from mole fraction (kg/kg) to ppm + if (useCO2) { + CO2 <- CO2 * 1e+06 + } ## convert from mole fraction (kg/kg) to ppm ## is CO2 present? if (!is.numeric(CO2)) { @@ -126,13 +132,17 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, leafN <- rep(leafN, length(Tair)) } if (length(HydResist) == 1) { - PEcAn.logger::logger.warn("total plant-soil hydraulic resistance (MPa.m2.s/mmol-1) not specified, setting to default: ", - HydResist) + PEcAn.logger::logger.warn( + "total plant-soil hydraulic resistance (MPa.m2.s/mmol-1) not specified, setting to default: ", + HydResist + ) HydResist <- rep(HydResist, length(Tair)) } if (length(LeafWaterPot) == 1) { - PEcAn.logger::logger.warn("maximum soil-leaf water potential difference (MPa) not specified, setting to default: ", - LeafWaterPot) + PEcAn.logger::logger.warn( + "maximum soil-leaf water potential difference (MPa) not specified, setting to default: ", + LeafWaterPot + ) LeafWaterPot <- rep(LeafWaterPot, length(Tair)) } @@ -141,15 +151,15 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, doy <- rep(seq_len(diy), each = timestep.s / dt)[seq_along(sec)] ## Aggregate variables up to daily - Tmean <- PEcAn.utils::ud_convert(tapply(Tair, doy, mean, na.rm = TRUE), "Kelvin", "Celsius") - Tmin <- PEcAn.utils::ud_convert(tapply(Tair, doy, min, na.rm = TRUE), "Kelvin", "Celsius") - Tmax <- PEcAn.utils::ud_convert(tapply(Tair, doy, max, na.rm = TRUE), "Kelvin", "Celsius") - Rin <- tapply(SW, doy, sum) * dt * 1e-06 # J/m2/s * s * MJ/J + Tmean <- PEcAn.utils::ud_convert(tapply(Tair, doy, mean, na.rm = TRUE), "Kelvin", "Celsius") + Tmin <- PEcAn.utils::ud_convert(tapply(Tair, doy, min, na.rm = TRUE), "Kelvin", "Celsius") + Tmax <- PEcAn.utils::ud_convert(tapply(Tair, doy, max, na.rm = TRUE), "Kelvin", "Celsius") + Rin <- tapply(SW, doy, sum) * dt * 1e-06 # J/m2/s * s * MJ/J LeafWaterPot <- tapply(LeafWaterPot, doy, mean) - CO2 <- tapply(CO2, doy, mean) - HydResist <- tapply(HydResist, doy, mean) - leafN <- tapply(leafN, doy, mean) - doy <- tapply(doy, doy, mean) + CO2 <- tapply(CO2, doy, mean) + HydResist <- tapply(HydResist, doy, mean) + leafN <- tapply(leafN, doy, mean) + doy <- tapply(doy, doy, mean) ## The nine columns of driving data are: day of year; mean air temperature (deg C); max daily ## temperature (deg C); min daily temperature (deg C); incident radiation (MJ/m2/day); maximum @@ -160,31 +170,30 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, ## build data matrix tmp <- cbind(doy, Tmean, Tmax, Tmin, Rin, LeafWaterPot, CO2, HydResist, leafN) - ##filter out days not included in start or end date - if(year == start_year){ - start.row <- length(as.Date(paste0(start_year, "-01-01")):as.Date(start_date)) #extra days length includes the start date - if (start.row > 1){ + ## filter out days not included in start or end date + if (year == start_year) { + start.row <- length(as.Date(paste0(start_year, "-01-01")):as.Date(start_date)) # extra days length includes the start date + if (start.row > 1) { PEcAn.logger::logger.info("Subsetting DALEC met to match start date ", as.Date(start_date)) print(start.row) print(nrow(tmp)) - tmp <- tmp[start.row:nrow(tmp),] + tmp <- tmp[start.row:nrow(tmp), ] } } - if (year == end_year){ - if(year == start_year){ + if (year == end_year) { + if (year == start_year) { end.row <- length(as.Date(start_date):as.Date(end_date)) - if (end.row < nrow(tmp)){ + if (end.row < nrow(tmp)) { PEcAn.logger::logger.info("Subsetting DALEC met to match end date") - tmp <- tmp[1:end.row,] + tmp <- tmp[1:end.row, ] } - } else{ + } else { end.row <- length(as.Date(paste0(end_year, "-01-01")):as.Date(end_date)) - if (end.row < nrow(tmp)){ + if (end.row < nrow(tmp)) { PEcAn.logger::logger.info("Subsetting DALEC met to match end date") - tmp <- tmp[1:end.row,] + tmp <- tmp[1:end.row, ] } } - } if (is.null(out)) { @@ -192,9 +201,8 @@ met2model.DALEC <- function(in.path, in.prefix, outfolder, start_date, end_date, } else { out <- rbind(out, tmp) } - } ## end loop over years - utils::write.table(out, out.file.full, quote = FALSE, sep = " ", row.names = FALSE, col.names = FALSE) + } ## end loop over years + utils::write.table(out, out.file.full, quote = FALSE, sep = " ", row.names = FALSE, col.names = FALSE) return(invisible(results)) - } # met2model.DALEC diff --git a/models/dalec/R/model2netcdf.DALEC.R b/models/dalec/R/model2netcdf.DALEC.R index 18be5fcb01f..d22fee78e46 100644 --- a/models/dalec/R/model2netcdf.DALEC.R +++ b/models/dalec/R/model2netcdf.DALEC.R @@ -12,137 +12,151 @@ ##' @author Shawn Serbin, Michael Dietze model2netcdf.DALEC <- function(outdir, sitelat, sitelon, start_date, end_date) { runid <- basename(outdir) - DALEC.configs <- utils::read.table(file.path(gsub(pattern = "/out/", - replacement = "/run/", x = outdir), - paste0("CONFIG.", runid)), - stringsAsFactors = FALSE) - + DALEC.configs <- utils::read.table( + file.path( + gsub( + pattern = "/out/", + replacement = "/run/", x = outdir + ), + paste0("CONFIG.", runid) + ), + stringsAsFactors = FALSE + ) + ### Read in model output in DALEC format - DALEC.output <- utils::read.table(file.path(outdir, "out.txt"), - header = FALSE, sep = "") + DALEC.output <- utils::read.table(file.path(outdir, "out.txt"), + header = FALSE, sep = "" + ) DALEC.output.dims <- dim(DALEC.output) - + ### Determine number of years and output timestep - days <- as.Date(start_date):as.Date(end_date) - year <- strftime(as.Date(days, origin = "1970-01-01"), "%Y") - num.years <- length(unique(year)) - years <- unique(year) + days <- as.Date(start_date):as.Date(end_date) + year <- strftime(as.Date(days, origin = "1970-01-01"), "%Y") + num.years <- length(unique(year)) + years <- unique(year) timestep.s <- 86400 - + ### Loop over years in DALEC output to create separate netCDF outputs for (y in years) { if (file.exists(file.path(outdir, paste(y, "nc", sep = ".")))) { next } - print(paste("---- Processing year: ", y)) #turn on for debugging - + print(paste("---- Processing year: ", y)) # turn on for debugging + ## Subset data for processing sub.DALEC.output <- subset(DALEC.output, year == y) sub.DALEC.output.dims <- dim(sub.DALEC.output) # ******************** Declare netCDF variables ********************# start.day <- 1 - if (y == lubridate::year(start_date)){ - start.day <- length(as.Date(paste0(y, "-01-01")):as.Date(start_date)) - } - tvals <- (start.day:sub.DALEC.output.dims[1])-1 - bounds <- array(data=NA, dim=c(length(tvals),2)) - bounds[,1] <- tvals - bounds[,2] <- bounds[,1]+1 - t <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", y, "-01-01 00:00:00"), - vals = tvals, calendar = "standard", unlim = TRUE) + if (y == lubridate::year(start_date)) { + start.day <- length(as.Date(paste0(y, "-01-01")):as.Date(start_date)) + } + tvals <- (start.day:sub.DALEC.output.dims[1]) - 1 + bounds <- array(data = NA, dim = c(length(tvals), 2)) + bounds[, 1] <- tvals + bounds[, 2] <- bounds[, 1] + 1 + t <- ncdf4::ncdim_def( + name = "time", units = paste0("days since ", y, "-01-01 00:00:00"), + vals = tvals, calendar = "standard", unlim = TRUE + ) ## ***** Need to dynamically update the UTC offset here ***** - + lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") dims <- list(lon = lon, lat = lat, time = t) - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname="history time interval endpoint dimensions", - vals = 1:2, units="") - + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, units = "" + ) + ## Output names # ra (autotrophic respiration, gC/m2/day); # af (flux of carbon entering foliage, gC/m2/day); # aw (flux of carbon entering woody material, gC/m2/day); # ar (flux of carbon entering roots, gC/m2/day); # lf (flux of carbon leaving foliage as litter, gC/m2/day); - # lw (flux of carbon leaving woody material as debris, gC/m2/day); + # lw (flux of carbon leaving woody material as debris, gC/m2/day); # lr (flux of carbon leaving roots as debris, gC/m2/day); # cf (foliar biomass, gC/m2); # cw (woody biomass, gC/m2); # cr (root biomass, gC/m2); # rh1 (heterotrophic flux from litter, gC/m2/day); - # rh2 (heterotrophic flux from soil and woody debris, gC/m2/day); + # rh2 (heterotrophic flux from soil and woody debris, gC/m2/day); # d (decompostion flux from litter to soil pool, gC/m2/day); # cl (litter biomass, gC/m2); # cs (soil organic matter, gC/m2); # gpp (gross primary productivity, gC/m2/day); # nep (net ecosystem productivity, gC/m2/day); - + # names(sub.DALEC.output) <- c("ra", "af", "aw", "ar", "lf", "lw", "lr", "cf", "cw", "cr", "rh1", "rh2", "d", "cl", "cs", "gpp", "nep") - + ## Setup outputs for netCDF file in appropriate units output <- list() ## Fluxes - output[[1]] <- (sub.DALEC.output[, 1] * 0.001)/timestep.s # Autotrophic Respiration in kgC/m2/s - output[[2]] <- (sub.DALEC.output[, 21] + sub.DALEC.output[, 23]) * 0.001 / timestep.s # Heterotrophic Resp kgC/m2/s - output[[3]] <- (sub.DALEC.output[, 31] * 0.001)/timestep.s # GPP in kgC/m2/s - output[[4]] <- (sub.DALEC.output[, 33] * 0.001)/timestep.s # NEE in kgC/m2/s - output[[5]] <- (sub.DALEC.output[, 3] + sub.DALEC.output[, 5] + sub.DALEC.output[, 7]) * 0.001/timestep.s # NPP kgC/m2/s - output[[6]] <- (sub.DALEC.output[, 9] * 0.001) / timestep.s # Leaf Litter Flux, kgC/m2/s - output[[7]] <- (sub.DALEC.output[, 11] * 0.001) / timestep.s # Woody Litter Flux, kgC/m2/s - output[[8]] <- (sub.DALEC.output[, 13] * 0.001) / timestep.s # Root Litter Flux, kgC/m2/s - + output[[1]] <- (sub.DALEC.output[, 1] * 0.001) / timestep.s # Autotrophic Respiration in kgC/m2/s + output[[2]] <- (sub.DALEC.output[, 21] + sub.DALEC.output[, 23]) * 0.001 / timestep.s # Heterotrophic Resp kgC/m2/s + output[[3]] <- (sub.DALEC.output[, 31] * 0.001) / timestep.s # GPP in kgC/m2/s + output[[4]] <- (sub.DALEC.output[, 33] * 0.001) / timestep.s # NEE in kgC/m2/s + output[[5]] <- (sub.DALEC.output[, 3] + sub.DALEC.output[, 5] + sub.DALEC.output[, 7]) * 0.001 / timestep.s # NPP kgC/m2/s + output[[6]] <- (sub.DALEC.output[, 9] * 0.001) / timestep.s # Leaf Litter Flux, kgC/m2/s + output[[7]] <- (sub.DALEC.output[, 11] * 0.001) / timestep.s # Woody Litter Flux, kgC/m2/s + output[[8]] <- (sub.DALEC.output[, 13] * 0.001) / timestep.s # Root Litter Flux, kgC/m2/s + ## Pools - output[[9]] <- (sub.DALEC.output[, 15] * 0.001) # Leaf Carbon, kgC/m2 - output[[10]] <- (sub.DALEC.output[, 17] * 0.001) # Wood Carbon, kgC/m2 - output[[11]] <- (sub.DALEC.output[, 19] * 0.001) # Root Carbon, kgC/m2 - output[[12]] <- (sub.DALEC.output[, 27] * 0.001) # Litter Carbon, kgC/m2 - output[[13]] <- (sub.DALEC.output[, 29] * 0.001) # Soil Carbon, kgC/m2 - + output[[9]] <- (sub.DALEC.output[, 15] * 0.001) # Leaf Carbon, kgC/m2 + output[[10]] <- (sub.DALEC.output[, 17] * 0.001) # Wood Carbon, kgC/m2 + output[[11]] <- (sub.DALEC.output[, 19] * 0.001) # Root Carbon, kgC/m2 + output[[12]] <- (sub.DALEC.output[, 27] * 0.001) # Litter Carbon, kgC/m2 + output[[13]] <- (sub.DALEC.output[, 29] * 0.001) # Soil Carbon, kgC/m2 + ## standard composites - output[[14]] <- output[[1]] + output[[2]] # Total Respiration - output[[15]] <- output[[9]] + output[[10]] + output[[11]] ## TotLivBiom - output[[16]] <- output[[12]] + output[[13]] ## TotSoilCarb - output[[17]] <- sub.DALEC.output[, 15] * DALEC.configs[grep("SLA", DALEC.configs) + 1][[1]] - + output[[14]] <- output[[1]] + output[[2]] # Total Respiration + output[[15]] <- output[[9]] + output[[10]] + output[[11]] ## TotLivBiom + output[[16]] <- output[[12]] + output[[13]] ## TotSoilCarb + output[[17]] <- sub.DALEC.output[, 15] * DALEC.configs[grep("SLA", DALEC.configs) + 1][[1]] + ## time_bounds - output[[18]] <- c(rbind(bounds[,1], bounds[,2])) - + output[[18]] <- c(rbind(bounds[, 1], bounds[, 2])) + ## missing value handling for (i in seq_along(output)) { - if (length(output[[i]]) == 0) + if (length(output[[i]]) == 0) { output[[i]] <- rep(-999, length(t$vals)) + } } - + ## setup nc file # ******************** Declar netCDF variables ********************# nc_var <- list() - nc_var[[1]] <- PEcAn.utils::to_ncvar("AutoResp", dims) - nc_var[[2]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) - nc_var[[3]] <- PEcAn.utils::to_ncvar("GPP", dims) - nc_var[[4]] <- PEcAn.utils::to_ncvar("NEE", dims) - nc_var[[5]] <- PEcAn.utils::to_ncvar("NPP", dims) - nc_var[[6]] <- PEcAn.utils::to_ncvar("leaf_litter_carbon_flux", dims) #was LeafLitter - nc_var[[7]] <- PEcAn.utils::to_ncvar("WoodyLitter", dims) #need to resolve standard woody litter flux - nc_var[[8]] <- PEcAn.utils::to_ncvar("subsurface_litter_carbon_flux", dims) #was RootLitter - nc_var[[9]] <- PEcAn.utils::to_ncvar("leaf_carbon_content", dims) #was LeafBiomass - nc_var[[10]] <- PEcAn.utils::to_ncvar("wood_carbon_content", dims) #was WoodBiomass - nc_var[[11]] <- PEcAn.utils::to_ncvar("root_carbon_content", dims) #was RootBiomass - nc_var[[12]] <- PEcAn.utils::to_ncvar("litter_carbon_content", dims) #was LitterBiomass - nc_var[[13]] <- PEcAn.utils::to_ncvar("soil_carbon_content", dims) #was SoilC; SOM pool technically includes woody debris (can't be represented by our standard) - + nc_var[[1]] <- PEcAn.utils::to_ncvar("AutoResp", dims) + nc_var[[2]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) + nc_var[[3]] <- PEcAn.utils::to_ncvar("GPP", dims) + nc_var[[4]] <- PEcAn.utils::to_ncvar("NEE", dims) + nc_var[[5]] <- PEcAn.utils::to_ncvar("NPP", dims) + nc_var[[6]] <- PEcAn.utils::to_ncvar("leaf_litter_carbon_flux", dims) # was LeafLitter + nc_var[[7]] <- PEcAn.utils::to_ncvar("WoodyLitter", dims) # need to resolve standard woody litter flux + nc_var[[8]] <- PEcAn.utils::to_ncvar("subsurface_litter_carbon_flux", dims) # was RootLitter + nc_var[[9]] <- PEcAn.utils::to_ncvar("leaf_carbon_content", dims) # was LeafBiomass + nc_var[[10]] <- PEcAn.utils::to_ncvar("wood_carbon_content", dims) # was WoodBiomass + nc_var[[11]] <- PEcAn.utils::to_ncvar("root_carbon_content", dims) # was RootBiomass + nc_var[[12]] <- PEcAn.utils::to_ncvar("litter_carbon_content", dims) # was LitterBiomass + nc_var[[13]] <- PEcAn.utils::to_ncvar("soil_carbon_content", dims) # was SoilC; SOM pool technically includes woody debris (can't be represented by our standard) + nc_var[[14]] <- PEcAn.utils::to_ncvar("TotalResp", dims) nc_var[[15]] <- PEcAn.utils::to_ncvar("TotLivBiom", dims) nc_var[[16]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) nc_var[[17]] <- PEcAn.utils::to_ncvar("LAI", dims) - nc_var[[18]] <- ncdf4::ncvar_def(name="time_bounds", units='', - longname = "history time interval endpoints", dim=list(time_interval,time = t), - prec = "double") - + nc_var[[18]] <- ncdf4::ncvar_def( + name = "time_bounds", units = "", + longname = "history time interval endpoints", dim = list(time_interval, time = t), + prec = "double" + ) + ### Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) - ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec=NA) + ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec = NA) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") for (i in seq_along(nc_var)) { ncdf4::ncvar_put(nc, nc_var[[i]], output[[i]]) @@ -150,9 +164,7 @@ model2netcdf.DALEC <- function(outdir, sitelat, sitelon, start_date, end_date) { } close(varfile) ncdf4::nc_close(nc) - - } ### End of year loop - + } ### End of year loop } # model2netcdf.DALEC # ==================================================================================================# ## EOF diff --git a/models/dalec/R/write.configs.dalec.R b/models/dalec/R/write.configs.dalec.R index 64859b1c1fe..557e74f5664 100644 --- a/models/dalec/R/write.configs.dalec.R +++ b/models/dalec/R/write.configs.dalec.R @@ -4,12 +4,11 @@ PREFIX_XML <- "\n" convert.samples.DALEC <- function(trait.samples) { - DEFAULT.LEAF.C <- 0.48 ## convert SLA from PEcAn m2 / kg leaf to m2 / g C if ("SLA" %in% names(trait.samples)) { - trait.samples[["SLA"]] <- trait.samples[["SLA"]]/DEFAULT.LEAF.C/1000 + trait.samples[["SLA"]] <- trait.samples[["SLA"]] / DEFAULT.LEAF.C / 1000 } # t1 rate variable controlling decomposition from litter to soil organinc matter [day-1, ref T @@ -35,19 +34,19 @@ convert.samples.DALEC <- function(trait.samples) { # t5 proportion of foliage becoming litter every time step if ("leaf_turnover_rate" %in% names(trait.samples)) { - trait.samples[["leaf_turnover_rate"]] <- trait.samples[["leaf_turnover_rate"]]/365 + trait.samples[["leaf_turnover_rate"]] <- trait.samples[["leaf_turnover_rate"]] / 365 names(trait.samples)[which(names(trait.samples) == "leaf_turnover_rate")] <- "t5" } # t6 proportion of woody material becoming woody debris every time step if ("wood_turnover_rate" %in% names(trait.samples)) { - trait.samples[["wood_turnover_rate"]] <- trait.samples[["wood_turnover_rate"]]/365 + trait.samples[["wood_turnover_rate"]] <- trait.samples[["wood_turnover_rate"]] / 365 names(trait.samples)[which(names(trait.samples) == "wood_turnover_rate")] <- "t6" } # t7 proportion of fine roots becoming soil/woody debris every time step if ("root_turnover_rate" %in% names(trait.samples)) { - trait.samples[["root_turnover_rate"]] <- trait.samples[["root_turnover_rate"]]/365 + trait.samples[["root_turnover_rate"]] <- trait.samples[["root_turnover_rate"]] / 365 names(trait.samples)[which(names(trait.samples) == "root_turnover_rate")] <- "t7" } @@ -80,14 +79,11 @@ convert.samples.DALEC <- function(trait.samples) { ##' @return configuration files ##' @export write.config.DALEC write.config.DALEC <- function(defaults, trait.values, settings, run.id) { - ### CONVERT PARAMETERS cmdFlags <- "" for (group in names(trait.values)) { if (group == "env") { - ## set defaults from config.header - } else { if (!is.null(trait.values[[group]])) { params <- convert.samples.DALEC(trait.values[[group]]) @@ -102,61 +98,60 @@ write.config.DALEC <- function(defaults, trait.values, settings, run.id) { ### INITIAL CONDITIONS IC.params <- list() - if(!is.null(settings$run$inputs$poolinitcond$path)) { + if (!is.null(settings$run$inputs$poolinitcond$path)) { IC.path <- settings$run$inputs$poolinitcond$path - - #grab SLA from parameters and convert to PECAN standard + + # grab SLA from parameters and convert to PECAN standard sla <- NULL - if("SLA" %in% names(params)){ - sla <- PEcAn.utils::ud_convert(params[1,"SLA"], 'm2 g-1', 'm2 kg-1') #convert SLA to m2/kgC from m2/gC (revert convert.samples conversion to dalec default; need standard for prepare.pools) - } else{ + if ("SLA" %in% names(params)) { + sla <- PEcAn.utils::ud_convert(params[1, "SLA"], "m2 g-1", "m2 kg-1") # convert SLA to m2/kgC from m2/gC (revert convert.samples conversion to dalec default; need standard for prepare.pools) + } else { default.param <- utils::read.table(system.file("default_param.dalec", package = "PEcAn.DALEC"), header = TRUE) - sla <- PEcAn.utils::ud_convert(default.param[which(default.param$cmdFlag == "SLA"),"val"], 'm2 g-1', 'm2 kg-1') #convert SLA to m2/kgC from m2/gC (dalec default) + sla <- PEcAn.utils::ud_convert(default.param[which(default.param$cmdFlag == "SLA"), "val"], "m2 g-1", "m2 kg-1") # convert SLA to m2/kgC from m2/gC (dalec default) } IC.pools <- PEcAn.data.land::prepare_pools(IC.path, constants = list(sla = sla)) - if(!is.null(IC.pools)){ - ###Write initial conditions from netcdf (Note: wherever valid input isn't available, DALEC default remains) + if (!is.null(IC.pools)) { + ### Write initial conditions from netcdf (Note: wherever valid input isn't available, DALEC default remains) # cf0 initial canopy foliar carbon (g/m2) if ("leaf" %in% names(IC.pools)) { - IC.params[["cf0"]] <- PEcAn.utils::ud_convert(IC.pools$leaf, 'kg m-2', 'g m-2') #from PEcAn standard kg C m-2 + IC.params[["cf0"]] <- PEcAn.utils::ud_convert(IC.pools$leaf, "kg m-2", "g m-2") # from PEcAn standard kg C m-2 } # cw0 initial pool of woody carbon (g/m2) if ("wood" %in% names(IC.pools)) { - IC.params[["cw0"]] <- PEcAn.utils::ud_convert(IC.pools$wood, 'kg m-2', 'g m-2') #from PEcAn standard kg C m-2 + IC.params[["cw0"]] <- PEcAn.utils::ud_convert(IC.pools$wood, "kg m-2", "g m-2") # from PEcAn standard kg C m-2 } # cr0 initial pool of fine root carbon (g/m2) if ("fine.roots" %in% names(IC.pools)) { - IC.params[["cr0"]] <- PEcAn.utils::ud_convert(IC.pools$fine.roots, 'kg m-2', 'g m-2') #from PEcAn standard kg C m-2 + IC.params[["cr0"]] <- PEcAn.utils::ud_convert(IC.pools$fine.roots, "kg m-2", "g m-2") # from PEcAn standard kg C m-2 } - ###non-living variables + ### non-living variables # cl0 initial pool of litter carbon (g/m2) if ("litter" %in% names(IC.pools)) { - IC.params[["cl0"]] <- PEcAn.utils::ud_convert(IC.pools$litter, 'kg m-2', 'g m-2') #from PEcAn standard kg C m-2 + IC.params[["cl0"]] <- PEcAn.utils::ud_convert(IC.pools$litter, "kg m-2", "g m-2") # from PEcAn standard kg C m-2 } # cs0 initial pool of soil organic matter and woody debris carbon (g/m2) - if("soil" %in% names(IC.pools)){ - if("wood.debris" %in% names(IC.pools)){ - IC.params[["cs0"]] <- PEcAn.utils::ud_convert(IC.pools$soil + sum(IC.pools$wood.debris), 'kg m-2', 'g m-2') #from PEcAn standard kg C m-2 + if ("soil" %in% names(IC.pools)) { + if ("wood.debris" %in% names(IC.pools)) { + IC.params[["cs0"]] <- PEcAn.utils::ud_convert(IC.pools$soil + sum(IC.pools$wood.debris), "kg m-2", "g m-2") # from PEcAn standard kg C m-2 } else { - IC.params[["cs0"]] <- PEcAn.utils::ud_convert(IC.pools$soil, 'kg m-2', 'g m-2') #from PEcAn standard kg C m-2 + IC.params[["cs0"]] <- PEcAn.utils::ud_convert(IC.pools$soil, "kg m-2", "g m-2") # from PEcAn standard kg C m-2 PEcAn.logger::logger.warn("write.configs.DALEC IC: Loading soil carbon pool without woody debris.") } } - ###Write to command line file + ### Write to command line file for (i in seq_along(IC.params)) { cmdFlags <- paste0(cmdFlags, " -", names(IC.params)[i], " ", IC.params[[i]]) } - PEcAn.logger::logger.info(paste("All command flags:",cmdFlags)) - - } else{ + PEcAn.logger::logger.info(paste("All command flags:", cmdFlags)) + } else { PEcAn.logger::logger.error("Bad initial conditions filepath; kept defaults") } } @@ -175,20 +170,22 @@ write.config.DALEC <- function(defaults, trait.values, settings, run.id) { writeLines(cmdFlags, con = file.path(rundir, config.file.name)) ### WRITE JOB.SH - jobsh <- paste0("#!/bin/bash\n", - settings$model$binary, - " $(cat ", rundir, "/", config.file.name, - ") < ", as.character(settings$run$inputs$met$path), " > ", - outdir, "/out.txt\n", - # 'echo ".libPaths(',"'~/R/library');", - "echo \"", - " library(PEcAn.DALEC); model2netcdf.DALEC(", "'", - outdir, "',", - settings$run$site$lat, ",", - settings$run$site$lon, ", '", - settings$run$start.date, "', '", - settings$run$end.date, "') ", - "\" | R --vanilla") + jobsh <- paste0( + "#!/bin/bash\n", + settings$model$binary, + " $(cat ", rundir, "/", config.file.name, + ") < ", as.character(settings$run$inputs$met$path), " > ", + outdir, "/out.txt\n", + # 'echo ".libPaths(',"'~/R/library');", + "echo \"", + " library(PEcAn.DALEC); model2netcdf.DALEC(", "'", + outdir, "',", + settings$run$site$lat, ",", + settings$run$site$lon, ", '", + settings$run$start.date, "', '", + settings$run$end.date, "') ", + "\" | R --vanilla" + ) writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) diff --git a/models/dalec/inst/DALEC_priors.R b/models/dalec/inst/DALEC_priors.R index 2ca9ba3e73c..0c14e5b9a0f 100644 --- a/models/dalec/inst/DALEC_priors.R +++ b/models/dalec/inst/DALEC_priors.R @@ -1,112 +1,122 @@ ## DALEC PRIORS # t2 - autotrophic_respiration_fraction -median = 0.4733 -lo = 0.2 -hi = 0.7 +median <- 0.4733 +lo <- 0.2 +hi <- 0.7 -beta_fit <- function(alpha,median,lo,hi){ - beta = (alpha-1/3)/median +2/3 -alpha - err = abs(lo - qbeta(0.025,alpha,beta))+abs(hi-qbeta(0.975,alpha,beta)) +beta_fit <- function(alpha, median, lo, hi) { + beta <- (alpha - 1 / 3) / median + 2 / 3 - alpha + err <- abs(lo - qbeta(0.025, alpha, beta)) + abs(hi - qbeta(0.975, alpha, beta)) return(err) } -beta_fit(1,median,lo,hi) ## test default -#optimize(beta_fit,interval = c(0.1,1000),median=median,lo=lo,hi=hi) -fit = optim(par = 1,beta_fit,median=median,lo=lo,hi=hi) -alpha = fit$par -beta = (alpha-1/3)/median +2/3 -alpha -my.dbeta = function(x){dbeta(x,alpha,beta)} -curve(my.dbeta,from=0.01,to=.99) +beta_fit(1, median, lo, hi) ## test default +# optimize(beta_fit,interval = c(0.1,1000),median=median,lo=lo,hi=hi) +fit <- optim(par = 1, beta_fit, median = median, lo = lo, hi = hi) +alpha <- fit$par +beta <- (alpha - 1 / 3) / median + 2 / 3 - alpha +my.dbeta <- function(x) { + dbeta(x, alpha, beta) +} +curve(my.dbeta, from = 0.01, to = .99) # t3 - proportion of NPP allocated to foliage -median = 0.3150 -lo = 0.01 -hi = 0.5 -beta_fit(1,median,lo,hi) ## test default -fit = optim(par = 1,beta_fit,median=median,lo=lo,hi=hi) -alpha = fit$par -beta = (alpha-1/3)/median +2/3 -alpha -curve(my.dbeta,from=0.01,to=.99) +median <- 0.3150 +lo <- 0.01 +hi <- 0.5 +beta_fit(1, median, lo, hi) ## test default +fit <- optim(par = 1, beta_fit, median = median, lo = lo, hi = hi) +alpha <- fit$par +beta <- (alpha - 1 / 3) / median + 2 / 3 - alpha +curve(my.dbeta, from = 0.01, to = .99) -#t4 proportion of NPP allocated to roots -median = 0.4344 -lo = 0.01 -hi = 0.5 -hi = 0.6 ## relaxed Mat's default because the posterior was narrow -beta_fit(1,median,lo,hi) ## test default -fit = optim(par = 1,beta_fit,median=median,lo=lo,hi=hi) -alpha = fit$par -beta = (alpha-1/3)/median +2/3 -alpha -curve(my.dbeta,from=0.01,to=.99) +# t4 proportion of NPP allocated to roots +median <- 0.4344 +lo <- 0.01 +hi <- 0.5 +hi <- 0.6 ## relaxed Mat's default because the posterior was narrow +beta_fit(1, median, lo, hi) ## test default +fit <- optim(par = 1, beta_fit, median = median, lo = lo, hi = hi) +alpha <- fit$par +beta <- (alpha - 1 / 3) / median + 2 / 3 - alpha +curve(my.dbeta, from = 0.01, to = .99) -#t6 proportion of woody material becoming woody debris every time step -median = 2.06e-6 * 365 -lo = 2e-6 *365 -hi = 0.02 * 365 -gamma_fit <- function(theta,median,lo,hi){ - shape = theta[1] - rate = theta[2] +# t6 proportion of woody material becoming woody debris every time step +median <- 2.06e-6 * 365 +lo <- 2e-6 * 365 +hi <- 0.02 * 365 +gamma_fit <- function(theta, median, lo, hi) { + shape <- theta[1] + rate <- theta[2] # no closed form median, so including into error, weighting median higher (10x) - err = abs(lo - qgamma(0.025,shape,rate))+abs(hi-qgamma(0.975,shape,rate)) + - 10*abs(median-qgamma(0.5,shape,rate)) + err <- abs(lo - qgamma(0.025, shape, rate)) + abs(hi - qgamma(0.975, shape, rate)) + + 10 * abs(median - qgamma(0.5, shape, rate)) return(err) } -gamma_fit(c(1,1),median,lo,hi) ## test default -fit = optim(par = c(1,1),gamma_fit,median=median,lo=lo,hi=hi) -shape = fit$par[1] -rate = fit$par[2] -my.dgamma = function(x){dgamma(x,shape,rate)} -curve(my.dgamma,from=0.001,to=.02) -abline(v=median) -qgamma(0.5,shape,rate) +gamma_fit(c(1, 1), median, lo, hi) ## test default +fit <- optim(par = c(1, 1), gamma_fit, median = median, lo = lo, hi = hi) +shape <- fit$par[1] +rate <- fit$par[2] +my.dgamma <- function(x) { + dgamma(x, shape, rate) +} +curve(my.dgamma, from = 0.001, to = .02) +abline(v = median) +qgamma(0.5, shape, rate) # t1 - rate variable controling decomposition from litter to soil organinc matter [day-1, ref T 10C] -median = 4.41e-6 -lo = 1e-6 -hi = 0.01 -gamma_fit(c(1,1),median,lo,hi) ## test default -fit = optim(par = c(1,1),gamma_fit,median=median,lo=lo,hi=hi) -shape = fit$par[1] -rate = fit$par[2] -my.dgamma = function(x){dgamma(x,shape,rate)} -curve(my.dgamma,from=0.001,to=.02) -abline(v=median) -qgamma(0.5,shape,rate) +median <- 4.41e-6 +lo <- 1e-6 +hi <- 0.01 +gamma_fit(c(1, 1), median, lo, hi) ## test default +fit <- optim(par = c(1, 1), gamma_fit, median = median, lo = lo, hi = hi) +shape <- fit$par[1] +rate <- fit$par[2] +my.dgamma <- function(x) { + dgamma(x, shape, rate) +} +curve(my.dgamma, from = 0.001, to = .02) +abline(v = median) +qgamma(0.5, shape, rate) ## GAMMA GAVE A BAD FIT SO SWITCHING TO EXPONENTIAL -rate = log(2)/median -curve(dexp(x,rate),lo,lo*100) -abline(v=median) -qexp(0.5,rate) -qexp(0.025,rate) -qexp(0.975,rate) +rate <- log(2) / median +curve(dexp(x, rate), lo, lo * 100) +abline(v = median) +qexp(0.5, rate) +qexp(0.025, rate) +qexp(0.975, rate) # t8 - rate variable controlling respiration from litter [day-1, ref T 10C] -median = 2.28e-2 -lo = 5e-5 -hi = 0.5 -gamma_fit(c(1,1),median,lo,hi) ## test default -fit = optim(par = c(1,1),gamma_fit,median=median,lo=lo,hi=hi) -shape = fit$par[1] -rate = fit$par[2] -my.dgamma = function(x){dgamma(x,shape,rate)} -curve(my.dgamma,from=0.001,to=.02) -abline(v=median) -qgamma(c(0.025,0.5,0.975),shape,rate) +median <- 2.28e-2 +lo <- 5e-5 +hi <- 0.5 +gamma_fit(c(1, 1), median, lo, hi) ## test default +fit <- optim(par = c(1, 1), gamma_fit, median = median, lo = lo, hi = hi) +shape <- fit$par[1] +rate <- fit$par[2] +my.dgamma <- function(x) { + dgamma(x, shape, rate) +} +curve(my.dgamma, from = 0.001, to = .02) +abline(v = median) +qgamma(c(0.025, 0.5, 0.975), shape, rate) # t9 - rate variable controlling respiration from soil organic matter and woody debris [day-1, ref T 10C] -median = 2.65e-6 -lo = 1e-6 -hi = 0.5 -gamma_fit(c(1,1),median,lo,hi) ## test default -fit = optim(par = c(1,1),gamma_fit,median=median,lo=lo,hi=hi) -shape = fit$par[1] -rate = fit$par[2] -my.dgamma = function(x){dgamma(x,shape,rate)} -curve(my.dgamma,from=0.001,to=.02) -abline(v=median) -qgamma(c(0.025,0.5,0.975),shape,rate) +median <- 2.65e-6 +lo <- 1e-6 +hi <- 0.5 +gamma_fit(c(1, 1), median, lo, hi) ## test default +fit <- optim(par = c(1, 1), gamma_fit, median = median, lo = lo, hi = hi) +shape <- fit$par[1] +rate <- fit$par[2] +my.dgamma <- function(x) { + dgamma(x, shape, rate) +} +curve(my.dgamma, from = 0.001, to = .02) +abline(v = median) +qgamma(c(0.025, 0.5, 0.975), shape, rate) ## Gamma gave a poor fit to the median, trying exponential -rate = log(2)/median -curve(dexp(x,rate),lo,lo*100) -abline(v=median) -qexp(c(0.025,0.5,0.975),rate) +rate <- log(2) / median +curve(dexp(x, rate), lo, lo * 100) +abline(v = median) +qexp(c(0.025, 0.5, 0.975), rate) diff --git a/models/dalec/tests/testthat/test.met2model.R b/models/dalec/tests/testthat/test.met2model.R index 4e77f1fcfd2..203469e9d78 100644 --- a/models/dalec/tests/testthat/test.met2model.R +++ b/models/dalec/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("DALEC met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/dvmdostem/R/globals.R b/models/dvmdostem/R/globals.R index 9edd78ac0f1..f56fe702db5 100644 --- a/models/dvmdostem/R/globals.R +++ b/models/dvmdostem/R/globals.R @@ -1 +1 @@ -utils::globalVariables(c('pecan_outvars', 'rs_outspec_path','req_v_list')) +utils::globalVariables(c("pecan_outvars", "rs_outspec_path", "req_v_list")) diff --git a/models/dvmdostem/R/model2netcdf.dvmdostem.R b/models/dvmdostem/R/model2netcdf.dvmdostem.R index 2bb0517e69f..1d4b8fbe398 100644 --- a/models/dvmdostem/R/model2netcdf.dvmdostem.R +++ b/models/dvmdostem/R/model2netcdf.dvmdostem.R @@ -1,6 +1,6 @@ library(lubridate) -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' @title Write data into PEcAn shaped output file. ##' @param y_starts a list of years, i.e.: 1901, 1902, 1903, etc. ##' @param outdir a path to the location where were we will look for dvmdostem outputs and write PEcAn outputs. @@ -12,13 +12,11 @@ library(lubridate) ##' @author Tobey Carman ##' write.data2pecan.file <- function(y_starts, outdir, pecan_requested_vars, monthly_dvmdostem_outputs, yearly_dvmdostem_outputs, px_Y, px_X) { - # Looping over the dvmdostem ouputs and writing data into the # the respective yearly PEcAn output files. for (i in seq_along(1:length(y_starts))) { ncout <- ncdf4::nc_open(file.path(outdir, paste0(lubridate::year(y_starts[i]), ".nc")), write = TRUE) for (j in pecan_requested_vars) { - # Look up the depends_on in the reverse map for (k in vmap_reverse[[j]][["depends_on"]]) { # See that dvmdostem output files are available for each depends_on... @@ -30,11 +28,10 @@ write.data2pecan.file <- function(y_starts, outdir, pecan_requested_vars, monthl newVector <- vector(mode = "numeric") for (k in unlist(strsplit(vmap_reverse[[j]][["depends_on"]], ","))) { - # Determine if dvmdostem file is monthly or yearly - if (TRUE %in% sapply(monthly_dvmdostem_outputs, function(x) grepl(paste0("^",k,"_"), x))) { + if (TRUE %in% sapply(monthly_dvmdostem_outputs, function(x) grepl(paste0("^", k, "_"), x))) { ncin_tr_y <- ncdf4::nc_open(file.path(outdir, paste0(k, "_monthly_tr.nc"))) - } else if (TRUE %in% sapply(yearly_dvmdostem_outputs, function(x) grepl(paste0("^",k,"_"), x))) { + } else if (TRUE %in% sapply(yearly_dvmdostem_outputs, function(x) grepl(paste0("^", k, "_"), x))) { ncin_tr_y <- ncdf4::nc_open(file.path(outdir, paste0(k, "_yearly_tr.nc"))) } else { PEcAn.logger::logger.error(paste0("ERROR!: ", k, " is not a monthly or yearly variable!")) @@ -58,65 +55,64 @@ write.data2pecan.file <- function(y_starts, outdir, pecan_requested_vars, monthl # Coerce the data into the right shape (y, x, time). # With a single pixel run, the Y and X dimensions are lost when - # reading from the file with ncdf4::ncvar_get, and the subsequent + # reading from the file with ncdf4::ncvar_get, and the subsequent # ncdf4::ncvar_put call fails. So here we make sure that the # vardata_new data is a 3D structure: dim_lengths <- sapply(ncin_tr_y$var[[1]]$dim, function(x) x$len) vardata_new <- array(vardata_new, dim = dim_lengths) dim.order <- sapply(ncin_tr_y$var[[k]]$dim, function(x) x$name) - starts <-c(y = px_Y, x = px_X, time = 1) + starts <- c(y = px_Y, x = px_X, time = 1) # What if variable as output from dvmdostem is by pft, or layer or pft and # compartment (pftpart)? # # Guessing/assuming that this is a "calibration run" and that the - # dvmdostem calibraiton variables were enabled, which includes netCDF - # output by PFT and soil layer. These have to be sumarized in order to be - # included in the pecan output.") + # dvmdostem calibraiton variables were enabled, which includes netCDF + # output by PFT and soil layer. These have to be sumarized in order to be + # included in the pecan output.") # # Due to the way R handles NetCDF files, it appears that the dimensions of - # vardata_new are (X, Y, PFT, TIME), even though in the NetCDf file, the + # vardata_new are (X, Y, PFT, TIME), even though in the NetCDf file, the # dimensions are explicitly set as y, x, pft, time as reccomended by the - # CF standards. In this case we want to sum over pft, which is the 3rd - # dimension in vardata_new. Note, to further confuse things, the - # ncdf4.helpers::nc.get.dim.names() function returns the following: + # CF standards. In this case we want to sum over pft, which is the 3rd + # dimension in vardata_new. Note, to further confuse things, the + # ncdf4.helpers::nc.get.dim.names() function returns the following: # # Browse[4]> nc.get.dim.names(ncin_tr_y) - # [1] "time" "y" "x" "pft" + # [1] "time" "y" "x" "pft" # # But some testing in an interactive R session seems to indicate that # the following apply function sums over PFTs as we want, and we end up # with vardata_new being an array with the dimensions X, Y, time if (length(dim(vardata_new)) == 5) { PEcAn.logger::logger.debug("") - vardata_new <- apply(vardata_new, c(1,2,5), function(x) sum(x)) - dim.order <- dim.order[!dim.order %in% c('pft', 'pftpart')] + vardata_new <- apply(vardata_new, c(1, 2, 5), function(x) sum(x)) + dim.order <- dim.order[!dim.order %in% c("pft", "pftpart")] } - if (length(dim(vardata_new)) == 4){ + if (length(dim(vardata_new)) == 4) { PEcAn.logger::logger.debug("") - vardata_new <- apply(vardata_new, c(1,2,4), function(x) sum(x)) - dim.order <- dim.order[!dim.order %in% c('pft', 'layer')] + vardata_new <- apply(vardata_new, c(1, 2, 4), function(x) sum(x)) + dim.order <- dim.order[!dim.order %in% c("pft", "layer")] } - #if ('pft' %in% nc.get.dim.names(ncin_tr_y)) {} - #if ('layers' %in% nc.get.dim.names(ncin_tr_y)) {} - #if ('pft' %in% nc.get.dim.names(ncin_tr_y)) + # if ('pft' %in% nc.get.dim.names(ncin_tr_y)) {} + # if ('layers' %in% nc.get.dim.names(ncin_tr_y)) {} + # if ('pft' %in% nc.get.dim.names(ncin_tr_y)) - if (TRUE %in% sapply(monthly_dvmdostem_outputs, function(x) grepl(paste0("^",k,"_"), x))) { + if (TRUE %in% sapply(monthly_dvmdostem_outputs, function(x) grepl(paste0("^", k, "_"), x))) { # The current variable (j) is a monthly output - counts <- c(y=1, x=1, time=12) - startidx <- ((i-1)*12)+1 - endidx <- i*12 - newVector <- cbind(newVector, vardata_new[px_X, px_Y,startidx:endidx]) - } else if (TRUE %in% sapply(yearly_dvmdostem_outputs, function(x) grepl(paste0("^",k,"_"), x))) { + counts <- c(y = 1, x = 1, time = 12) + startidx <- ((i - 1) * 12) + 1 + endidx <- i * 12 + newVector <- cbind(newVector, vardata_new[px_X, px_Y, startidx:endidx]) + } else if (TRUE %in% sapply(yearly_dvmdostem_outputs, function(x) grepl(paste0("^", k, "_"), x))) { # The current variable (k) is a yearly output - counts <- c(y=1, x=1, time=1) + counts <- c(y = 1, x = 1, time = 1) newVector <- cbind(newVector, vardata_new[px_X, px_Y, i]) } else { PEcAn.logger::logger.error(paste0("ERROR!: ", k, " is not a monthly or yearly variable!")) stop() } - } # Maybe we will support more operations in the future besides sum... @@ -129,42 +125,45 @@ write.data2pecan.file <- function(y_starts, outdir, pecan_requested_vars, monthl } } -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' @name model2netcdf.dvmdostem ##' @title Code to convert dvmdostem netcdf output into into CF standard -##' +##' ##' @param outdir Location of dvmdostem model output ##' @param runstart ?? ##' @param runend ?? ##' @param pecan_requested_vars a space separated string with names of the PEcAn variables to output. -##' @examples +##' @examples ##' \dontrun{ ##' # example code here? ##' } -##' +##' ##' @export ##' ##' @author Tobey Carman, Shawn Serbin ##' model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_vars) { - PEcAn.logger::logger.info(paste0("Run start: ", runstart, " Run end: ", runend)) PEcAn.logger::logger.info(paste0("Processing dvmdostem outputs in: ", outdir)) PEcAn.logger::logger.info(paste0("Building the following PEcAn variables: ", pecan_requested_vars)) # Split apart the string of pecan vars passed into the function pecan_requested_vars <- unlist(lapply(unlist(strsplit(pecan_requested_vars, ",")), trimws)) - pecan_requested_vars <- unlist(lapply(pecan_requested_vars, function(x){x[!x==""]})) + pecan_requested_vars <- unlist(lapply(pecan_requested_vars, function(x) { + x[!x == ""] + })) # Look up the required dvmdostem variables. dvmdostem_outputs <- "" for (pov in pecan_requested_vars) { dvmdostem_outputs <- trimws(paste(dvmdostem_outputs, vmap_reverse[[pov]][["depends_on"]], sep = ",")) } - dvmdostem_outputs <- unlist(lapply(unlist(strsplit(trimws(dvmdostem_outputs), ",")), function(x){x[!x== ""]})) + dvmdostem_outputs <- unlist(lapply(unlist(strsplit(trimws(dvmdostem_outputs), ",")), function(x) { + x[!x == ""] + })) # First things first, we need to check the run_status.nc file and make sure # that the a) only one pixel ran, and b) the success code is > 0 - nc_runstatus <- ncdf4::nc_open(file.path(outdir, "run_status.nc"), write=FALSE) + nc_runstatus <- ncdf4::nc_open(file.path(outdir, "run_status.nc"), write = FALSE) if (length(nc_runstatus$var) != 1) { PEcAn.logger::logger.error(c("INVALID run_status.nc file! Expecting 1 variable, found: ", length(nc_runstatus$var))) stop() @@ -172,12 +171,12 @@ model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_var run_status <- ncdf4::ncvar_get(nc_runstatus, nc_runstatus$var$run_status) - # Cooerce the array into the right shape. the ncvar_get function does the + # Cooerce the array into the right shape. the ncvar_get function does the # right thing if we are reading run_mask with more than one pixel. But if # there is only one pixel, then a 1D list is returned, which causes problems # later in the function. So here we force the array into a 2D shape. - dim.lengths = sapply(nc_runstatus$var[[1]]$dim, function(x) x$len) - run_status <- array(run_status, dim=dim.lengths) + dim.lengths <- sapply(nc_runstatus$var[[1]]$dim, function(x) x$len) + run_status <- array(run_status, dim = dim.lengths) ncdf4::nc_close(nc_runstatus) good_px <- which(run_status > 0) @@ -186,8 +185,8 @@ model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_var PEcAn.logger::logger.error("Not sure what to do, so quitting.") stop() # Not sure we even need to check bad_px or skipped_px? - #skipped_px <- which(run_status == 0) - #bad_px <- which(run_status < 0) + # skipped_px <- which(run_status == 0) + # bad_px <- which(run_status < 0) } # A less aggressive check here might be to see if enough of the transient @@ -239,13 +238,13 @@ model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_var # Look at the first dvmdostem output, see if it is was provided by dvmdostem # as monthly or yearly, and adjust accordingly. - # NOTE: Assumes that all dvmdostem output files are at the same + # NOTE: Assumes that all dvmdostem output files are at the same # time resolution! - if(TRUE %in% sapply(monthly_dvmdostem_outputs, function(x) grepl(paste0("^",dvmdostem_outputs[1],"_"), x))) { + if (TRUE %in% sapply(monthly_dvmdostem_outputs, function(x) grepl(paste0("^", dvmdostem_outputs[1], "_"), x))) { trfile <- file.path(outdir, paste0(dvmdostem_outputs[1], "_monthly_tr.nc")) scfile <- file.path(outdir, paste0(dvmdostem_outputs[1], "_monthly_sc.nc")) timedivisor <- 12 - } else if (TRUE %in% sapply(yearly_dvmdostem_outputs, function(x) grepl(paste0("^",dvmdostem_outputs[1],"_"), x))) { + } else if (TRUE %in% sapply(yearly_dvmdostem_outputs, function(x) grepl(paste0("^", dvmdostem_outputs[1], "_"), x))) { trfile <- file.path(outdir, paste0(dvmdostem_outputs[1], "_yearly_tr.nc")) scfile <- file.path(outdir, paste0(dvmdostem_outputs[1], "_yearly_sc.nc")) timedivisor <- 1 @@ -257,43 +256,48 @@ model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_var PEcAn.logger::logger.info(paste0("Opening dvmdostem raw output file for variable (transient): ", dvmdostem_outputs[1])) ncin_y_tr <- ncdf4::nc_open(trfile) y_tr_time_start <- ncin_y_tr$dim$time$units - y_tr_time_start <- as.numeric( sub("\\D*(\\d+).*", "\\1", y_tr_time_start) ) - y_tr_time_end <- y_tr_time_start + (ncin_y_tr$dim$time$len/timedivisor) - 1 + y_tr_time_start <- as.numeric(sub("\\D*(\\d+).*", "\\1", y_tr_time_start)) + y_tr_time_end <- y_tr_time_start + (ncin_y_tr$dim$time$len / timedivisor) - 1 y_tr_starts <- paste0(seq(y_tr_time_start, y_tr_time_end, 1), "-01-01 00:00:00") PEcAn.logger::logger.info(paste0("Opening dvmdostem raw output file for variable (scenario): ", dvmdostem_outputs[1])) ncin_y_sc <- ncdf4::nc_open(scfile) y_sc_time_start <- ncin_y_sc$dim$time$units - y_sc_time_start <- as.numeric( sub("\\D*(\\d+).*", "\\1", y_sc_time_start) ) - y_sc_time_end <- y_sc_time_start + (ncin_y_sc$dim$time$len/timedivisor) - 1 + y_sc_time_start <- as.numeric(sub("\\D*(\\d+).*", "\\1", y_sc_time_start)) + y_sc_time_end <- y_sc_time_start + (ncin_y_sc$dim$time$len / timedivisor) - 1 y_sc_starts <- paste0(seq(y_sc_time_start, y_sc_time_end, 1), "-01-01 00:00:00") # Check that transient and sceario runs were contiguous... if ((lubridate::year(y_tr_starts[length(y_tr_starts)]) + 1) != lubridate::year(y_sc_starts[1])) { PEcAn.logger::logger.error("WARNING! There is a gap between your transient and scenario datasets!!") - PEcAn.logger::logger.error(paste0("End of transient:", - lubridate::year(y_tr_starts[length(y_tr_starts)]), - " Begining of scenario: ", - lubridate::year(y_sc_starts[1]))) + PEcAn.logger::logger.error(paste0( + "End of transient:", + lubridate::year(y_tr_starts[length(y_tr_starts)]), + " Begining of scenario: ", + lubridate::year(y_sc_starts[1]) + )) } PEcAn.logger::logger.info("Creating one netcdf file for each output year...") all_yrs <- c(y_tr_starts, y_sc_starts) for (i in seq_along(1:length(all_yrs))) { - PEcAn.logger::logger.info("Creating dimensions (and coordinate variables) for new PEcAn style files...") # The way R netcdf works is that you pass a vals argument when creating dimensions # and it creates the coordinate variables for you. - lond <- ncdf4::ncdim_def(name='lon', - units="degrees_east", - vals=c(1), # <=== read from dvmdostem file! see dvmdostem issue #342 - longname="coordinate_longitude") - - latd <- ncdf4::ncdim_def(name='lat', - units="degrees_north", - vals=c(1), # <=== read from dvmdostem file! see dvmdostem issue #342 - longname="coordinate_latitude") + lond <- ncdf4::ncdim_def( + name = "lon", + units = "degrees_east", + vals = c(1), # <=== read from dvmdostem file! see dvmdostem issue #342 + longname = "coordinate_longitude" + ) + + latd <- ncdf4::ncdim_def( + name = "lat", + units = "degrees_north", + vals = c(1), # <=== read from dvmdostem file! see dvmdostem issue #342 + longname = "coordinate_latitude" + ) if (length(monthly_dvmdostem_outputs) > 0) { # last day of each month @@ -301,37 +305,45 @@ model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_var } else { timed_vals <- c(0) } - timed <- ncdf4::ncdim_def(name='time', - units=paste0("days since ", all_yrs[i]), - vals=timed_vals, - unlim=TRUE, - longname="time", - calendar='365_day') + timed <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since ", all_yrs[i]), + vals = timed_vals, + unlim = TRUE, + longname = "time", + calendar = "365_day" + ) - out_nc_dims <- list(lon=lond, lat=latd, time=timed) # dimension order: X, Y, time + out_nc_dims <- list(lon = lond, lat = latd, time = timed) # dimension order: X, Y, time PEcAn.logger::logger.info("Creating variables for new PEcAn style files...") newvars <- c() # Not very efficient, would be better to pre-allocate space j <- 0 - for (name in pecan_requested_vars){ + for (name in pecan_requested_vars) { j <- j + 1 print(paste0("Creating variable named: ", name)) - ncvar <- ncdf4::ncvar_def(name = name, - units = vmap_reverse[[name]][["newunits"]], - longname = vmap_reverse[[name]][["longname"]], - dim = out_nc_dims, -999, prec = "double") + ncvar <- ncdf4::ncvar_def( + name = name, + units = vmap_reverse[[name]][["newunits"]], + longname = vmap_reverse[[name]][["longname"]], + dim = out_nc_dims, -999, prec = "double" + ) newvars[[j]] <- ncvar } ncout <- ncdf4::nc_create(file.path(outdir, paste0(as.character(lubridate::year(all_yrs[i])), ".nc")), newvars) # extract variable and long names to VAR file for PEcAn visibility # THIS NEEDS TO BE KEPT AND USED FOR PROPER PLOTTING - write.table(sapply(ncout$var, function(x) { x$longname }), - file = file.path(outdir,paste0(as.character(lubridate::year(all_yrs[i])), ".nc.var")), - col.names = FALSE, - row.names = TRUE, - quote = FALSE) + write.table( + sapply(ncout$var, function(x) { + x$longname + }), + file = file.path(outdir, paste0(as.character(lubridate::year(all_yrs[i])), ".nc.var")), + col.names = FALSE, + row.names = TRUE, + quote = FALSE + ) ncdf4::nc_close(ncout) } @@ -341,7 +353,6 @@ model2netcdf.dvmdostem <- function(outdir, runstart, runend, pecan_requested_var # Write the scenario data to pecan files. write.data2pecan.file(y_sc_starts, outdir, pecan_requested_vars, monthly_dvmdostem_outputs, yearly_dvmdostem_outputs, px_Y, px_X) - } # end of function -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ## EOF diff --git a/models/dvmdostem/R/pecan2dvmdostem_variable_mapping.R b/models/dvmdostem/R/pecan2dvmdostem_variable_mapping.R index 688cbdcf9f2..15fcf960ba5 100644 --- a/models/dvmdostem/R/pecan2dvmdostem_variable_mapping.R +++ b/models/dvmdostem/R/pecan2dvmdostem_variable_mapping.R @@ -1,29 +1,29 @@ ##' Build a mapping from dvmdostem names to PEcAn names, units, etc. -##' The temunits should be (are) looked up from the dvmdostem output +##' The temunits should be (are) looked up from the dvmdostem output ##' file's units attributes. -##' +##' ##' This data structure allows us to keep track of PEcAn output variables ##' that might depend on more than one DVMDOSTEM files. ##' @export vmap_reverse <- list( - "GPP" = c(depends_on="GPP", longname="Gross Primary Productivity", newunits="kg m-2 s-1"), - "NPP" = c(depends_on="NPP", longname="Net Primary Productivity", newunits="kg m-2 s-1"), - "HeteroResp" = c(depends_on="RH", longname="Heterotrophic Respiration", newunits="kg m-2 s-1"), - "AutoResp" = c(depends_on="RM,RG", longname="Autotrophic Respiration", newunits="kg m-2 s-1"), - "SoilOrgC" = c(depends_on="SHLWC,SOMA,SOMCR,SOMPR,SOMRAWC", longname="Soil Organic Carbon", newunits="kg m-2"), - "LAI" = c(depends_on="LAI", longname="Leaf Area Index", newunits="m2/m2"), - "VegC" = c(depends_on="VEGC", longname="Vegetation Carbon", newunits="kg m-2"), - "DeepC" = c(depends_on="DEEPC", longname="Deep (amporphous) soil C", newunits="kg m-2"), - "AvailN" = c(depends_on="AVLN", longname="Available Nitrogen", newunits="kg m-2"), - "NetNMin" = c(depends_on="NETNMIN", longname="Net N Mineralization", newunits="kg m-2 s-1"), - "NImmob" = c(depends_on="NIMMOB", longname="N Immobilization", newunits="kg m-2 s-1"), - "NInput" = c(depends_on="NINPUT", longname="N Inputs to soil", newunits="kg m-2 s-1"), - "NLost" = c(depends_on="NLOST", longname="N Lost from soil", newunits="kg m-2 s-1"), - "NUptakeIn" = c(depends_on="INNUPTAKE", longname="N Uptake ignoring N limitation", newunits="kg m-2 s-1"), - "NUptakeSt" = c(depends_on="NUPTAKEST", longname="N Uptake Structural", newunits="kg m-2 s-1"), - "NUptakeLab" = c(depends_on="NUPTAKELAB", longname="N Uptake Labile", newunits="kg m-2 s-1"), - "OrgN" = c(depends_on="ORGN", longname="Total Soil Organic N", newunits="kg m-2"), - "ShlwC" = c(depends_on="SHLWC", longname="Shallow (fibrous) soil C", newunits="kg m-2"), - "VegN" = c(depends_on="VEGN", longname="Vegetation N", newunits="kg m-2") - #"" = c(depends_on="", longname="", newunits="") -) \ No newline at end of file + "GPP" = c(depends_on = "GPP", longname = "Gross Primary Productivity", newunits = "kg m-2 s-1"), + "NPP" = c(depends_on = "NPP", longname = "Net Primary Productivity", newunits = "kg m-2 s-1"), + "HeteroResp" = c(depends_on = "RH", longname = "Heterotrophic Respiration", newunits = "kg m-2 s-1"), + "AutoResp" = c(depends_on = "RM,RG", longname = "Autotrophic Respiration", newunits = "kg m-2 s-1"), + "SoilOrgC" = c(depends_on = "SHLWC,SOMA,SOMCR,SOMPR,SOMRAWC", longname = "Soil Organic Carbon", newunits = "kg m-2"), + "LAI" = c(depends_on = "LAI", longname = "Leaf Area Index", newunits = "m2/m2"), + "VegC" = c(depends_on = "VEGC", longname = "Vegetation Carbon", newunits = "kg m-2"), + "DeepC" = c(depends_on = "DEEPC", longname = "Deep (amporphous) soil C", newunits = "kg m-2"), + "AvailN" = c(depends_on = "AVLN", longname = "Available Nitrogen", newunits = "kg m-2"), + "NetNMin" = c(depends_on = "NETNMIN", longname = "Net N Mineralization", newunits = "kg m-2 s-1"), + "NImmob" = c(depends_on = "NIMMOB", longname = "N Immobilization", newunits = "kg m-2 s-1"), + "NInput" = c(depends_on = "NINPUT", longname = "N Inputs to soil", newunits = "kg m-2 s-1"), + "NLost" = c(depends_on = "NLOST", longname = "N Lost from soil", newunits = "kg m-2 s-1"), + "NUptakeIn" = c(depends_on = "INNUPTAKE", longname = "N Uptake ignoring N limitation", newunits = "kg m-2 s-1"), + "NUptakeSt" = c(depends_on = "NUPTAKEST", longname = "N Uptake Structural", newunits = "kg m-2 s-1"), + "NUptakeLab" = c(depends_on = "NUPTAKELAB", longname = "N Uptake Labile", newunits = "kg m-2 s-1"), + "OrgN" = c(depends_on = "ORGN", longname = "Total Soil Organic N", newunits = "kg m-2"), + "ShlwC" = c(depends_on = "SHLWC", longname = "Shallow (fibrous) soil C", newunits = "kg m-2"), + "VegN" = c(depends_on = "VEGN", longname = "Vegetation N", newunits = "kg m-2") + # "" = c(depends_on="", longname="", newunits="") +) diff --git a/models/dvmdostem/R/write.config.dvmdostem.R b/models/dvmdostem/R/write.config.dvmdostem.R index 9e0298b2040..403a66f8f02 100644 --- a/models/dvmdostem/R/write.config.dvmdostem.R +++ b/models/dvmdostem/R/write.config.dvmdostem.R @@ -1,4 +1,3 @@ - ##' Setup the output variables that dvmdostem will generate and PEcAn will analyze. ##' This function handles the interplay between output variables and output spec file. ##' There are custom tags in the section of the pecan xml file for dvmdostem @@ -9,7 +8,7 @@ ##' supplied settings for output spec path and variable list and returns the path ##' to the final run specific output spec file and the list of variables to ##' process. The run specific output spec file is copied into place and adjusted -##' using the dvmdostem script for working with output spec files. +##' using the dvmdostem script for working with output spec files. ##' ##' @name setup.outputs.dvmdostem ##' @title Setup outputs to be generated by dvmdostem and analyzed by PEcAn. @@ -25,12 +24,11 @@ ##' @author Tobey Carman ##' setup.outputs.dvmdostem <- function(dvmdostem_calibration, - pecan_requested_outputs, - dvmdostem_output_spec, + pecan_requested_outputs, + dvmdostem_output_spec, run_directory, run_id, appbinary_path) { - is.not.null <- function(x) !is.null(x) # helper for readability # 0) neither path or variables specified @@ -60,106 +58,115 @@ setup.outputs.dvmdostem <- function(dvmdostem_calibration, pecan_outvars <- pecan_requested_outputs outspec_path <- dvmdostem_output_spec } - - # Calibraton run? - if (grepl(tolower(dvmdostem_calibration), 'yes', fixed = TRUE )) { - PEcAn.logger::logger.warn("Calibration run requested! Ignoring requested ", - "output variables and using pre-set dvmdostem ", - "calibration outputs list") + + # Calibraton run? + if (grepl(tolower(dvmdostem_calibration), "yes", fixed = TRUE)) { + PEcAn.logger::logger.warn( + "Calibration run requested! Ignoring requested ", + "output variables and using pre-set dvmdostem ", + "calibration outputs list" + ) # Copy the base file to a run-specific output spec file - if (! file.exists(file.path(run_directory, "config")) ) { + if (!file.exists(file.path(run_directory, "config"))) { dir.create(file.path(run_directory, "config"), recursive = TRUE) } rs_outspec_path <- file.path(run_directory, "config/", basename(outspec_path)) file.copy(outspec_path, rs_outspec_path) - + # Empty the run specific output spec file - system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c("--empty", rs_outspec_path)) - + system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + args = c("--empty", rs_outspec_path) + ) + # Turn on the dvmdostem calibration outputs system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c(rs_outspec_path, "--enable-cal-vars")) + args = c(rs_outspec_path, "--enable-cal-vars") + ) # Now enable anything in pecan_outvars that is not already enabled. requested_vars <- requested_vars_string2list(pecan_outvars, outspec_path = rs_outspec_path) - + # Figure out which variables are already 'ON' in order to support the calibration - # run. These will be at yearly resolution. We don't want to modify the output spec + # run. These will be at yearly resolution. We don't want to modify the output spec # setting for any of the calibration variables, not to mention the redundant work # (extra system call) to turn the variable on again. - a <- system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c(rs_outspec_path, "-s", "--csv"), stdout=TRUE) + a <- system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + args = c(rs_outspec_path, "-s", "--csv"), stdout = TRUE + ) con <- textConnection(a) already_on <- utils::read.csv(con, header = TRUE) for (j in req_v_list) { if (j %in% already_on$Name) { - PEcAn.logger::logger.info(paste0("Passing on ",j,"; it is already enabled..." )) + PEcAn.logger::logger.info(paste0("Passing on ", j, "; it is already enabled...")) } else { system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c(rs_outspec_path, "--on", j, "y", "m")) + args = c(rs_outspec_path, "--on", j, "y", "m") + ) } } - ret <- system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c(rs_outspec_path, "--summary"), - stdout=TRUE, stderr=TRUE) + ret <- system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + args = c(rs_outspec_path, "--summary"), + stdout = TRUE, stderr = TRUE + ) req_v_str <- paste0(sapply(strsplit(ret, "\\s+"), function(a) a[2])[-1], collapse = " ") - - - # done with calibration setup - } else { + + + # done with calibration setup + } else { # not a calibration run # Verify that the base output_spec file exists. - if (! file.exists(outspec_path) ) { + if (!file.exists(outspec_path)) { PEcAn.logger::logger.error("ERROR! The specified output spec file does not exist on this system!") PEcAn.logger::logger.error(c("Cannot find file: ", outspec_path)) stop() } - + # Check that at least one variable is enabled. - if( length(unlist((strsplit(pecan_outvars, ",")))) < 1 ){ + if (length(unlist((strsplit(pecan_outvars, ",")))) < 1) { PEcAn.logger::logger.error("ERROR! No output variables enabled!") PEcAn.logger::logger.error("Try adding the tag to your pecan.xml file!") stop() } - + req_var_list <- requested_vars_string2list() - + # Copy the base file to a run-specific output spec file - if (! file.exists(file.path(run_directory, "config")) ) { + if (!file.exists(file.path(run_directory, "config"))) { dir.create(file.path(run_directory, "config"), recursive = TRUE) } rs_outspec_path <- file.path(run_directory, "config/", basename(outspec_path)) file.copy(outspec_path, rs_outspec_path) - - # A more sophisticated test will verify that all the variables + + # A more sophisticated test will verify that all the variables # are valid at the correct dimensions(month and year??) - + # Empty the run specific output spec file - system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c("--empty", rs_outspec_path)) - + system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + args = c("--empty", rs_outspec_path) + ) + # Fill the run specific output spec file according to list for (j in req_v_list) { system2(file.path(appbinary_path, "scripts/outspec_utils.py"), - args=c(rs_outspec_path, "--on", j, "y", "m")) + args = c(rs_outspec_path, "--on", j, "y", "m") + ) } } # Done with non-calibration run setup # Print summary for debugging - #system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + # system2(file.path(appbinary_path, "scripts/outspec_utils.py"), # args=c("-s", rs_outspec_path)) - + return(c(rs_outspec_path, req_v_str)) } -##------------------------------------------------------------------------------------------------# -##' Look up the "depends_on" in the output variable mapping, -##' accumulate a list of dvmdostem variables to turn on to support +## ------------------------------------------------------------------------------------------------# +##' Look up the "depends_on" in the output variable mapping, +##' accumulate a list of dvmdostem variables to turn on to support ##' the requested variables in the pecan.xml tag ##' ##' @name requested_vars_string2list @@ -172,18 +179,20 @@ setup.outputs.dvmdostem <- function(dvmdostem_calibration, requested_vars_string2list <- function(req_v_str, outspec_path) { req_v_str <- "" for (pov in unlist(lapply(unlist(strsplit(pecan_outvars, ",")), trimws))) { - #print(paste("HERE>>>", vmap_reverse[[pov]][["depends_on"]])) + # print(paste("HERE>>>", vmap_reverse[[pov]][["depends_on"]])) req_v_str <- trimws(paste(req_v_str, vmap_reverse[[pov]][["depends_on"]], sep = ",")) } - # # Ugly, but basically jsut takes care of stripping out empty strings and + # # Ugly, but basically jsut takes care of stripping out empty strings and # making sure the final result is a 1D list, not nested. req_v_str <- trimws(req_v_str) - req_v_list <- unlist(lapply(unlist(strsplit(req_v_str, ",")), function(x){x[!x== ""]})) + req_v_list <- unlist(lapply(unlist(strsplit(req_v_str, ",")), function(x) { + x[!x == ""] + })) # Check that all variables specified in list exist in the base output spec file. a <- utils::read.csv(rs_outspec_path) for (j in req_v_list) { - if (! j %in% a[["Name"]]) { + if (!j %in% a[["Name"]]) { PEcAn.logger::logger.error(paste0("ERROR! Can't find variable: '", j, "' in the output spec file: ", rs_outspec_path)) stop() } @@ -211,38 +220,37 @@ requested_vars_string2list <- function(req_v_str, outspec_path) { #' @author Shawn Serbin, Tobey Carman #' convert.samples.dvmdostem <- function(trait_values) { - - if("SLA" %in% names(trait_values)) { + if ("SLA" %in% names(trait_values)) { # Convert from m2 / kg to m2 / g trait_values[["SLA"]] <- trait_values[["SLA"]] / 1000.0 } - if("cuticular_cond" %in% names(trait_values)) { + if ("cuticular_cond" %in% names(trait_values)) { # Convert from umol H2O m-2 s-1 to ??? # Original values in dvmdostem param files not making sense, no good # comments as to units. This conversion seems to make the values match # what we expect from the other data in the PEcAn/bety database. trait_values[["cuticular_cond"]] <- trait_values[["cuticular_cond"]] / 10^9 } - - if("vpd_open" %in% names(trait_values)) { + + if ("vpd_open" %in% names(trait_values)) { # Convert from kPa to Pa - trait_values[["vpd_open"]] <- PEcAn.utils::ud_convert(trait_values[["vpd_open"]],"kPa","Pa") + trait_values[["vpd_open"]] <- PEcAn.utils::ud_convert(trait_values[["vpd_open"]], "kPa", "Pa") } - - if("vpd_close" %in% names(trait_values)) { + + if ("vpd_close" %in% names(trait_values)) { # Convert from kPa to Pa - trait_values[["vpd_close"]] <- PEcAn.utils::ud_convert(trait_values[["vpd_close"]],"kPa","Pa") + trait_values[["vpd_close"]] <- PEcAn.utils::ud_convert(trait_values[["vpd_close"]], "kPa", "Pa") } - + # Return the modifed version - return (trait_values) + return(trait_values) } -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Adjust the runmask for dvmdostem. This is necessary if you are ##' using a mutisite dvmdostem dataset (more than one grid cell/pixel) -##' and you are not forcing the community (cmt or vegetation) type. In +##' and you are not forcing the community (cmt or vegetation) type. In ##' other words you are using a vegetation map to determine the pixel's cmt ##' type. In this case you must make sure that for the site and PFTs ##' you have selected, the underlying veg map classifies the site as the @@ -257,54 +265,55 @@ convert.samples.dvmdostem <- function(trait_values) { ##' @return NULL ##' @export ##' @author Tobey Carman -##' +##' adjust.runmask.dvmdostem <- function(siteDataPath, rundir, pixel_X, pixel_Y) { - # Copy the run-mask from the input data directory to the run directory system2(paste0("cp"), - wait=TRUE, - args=(c("-r", - file.path(siteDataPath, 'run-mask.nc'), - file.path(rundir, 'run-mask.nc')))) - + wait = TRUE, + args = (c( + "-r", + file.path(siteDataPath, "run-mask.nc"), + file.path(rundir, "run-mask.nc") + )) + ) + # # Turn off all pixels except the 0,0 pixel in the mask # Can't seem to use this as python-netcdf4 is not available. WTF. # system2(paste0(file.path(appbinary_path, "scripts/runmask-util.py")), # wait=TRUE, # args=c("--reset", "--yx", pixel_Y, pixel_X, file.path(rundir, 'run-mask.nc'))) - + ## !!WARNING!! See note here: ## https://github.com/cran/ncdf4/blob/master/R/ncdf4.R ## Permalink: https://github.com/cran/ncdf4/blob/6eea28ce4e457054ff8d4cb90c58dce4ec765fd7/R/ncdf4.R#L1 ## ## Basically: - ## 1. R starts counting at 1, and netCDF counting - ## starts at 0. - ## 2. R array subscripts go in Fortran order (XYZT), - ## while netCDF subscripts go in C order (TZYX). + ## 1. R starts counting at 1, and netCDF counting + ## starts at 0. + ## 2. R array subscripts go in Fortran order (XYZT), + ## while netCDF subscripts go in C order (TZYX). ## 3. R does not have a 64bit integer datatype (which is the datatype ## of the run mask netCDF file). So we get a warning about casting ## the data from the R integer value to the netCDF 64bit integer - ncMaskFile <- ncdf4::nc_open(file.path(rundir, 'run-mask.nc'), write = TRUE) + ncMaskFile <- ncdf4::nc_open(file.path(rundir, "run-mask.nc"), write = TRUE) new_data <- matrix(0, ncMaskFile$dim$X$len, ncMaskFile$dim$Y$len) new_data[[strtoi(pixel_X), strtoi(pixel_Y)]] <- 1 - ncdf4::ncvar_put(ncMaskFile, ncMaskFile$var$run, new_data, verbose=TRUE) + ncdf4::ncvar_put(ncMaskFile, ncMaskFile$var$run, new_data, verbose = TRUE) ncdf4::nc_close(ncMaskFile) - - PEcAn.logger::logger.info(paste0("Set run mask pixel (y,x)=("),pixel_Y,",",pixel_X,")" ) - + + PEcAn.logger::logger.info(paste0("Set run mask pixel (y,x)=("), pixel_Y, ",", pixel_X, ")") } -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Make sure that selected run mask pixel, veg map pixel value and CMT type are all copasetic. The -##' function calls stop() if there is anything inconsistent, for example more tha one pixel is -##' enabled in the run mask, or the enabled pixel's vegetation type does not match the +##' function calls stop() if there is anything inconsistent, for example more tha one pixel is +##' enabled in the run mask, or the enabled pixel's vegetation type does not match the ##' vegetation/community type of the chosen PFTs. -##' +##' ##' @name enforce.runmask.cmt.vegmap.harmony ##' @param siteDataPath is the path to the folder where we expect to find the dvmdostem input data files. -##' @param rundir is the path to the local running directory where customized files (config, parameters, +##' @param rundir is the path to the local running directory where customized files (config, parameters, ##' runmask etc) are copied to. ##' @param cmtnum is the community type (vegetation type) that should be used for the run. Based on the ##' chosen PFT, and required to look up the correct parameters in the parameter files. @@ -312,35 +321,33 @@ adjust.runmask.dvmdostem <- function(siteDataPath, rundir, pixel_X, pixel_Y) { ##' @export ##' @author Tobey Carman ##' -enforce.runmask.cmt.vegmap.harmony <- function(siteDataPath, rundir, cmtnum){ - +enforce.runmask.cmt.vegmap.harmony <- function(siteDataPath, rundir, cmtnum) { # Open the runmask and see which pixel is enabled - ncRunMaskFile <- ncdf4::nc_open(file.path(rundir, "run-mask.nc"), write=FALSE) + ncRunMaskFile <- ncdf4::nc_open(file.path(rundir, "run-mask.nc"), write = FALSE) run_mask <- ncdf4::ncvar_get(ncRunMaskFile, ncRunMaskFile$var$run) - enabled_px <- which(run_mask!=0, arr.ind=TRUE) + enabled_px <- which(run_mask != 0, arr.ind = TRUE) print("========================================================================") print(c("length(enabled_px):", length(enabled_px), " enabled_px:", unlist(enabled_px))) print("========================================================================") if (length(enabled_px != 2)) { PEcAn.logger::logger.error("THERE MUST BE A SINGLE PIXEL ENABLED IN THE RUN MASK FILE!") - PEcAn.logger::logger.error(c("Instead found ", length(enabled_px), " pixels in file: ", file.path(rundir, "run-mask.nc") ) ) + PEcAn.logger::logger.error(c("Instead found ", length(enabled_px), " pixels in file: ", file.path(rundir, "run-mask.nc"))) stop() } - + # Open the input veg file, check that the pixel that is enabled in the # run mask is the right veg type to match the cmt/pft that is selected # for the run. - ncVegCMTFile <- ncdf4::nc_open(file.path(siteDataPath, "vegetation.nc"), write=FALSE) + ncVegCMTFile <- ncdf4::nc_open(file.path(siteDataPath, "vegetation.nc"), write = FALSE) veg_class <- ncdf4::ncvar_get(ncVegCMTFile, ncVegCMTFile$var$veg_class) - if (cmtnum != veg_class[[enabled_px[1],enabled_px[2]]]) { + if (cmtnum != veg_class[[enabled_px[1], enabled_px[2]]]) { PEcAn.logger::logger.error("INCORRECT PIXEL!! THIS RUN WILL NOT WORK!") PEcAn.logger::logger.error("STOPPING NOW TO PREVENT FUTURE HEARTACHE!") stop() } - } -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Writes a dvmdostem PEcAn config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -359,67 +366,72 @@ enforce.runmask.cmt.vegmap.harmony <- function(siteDataPath, rundir, cmtnum){ ##' @importFrom rjson fromJSON toJSON ##' write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run.id) { - # MAKE SURE TO USE PYTHON 3 FOR dvmdostem v0.5.0 AND UP!! # Also on the ubuntu VM, there is a symlink from ~/bin/python to /usr/bin/python3 # Same sym link setup on modex. Sys.setenv(PATH = paste(c("/home/carya/bin", Sys.getenv("PATH")), collapse = .Platform$path.sep)) Sys.setenv(PATH = paste(c("/home/tcarman/bin", Sys.getenv("PATH")), collapse = .Platform$path.sep)) - #PEcAn.logger::logger.info(system2("python", args="-V")) - + # PEcAn.logger::logger.info(system2("python", args="-V")) + ## site information ## (Currently unused) site <- settings$run$site site.id <- as.numeric(site$id) - + # Setup some local variables for this function for easily referencing # common locations for input, output, and the application binary. - local_rundir <- file.path(settings$rundir, run.id) # on local machine for staging - rundir <- file.path(settings$host$rundir, run.id) # on remote machine for execution + local_rundir <- file.path(settings$rundir, run.id) # on local machine for staging + rundir <- file.path(settings$host$rundir, run.id) # on remote machine for execution outdir <- file.path(settings$host$outdir, run.id) appbinary <- settings$model$binary - appbinary_path <- dirname(appbinary) # path of dvmdostem binary file - + appbinary_path <- dirname(appbinary) # path of dvmdostem binary file + # On the VM, these seem to be the same. PEcAn.logger::logger.info(paste0("local_rundir: ", local_rundir)) PEcAn.logger::logger.info(paste0("rundir: ", rundir)) - + # Copy the base set of dvmdostem parameters and configurations into the # run directory. Some of the values in these files will be overwritten in # subsequent steps, but copying everything up makes sure that all the # necessary files exist for a dvmdostem run - the config files and the # parameter files in this case. - if (dir.exists(file.path(rundir, 'config'))) { - unlink(file.path(rundir, 'config'), recursive=TRUE) + if (dir.exists(file.path(rundir, "config"))) { + unlink(file.path(rundir, "config"), recursive = TRUE) } system2(paste0("cp"), - wait=TRUE, - args=(c("-r", - file.path(appbinary_path, 'config'), - file.path(rundir, 'config')))) # this seems like a problem with below since we copy this first - # and below ask if it exists and if so don't copy the template version - - if (dir.exists(file.path(rundir, 'parameters'))) { - unlink(file.path(rundir, 'parameters'), recursive=TRUE) + wait = TRUE, + args = (c( + "-r", + file.path(appbinary_path, "config"), + file.path(rundir, "config") + )) + ) # this seems like a problem with below since we copy this first + # and below ask if it exists and if so don't copy the template version + + if (dir.exists(file.path(rundir, "parameters"))) { + unlink(file.path(rundir, "parameters"), recursive = TRUE) } system2(paste0("cp"), - wait=TRUE, - args=(c("-r", - file.path(appbinary_path, 'parameters'), - file.path(rundir, 'parameters')))) - - + wait = TRUE, + args = (c( + "-r", + file.path(appbinary_path, "parameters"), + file.path(rundir, "parameters") + )) + ) + + # Pull out the community name/number for use below in extracting # the correct block of data from the dvmdostem parameter files. # The settings$pfts$pft$name variable will be something like this: "CMT04-Salix" - cmtname <- unlist(strsplit(settings$pfts$pft$name, "-", fixed=TRUE))[1] + cmtname <- unlist(strsplit(settings$pfts$pft$name, "-", fixed = TRUE))[1] cmtnum <- as.numeric(unlist(strsplit(cmtname, "CMT"))[2]) # PEcAn.logger::logger.info(paste("cmtname: ", cmtname, " cmtnum: ", cmtnum)) - + # Check that all selected PFTs (from pecan.xml) have the same CMT number! for (pft in settings$pfts) { cur_pftname <- pft$name - cur_cmtname <- unlist(strsplit(cur_pftname, "-", fixed=TRUE))[1] + cur_cmtname <- unlist(strsplit(cur_pftname, "-", fixed = TRUE))[1] cur_cmtnum <- as.numeric(unlist(strsplit(cur_cmtname, "CMT"))[2]) if (cur_cmtname == cmtname) { # pass, evertthing ok @@ -429,10 +441,10 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. stop() } } - + # (1) # Read in a parameter data block from dvmdostem - + # Now we have to read the appropriate values out of the trait_df # and get those values written into the parameter file(s) that dvmdostem will # need when running. Because the dvmdostem parameters have a sort of @@ -443,51 +455,55 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. # - Read dvmdostem parameter file into json object, load into memory # - Update the in-memory json object # - Write the json object back out to a new dvmdostem parameter file - + # Next, use a helper script distributed with dvmdostem to read the dvmdostem # parameter data into memory as a json object, using a temporaroy json file # to hold a representation of each dvmdostem parameter file. - dimveg_params <- paste(appbinary_path, "parameters", 'cmt_dimvegetation.txt', sep="/") - envcanopy_params <- paste(appbinary_path, "parameters", 'cmt_envcanopy.txt', sep="/") - bgcveg_params <- paste(appbinary_path, "parameters", 'cmt_bgcvegetation.txt', sep="/") - calparbgc_params <- paste(appbinary_path, "parameters", "cmt_calparbgc.txt", sep="/") - + dimveg_params <- paste(appbinary_path, "parameters", "cmt_dimvegetation.txt", sep = "/") + envcanopy_params <- paste(appbinary_path, "parameters", "cmt_envcanopy.txt", sep = "/") + bgcveg_params <- paste(appbinary_path, "parameters", "cmt_bgcvegetation.txt", sep = "/") + calparbgc_params <- paste(appbinary_path, "parameters", "cmt_calparbgc.txt", sep = "/") + # Call the helper script and write out the data to a temporary file # This gets just the block we are interested in (based on community type) # create rundir temp directory - if (! file.exists(file.path(local_rundir, "tmp"))) { - dir.create(file.path(local_rundir, "tmp"), recursive=TRUE) + if (!file.exists(file.path(local_rundir, "tmp"))) { + dir.create(file.path(local_rundir, "tmp"), recursive = TRUE) } - dimveg_jsonfile <- file.path(local_rundir, "tmp",'dvmdostem-dimveg.json') + dimveg_jsonfile <- file.path(local_rundir, "tmp", "dvmdostem-dimveg.json") PEcAn.logger::logger.info(paste0("dimveg_jsonfile: ", dimveg_jsonfile)) - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--dump-block-to-json", dimveg_params, cmtnum)), - stdout=dimveg_jsonfile, wait=TRUE) - - envcanopy_jsonfile <- file.path(local_rundir, "tmp",'dvmdostem-envcanopy.json') + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--dump-block-to-json", dimveg_params, cmtnum)), + stdout = dimveg_jsonfile, wait = TRUE + ) + + envcanopy_jsonfile <- file.path(local_rundir, "tmp", "dvmdostem-envcanopy.json") PEcAn.logger::logger.info(paste0("envcanopy_jsonfile: ", envcanopy_jsonfile)) - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--dump-block-to-json", envcanopy_params, cmtnum)), - stdout=envcanopy_jsonfile, wait=TRUE) - - bgcveg_jsonfile <- file.path(local_rundir, "tmp",'dvmdostem-bgcveg.json') + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--dump-block-to-json", envcanopy_params, cmtnum)), + stdout = envcanopy_jsonfile, wait = TRUE + ) + + bgcveg_jsonfile <- file.path(local_rundir, "tmp", "dvmdostem-bgcveg.json") PEcAn.logger::logger.info(paste0("bgcveg_jsonfile: ", bgcveg_jsonfile)) - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--dump-block-to-json", bgcveg_params, cmtnum)), - stdout=bgcveg_jsonfile, wait=TRUE) - - calparbgc_jsonfile <- file.path(local_rundir, "tmp",'dvmdostem-calparbgc.json') + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--dump-block-to-json", bgcveg_params, cmtnum)), + stdout = bgcveg_jsonfile, wait = TRUE + ) + + calparbgc_jsonfile <- file.path(local_rundir, "tmp", "dvmdostem-calparbgc.json") PEcAn.logger::logger.info(paste0("calparbgc_jsonfile: ", calparbgc_jsonfile)) - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--dump-block-to-json", calparbgc_params, cmtnum)), - stdout=calparbgc_jsonfile, wait=TRUE) - + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--dump-block-to-json", calparbgc_params, cmtnum)), + stdout = calparbgc_jsonfile, wait = TRUE + ) + # Read the json file into memory - dimveg_jsondata <- fromJSON(paste(readLines(dimveg_jsonfile), collapse="")) - envcanopy_jsondata <- fromJSON(paste(readLines(envcanopy_jsonfile), collapse="")) - bgcveg_jsondata <- fromJSON(paste(readLines(bgcveg_jsonfile), collapse="")) - calparbgc_jsondata <- fromJSON(paste(readLines(calparbgc_jsonfile), collapse="")) - + dimveg_jsondata <- fromJSON(paste(readLines(dimveg_jsonfile), collapse = "")) + envcanopy_jsondata <- fromJSON(paste(readLines(envcanopy_jsonfile), collapse = "")) + bgcveg_jsondata <- fromJSON(paste(readLines(bgcveg_jsonfile), collapse = "")) + calparbgc_jsondata <- fromJSON(paste(readLines(calparbgc_jsonfile), collapse = "")) + # (2) # Overwrite parameter values with (ma-posterior) trait data from pecan PEcAn.logger::logger.info(paste0("CMT Name: ", cmtname)) @@ -507,9 +523,9 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. # care of. So result will be something like this: # SW_albedo gcmax cuticular_cond SLA # 1.0 3.4 2.5 11.0 - + traits <- convert.samples.dvmdostem(trait.values[[singlepft$name]]) - + for (curr_trait in names(traits)) { for (jd in list(bgcveg_jsondata, envcanopy_jsondata, dimveg_jsondata)) { for (i in names(jd)) { @@ -527,88 +543,124 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. # the variant. pft_common_name <- unlist(strsplit(pft_common_name, ".", fixed = TRUE))[1] - #PEcAn.logger::logger.info(paste0("PFT Name: ",cmtname)) # too verbose + # PEcAn.logger::logger.info(paste0("PFT Name: ",cmtname)) # too verbose if (identical(jd[[i]]$name, pft_common_name)) { PEcAn.logger::logger.info("Somewhere to stop...") - if (curr_trait == "cfall_leaf") { calparbgc_jsondata[[i]]$`cfall(0)` = traits[[curr_trait]] } - if (curr_trait == "cfall_stem") { calparbgc_jsondata[[i]]$`cfall(1)` = traits[[curr_trait]] } - if (curr_trait == "cfall_root") { calparbgc_jsondata[[i]]$`cfall(2)` = traits[[curr_trait]] } - if (curr_trait == "nfall_leaf") { calparbgc_jsondata[[i]]$`nfall(0)` = traits[[curr_trait]] } - if (curr_trait == "nfall_stem") { calparbgc_jsondata[[i]]$`nfall(1)` = traits[[curr_trait]] } - if (curr_trait == "nfall_root") { calparbgc_jsondata[[i]]$`nfall(2)` = traits[[curr_trait]] } - if (curr_trait == "krb_leaf") { calparbgc_jsondata[[i]]$`krb(0)` = traits[[curr_trait]] } - if (curr_trait == "krb_stem") { calparbgc_jsondata[[i]]$`krb(1)` = traits[[curr_trait]] } - if (curr_trait == "krb_root") { calparbgc_jsondata[[i]]$`krb(2)` = traits[[curr_trait]] } - if (curr_trait == "kra") { calparbgc_jsondata[[i]]$kra = traits[[curr_trait]] } - if (curr_trait == "frg") { calparbgc_jsondata[[i]]$frg = traits[[curr_trait]] } - if (curr_trait == "nmax") { calparbgc_jsondata[[i]]$nmax = traits[[curr_trait]] } - if (curr_trait == "cmax") { calparbgc_jsondata[[i]]$cmax = traits[[curr_trait]] } - if (curr_trait == "micbnup") { calparbgc_jsondata[[i]]$micbnup = traits[[curr_trait]] } - if (curr_trait == "kdcrawc") { calparbgc_jsondata[[i]]$kdcrawc = traits[[curr_trait]] } - if (curr_trait == "kdcsoma") { calparbgc_jsondata[[i]]$kdcsoma = traits[[curr_trait]] } - if (curr_trait == "kdcsompr") { calparbgc_jsondata[[i]]$kdcsompr = traits[[curr_trait]] } - if (curr_trait == "kdcsomcr") { calparbgc_jsondata[[i]]$kdcsomcr = traits[[curr_trait]] } + if (curr_trait == "cfall_leaf") { + calparbgc_jsondata[[i]]$`cfall(0)` <- traits[[curr_trait]] + } + if (curr_trait == "cfall_stem") { + calparbgc_jsondata[[i]]$`cfall(1)` <- traits[[curr_trait]] + } + if (curr_trait == "cfall_root") { + calparbgc_jsondata[[i]]$`cfall(2)` <- traits[[curr_trait]] + } + if (curr_trait == "nfall_leaf") { + calparbgc_jsondata[[i]]$`nfall(0)` <- traits[[curr_trait]] + } + if (curr_trait == "nfall_stem") { + calparbgc_jsondata[[i]]$`nfall(1)` <- traits[[curr_trait]] + } + if (curr_trait == "nfall_root") { + calparbgc_jsondata[[i]]$`nfall(2)` <- traits[[curr_trait]] + } + if (curr_trait == "krb_leaf") { + calparbgc_jsondata[[i]]$`krb(0)` <- traits[[curr_trait]] + } + if (curr_trait == "krb_stem") { + calparbgc_jsondata[[i]]$`krb(1)` <- traits[[curr_trait]] + } + if (curr_trait == "krb_root") { + calparbgc_jsondata[[i]]$`krb(2)` <- traits[[curr_trait]] + } + if (curr_trait == "kra") { + calparbgc_jsondata[[i]]$kra <- traits[[curr_trait]] + } + if (curr_trait == "frg") { + calparbgc_jsondata[[i]]$frg <- traits[[curr_trait]] + } + if (curr_trait == "nmax") { + calparbgc_jsondata[[i]]$nmax <- traits[[curr_trait]] + } + if (curr_trait == "cmax") { + calparbgc_jsondata[[i]]$cmax <- traits[[curr_trait]] + } + if (curr_trait == "micbnup") { + calparbgc_jsondata[[i]]$micbnup <- traits[[curr_trait]] + } + if (curr_trait == "kdcrawc") { + calparbgc_jsondata[[i]]$kdcrawc <- traits[[curr_trait]] + } + if (curr_trait == "kdcsoma") { + calparbgc_jsondata[[i]]$kdcsoma <- traits[[curr_trait]] + } + if (curr_trait == "kdcsompr") { + calparbgc_jsondata[[i]]$kdcsompr <- traits[[curr_trait]] + } + if (curr_trait == "kdcsomcr") { + calparbgc_jsondata[[i]]$kdcsomcr <- traits[[curr_trait]] + } if (curr_trait == "SLA") { - dimveg_jsondata[[i]]$sla = traits[[curr_trait]] + dimveg_jsondata[[i]]$sla <- traits[[curr_trait]] } if (curr_trait == "frprod_perc_10") { - dimveg_jsondata[[i]]$`frprod[0]` = traits[[curr_trait]] + dimveg_jsondata[[i]]$`frprod[0]` <- traits[[curr_trait]] } if (curr_trait == "frprod_perc_20") { - dimveg_jsondata[[i]]$`frprod[1]` = traits[[curr_trait]] + dimveg_jsondata[[i]]$`frprod[1]` <- traits[[curr_trait]] } if (curr_trait == "frprod_perc_30") { - dimveg_jsondata[[i]]$`frprod[2]` = traits[[curr_trait]] + dimveg_jsondata[[i]]$`frprod[2]` <- traits[[curr_trait]] } if (curr_trait == "frprod_perc_40") { - dimveg_jsondata[[i]]$`frprod[3]` = traits[[curr_trait]] + dimveg_jsondata[[i]]$`frprod[3]` <- traits[[curr_trait]] } if (curr_trait == "frprod_perc_50") { - dimveg_jsondata[[i]]$`frprod[4]` = traits[[curr_trait]] + dimveg_jsondata[[i]]$`frprod[4]` <- traits[[curr_trait]] } if (curr_trait == "klai") { - dimveg_jsondata[[i]]$klai = traits[[curr_trait]] + dimveg_jsondata[[i]]$klai <- traits[[curr_trait]] } if (curr_trait == "ilai") { - dimveg_jsondata[[i]]$initial_lai = traits[[curr_trait]] + dimveg_jsondata[[i]]$initial_lai <- traits[[curr_trait]] } if (curr_trait == "extinction_coefficient_diffuse") { - envcanopy_jsondata[[i]]$er = traits[[curr_trait]] + envcanopy_jsondata[[i]]$er <- traits[[curr_trait]] } if (curr_trait == "SW_albedo") { - envcanopy_jsondata[[i]]$albvisnir = traits[[curr_trait]] + envcanopy_jsondata[[i]]$albvisnir <- traits[[curr_trait]] } if (curr_trait == "cuticular_cond") { - envcanopy_jsondata[[i]]$gl_c = traits[[curr_trait]] + envcanopy_jsondata[[i]]$gl_c <- traits[[curr_trait]] } if (curr_trait == "gcmax") { - envcanopy_jsondata[[i]]$glmax = traits[[curr_trait]] + envcanopy_jsondata[[i]]$glmax <- traits[[curr_trait]] } if (curr_trait == "ppfd50") { - envcanopy_jsondata[[i]]$ppfd50 = traits[[curr_trait]] + envcanopy_jsondata[[i]]$ppfd50 <- traits[[curr_trait]] } if (curr_trait == "vpd_open") { - envcanopy_jsondata[[i]]$vpd_open = traits[[curr_trait]] + envcanopy_jsondata[[i]]$vpd_open <- traits[[curr_trait]] } if (curr_trait == "vpd_close") { - envcanopy_jsondata[[i]]$vpd_close = traits[[curr_trait]] + envcanopy_jsondata[[i]]$vpd_close <- traits[[curr_trait]] } if (curr_trait == "pstemp_min") { - bgcveg_jsondata[[i]]$tmin = traits[[curr_trait]] + bgcveg_jsondata[[i]]$tmin <- traits[[curr_trait]] } if (curr_trait == "pstemp_low") { - bgcveg_jsondata[[i]]$toptmin = traits[[curr_trait]] + bgcveg_jsondata[[i]]$toptmin <- traits[[curr_trait]] } if (curr_trait == "pstemp_high") { - bgcveg_jsondata[[i]]$toptmax = traits[[curr_trait]] + bgcveg_jsondata[[i]]$toptmax <- traits[[curr_trait]] } if (curr_trait == "pstemp_max") { - bgcveg_jsondata[[i]]$tmax = traits[[curr_trait]] + bgcveg_jsondata[[i]]$tmax <- traits[[curr_trait]] } if (curr_trait == "labncon") { - bgcveg_jsondata[[i]]$labncon = traits[[curr_trait]] + bgcveg_jsondata[[i]]$labncon <- traits[[curr_trait]] } } } @@ -616,57 +668,61 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. } # end loop over different json structures } # end loop over traits } # end loop over pfts - + # Write it back out to disk (overwriting ok??) dimveg_exportJson <- toJSON(dimveg_jsondata) - write(dimveg_exportJson, file.path(local_rundir, "tmp","dimveg_newfile.json")) - + write(dimveg_exportJson, file.path(local_rundir, "tmp", "dimveg_newfile.json")) + envcanopy_exportJson <- toJSON(envcanopy_jsondata) - write(envcanopy_exportJson, file.path(local_rundir, "tmp","envcanopy_newfile.json")) - + write(envcanopy_exportJson, file.path(local_rundir, "tmp", "envcanopy_newfile.json")) + bgcveg_exportJson <- toJSON(bgcveg_jsondata) - write(bgcveg_exportJson, file.path(local_rundir, "tmp","bgcveg_newfile.json")) - + write(bgcveg_exportJson, file.path(local_rundir, "tmp", "bgcveg_newfile.json")) + calparbgc_exportJson <- toJSON(calparbgc_jsondata) write(calparbgc_exportJson, file.path(local_rundir, "tmp", "calparbgc_newfile.json")) # (3) # Format a new dvmdostem parameter file using the new json file as a source. - + if (dir.exists(file.path(rundir, "parameters/"))) { # pass } else { print("No parameter/ directory in run directory! Need to create...") - dir.create(file.path(rundir,"parameters" )) + dir.create(file.path(rundir, "parameters")) } - - ref_file <- paste0(file.path(appbinary_path, "parameters/"), 'cmt_dimvegetation.txt') + + ref_file <- paste0(file.path(appbinary_path, "parameters/"), "cmt_dimvegetation.txt") new_param_file <- paste0(file.path(local_rundir, "parameters/"), "cmt_dimvegetation.txt") - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--fmt-block-from-json", file.path(local_rundir, "tmp","dimveg_newfile.json"), ref_file)), - stdout=new_param_file, wait=TRUE) - - ref_file <- paste0(file.path(appbinary_path, "parameters/"), 'cmt_envcanopy.txt') + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--fmt-block-from-json", file.path(local_rundir, "tmp", "dimveg_newfile.json"), ref_file)), + stdout = new_param_file, wait = TRUE + ) + + ref_file <- paste0(file.path(appbinary_path, "parameters/"), "cmt_envcanopy.txt") new_param_file <- paste0(file.path(local_rundir, "parameters/"), "cmt_envcanopy.txt") - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--fmt-block-from-json", file.path(local_rundir, "tmp","envcanopy_newfile.json"), ref_file)), - stdout=new_param_file, wait=TRUE) - - ref_file <- paste0(file.path(appbinary_path, "parameters/"), 'cmt_bgcvegetation.txt') + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--fmt-block-from-json", file.path(local_rundir, "tmp", "envcanopy_newfile.json"), ref_file)), + stdout = new_param_file, wait = TRUE + ) + + ref_file <- paste0(file.path(appbinary_path, "parameters/"), "cmt_bgcvegetation.txt") new_param_file <- paste0(file.path(local_rundir, "parameters/"), "cmt_bgcvegetation.txt") - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--fmt-block-from-json", file.path(local_rundir, "tmp","bgcveg_newfile.json"), ref_file)), - stdout=new_param_file, wait=TRUE) + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--fmt-block-from-json", file.path(local_rundir, "tmp", "bgcveg_newfile.json"), ref_file)), + stdout = new_param_file, wait = TRUE + ) - ref_file <- paste0(file.path(appbinary_path, "parameters/"), 'cmt_calparbgc.txt') + ref_file <- paste0(file.path(appbinary_path, "parameters/"), "cmt_calparbgc.txt") new_param_file <- paste0(file.path(local_rundir, "parameters/"), "cmt_calparbgc.txt") - system2(paste0(appbinary_path,"/scripts/param_util.py"), - args=(c("--fmt-block-from-json", file.path(local_rundir, "tmp","calparbgc_newfile.json"), ref_file)), - stdout=new_param_file, wait=TRUE) - + system2(paste0(appbinary_path, "/scripts/param_util.py"), + args = (c("--fmt-block-from-json", file.path(local_rundir, "tmp", "calparbgc_newfile.json"), ref_file)), + stdout = new_param_file, wait = TRUE + ) + ## Cleanup rundir temp directory - comment out for debugging - unlink(file.path(local_rundir, "tmp"), recursive = TRUE, force = FALSE) # comment out for debugging - + unlink(file.path(local_rundir, "tmp"), recursive = TRUE, force = FALSE) # comment out for debugging + # TODO: # [x] finish with parameter update process # [x] dynamically copy parameters to right place @@ -675,22 +731,22 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. # - figure out how to handle the met. # -> step one is symlink from raw data locations (Model install folder) # into pecan run folder, maybe do this within job.sh? - - # Here we set up two things: + + # Here we set up two things: # 1) the paths to the met drivers (temperature, precip, etc) - # 2) paths to the other input data files that dvmdostem requires (soil maps, + # 2) paths to the other input data files that dvmdostem requires (soil maps, # vegetation maps, topography etc). - # This will allow us to source the meteorology data from PEcAn (or BetyDB) and + # This will allow us to source the meteorology data from PEcAn (or BetyDB) and # collect the other inputs from a different location. - + # Met info met_driver_dir <- dirname(settings$run$inputs$met$path) # Not sure what happens here if the site is selected from the # map and instead of having tags, the xml file has # tags? - + # Pick up the site and pixel settings from the xml file if they exist - if (is.null(settings$model$dvmdostem_site)){ + if (is.null(settings$model$dvmdostem_site)) { # Client did not setup a dvmdostem specific site tags in the # xml file. Assume that all the site data has the same path # as the met data @@ -701,45 +757,45 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. } PEcAn.logger::logger.info(paste0("Using siteDataPath: ", siteDataPath)) PEcAn.logger::logger.info(paste0("Using met_driver_path: ", met_driver_dir)) - + # Check the size of the input dataset(s) # 1) met data set and other site data are the same size/shape (what if the # met comes from PEcAn/Bety and is single site and the other inputs come # from a single or multi-pixel dvmdostem dataset???) - # 2) if the incoming datasets are NOT single pixel, then + # 2) if the incoming datasets are NOT single pixel, then # we need to adjust the run-mask and choose the correct pixel # 3) if the incoming datasets ARE single pixel, then no runmask # adjustment, but warn user if they have set dvmdostem_pixel_x # and dvmdostem_pixel_y tags in the xml file - - if (is.null(settings$model$dvmdostem_pixel_y)){ + + if (is.null(settings$model$dvmdostem_pixel_y)) { pixel_Y <- 1 } else { pixel_Y <- settings$model$dvmdostem_pixel_y } - if (is.null(settings$model$dvmdostem_pixel_x)){ + if (is.null(settings$model$dvmdostem_pixel_x)) { pixel_X <- 1 } else { pixel_X <- settings$model$dvmdostem_pixel_x } - + # First, turn on a specific pixel in the run mask. # In the case of a single pixel dataset, this will ensure that the # pixel is set to run. adjust.runmask.dvmdostem(siteDataPath, rundir, pixel_X, pixel_Y) - + # If the user has not explicity said to force the CMT type in # their xml settings, then we have to look at the run mask, figure out # which pixel is enabled, and then check the corresponding pixel in the # veg map to make sure that cmt number matches the cmt number for the # chosen PFTs. - if ((toupper(settings$model$dvmdostem_forcecmtnum) == 'YES') || (toupper(settings$model$dvmdostem_forcecmtnum) == "Y")) { + if ((toupper(settings$model$dvmdostem_forcecmtnum) == "YES") || (toupper(settings$model$dvmdostem_forcecmtnum) == "Y")) { # Nothing to do } else { print("Enforcing harmony between the runmask, vegmap, and required CMT type.") enforce.runmask.cmt.vegmap.harmony(siteDataPath, rundir, cmtnum) } - + # if (!is.null(settings$model$dvmdostem_calibration)){ # if (grepl(settings$model$dvmdostem_calibration, "yes", ignore.case = TRUE)) { # # Ignore the following: @@ -751,143 +807,143 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. # } # rs_outspec_path <- file.path(rundir, "config/", basename(outspec_path)) # file.copy(outspec_path, rs_outspec_path) - # - # + # + # # } else { - # + # # } - - # # A more sophisticated test will verify that all the variables + + # # A more sophisticated test will verify that all the variables # # are valid at the correct dimensions(month and year??) - # + # # # Empty the run specific output spec file - # system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + # system2(file.path(appbinary_path, "scripts/outspec_utils.py"), # args=c("--empty", rs_outspec_path)) - # + # # # Fill the run specific output spec file according to list # for (j in req_v_list) { # system2(file.path(appbinary_path, "scripts/outspec_utils.py"), # args=c(rs_outspec_path, "--on", j, "y", "m")) # } - # + # # # Print summary for debugging - # #system2(file.path(appbinary_path, "scripts/outspec_utils.py"), + # #system2(file.path(appbinary_path, "scripts/outspec_utils.py"), # # args=c("-s", rs_outspec_path)) - # + # # return(c(rs_outspec_path, req_v_str)) - + # setup the variables to output based on tags in xml file. v <- setup.outputs.dvmdostem( settings$model$dvmdostem_calibration, - settings$model$dvmdostem_pecan_outputs, + settings$model$dvmdostem_pecan_outputs, settings$model$dvmdostem_output_spec, rundir, run.id, appbinary_path ) rs_outspec_path <- v[1] PEcAn.logger::logger.info(paste0("Will be generating the following dvmdostem output variables: ", v[2])) - + ## Update dvm-dos-tem config.js file - + # Get a copy of the config file written into the run directory with the # appropriate template parameters substituted. if (!is.null(settings$model$configtemplate) && file.exists(settings$model$configtemplate)) { - config_template <- readLines(con=settings$model$config_template, n=-1) + config_template <- readLines(con = settings$model$config_template, n = -1) } else { - config_template <- readLines(con=system.file("config.js.template", package = "PEcAn.dvmdostem"), n=-1) + config_template <- readLines(con = system.file("config.js.template", package = "PEcAn.dvmdostem"), n = -1) } - + config_template <- gsub("@MET_DRIVER_DIR@", met_driver_dir, config_template) - #config_template <- gsub("@INPUT_DATA_DIR@", file.path(dirname(appbinary), siteDataPath), config_template) + # config_template <- gsub("@INPUT_DATA_DIR@", file.path(dirname(appbinary), siteDataPath), config_template) config_template <- gsub("@INPUT_DATA_DIR@", siteDataPath, config_template) config_template <- gsub("@MODEL_OUTPUT_DIR@", outdir, config_template) config_template <- gsub("@CUSTOM_RUN_MASK@", file.path(rundir), config_template) config_template <- gsub("@CUSTOM_OUTSPEC@", file.path("config/", basename(rs_outspec_path)), config_template) config_template <- gsub("@DYNAMIC_MODELED_LAI@", settings$model$dvmdostem_dynamic_modeled_lai, config_template) - if (grepl(tolower(settings$model$dvmdostem_calibration), "yes", fixed=TRUE)) { - config_template <- gsub("@EQ_NC_OUTPUT@", 1, config_template) # WARNING, output volume can be prohibitive! + if (grepl(tolower(settings$model$dvmdostem_calibration), "yes", fixed = TRUE)) { + config_template <- gsub("@EQ_NC_OUTPUT@", 1, config_template) # WARNING, output volume can be prohibitive! config_template <- gsub("@NC_OUTPUT_LAST_N_EQ@", settings$model$dvmdostem_nc_output_last_n_eq, config_template) } else { config_template <- gsub("@EQ_NC_OUTPUT@", 0, config_template) config_template <- gsub("@NC_OUTPUT_LAST_N_EQ@", -1, config_template) } - if (! file.exists(file.path(settings$rundir, run.id,"config")) ) { - dir.create(file.path(settings$rundir, run.id,"config"),recursive = TRUE) + if (!file.exists(file.path(settings$rundir, run.id, "config"))) { + dir.create(file.path(settings$rundir, run.id, "config"), recursive = TRUE) } - - writeLines(config_template, con=file.path(settings$rundir, run.id,"config/config.js")) - + + writeLines(config_template, con = file.path(settings$rundir, run.id, "config/config.js")) + ### create launch script (which will create symlink) - needs to be created if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { - jobsh <- readLines(con=settings$model$jobtemplate, n=-1) + jobsh <- readLines(con = settings$model$jobtemplate, n = -1) } else { - jobsh <- readLines(con=system.file("job.sh.template", package = "PEcAn.dvmdostem"), n=-1) + jobsh <- readLines(con = system.file("job.sh.template", package = "PEcAn.dvmdostem"), n = -1) } - + ### create host specific setttings - stubbed for now, nothing to do yet, ends up as empty ### string that is put into the job.sh file hostsetup <- "" if (!is.null(settings$model$prerun)) { - hostsetup <- paste(hostsetup, sep="\n", paste(settings$model$prerun, collapse="\n")) + hostsetup <- paste(hostsetup, sep = "\n", paste(settings$model$prerun, collapse = "\n")) } if (!is.null(settings$host$prerun)) { - hostsetup <- paste(hostsetup, sep="\n", paste(settings$host$prerun, collapse="\n")) + hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { - hostteardown <- paste(hostteardown, sep="\n", paste(settings$model$postrun, collapse="\n")) + hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) } if (!is.null(settings$host$postrun)) { - hostteardown <- paste(hostteardown, sep="\n", paste(settings$host$postrun, collapse="\n")) + hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@RUNDIR@", rundir, jobsh) jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@BINARY@", appbinary, jobsh) - + ## model specific options from the pecan.xml file # setup defaults if missing - may not want to do this long term - if (is.null(settings$model$dvmdostem_prerun)){ + if (is.null(settings$model$dvmdostem_prerun)) { jobsh <- gsub("@PRERUN@", 100, jobsh) } else { jobsh <- gsub("@PRERUN@", settings$model$dvmdostem_prerun, jobsh) } - - if (is.null(settings$model$dvmdostem_equil)){ + + if (is.null(settings$model$dvmdostem_equil)) { jobsh <- gsub("@EQUILIBRIUM@", 1000, jobsh) } else { jobsh <- gsub("@EQUILIBRIUM@", settings$model$dvmdostem_equil, jobsh) } - - if (is.null(settings$model$dvmdostem_spinup)){ + + if (is.null(settings$model$dvmdostem_spinup)) { jobsh <- gsub("@SPINUP@", 450, jobsh) } else { jobsh <- gsub("@SPINUP@", settings$model$dvmdostem_spinup, jobsh) } - - if (is.null(settings$model$dvmdostem_transient)){ - jobsh <- gsub("@TRANSIENT@", 109, jobsh) # what if this isn't the case? Do we want to hard code backup or just end in error? - } else { # could just invoke a stop() here for these if missing and provide an error message + + if (is.null(settings$model$dvmdostem_transient)) { + jobsh <- gsub("@TRANSIENT@", 109, jobsh) # what if this isn't the case? Do we want to hard code backup or just end in error? + } else { # could just invoke a stop() here for these if missing and provide an error message jobsh <- gsub("@TRANSIENT@", settings$model$dvmdostem_transient, jobsh) } - - if (is.null(settings$model$dvmdostem_scenerio)){ + + if (is.null(settings$model$dvmdostem_scenerio)) { jobsh <- gsub("@SCENERIO@", 91, jobsh) } else { jobsh <- gsub("@SCENERIO@", settings$model$dvmdostem_scenerio, jobsh) } - - if (is.null(settings$model$dvmdostem_loglevel)){ + + if (is.null(settings$model$dvmdostem_loglevel)) { jobsh <- gsub("@LOGLEVEL@", "err", jobsh) } else { jobsh <- gsub("@LOGLEVEL@", settings$model$dvmdostem_loglevel, jobsh) } - - if (is.null(settings$model$dvmdostem_forcecmtnum)){ + + if (is.null(settings$model$dvmdostem_forcecmtnum)) { PEcAn.logger::logger.info("Using vegetation.nc input file to determine community type of pixel...") PEcAn.logger::logger.warn("The CMT type of your selected PFT must match the CMT type in the input veg file for the selected pixel!") jobsh <- gsub("@FORCE_CMTNUM@", "", jobsh) @@ -895,9 +951,9 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. PEcAn.logger::logger.info("FORCING cmt type to match selected PFT. IGNORING vegetation.nc map!") jobsh <- gsub("@FORCE_CMTNUM@", paste0("--force-cmt ", cmtnum), jobsh) } - + jobsh <- gsub("@PECANREQVARS@", settings$model$dvmdostem_pecan_outputs, jobsh) - + # Really no idea what the defaults should be for these if the user # does not specify them in the pecan.xml file... if (is.null(settings$run$start.date)) { @@ -905,16 +961,15 @@ write.config.dvmdostem <- function(defaults = NULL, trait.values, settings, run. } else { jobsh <- gsub("@RUNSTART@", settings$run$start.date, jobsh) } - + if (is.null(settings$run$end.date)) { jobsh <- gsub("@RUNEND@", "", jobsh) } else { jobsh <- gsub("@RUNEND@", settings$run$end.date, jobsh) } - - writeLines(jobsh, con=file.path(settings$rundir, run.id,"job.sh")) - Sys.chmod(file.path(settings$rundir, run.id,"job.sh")) - + + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) + Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) } # end of function #------------------------------------------------------------------------------------------------# ### EOF diff --git a/models/dvmdostem/tests/testthat/test-model2netcdf.dvmdostem.R b/models/dvmdostem/tests/testthat/test-model2netcdf.dvmdostem.R index b2b9eca1503..f04b2a3a344 100644 --- a/models/dvmdostem/tests/testthat/test-model2netcdf.dvmdostem.R +++ b/models/dvmdostem/tests/testthat/test-model2netcdf.dvmdostem.R @@ -1,8 +1,7 @@ - context("test model2netcdf.dvmdostem") test_that("PLACEHOLDER", { - # Package checking fails if there are no tests at all, - # so this is an empty placeholder to let checks finish. - # Please replace it with real tests. -}) \ No newline at end of file + # Package checking fails if there are no tests at all, + # so this is an empty placeholder to let checks finish. + # Please replace it with real tests. +}) diff --git a/models/dvmdostem/tests/testthat/test.met2model.R b/models/dvmdostem/tests/testthat/test.met2model.R index 3fdbc89eefe..a56f13f7eaf 100644 --- a/models/dvmdostem/tests/testthat/test.met2model.R +++ b/models/dvmdostem/tests/testthat/test.met2model.R @@ -7,7 +7,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { skip("Not implemented") nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/ed/R/SAS.ED2.R b/models/ed/R/SAS.ED2.R index 679e2710481..2164506082d 100644 --- a/models/ed/R/SAS.ED2.R +++ b/models/ed/R/SAS.ED2.R @@ -30,36 +30,35 @@ ##' @param slxclay Soil percent clay; used to calculate expected fire return interval ##' @export ##' -SAS.ED2.param.Args <- function(decomp_scheme=2, +SAS.ED2.param.Args <- function(decomp_scheme = 2, kh_active_depth = -0.20, - decay_rate_fsc=11, - decay_rate_stsc=4.5, - decay_rate_ssc=0.2, - Lc=0.049787, - c2n_slow=10.0, - c2n_structural=150.0, - r_stsc=0.3, # Constants from ED - rh_decay_low=0.24, - rh_decay_high=0.60, - rh_low_temp=18.0+273.15, - rh_high_temp=45.0+273.15, - rh_decay_dry=12.0, - rh_decay_wet=36.0, - rh_dry_smoist=0.48, - rh_wet_smoist=0.98, - resp_opt_water=0.8938, - resp_water_below_opt=5.0786, - resp_water_above_opt=4.5139, - resp_temperature_increase=0.0757, - rh_lloyd_1=308.56, - rh_lloyd_2=1/56.02, - rh_lloyd_3=227.15, - yrs.met=30, - sm_fire=0, - fire_intensity=0, - slxsand=0.33, - slxclay=0.33) { - + decay_rate_fsc = 11, + decay_rate_stsc = 4.5, + decay_rate_ssc = 0.2, + Lc = 0.049787, + c2n_slow = 10.0, + c2n_structural = 150.0, + r_stsc = 0.3, # Constants from ED + rh_decay_low = 0.24, + rh_decay_high = 0.60, + rh_low_temp = 18.0 + 273.15, + rh_high_temp = 45.0 + 273.15, + rh_decay_dry = 12.0, + rh_decay_wet = 36.0, + rh_dry_smoist = 0.48, + rh_wet_smoist = 0.98, + resp_opt_water = 0.8938, + resp_water_below_opt = 5.0786, + resp_water_above_opt = 4.5139, + resp_temperature_increase = 0.0757, + rh_lloyd_1 = 308.56, + rh_lloyd_2 = 1 / 56.02, + rh_lloyd_3 = 227.15, + yrs.met = 30, + sm_fire = 0, + fire_intensity = 0, + slxsand = 0.33, + slxclay = 0.33) { return( data.frame( decomp_scheme = decomp_scheme, @@ -72,7 +71,7 @@ SAS.ED2.param.Args <- function(decomp_scheme=2, c2n_structural = c2n_structural, r_stsc = r_stsc, # Constants from ED rh_decay_low = rh_decay_low, - rh_decay_high = rh_decay_high, + rh_decay_high = rh_decay_high, rh_low_temp = rh_low_temp, rh_high_temp = rh_high_temp, rh_decay_dry = rh_decay_dry, @@ -86,57 +85,55 @@ SAS.ED2.param.Args <- function(decomp_scheme=2, rh_lloyd_1 = rh_lloyd_1, rh_lloyd_2 = rh_lloyd_2, rh_lloyd_3 = rh_lloyd_3, - yrs.met = yrs.met, + yrs.met = yrs.met, sm_fire = sm_fire, fire_intensity = fire_intensity, slxsand = slxsand, slxclay = slxclay ) ) - } -#Soil Moisture at saturation (should replace with soil functions in data.lab) -calc.slmsts <- function(slxsand, slxclay){ +# Soil Moisture at saturation (should replace with soil functions in data.lab) +calc.slmsts <- function(slxsand, slxclay) { # Soil moisture at saturation [ m^3/m^3 ] - (50.5 - 14.2*slxsand - 3.7*slxclay) / 100. + (50.5 - 14.2 * slxsand - 3.7 * slxclay) / 100. } -calc.slpots <- function(slxsand, slxclay){ +calc.slpots <- function(slxsand, slxclay) { # Soil moisture potential at saturation [ m ] - -1. * (10.^(2.17 - 0.63*slxclay - 1.58*slxsand)) * 0.01 - + -1. * (10.^(2.17 - 0.63 * slxclay - 1.58 * slxsand)) * 0.01 } -calc.slbs <- function(slxsand, slxclay){ +calc.slbs <- function(slxsand, slxclay) { # B exponent (unitless) - 3.10 + 15.7*slxclay - 0.3*slxsand + 3.10 + 15.7 * slxclay - 0.3 * slxsand } -calc.soilcp <- function(slmsts, slpots, slbs){ +calc.soilcp <- function(slmsts, slpots, slbs) { # Dry soil capacity (at -3.1MPa) [ m^3/m^3 ]. # soil(nslcon)%soilcp = soil(nslcon)%slmsts & # * ( soil(nslcon)%slpots / (soilcp_MPa * wdns / grav)) & # ** (1. / soil(nslcon)%slbs) - soilcp_MPa = -3.1 - wdns = 1.000e3 - grav=9.80665 - - slmsts * (slpots / (soilcp_MPa * wdns / grav)) ^ (1./slbs) + soilcp_MPa <- -3.1 + wdns <- 1.000e3 + grav <- 9.80665 + + slmsts * (slpots / (soilcp_MPa * wdns / grav))^(1. / slbs) } # How Ed calculates the fire threshold if sm_fire < 0 -smfire.neg <- function(slmsts, slpots, smfire, slbs){ - grav=9.80665 +smfire.neg <- function(slmsts, slpots, smfire, slbs) { + grav <- 9.80665 # soil(nsoil)%soilfr = soil(nsoil)%slmsts * Â ( soil(nsoil)%slpots / (sm_fire * 1000. / grav)) ** ( 1. / soil(nsoil)%slbs) # soilfr = slmsts * ( slpots / (sm_fire * 1000. / grav)) ** ( 1. / slbs) - soilfr <- slmsts*((slpots/(smfire * 1000/9.80665))^(1/slbs)) - + soilfr <- slmsts * ((slpots / (smfire * 1000 / 9.80665))^(1 / slbs)) + return(soilfr) } # How Ed calculates the fire threshold if sm_fire > 0 -smfire.pos <- function(slmsts, soilcp, smfire){ +smfire.pos <- function(slmsts, soilcp, smfire) { soilfr <- soilcp + smfire * (slmsts - soilcp) return(soilfr) } @@ -149,44 +146,43 @@ smfire.pos <- function(slmsts, soilcp, smfire){ ##' 2015 Aug: Modifications for greater site flexibility & updated ED ##' 2016 Jan: Adaptation for regional-scale runs (single-cells run independently, but executed in batches) ##' 2018 Jul: Conversion to function, Christine Rollinson July 2018 -##'@description This functions approximates landscape equilibrium steady state for vegetation and +##' @description This functions approximates landscape equilibrium steady state for vegetation and ##' soil pools using the successional trajectory of a single patch modeled with disturbance -##' off and the prescribed disturbance rates for runs (Xia et al. 2012 GMD 5:1259-1271). +##' off and the prescribed disturbance rates for runs (Xia et al. 2012 GMD 5:1259-1271). ##' @param dir.analy Location of ED2 analysis files; expects monthly and yearly output ##' @param dir.histo Location of ED2 history files (for vars not in analy); expects monthly ##' @param outdir Location to write SAS .css & .pss files ##' @param lat site latitude; used for file naming ##' @param lon site longitude; used for file naming ##' @param blckyr Number of years between patch ages (aka blocks) -##' @param prefix ED2 -E- output file prefix +##' @param prefix ED2 -E- output file prefix ##' @param treefall Value to be used for TREEFALL_DISTURBANCE_RATE in ED2IN for full runs (disturbance on) ##' @param param.args ED2 parameter arguments (mostly soil biogeochem) -##' @param sufx ED2 out file suffix; used in constructing file names(default "g01.h5) +##' @param sufx ED2 out file suffix; used in constructing file names(default "g01.h5) ##' @export ##' SAS.ED2 <- function(dir.analy, dir.histo, outdir, lat, lon, blckyr, prefix, treefall, param.args = SAS.ED2.param.Args(), sufx = "g01.h5") { - - if(!param.args$decomp_scheme %in% 0:4) stop("Invalid decomp_scheme") + if (!param.args$decomp_scheme %in% 0:4) stop("Invalid decomp_scheme") # create a directory for the initialization files - dir.create(outdir, recursive=T, showWarnings=F) - + dir.create(outdir, recursive = T, showWarnings = F) + #--------------------------------------- # Setting up some specifics that vary by site (like soil depth) #--------------------------------------- - #Set directories + # Set directories # dat.dir <- dir.analy - ann.files <- dir(dir.analy, "-Y-") #yearly files only - - #Get time window + ann.files <- dir(dir.analy, "-Y-") # yearly files only + + # Get time window # Note: Need to make this more flexible to get the thing after "Y" - yrind <- which(strsplit(ann.files,"-")[[1]] == "Y") - yeara <- as.numeric(strsplit(ann.files,"-")[[1]][yrind+1]) #first year - yearz <- as.numeric(strsplit(ann.files,"-")[[length(ann.files)]][yrind+1]) #last full year - yrs <- seq(yeara+1, yearz, by=blckyr) # The years we're going to use as time steps for the demography + yrind <- which(strsplit(ann.files, "-")[[1]] == "Y") + yeara <- as.numeric(strsplit(ann.files, "-")[[1]][yrind + 1]) # first year + yearz <- as.numeric(strsplit(ann.files, "-")[[length(ann.files)]][yrind + 1]) # last full year + yrs <- seq(yeara + 1, yearz, by = blckyr) # The years we're going to use as time steps for the demography nsteps <- length(yrs) # The number of blocks = the number steps we'll have - + # Need to get the layers being used for calculating temp & moist # Note: In ED there's a pain in the butt way of doing this with the energy, but we're going to approximate # slz <- c(-5.50, -4.50, -2.17, -1.50, -1.10, -0.80, -0.60, -0.45, -0.30, -0.20, -0.12, -0.06) @@ -194,26 +190,28 @@ SAS.ED2 <- function(dir.analy, dir.histo, outdir, lat, lon, blckyr, nc.temp <- ncdf4::nc_open(file.path(dir.analy, ann.files[1])) slz <- ncdf4::ncvar_get(nc.temp, "SLZ") ncdf4::nc_close(nc.temp) - - dslz <- vector(length=length(slz)) - dslz[length(dslz)] <- 0-slz[length(dslz)] - - for(i in 1:(length(dslz)-1)){ - dslz[i] <- slz[i+1] - slz[i] + + dslz <- vector(length = length(slz)) + dslz[length(dslz)] <- 0 - slz[length(dslz)] + + for (i in 1:(length(dslz) - 1)) { + dslz[i] <- slz[i + 1] - slz[i] } - - nsoil=which(slz >= param.args$kh_active_depth-1e-3) # Maximum depth for avg. temperature and moisture; adding a fudge factor bc it's being weird + + nsoil <- which(slz >= param.args$kh_active_depth - 1e-3) # Maximum depth for avg. temperature and moisture; adding a fudge factor bc it's being weird # nsoil=length(slz) #--------------------------------------- - + #--------------------------------------- - # First loop over analy files (faster than histo) to aggregate initial + # First loop over analy files (faster than histo) to aggregate initial # .css and .pss files for each site #--------------------------------------- - #create an emtpy storage for the patch info - pss.big <- matrix(nrow=length(yrs),ncol=13) # save every X yrs according to chunks specified above - colnames(pss.big) <- c("time","patch","trk","age","area","water","fsc","stsc","stsl", - "ssc","psc","msn","fsn") + # create an emtpy storage for the patch info + pss.big <- matrix(nrow = length(yrs), ncol = 13) # save every X yrs according to chunks specified above + colnames(pss.big) <- c( + "time", "patch", "trk", "age", "area", "water", "fsc", "stsc", "stsl", + "ssc", "psc", "msn", "fsn" + ) #--------------------------------------- # Finding the mean soil temp & moisture @@ -222,263 +220,267 @@ SAS.ED2 <- function(dir.analy, dir.histo, outdir, lat, lon, blckyr, # This will also be necessary for helping update disturbance parameter #--------------------------------------- soil.obj <- PEcAn.data.land::soil_params(sand = param.args$slxsand, clay = param.args$slxclay) - + slmsts <- calc.slmsts(param.args$slxsand, param.args$slxclay) # Soil Moisture at saturation - slpots <- calc.slpots(param.args$slxsand, param.args$slxclay) # Soil moisture potential at saturation [ m ] - slbs <- calc.slbs(param.args$slxsand, param.args$slxclay) # B exponent (unitless) + slpots <- calc.slpots(param.args$slxsand, param.args$slxclay) # Soil moisture potential at saturation [ m ] + slbs <- calc.slbs(param.args$slxsand, param.args$slxclay) # B exponent (unitless) soilcp <- calc.soilcp(slmsts, slpots, slbs) # Dry soil capacity (at -3.1MPa) [ m^3/m^3 ] - + # Calculating Soil fire characteristics - soilfr=0 - if(abs(param.args$sm_fire)>0){ - if(param.args$sm_fire>0){ - soilfr <- smfire.pos(slmsts, soilcp, smfire=param.args$sm_fire) + soilfr <- 0 + if (abs(param.args$sm_fire) > 0) { + if (param.args$sm_fire > 0) { + soilfr <- smfire.pos(slmsts, soilcp, smfire = param.args$sm_fire) } else { - soilfr <- smfire.neg(slmsts, slpots, smfire=param.args$sm_fire, slbs) + soilfr <- smfire.neg(slmsts, slpots, smfire = param.args$sm_fire, slbs) } } - - month.begin = 1 - month.end = 12 - + + month.begin <- 1 + month.end <- 12 + tempk.air <- tempk.soil <- moist.soil <- moist.soil.mx <- moist.soil.mn <- nfire <- vector() - for(y in yrs){ + for (y in yrs) { air.temp.tmp <- soil.temp.tmp <- soil.moist.tmp <- soil.mmax.tmp <- soil.mmin.tmp <- vector() ind <- which(yrs == y) - for(m in month.begin:month.end){ - #Make the file name. - year.now <-sprintf("%4.4i",y) - month.now <- sprintf("%2.2i",m) - day.now <- sprintf("%2.2i",0) - hour.now <- sprintf("%6.6i",0) - - file.now <- paste(prefix,"-E-",year.now,"-",month.now,"-",day.now,"-" - ,hour.now,"-",sufx,sep="") - + for (m in month.begin:month.end) { + # Make the file name. + year.now <- sprintf("%4.4i", y) + month.now <- sprintf("%2.2i", m) + day.now <- sprintf("%2.2i", 0) + hour.now <- sprintf("%6.6i", 0) + + file.now <- paste(prefix, "-E-", year.now, "-", month.now, "-", day.now, "-", + hour.now, "-", sufx, + sep = "" + ) + # cat(" - Reading file :",file.now,"...","\n") - now <- ncdf4::nc_open(file.path(dir.analy,file.now)) - - air.temp.tmp [m] <- ncdf4::ncvar_get(now, "MMEAN_ATM_TEMP_PY") - soil.temp.tmp [m] <- sum(ncdf4::ncvar_get(now, "MMEAN_SOIL_TEMP_PY")[nsoil]*dslz[nsoil]/sum(dslz[nsoil])) - soil.moist.tmp[m] <- sum(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")[nsoil]*dslz[nsoil]/sum(dslz[nsoil])) - soil.mmax.tmp [m] <- max(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")) - soil.mmin.tmp [m] <- min(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")) - + now <- ncdf4::nc_open(file.path(dir.analy, file.now)) + + air.temp.tmp[m] <- ncdf4::ncvar_get(now, "MMEAN_ATM_TEMP_PY") + soil.temp.tmp[m] <- sum(ncdf4::ncvar_get(now, "MMEAN_SOIL_TEMP_PY")[nsoil] * dslz[nsoil] / sum(dslz[nsoil])) + soil.moist.tmp[m] <- sum(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")[nsoil] * dslz[nsoil] / sum(dslz[nsoil])) + soil.mmax.tmp[m] <- max(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")) + soil.mmin.tmp[m] <- min(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")) + ncdf4::nc_close(now) } # End month loop # Finding yearly means - tempk.air [ind] <- mean(air.temp.tmp) - tempk.soil [ind] <- mean(soil.temp.tmp) - moist.soil [ind] <- mean(soil.moist.tmp) + tempk.air[ind] <- mean(air.temp.tmp) + tempk.soil[ind] <- mean(soil.temp.tmp) + moist.soil[ind] <- mean(soil.moist.tmp) moist.soil.mx[ind] <- max(soil.mmax.tmp) moist.soil.mn[ind] <- min(soil.mmin.tmp) - nfire [ind] <- length(which(soil.moist.tmp0, length(nfire)/length(which(nfire>0)), 0) - + rel_soil_moist <- mean(moist.soil / slmsts) # Relativizing by max moisture capacity + pfire <- sum(nfire) / (length(nfire) * 12) + fire_return <- ifelse(max(nfire) > 0, length(nfire) / length(which(nfire > 0)), 0) + cat(paste0("mean soil temp : ", round(soil_tempk, 2), "\n")) cat(paste0("mean soil moist : ", round(rel_soil_moist, 3), "\n")) cat(paste0("fire return interval (yrs) : ", fire_return), "\n") #--------------------------------------- - - #--------------------------------------- + + #--------------------------------------- # Calculate area distribution based on geometric decay based loosely on your disturbance rates - # Note: This one varies from Jackie's original in that it lets your oldest, undisturbed bin + # Note: This one varies from Jackie's original in that it lets your oldest, undisturbed bin # start a bit larger (everything leftover) to let it get cycled in naturally #--------------------------------------- # ------ # Calculate the Rate of fire & total disturbance # ------ fire_rate <- pfire * param.args$fire_intensity - + # Total disturbance rate = treefall + fire # -- treefall = % area/yr - disturb <- treefall + fire_rate + disturb <- treefall + fire_rate # ------ - - stand.age <- seq(yrs[1]-yeara,nrow(pss.big)*blckyr,by=blckyr) - area.dist <- vector(length=nrow(pss.big)) - area.dist[1] <- sum(stats::dgeom(0:(stand.age[2]-1), disturb)) - for(i in 2:(length(area.dist)-1)){ - area.dist[i] <- sum(stats::dgeom((stand.age[i]):(stand.age[i+1]-1),disturb)) + + stand.age <- seq(yrs[1] - yeara, nrow(pss.big) * blckyr, by = blckyr) + area.dist <- vector(length = nrow(pss.big)) + area.dist[1] <- sum(stats::dgeom(0:(stand.age[2] - 1), disturb)) + for (i in 2:(length(area.dist) - 1)) { + area.dist[i] <- sum(stats::dgeom((stand.age[i]):(stand.age[i + 1] - 1), disturb)) } - area.dist[length(area.dist)] <- 1 - sum(area.dist[1:(length(area.dist)-1)]) - pss.big[,"area"] <- area.dist - #--------------------------------------- - - - #--------------------------------------- + area.dist[length(area.dist)] <- 1 - sum(area.dist[1:(length(area.dist) - 1)]) + pss.big[, "area"] <- area.dist + #--------------------------------------- + + + #--------------------------------------- # Extraction Loop Part 1: Cohorts!! # This loop does the following: # -- Extract cohort info from each age slice from *annual* *analy* files (these are annual means) # -- Write cohort info to the .css file as a new patch for each age slice # -- Dummy extractions of patch-level variables; all of the important variables here are place holders - #--------------------------------------- - cat(" - Reading analy files ...","\n") - for (y in yrs){ - now <- ncdf4::nc_open(file.path(dir.analy,ann.files[y-yeara+1])) + #--------------------------------------- + cat(" - Reading analy files ...", "\n") + for (y in yrs) { + now <- ncdf4::nc_open(file.path(dir.analy, ann.files[y - yeara + 1])) ind <- which(yrs == y) - - #Grab variable to see how many cohorts there are - ipft <- ncdf4::ncvar_get(now,'PFT') - + + # Grab variable to see how many cohorts there are + ipft <- ncdf4::ncvar_get(now, "PFT") + #--------------------------------------- # organize into .css variables (Cohorts) # Note: all cohorts from a time slice are assigned to a single patch representing a stand of age X #--------------------------------------- - css.tmp <- matrix(nrow=length(ipft),ncol=10) + css.tmp <- matrix(nrow = length(ipft), ncol = 10) colnames(css.tmp) <- c("time", "patch", "cohort", "dbh", "hite", "pft", "n", "bdead", "balive", "Avgrg") - - css.tmp[,"time" ] <- rep(yeara,length(ipft)) - css.tmp[,"patch" ] <- rep(floor((y-yeara)/blckyr)+1,length(ipft)) - css.tmp[,"cohort"] <- 1:length(ipft) - css.tmp[,"dbh" ] <- ncdf4::ncvar_get(now,'DBH') - css.tmp[,"hite" ] <- ncdf4::ncvar_get(now,'HITE') - css.tmp[,"pft" ] <- ipft - css.tmp[,"n" ] <- ncdf4::ncvar_get(now,'NPLANT') - css.tmp[,"bdead" ] <- ncdf4::ncvar_get(now,'BDEAD') - css.tmp[,"balive"] <- ncdf4::ncvar_get(now,'BALIVE') - css.tmp[,"Avgrg" ] <- rep(0,length(ipft)) - - #save big .css matrix - if(y==yrs[1]){ + + css.tmp[, "time"] <- rep(yeara, length(ipft)) + css.tmp[, "patch"] <- rep(floor((y - yeara) / blckyr) + 1, length(ipft)) + css.tmp[, "cohort"] <- 1:length(ipft) + css.tmp[, "dbh"] <- ncdf4::ncvar_get(now, "DBH") + css.tmp[, "hite"] <- ncdf4::ncvar_get(now, "HITE") + css.tmp[, "pft"] <- ipft + css.tmp[, "n"] <- ncdf4::ncvar_get(now, "NPLANT") + css.tmp[, "bdead"] <- ncdf4::ncvar_get(now, "BDEAD") + css.tmp[, "balive"] <- ncdf4::ncvar_get(now, "BALIVE") + css.tmp[, "Avgrg"] <- rep(0, length(ipft)) + + # save big .css matrix + if (y == yrs[1]) { css.big <- css.tmp - } else{ - css.big <- rbind(css.big,css.tmp) + } else { + css.big <- rbind(css.big, css.tmp) } #--------------------------------------- - - + + #--------------------------------------- # save .pss variables (Patches) # NOTE: patch AREA needs to be adjusted to be equal to the probability of a stand of age x on the landscape #--------------------------------------- - pss.big[ind,"time"] <- 1800 - pss.big[ind,"patch"] <- floor((y-yeara)/blckyr)+1 - pss.big[ind,"trk"] <- 1 - pss.big[ind,"age"] <- y-yeara + pss.big[ind, "time"] <- 1800 + pss.big[ind, "patch"] <- floor((y - yeara) / blckyr) + 1 + pss.big[ind, "trk"] <- 1 + pss.big[ind, "age"] <- y - yeara # Note: the following are just place holders that will be overwritten post-SAS # pss.big[ind,6] <- ncdf4::ncvar_get(now,"AREA") - pss.big[ind,"water"] <- 0.5 - pss.big[ind,"fsc"] <- ncdf4::ncvar_get(now,"FAST_SOIL_C") - pss.big[ind,"stsc"] <- ncdf4::ncvar_get(now,"STRUCTURAL_SOIL_C") - pss.big[ind,"stsl"] <- ncdf4::ncvar_get(now,"STRUCTURAL_SOIL_L") - pss.big[ind,"ssc"] <- ncdf4::ncvar_get(now,"SLOW_SOIL_C") - pss.big[ind,"psc"] <- 0 - pss.big[ind,"msn"] <- ncdf4::ncvar_get(now,"MINERALIZED_SOIL_N") - pss.big[ind,"fsn"] <- ncdf4::ncvar_get(now,"FAST_SOIL_N") - + pss.big[ind, "water"] <- 0.5 + pss.big[ind, "fsc"] <- ncdf4::ncvar_get(now, "FAST_SOIL_C") + pss.big[ind, "stsc"] <- ncdf4::ncvar_get(now, "STRUCTURAL_SOIL_C") + pss.big[ind, "stsl"] <- ncdf4::ncvar_get(now, "STRUCTURAL_SOIL_L") + pss.big[ind, "ssc"] <- ncdf4::ncvar_get(now, "SLOW_SOIL_C") + pss.big[ind, "psc"] <- 0 + pss.big[ind, "msn"] <- ncdf4::ncvar_get(now, "MINERALIZED_SOIL_N") + pss.big[ind, "fsn"] <- ncdf4::ncvar_get(now, "FAST_SOIL_N") + ncdf4::nc_close(now) } - #--------------------------------------- - - #--------------------------------------- + #--------------------------------------- + + #--------------------------------------- # Extraction Loop Part 2: Patches! # This loop does the following: # -- Extract age slice (new patch) soil carbon conditions from *monthly* *histo* files - # -- Note: this is done because most of the necessary inputs for SAS are instantaneous values that - # are not currently tracked in analy files, let alone annual analy files; this could + # -- Note: this is done because most of the necessary inputs for SAS are instantaneous values that + # are not currently tracked in analy files, let alone annual analy files; this could # theoretically change in the future - # -- Monthly data is then aggregated to a yearly value: sum for carbon inputs; mean for temp/moist + # -- Monthly data is then aggregated to a yearly value: sum for carbon inputs; mean for temp/moist # (if not calculated above) #--------------------------------------- - pss.big <- pss.big[stats::complete.cases(pss.big),] - + pss.big <- pss.big[stats::complete.cases(pss.big), ] + # some empty vectors for storage etc fsc_in_y <- ssc_in_y <- ssl_in_y <- fsn_in_y <- pln_up_y <- vector() - fsc_in_m <- ssc_in_m <- ssl_in_m <- fsn_in_m <- pln_up_m <- vector() + fsc_in_m <- ssc_in_m <- ssl_in_m <- fsn_in_m <- pln_up_m <- vector() # # NOTE: The following line should get removed if we roll with 20-year mean temp & moist # soil_tempk_y <- soil_tempk_m <- swc_max_m <- swc_max_y <- swc_m <- swc_y <- vector() - + # switch to the histo directory # dat.dir <- file.path(in.base,sites[s],"/analy/") - mon.files <- dir(dir.histo, "-S-") # monthly files only - - #Get time window - yeara <- as.numeric(strsplit(mon.files,"-")[[1]][yrind+1]) #first year - yearz <- as.numeric(strsplit(mon.files,"-")[[length(mon.files)-1]][yrind+1]) #last year - - montha <- as.numeric(strsplit(mon.files,"-")[[1]][yrind+2]) #first month - monthz <- as.numeric(strsplit(mon.files,"-")[[length(mon.files)-1]][yrind+2]) #last month - + mon.files <- dir(dir.histo, "-S-") # monthly files only + + # Get time window + yeara <- as.numeric(strsplit(mon.files, "-")[[1]][yrind + 1]) # first year + yearz <- as.numeric(strsplit(mon.files, "-")[[length(mon.files) - 1]][yrind + 1]) # last year + + montha <- as.numeric(strsplit(mon.files, "-")[[1]][yrind + 2]) # first month + monthz <- as.numeric(strsplit(mon.files, "-")[[length(mon.files) - 1]][yrind + 2]) # last month + cat(" - Processing History Files \n") - for (y in yrs){ + for (y in yrs) { dpm <- lubridate::days_in_month(1:12) - if(lubridate::leap_year(y)) dpm[2] <- dpm[2]+1 - #calculate month start/end based on year - if (y == yrs[1]){ - month.begin = montha - }else{ - month.begin = 1 + if (lubridate::leap_year(y)) dpm[2] <- dpm[2] + 1 + # calculate month start/end based on year + if (y == yrs[1]) { + month.begin <- montha + } else { + month.begin <- 1 } - if (y == yrs[length(yrs)]){ - month.end = monthz - }else{ - month.end = 12 + if (y == yrs[length(yrs)]) { + month.end <- monthz + } else { + month.end <- 12 } - - for(m in month.begin:month.end){ - #Make the file name. - year.now <-sprintf("%4.4i",y) - month.now <- sprintf("%2.2i",m) - day.now <- sprintf("%2.2i",1) - hour.now <- sprintf("%6.6i",0) - + + for (m in month.begin:month.end) { + # Make the file name. + year.now <- sprintf("%4.4i", y) + month.now <- sprintf("%2.2i", m) + day.now <- sprintf("%2.2i", 1) + hour.now <- sprintf("%6.6i", 0) + # dat.dir <- paste(in.base,sites[s],"/histo/",sep="") - file.now <- paste(prefix,"-S-",year.now,"-",month.now,"-",day.now,"-" - ,hour.now,"-",sufx,sep="") - + file.now <- paste(prefix, "-S-", year.now, "-", month.now, "-", day.now, "-", + hour.now, "-", sufx, + sep = "" + ) + # cat(" - Reading file :",file.now,"...","\n") - now <- ncdf4::nc_open(file.path(dir.histo,file.now)) - + now <- ncdf4::nc_open(file.path(dir.histo, file.now)) + # Note: we have to convert the daily value for 1 month by days per month to get a monthly estimate - fsc_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"FSC_IN")*dpm[m] #kg/(m2*day) --> kg/(m2*month) - ssc_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SSC_IN")*dpm[m] - ssl_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SSL_IN")*dpm[m] - fsn_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"FSN_IN")*dpm[m] - pln_up_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"TOTAL_PLANT_NITROGEN_UPTAKE")*dpm[m] + fsc_in_m[m - month.begin + 1] <- ncdf4::ncvar_get(now, "FSC_IN") * dpm[m] # kg/(m2*day) --> kg/(m2*month) + ssc_in_m[m - month.begin + 1] <- ncdf4::ncvar_get(now, "SSC_IN") * dpm[m] + ssl_in_m[m - month.begin + 1] <- ncdf4::ncvar_get(now, "SSL_IN") * dpm[m] + fsn_in_m[m - month.begin + 1] <- ncdf4::ncvar_get(now, "FSN_IN") * dpm[m] + pln_up_m[m - month.begin + 1] <- ncdf4::ncvar_get(now, "TOTAL_PLANT_NITROGEN_UPTAKE") * dpm[m] # ssc_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SSC_IN")*dpm[m] - + # # NOTE: the following lines shoudl get removed if using 20-year means # soil_tempk_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SOIL_TEMPK_PA")[nsoil] # Surface soil temp # swc_max_m[m-month.begin+1] <- max(ncdf4::ncvar_get(now,"SOIL_WATER_PA")) # max soil moist to avoid digging through water capacity stuff # swc_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SOIL_WATER_PA")[nsoil] #Surface soil moist - + ncdf4::nc_close(now) } # Find which patch we're working in - ind <- (y-yeara)/blckyr + 1 - + ind <- (y - yeara) / blckyr + 1 + # Sum monthly values to get a total estimated carbon input - fsc_in_y[ind] <- sum(fsc_in_m,na.rm=TRUE) - ssc_in_y[ind] <- sum(ssc_in_m,na.rm=TRUE) - ssl_in_y[ind] <- sum(ssl_in_m,na.rm=TRUE) - fsn_in_y[ind] <- sum(fsn_in_m,na.rm=TRUE) - pln_up_y[ind] <- sum(pln_up_m,na.rm=TRUE) - + fsc_in_y[ind] <- sum(fsc_in_m, na.rm = TRUE) + ssc_in_y[ind] <- sum(ssc_in_m, na.rm = TRUE) + ssl_in_y[ind] <- sum(ssl_in_m, na.rm = TRUE) + fsn_in_y[ind] <- sum(fsn_in_m, na.rm = TRUE) + pln_up_y[ind] <- sum(pln_up_m, na.rm = TRUE) + # # Soil temp & moisture here should get deleted if using the 20-year means - # soil_tempk_y[ind] <- mean(soil_tempk_m,na.rm=TRUE) - # swc_y[ind] <- mean(swc_m,na.rm=TRUE)/max(swc_max_m,na.rm=TRUE) + # soil_tempk_y[ind] <- mean(soil_tempk_m,na.rm=TRUE) + # swc_y[ind] <- mean(swc_m,na.rm=TRUE)/max(swc_max_m,na.rm=TRUE) } #--------------------------------------- - - #--------------------------------------- + + #--------------------------------------- # Calculate steady-state soil pools! # - # These are the equations from soil_respiration.f90 -- if this module has changed, these need - # Note: We ignore the unit conversions here because we're now we're working with the yearly + # These are the equations from soil_respiration.f90 -- if this module has changed, these need + # Note: We ignore the unit conversions here because we're now we're working with the yearly # sum so that we end up with straight kgC/m2 # fast_C_loss <- kgCday_2_umols * A_decomp * decay_rate_fsc * fast_soil_C # struc_C_loss <- kgCday_2_umols * A_decomp * Lc * decay_rate_stsc * struct_soil_C * f_decomp # slow_C_loss <- kcCday_2_umols * A_decomp * decay_rate_ssc * slow_soil_C #--------------------------------------- - + # ----------------------- # Calculate the annual carbon loss if things are stable # ----------- @@ -486,128 +488,130 @@ SAS.ED2 <- function(dir.analy, dir.histo, outdir, lat, lon, blckyr, ssc_loss <- param.args$decay_rate_ssc ssl_loss <- param.args$decay_rate_stsc # ----------- - - + + # ************************************* # Calculate A_decomp according to your DECOMP_SCPEME # A_decomp <- temperature_limitation * water_limitation # aka het_resp_weight # ************************************* # ======================== - # Temperature Limitation + # Temperature Limitation # ======================== # soil_tempk <- sum(soil_tempo_y*area.dist) - if(param.args$decomp_scheme %in% c(0, 3)){ - temperature_limitation = min(1,exp(param.args$resp_temperature_increase * (soil_tempk-318.15))) - } else if(param.args$decomp_scheme %in% c(1,4)){ - lnexplloyd = param.args$rh_lloyd_1 * ( param.args$rh_lloyd_2 - 1. / (soil_tempk - param.args$rh_lloyd_3)) - lnexplloyd = max(-38.,min(38,lnexplloyd)) - temperature_limitation = min( 1.0, param.args$resp_temperature_increase * exp(lnexplloyd) ) - } else if(param.args$decomp_scheme==2) { + if (param.args$decomp_scheme %in% c(0, 3)) { + temperature_limitation <- min(1, exp(param.args$resp_temperature_increase * (soil_tempk - 318.15))) + } else if (param.args$decomp_scheme %in% c(1, 4)) { + lnexplloyd <- param.args$rh_lloyd_1 * (param.args$rh_lloyd_2 - 1. / (soil_tempk - param.args$rh_lloyd_3)) + lnexplloyd <- max(-38., min(38, lnexplloyd)) + temperature_limitation <- min(1.0, param.args$resp_temperature_increase * exp(lnexplloyd)) + } else if (param.args$decomp_scheme == 2) { # Low Temp Limitation lnexplow <- param.args$rh_decay_low * (param.args$rh_low_temp - soil_tempk) lnexplow <- max(-38, min(38, lnexplow)) tlow_fun <- 1 + exp(lnexplow) - + # High Temp Limitation - lnexphigh <- param.args$rh_decay_high*(soil_tempk - param.args$rh_high_temp) + lnexphigh <- param.args$rh_decay_high * (soil_tempk - param.args$rh_high_temp) lnexphigh <- max(-38, min(38, lnexphigh)) thigh_fun <- 1 + exp(lnexphigh) - - temperature_limitation <- 1/(tlow_fun*thigh_fun) - } + + temperature_limitation <- 1 / (tlow_fun * thigh_fun) + } # ======================== - + # ======================== - # Moisture Limitation + # Moisture Limitation # ======================== # rel_soil_moist <- sum(swc_y*area.dist) - if(param.args$decomp_scheme %in% c(0,1)){ - if (rel_soil_moist <= param.args$resp_opt_water){ - water_limitation = exp( (rel_soil_moist - param.args$resp_opt_water) * param.args$resp_water_below_opt) + if (param.args$decomp_scheme %in% c(0, 1)) { + if (rel_soil_moist <= param.args$resp_opt_water) { + water_limitation <- exp((rel_soil_moist - param.args$resp_opt_water) * param.args$resp_water_below_opt) } else { - water_limitation = exp( (param.args$resp_opt_water - rel_soil_moist) * param.args$resp_water_above_opt) + water_limitation <- exp((param.args$resp_opt_water - rel_soil_moist) * param.args$resp_water_above_opt) } - } else if(param.args$decomp_scheme==2){ + } else if (param.args$decomp_scheme == 2) { # Dry soil Limitation lnexpdry <- param.args$rh_decay_dry * (param.args$rh_dry_smoist - rel_soil_moist) lnexpdry <- max(-38, min(38, lnexpdry)) - smdry_fun <- 1+exp(lnexpdry) - + smdry_fun <- 1 + exp(lnexpdry) + # Wet Soil limitation lnexpwet <- param.args$rh_decay_wet * (rel_soil_moist - param.args$rh_wet_smoist) lnexpwet <- max(-38, min(38, lnexpwet)) - smwet_fun <- 1+exp(lnexpwet) - - water_limitation <- 1/(smdry_fun * smwet_fun) + smwet_fun <- 1 + exp(lnexpwet) + + water_limitation <- 1 / (smdry_fun * smwet_fun) } else { - water_limitation = rel_soil_moist * 4.0893 - rel_soil_moist**2 * 3.1681 - 0.3195897 + water_limitation <- rel_soil_moist * 4.0893 - rel_soil_moist**2 * 3.1681 - 0.3195897 } # ======================== - + A_decomp <- temperature_limitation * water_limitation # aka het_resp_weight # ************************************* - + # ************************************* # Calculate the steady-state pools - # NOTE: Current implementation weights carbon input by patch size rather than using the + # NOTE: Current implementation weights carbon input by patch size rather than using the # carbon balance from the oldest state (as was the first implementation) # ************************************* # ------------------- # Do the carbon and fast nitrogen pools # ------------------- - fsc_ss <- fsc_in_y[length(fsc_in_y)]/(fsc_loss * A_decomp) - ssl_ss <- ssl_in_y[length(ssl_in_y)]/(ssl_loss * A_decomp * param.args$Lc) # Structural soil C - ssc_ss <- ((ssl_loss * A_decomp * param.args$Lc * ssl_ss)*(1 - param.args$r_stsc))/(ssc_loss * A_decomp ) - fsn_ss <- fsn_in_y[length(fsn_in_y)]/(fsc_loss * A_decomp) + fsc_ss <- fsc_in_y[length(fsc_in_y)] / (fsc_loss * A_decomp) + ssl_ss <- ssl_in_y[length(ssl_in_y)] / (ssl_loss * A_decomp * param.args$Lc) # Structural soil C + ssc_ss <- ((ssl_loss * A_decomp * param.args$Lc * ssl_ss) * (1 - param.args$r_stsc)) / (ssc_loss * A_decomp) + fsn_ss <- fsn_in_y[length(fsn_in_y)] / (fsc_loss * A_decomp) # ------------------- - + # ------------------- # Do the mineralized nitrogen calculation # ------------------- - #ED2: csite%mineralized_N_loss = csite%total_plant_nitrogen_uptake(ipa) - # + csite%today_Af_decomp(ipa) * Lc * K1 * csite%structural_soil_C(ipa) + # ED2: csite%mineralized_N_loss = csite%total_plant_nitrogen_uptake(ipa) + # + csite%today_Af_decomp(ipa) * Lc * K1 * csite%structural_soil_C(ipa) # * ( (1.0 - r_stsc) / c2n_slow - 1.0 / c2n_structural) - msn_loss <- pln_up_y[length(pln_up_y)] + - A_decomp*param.args$Lc*ssl_loss*ssl_in_y[length(ssl_in_y)]* - ((1.0-param.args$r_stsc)/param.args$c2n_slow - 1.0/param.args$c2n_structural) - - #fast_N_loss + slow_C_loss/c2n_slow - msn_med <- fsc_loss*A_decomp*fsn_in_y[length(fsn_in_y)]+ (ssc_loss * A_decomp)/param.args$c2n_slow - - msn_ss <- msn_med/msn_loss + msn_loss <- pln_up_y[length(pln_up_y)] + + A_decomp * param.args$Lc * ssl_loss * ssl_in_y[length(ssl_in_y)] * + ((1.0 - param.args$r_stsc) / param.args$c2n_slow - 1.0 / param.args$c2n_structural) + + # fast_N_loss + slow_C_loss/c2n_slow + msn_med <- fsc_loss * A_decomp * fsn_in_y[length(fsn_in_y)] + (ssc_loss * A_decomp) / param.args$c2n_slow + + msn_ss <- msn_med / msn_loss # ------------------- # ************************************* - + # ************************************* # Replace dummy values in patch matrix with the steady state calculations # ************************************* - # Figure out what steady state value index we shoudl use - # Note: In the current implementaiton this should be 1 because we did the weighted averaging up front, + # Figure out what steady state value index we shoudl use + # Note: In the current implementaiton this should be 1 because we did the weighted averaging up front, # but if something went wrong and dimensions are off, use this to pick the last (etc) p.use <- 1 - + # write the values to file - pss.big[,"patch"] <- 1:nrow(pss.big) - pss.big[,"area"] <- area.dist - pss.big[,"fsc"] <- rep(fsc_ss[p.use],nrow(pss.big)) # fsc - pss.big[,"stsc"] <- rep(ssl_ss[p.use],nrow(pss.big)) # stsc - pss.big[,"stsl"] <- rep(ssl_ss[p.use],nrow(pss.big)) # stsl (not used) - pss.big[,"ssc"] <- rep(ssc_ss[p.use],nrow(pss.big)) # ssc - pss.big[,"msn"] <- rep(msn_ss[p.use],nrow(pss.big)) # msn - pss.big[,"fsn"] <- rep(fsn_ss[p.use],nrow(pss.big)) # fsn + pss.big[, "patch"] <- 1:nrow(pss.big) + pss.big[, "area"] <- area.dist + pss.big[, "fsc"] <- rep(fsc_ss[p.use], nrow(pss.big)) # fsc + pss.big[, "stsc"] <- rep(ssl_ss[p.use], nrow(pss.big)) # stsc + pss.big[, "stsl"] <- rep(ssl_ss[p.use], nrow(pss.big)) # stsl (not used) + pss.big[, "ssc"] <- rep(ssc_ss[p.use], nrow(pss.big)) # ssc + pss.big[, "msn"] <- rep(msn_ss[p.use], nrow(pss.big)) # msn + pss.big[, "fsn"] <- rep(fsn_ss[p.use], nrow(pss.big)) # fsn # ************************************* #--------------------------------------- - + #--------------------------------------- # Write everything to file!! #--------------------------------------- - file.prefix=paste0(prefix, "-lat", lat, "lon", lon) - utils::write.table(css.big,file=file.path(outdir,paste0(file.prefix,".css")),row.names=FALSE,append=FALSE, - col.names=TRUE,quote=FALSE) - - utils::write.table(pss.big,file=file.path(outdir,paste0(file.prefix,".pss")),row.names=FALSE,append=FALSE, - col.names=TRUE,quote=FALSE) + file.prefix <- paste0(prefix, "-lat", lat, "lon", lon) + utils::write.table(css.big, + file = file.path(outdir, paste0(file.prefix, ".css")), row.names = FALSE, append = FALSE, + col.names = TRUE, quote = FALSE + ) + + utils::write.table(pss.big, + file = file.path(outdir, paste0(file.prefix, ".pss")), row.names = FALSE, append = FALSE, + col.names = TRUE, quote = FALSE + ) #--------------------------------------- - } - \ No newline at end of file diff --git a/models/ed/R/SDA.helpers.ED2.R b/models/ed/R/SDA.helpers.ED2.R index 1a054f76ee5..dc34fa60542 100644 --- a/models/ed/R/SDA.helpers.ED2.R +++ b/models/ed/R/SDA.helpers.ED2.R @@ -1,74 +1,81 @@ #' @title Get ED history restart file path #' #' @author Alexey Shiklomanov -#' @param mod_outdir Directory where PEcAn stores ensemble outputs. Usually +#' @param mod_outdir Directory where PEcAn stores ensemble outputs. Usually #' \code{/out} #' @param runid PEcAn run ID #' @param file.time Start or end time from SDA analysis #' @export get_restartfile.ED2 <- function(mod_outdir, runid, file.time) { - runid <- as.character(runid) - histfile_path <- file.path(mod_outdir, runid) - - # the frequency of history files might change depending on assimilation timestep - # we can identify this from the timestamps - history_files <- dir(histfile_path, "-S-") - # extract time stamps info from the file names - htimestamps <- sapply(seq_along(history_files), function(f){ - index <- gregexpr("-S-", history_files[1])[[1]] - tstamp <- substr(history_files[f], index[1] + 3, index[1] + 12) - }) - - # NOTE: ED2 writes annual history files on the same month e.g. in ed_model.F90: - # - # history_time = new_month .and. isoutput /= 0 .and. & - # current_time%month == imontha .and. & - # mod(real(current_time%year-iyeara),frqstate) == 0. - # - # So the annual history file after starting a run at 1961/01/01 will be "history-S-1962-01-01-000000-g01.h5" - # this version of code checks for annual -S- files, but putting these flags here to remind that there can be monthly or daily history files - annual_check <- monthly_check <- daily_check <- FALSE # this will result in reading the first file as YYYY-01-01 regardless of assimilation time step - - # if(length(htimestamps) > 1){ - # diff_check <- difftime(htimestamps[2], htimestamps[1], units = c("hours")) - # monthly_check <- ifelse(diff_check > 744, FALSE, TRUE) - # daily_check <- ifelse(diff_check > 24, FALSE, TRUE) - # # if you want to extend this to checks for sub-daily assimilations, also modify timestamp extraction above - # } - - # file.time comes from upstream in the format of "yyyy-12-31 23:59:59 UTC" - # to match ED2 naming for annual history files, round to next day "YYYY-01-01 UTC" - ed.hist.annual <- lubridate::ceiling_date(file.time, "1 day") - - file_year <- ifelse(annual_check, lubridate::year(file.time), lubridate::year(ed.hist.annual)) - file_month <- ifelse(monthly_check, lubridate::month(file.time), lubridate::month(ed.hist.annual)) - file_day <- ifelse(daily_check, lubridate::day(file.time), lubridate::day(ed.hist.annual)) - - # check how ED2 writes other -S- files - + runid <- as.character(runid) + histfile_path <- file.path(mod_outdir, runid) - datetime_string <- sprintf("%04d-%02d-%02d-000000", - file_year, - file_month, - file_day) - histfile_string <- paste0("history-S-", - datetime_string, - ".*\\.h5$") + # the frequency of history files might change depending on assimilation timestep + # we can identify this from the timestamps + history_files <- dir(histfile_path, "-S-") + # extract time stamps info from the file names + htimestamps <- sapply(seq_along(history_files), function(f) { + index <- gregexpr("-S-", history_files[1])[[1]] + tstamp <- substr(history_files[f], index[1] + 3, index[1] + 12) + }) - histfile <- list.files(histfile_path, - histfile_string, - full.names = TRUE) - if (length(histfile) > 1) { - PEcAn.logger::logger.error("Multiple history files found.") - return(NULL) - } else if (length(histfile) < 1) { - PEcAn.logger::logger.error("No history files found.") - return(NULL) - } else { - PEcAn.logger::logger.info("Using history file: ", - histfile) - return(histfile) - } + # NOTE: ED2 writes annual history files on the same month e.g. in ed_model.F90: + # + # history_time = new_month .and. isoutput /= 0 .and. & + # current_time%month == imontha .and. & + # mod(real(current_time%year-iyeara),frqstate) == 0. + # + # So the annual history file after starting a run at 1961/01/01 will be "history-S-1962-01-01-000000-g01.h5" + # this version of code checks for annual -S- files, but putting these flags here to remind that there can be monthly or daily history files + annual_check <- monthly_check <- daily_check <- FALSE # this will result in reading the first file as YYYY-01-01 regardless of assimilation time step + + # if(length(htimestamps) > 1){ + # diff_check <- difftime(htimestamps[2], htimestamps[1], units = c("hours")) + # monthly_check <- ifelse(diff_check > 744, FALSE, TRUE) + # daily_check <- ifelse(diff_check > 24, FALSE, TRUE) + # # if you want to extend this to checks for sub-daily assimilations, also modify timestamp extraction above + # } + + # file.time comes from upstream in the format of "yyyy-12-31 23:59:59 UTC" + # to match ED2 naming for annual history files, round to next day "YYYY-01-01 UTC" + ed.hist.annual <- lubridate::ceiling_date(file.time, "1 day") + + file_year <- ifelse(annual_check, lubridate::year(file.time), lubridate::year(ed.hist.annual)) + file_month <- ifelse(monthly_check, lubridate::month(file.time), lubridate::month(ed.hist.annual)) + file_day <- ifelse(daily_check, lubridate::day(file.time), lubridate::day(ed.hist.annual)) + + # check how ED2 writes other -S- files + + + datetime_string <- sprintf( + "%04d-%02d-%02d-000000", + file_year, + file_month, + file_day + ) + histfile_string <- paste0( + "history-S-", + datetime_string, + ".*\\.h5$" + ) + + histfile <- list.files(histfile_path, + histfile_string, + full.names = TRUE + ) + if (length(histfile) > 1) { + PEcAn.logger::logger.error("Multiple history files found.") + return(NULL) + } else if (length(histfile) < 1) { + PEcAn.logger::logger.error("No history files found.") + return(NULL) + } else { + PEcAn.logger::logger.info( + "Using history file: ", + histfile + ) + return(histfile) + } } #' @title Get ED2 config.xml file @@ -77,22 +84,21 @@ get_restartfile.ED2 <- function(mod_outdir, runid, file.time) { #' @param rundir Model run directory. Usually \code{/run} #' @inheritParams get_restartfile.ED2 get_configxml.ED2 <- function(rundir, runid) { - runid <- as.character(runid) - confxml_path <- file.path(rundir, runid, "config.xml") - confxml <- XML::xmlToList(XML::xmlParse(confxml_path)) - return(confxml) + runid <- as.character(runid) + confxml_path <- file.path(rundir, runid, "config.xml") + confxml <- XML::xmlToList(XML::xmlParse(confxml_path)) + return(confxml) } #' @title Generate ED2 cohort to patch mapping vector -#' +#' #' @author Alexey Shiklomanov -#' @description Generate a vector of integer indices for mapping ED state +#' @description Generate a vector of integer indices for mapping ED state #' cohort vectors onto patches, for instance for use with \code{tapply}. #' @param nc \code{ncdf4} object for ED history restart file. patch_cohort_index <- function(nc) { - # Patch length - paco_N <- ncdf4::ncvar_get(nc, "PACO_N") - patch_index <- do.call(c, mapply(rep, seq_along(paco_N), paco_N)) - return(patch_index) + # Patch length + paco_N <- ncdf4::ncvar_get(nc, "PACO_N") + patch_index <- do.call(c, mapply(rep, seq_along(paco_N), paco_N)) + return(patch_index) } - diff --git a/models/ed/R/check_ed2in.R b/models/ed/R/check_ed2in.R index 338bb2faf51..bdfb9bb0020 100644 --- a/models/ed/R/check_ed2in.R +++ b/models/ed/R/check_ed2in.R @@ -1,7 +1,7 @@ #' Check ED2IN #' -#' Check the basic structure of `ed2in` object, as well as consistency among -#' arguments (e.g. run dates and coordinates are within the range of vegetation +#' Check the basic structure of `ed2in` object, as well as consistency among +#' arguments (e.g. run dates and coordinates are within the range of vegetation #' and meteorology data). #' #' @inheritParams write_ed2in @@ -17,7 +17,7 @@ check_ed2in <- function(ed2in) { "IPHENYFF" ) unset <- !names(ed2in) %in% can_be_unset & - (purrr::map_lgl(ed2in, ~all(is.na(.))) | grepl("@.*?@", ed2in)) + (purrr::map_lgl(ed2in, ~ all(is.na(.))) | grepl("@.*?@", ed2in)) if (sum(unset) > 0) { PEcAn.logger::logger.severe( "The following required ED2IN tags are unset: ", diff --git a/models/ed/R/check_ed_metheader.R b/models/ed/R/check_ed_metheader.R index 7ef01f4872b..04d666d8567 100644 --- a/models/ed/R/check_ed_metheader.R +++ b/models/ed/R/check_ed_metheader.R @@ -1,11 +1,11 @@ #' Check ED met header object #' -#' Check that the object has all components, and throw an error if anything is -#' wrong. Optionally, do some basic checks of actualy meteorology files as +#' Check that the object has all components, and throw an error if anything is +#' wrong. Optionally, do some basic checks of actualy meteorology files as #' well. #' -#' `check_ed_metheader_format` checks an individual format (one item in the -#' `ed_metheader` list). `check_ed_metheader` applies these checks to each item +#' `check_ed_metheader_format` checks an individual format (one item in the +#' `ed_metheader` list). `check_ed_metheader` applies these checks to each item #' in the format list. #' #' @param ed_metheader ED meteorology header object (see [read_ed_metheader]) @@ -13,7 +13,7 @@ #' @inheritParams read_ed_metheader #' @export check_ed_metheader <- function(ed_metheader, check_files = TRUE) { - if(is.null(names(ed_metheader[[1]]))) { + if (is.null(names(ed_metheader[[1]]))) { stop("ED met header object is not a nested list") } .z <- lapply(ed_metheader, check_ed_metheader_format, check_files = check_files) @@ -24,10 +24,10 @@ check_ed_metheader <- function(ed_metheader, check_files = TRUE) { #' @export check_ed_metheader_format <- function(ed_metheader_format, check_files = TRUE) { correct_names <- c("path_prefix", "nlon", "nlat", "dx", "dy", "xmin", "ymin", "variables") - if(!all(names(ed_metheader_format) %in% correct_names)) { + if (!all(names(ed_metheader_format) %in% correct_names)) { stop("Format does not have the correct names") } - + met_files <- PEcAn.utils::match_file(ed_metheader_format$path_prefix) stopifnot(length(met_files) >= 1) stopifnot(all(file.exists(met_files))) @@ -58,7 +58,7 @@ check_ed_metfile <- function(metfile, variables) { hfile <- hdf5r::H5File$new(metfile, mode = "r") # Remove variables that are not constants variables <- variables[variables$flag != 4, ] - if(!all(variables$variable %in% hfile$ls()$name)) { + if (!all(variables$variable %in% hfile$ls()$name)) { stop("All variables not present in metfile") } } diff --git a/models/ed/R/check_veg.R b/models/ed/R/check_veg.R index b69c1be2cbb..7a64a282511 100644 --- a/models/ed/R/check_veg.R +++ b/models/ed/R/check_veg.R @@ -1,6 +1,6 @@ #' Check individual ED input files #' -#' Check internal file formatting, and optionally check for compatibility +#' Check internal file formatting, and optionally check for compatibility #' against related files. #' #' @param css css data object (see [read_css]) @@ -9,22 +9,23 @@ #' @return `NULL` (invisibly) #' @export check_css <- function(css, pss = NULL) { - if(!inherits(css, "data.frame") || nrow(css) == 0) { + if (!inherits(css, "data.frame") || nrow(css) == 0) { stop("css file should be a data frame") } - - expected_colnames <- c("time", "patch", "cohort", "dbh", "hite", "pft", - "n", "bdead", "balive", "lai") + + expected_colnames <- c( + "time", "patch", "cohort", "dbh", "hite", "pft", + "n", "bdead", "balive", "lai" + ) if (!identical(colnames(css), expected_colnames)) { stop("css file is formatted incorrectly") } - + if (!is.null(pss)) { - if(!all(unique(css$patch) %in% unique(pss$patch))) { + if (!all(unique(css$patch) %in% unique(pss$patch))) { stop("css file and pss file are not compatible") } } - } #' @rdname check_css @@ -34,8 +35,7 @@ check_pss <- function(pss, site = NULL) { stop("css file should be a data frame") } if (!is.null(site)) { - - if(!all(unique(pss$site) %in% unique(site$sitenum))) { + if (!all(unique(pss$site) %in% unique(site$sitenum))) { stop("pss and site files are not compatible") } } @@ -44,9 +44,10 @@ check_pss <- function(pss, site = NULL) { #' @rdname check_css #' @export check_site <- function(site) { - stopifnot( - nrow(site) >= 1, - !is.null(attributes(site)), - is.numeric(attr(site, "nsite")), - attr(site, "file_format") %in% c(1, 2, 3)) + stopifnot( + nrow(site) >= 1, + !is.null(attributes(site)), + is.numeric(attr(site, "nsite")), + attr(site, "file_format") %in% c(1, 2, 3) + ) } diff --git a/models/ed/R/create_veg.R b/models/ed/R/create_veg.R index ea8d434484f..1ec5f73083b 100644 --- a/models/ed/R/create_veg.R +++ b/models/ed/R/create_veg.R @@ -61,8 +61,8 @@ create_site <- function(input, check = TRUE) { } #' Modify a reference `data.frame` -#' -#' Wrapper around `modifyList` to allow expanding a `data.frame` by modifying +#' +#' Wrapper around `modifyList` to allow expanding a `data.frame` by modifying #' only a single column. #' #' @param input Named `list` or `data.frame` containing columns to replace in `base` diff --git a/models/ed/R/data.R b/models/ed/R/data.R index 12b1704ac5d..14b535d15b2 100644 --- a/models/ed/R/data.R +++ b/models/ed/R/data.R @@ -9,4 +9,4 @@ #' ... #' } #' @source \url{https://github.com/EDmodel/ED2/wiki/Plant-functional-types} -"pftmapping" \ No newline at end of file +"pftmapping" diff --git a/models/ed/R/download_edi.R b/models/ed/R/download_edi.R index 8559a25541f..7943d0895e0 100644 --- a/models/ed/R/download_edi.R +++ b/models/ed/R/download_edi.R @@ -1,12 +1,12 @@ #' Download ED inputs #' -#' Download and unzip common ED inputs from a public Open Science Framework -#' (OSF) repository (https://osf.io/b6umf). Inputs include the Olson Global +#' Download and unzip common ED inputs from a public Open Science Framework +#' (OSF) repository (https://osf.io/b6umf). Inputs include the Olson Global #' Ecosystems (OGE) database (`oge2OLD`) and the `chd` and `dgd` databases. #' #' The total download size around 28 MB. #' -#' @param directory Target directory for unzipping files. Will be created if it +#' @param directory Target directory for unzipping files. Will be created if it #' doesn't exist. #' @return `TRUE`, invisibly #' @export diff --git a/models/ed/R/ed_varlookup.R b/models/ed/R/ed_varlookup.R index 7ac2c596d95..7b00ba2042b 100644 --- a/models/ed/R/ed_varlookup.R +++ b/models/ed/R/ed_varlookup.R @@ -3,74 +3,100 @@ #' @param varname character; variable name to read from file #' @export ed.var <- function(varname) { - if(varname == "AGB") { - out = list(readvar = "AGB_CO", - type = 'co', units = "kgC/plant", - drelated = NULL, # other deterministically related vars? - expr = "AGB_CO") - } else if(varname == "TotLivBiom") { - out = list(readvar = c("BALIVE"), - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "BALIVE") - } else if(varname == "BA") { - out = list(readvar = "BA_CO", - type = 'co', units = "cm2/plant", - drelated = NULL, - expr = "BA_CO") - } else if(varname == "DBH") { - out = list(readvar = "DBH", - type = 'co', units = "cm/plant", - drelated = NULL, - expr = "DBH") - } else if(varname == "AbvGrndWood") { - out = list(readvar = c("AGB_CO"), #until I change BLEAF keeper to be annual work with total AGB - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "AGB_CO") - } else if(varname == "AGB.pft") { - out = list(readvar = c("AGB_CO"), #until I change BLEAF keeper to be annual work with total AGB - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "AGB_CO") - } else if(varname == "leaf_carbon_content") { - out = list(readvar = "BLEAF", - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "BLEAF") - } else if(varname == "root_carbon_content") { - out = list(readvar = "BROOT", - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "BROOT") - } else if(varname == "reproductive_litter_carbon_content") { - out = list(readvar = "BSEEDS_CO", - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "BSEEDS_CO") - } else if(varname == "storage_carbon_content") { - out = list(readvar = "BSTORAGE", - type = 'co', units = "kgC/plant", - drelated = NULL, - expr = "BSTORAGE") - } else if(varname == "GWBI") { - out = list(readvar = "DDBH_DT", # this is actually rate of change in DBH, we'll calculate GWBI from it - type = 'co', units = "cm/yr", - drelated = NULL, - expr = "DDBH_DT") - } else if(varname == "fast_soil_pool_carbon_content") { - out = list(readvar = "FAST_SOIL_C", - type = 'pa', units = "kg/m2", - drelated = NULL, - expr = "FAST_SOIL_C") - } else if(varname == "structural_soil_pool_carbon_content") { - out = list(readvar = "STRUCTURAL_SOIL_C", - type = 'pa', units = "kg/m2", - drelated = NULL, - expr = "STRUCTURAL_SOIL_C") + if (varname == "AGB") { + out <- list( + readvar = "AGB_CO", + type = "co", units = "kgC/plant", + drelated = NULL, # other deterministically related vars? + expr = "AGB_CO" + ) + } else if (varname == "TotLivBiom") { + out <- list( + readvar = c("BALIVE"), + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "BALIVE" + ) + } else if (varname == "BA") { + out <- list( + readvar = "BA_CO", + type = "co", units = "cm2/plant", + drelated = NULL, + expr = "BA_CO" + ) + } else if (varname == "DBH") { + out <- list( + readvar = "DBH", + type = "co", units = "cm/plant", + drelated = NULL, + expr = "DBH" + ) + } else if (varname == "AbvGrndWood") { + out <- list( + readvar = c("AGB_CO"), # until I change BLEAF keeper to be annual work with total AGB + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "AGB_CO" + ) + } else if (varname == "AGB.pft") { + out <- list( + readvar = c("AGB_CO"), # until I change BLEAF keeper to be annual work with total AGB + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "AGB_CO" + ) + } else if (varname == "leaf_carbon_content") { + out <- list( + readvar = "BLEAF", + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "BLEAF" + ) + } else if (varname == "root_carbon_content") { + out <- list( + readvar = "BROOT", + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "BROOT" + ) + } else if (varname == "reproductive_litter_carbon_content") { + out <- list( + readvar = "BSEEDS_CO", + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "BSEEDS_CO" + ) + } else if (varname == "storage_carbon_content") { + out <- list( + readvar = "BSTORAGE", + type = "co", units = "kgC/plant", + drelated = NULL, + expr = "BSTORAGE" + ) + } else if (varname == "GWBI") { + out <- list( + readvar = "DDBH_DT", # this is actually rate of change in DBH, we'll calculate GWBI from it + type = "co", units = "cm/yr", + drelated = NULL, + expr = "DDBH_DT" + ) + } else if (varname == "fast_soil_pool_carbon_content") { + out <- list( + readvar = "FAST_SOIL_C", + type = "pa", units = "kg/m2", + drelated = NULL, + expr = "FAST_SOIL_C" + ) + } else if (varname == "structural_soil_pool_carbon_content") { + out <- list( + readvar = "STRUCTURAL_SOIL_C", + type = "pa", units = "kg/m2", + drelated = NULL, + expr = "STRUCTURAL_SOIL_C" + ) } else { # No Match! warning(paste0("Couldn't find varname ", varname, "!")) - out = NULL + out <- NULL } return(out) } diff --git a/models/ed/R/example_veg.R b/models/ed/R/example_veg.R index 79614e9e473..b828910b177 100644 --- a/models/ed/R/example_veg.R +++ b/models/ed/R/example_veg.R @@ -3,21 +3,21 @@ #' @export example_css <- tibble::tribble( ~time, ~patch, ~cohort, ~dbh, ~hite, ~pft, ~n, ~bdead, ~balive, ~lai, - 2008, 1, 1, 12.50, 0, 9, 0.001, 0, 0, 0 + 2008, 1, 1, 12.50, 0, 9, 0.001, 0, 0, 0 ) #' @rdname example_css #' @export example_pss <- tibble::tribble( ~site, ~time, ~patch, ~trk, ~age, ~area, ~water, ~fsc, ~stsc, ~stsl, ~ssc, ~psc, ~msn, ~fsn, - 1, 2008, 1, 1, 70, 1, 0, 1, 5, 5, 0.01, 0, 1, 1 + 1, 2008, 1, 1, 70, 1, 0, 1, 5, 5, 0.01, 0, 1, 1 ) #' @rdname example_css #' @export example_site <- tibble::tribble( ~sitenum, ~area, ~TCI, ~elev, ~slope, ~aspect, ~soil, - 1, 1, -7, 100, 0, 0, 3 + 1, 1, -7, 100, 0, 0, 3 ) attr(example_site, "nsite") <- 1 attr(example_site, "file_format") <- 1 diff --git a/models/ed/R/get_met_dates.R b/models/ed/R/get_met_dates.R index 197aeb4b447..372c29064bf 100644 --- a/models/ed/R/get_met_dates.R +++ b/models/ed/R/get_met_dates.R @@ -1,6 +1,6 @@ #' Get meteorology dates #' -#' Figure out the dates for which a given meteorology is available by parsing +#' Figure out the dates for which a given meteorology is available by parsing #' the matching file names. #' @inheritParams write_ed_metheader #' @return Vector of dates for a run @@ -11,7 +11,7 @@ get_met_dates <- function(ed_metheader) { month_list <- purrr::map2( met_paths, met_file_list, - ~gsub(normalizePath(.x, mustWork = FALSE), "", normalizePath(.y, mustWork = FALSE)) + ~ gsub(normalizePath(.x, mustWork = FALSE), "", normalizePath(.y, mustWork = FALSE)) ) month_vec_raw <- tolower(gsub(".h5", "", Reduce(c, month_list))) month_vec <- lubridate::parse_date_time(month_vec_raw, "ym") @@ -21,7 +21,7 @@ get_met_dates <- function(ed_metheader) { #' Get all the dates in a month #' -#' For a given date, figure out its month and return all of the dates for that +#' For a given date, figure out its month and return all of the dates for that #' month. #' @param date Date as string or date object #' @return Sequence of dates from the first to the last day of the month. diff --git a/models/ed/R/met2model.ED2.R b/models/ed/R/met2model.ED2.R index 70dfa5cd623..84fae79c363 100644 --- a/models/ed/R/met2model.ED2.R +++ b/models/ed/R/met2model.ED2.R @@ -9,9 +9,9 @@ #' met2model wrapper for ED2 #' -#' If files already exist in 'Outfolder', the default function is NOT to -#' overwrite them and only gives user the notice that file already exists. If -#' user wants to overwrite the existing files, just change overwrite statement +#' If files already exist in 'Outfolder', the default function is NOT to +#' overwrite them and only gives user the notice that file already exists. If +#' user wants to overwrite the existing files, just change overwrite statement #' below to TRUE. #' #' @export @@ -29,12 +29,11 @@ #' @param ... currently unused met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, lat = NA, lon = NA, overwrite = FALSE, verbose = FALSE, leap_year = TRUE, ...) { - overwrite <- as.logical(overwrite) # results are stored in folder prefix.start.end start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") met_folder <- outfolder met_header_file <- file.path(met_folder, "ED_MET_DRIVER_HEADER") @@ -64,8 +63,8 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l mo <- findInterval(day, dm) return(mo) } else { - leap <- lubridate::leap_year(year) - mo[leap] <- findInterval(day[leap], dl) + leap <- lubridate::leap_year(year) + mo[leap] <- findInterval(day[leap], dl) mo[!leap] <- findInterval(day[!leap], dm) return(mo) } @@ -123,7 +122,6 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l ## loop over files for (year in year_seq) { - ## open netcdf ncfile <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) nc <- ncdf4::nc_open(ncfile) @@ -157,25 +155,25 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l ## determine GMT adjustment lst <- site$LST_shift[which(site$acro == froot)] ## extract variables - tdays <- nc$dim$time$vals + tdays <- nc$dim$time$vals Tair <- ncdf4::ncvar_get(nc, "air_temperature") - Qair <- ncdf4::ncvar_get(nc, "specific_humidity") #humidity (kg/kg) - U <- try(ncdf4::ncvar_get(nc, "eastward_wind"), silent = TRUE) - V <- try(ncdf4::ncvar_get(nc, "northward_wind"), silent = TRUE) + Qair <- ncdf4::ncvar_get(nc, "specific_humidity") # humidity (kg/kg) + U <- try(ncdf4::ncvar_get(nc, "eastward_wind"), silent = TRUE) + V <- try(ncdf4::ncvar_get(nc, "northward_wind"), silent = TRUE) Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") pres <- ncdf4::ncvar_get(nc, "air_pressure") - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") - LW <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") - CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air"), silent = TRUE) + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") + LW <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") + CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air"), silent = TRUE) use_UV <- is.numeric(U) & is.numeric(V) - if(!use_UV){ + if (!use_UV) { U <- try(ncdf4::ncvar_get(nc, "wind_speed"), silent = TRUE) - if(is.numeric(U)){ + if (is.numeric(U)) { PEcAn.logger::logger.info("eastward_wind and northward_wind are absent, using wind_speed to approximate eastward_wind") V <- rep(0, length(U)) - }else{ + } else { PEcAn.logger::logger.severe("No eastward_wind and northward_wind or wind_speed in the met data") } } @@ -214,12 +212,12 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l slen <- seq_along(sec) Tair <- c(rep(Tair[1], toff), Tair)[slen] Qair <- c(rep(Qair[1], toff), Qair)[slen] - U <- c(rep(U[1], toff), U)[slen] - V <- c(rep(V[1], toff), V)[slen] + U <- c(rep(U[1], toff), U)[slen] + V <- c(rep(V[1], toff), V)[slen] Rain <- c(rep(Rain[1], toff), Rain)[slen] pres <- c(rep(pres[1], toff), pres)[slen] - SW <- c(rep(SW[1], toff), SW)[slen] - LW <- c(rep(LW[1], toff), LW)[slen] + SW <- c(rep(SW[1], toff), SW)[slen] + LW <- c(rep(LW[1], toff), LW)[slen] if (useCO2) { CO2 <- c(rep(CO2[1], toff), CO2)[slen] } @@ -233,7 +231,7 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l # not 0. Similarly, 6pm on December 31 is "364.75 days since YYYY-01-01", # but this is DOY 365, not 364. doy <- floor(tdays) + 1 - + invalid_doy <- doy < 1 | doy > PEcAn.utils::days_in_year(year, leap_year) if (any(invalid_doy)) { PEcAn.logger::logger.severe(paste0( @@ -249,38 +247,38 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l # YYYY-01-01" value x 24. So we calculate it here using mod division. # (e.g., 12.5 days %% 1 = 0.5 day; 0.5 day x 24 = 12 hours) hr <- (tdays %% 1) * 24 - + ## calculate potential radiation in order to estimate diffuse/direct cosz <- PEcAn.data.atmosphere::cos_solar_zenith_angle(doy, lat, lon, dt, hr) rpot <- 1366 * cosz rpot <- rpot[seq_along(tdays)] - SW[rpot < SW] <- rpot[rpot < SW] ## ensure radiation < max + SW[rpot < SW] <- rpot[rpot < SW] ## ensure radiation < max ### this causes trouble at twilight bc of missmatch btw bin avergage and bin midpoint - frac <- SW/rpot - frac[frac > 0.9] <- 0.9 ## ensure some diffuse + frac <- SW / rpot + frac[frac > 0.9] <- 0.9 ## ensure some diffuse frac[frac < 0] <- 0 frac[is.na(frac)] <- 0 frac[is.nan(frac)] <- 0 - SWd <- SW * (1 - frac) ## Diffuse portion of total short wave rad + SWd <- SW * (1 - frac) ## Diffuse portion of total short wave rad ### convert to ED2.1 hdf met variables - n <- length(Tair) - nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] - nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] - vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] - vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] - prateA <- Rain # precipitation rate [kg_H2O/m2/s] - dlwrfA <- LW # downward long wave radiation [W/m2] - presA <- pres # pressure [Pa] - hgtA <- rep(50, n) # geopotential height [m] - ugrdA <- U # zonal wind [m/s] - vgrdA <- V # meridional wind [m/s] - shA <- Qair # specific humidity [kg_H2O/kg_air] - tmpA <- Tair # temperature [K] + n <- length(Tair) + nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] + nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] + vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] + vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] + prateA <- Rain # precipitation rate [kg_H2O/m2/s] + dlwrfA <- LW # downward long wave radiation [W/m2] + presA <- pres # pressure [Pa] + hgtA <- rep(50, n) # geopotential height [m] + ugrdA <- U # zonal wind [m/s] + vgrdA <- V # meridional wind [m/s] + shA <- Qair # specific humidity [kg_H2O/kg_air] + tmpA <- Tair # temperature [K] if (useCO2) { - co2A <- CO2 * 1e+06 # surface co2 concentration [ppm] converted from mole fraction [kg/kg] + co2A <- CO2 * 1e+06 # surface co2 concentration [ppm] converted from mole fraction [kg/kg] } # Next, because ED2 stores values in monthly HDF5 files, we need to @@ -302,19 +300,19 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l } else { ed_met_h5 <- hdf5r::H5File$new(mout) } - dims <- c(length(selm), 1, 1) + dims <- c(length(selm), 1, 1) nbdsf <- array(nbdsfA[selm], dim = dims) nddsf <- array(nddsfA[selm], dim = dims) vbdsf <- array(vbdsfA[selm], dim = dims) vddsf <- array(vddsfA[selm], dim = dims) prate <- array(prateA[selm], dim = dims) dlwrf <- array(dlwrfA[selm], dim = dims) - pres <- array(presA[selm], dim = dims) - hgt <- array(hgtA[selm], dim = dims) - ugrd <- array(ugrdA[selm], dim = dims) - vgrd <- array(vgrdA[selm], dim = dims) - sh <- array(shA[selm], dim = dims) - tmp <- array(tmpA[selm], dim = dims) + pres <- array(presA[selm], dim = dims) + hgt <- array(hgtA[selm], dim = dims) + ugrd <- array(ugrdA[selm], dim = dims) + vgrd <- array(vgrdA[selm], dim = dims) + sh <- array(shA[selm], dim = dims) + tmp <- array(tmpA[selm], dim = dims) if (useCO2) { co2 <- array(co2A[selm], dim = dims) } @@ -337,8 +335,10 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l } ## write DRIVER file - metvar <- c("nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", - "pres", "hgt", "ugrd", "vgrd", "sh", "tmp", "co2") + metvar <- c( + "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", + "pres", "hgt", "ugrd", "vgrd", "sh", "tmp", "co2" + ) metvar_table <- data.frame( variable = metvar, update_frequency = dt, @@ -346,11 +346,11 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l ) if (!useCO2) { - metvar_table_vars <- metvar_table[metvar_table$variable != "co2",] ## CO2 optional in ED2 - }else{ + metvar_table_vars <- metvar_table[metvar_table$variable != "co2", ] ## CO2 optional in ED2 + } else { metvar_table_vars <- metvar_table } - + ed_metheader <- list(list( path_prefix = met_folder, nlon = 1, @@ -361,11 +361,12 @@ met2model.ED2 <- function(in.path, in.prefix, outfolder, start_date, end_date, l ymin = lat, variables = metvar_table_vars )) - + check_ed_metheader(ed_metheader) write_ed_metheader(ed_metheader, met_header_file, - header_line = shQuote("Made_by_PEcAn_met2model.ED2")) - } ### end loop over met files + header_line = shQuote("Made_by_PEcAn_met2model.ED2") + ) + } ### end loop over met files PEcAn.logger::logger.info("Done with met2model.ED2") return(invisible(results)) diff --git a/models/ed/R/model2netcdf.ED2.R b/models/ed/R/model2netcdf.ED2.R index 6bcf9150e16..94224d975c8 100644 --- a/models/ed/R/model2netcdf.ED2.R +++ b/models/ed/R/model2netcdf.ED2.R @@ -9,7 +9,7 @@ ##' Code to convert ED2's -T- HDF5 output into netCDF format -##' +##' ##' Modified from code to convert ED2's HDF5 output into the NACP ##' Intercomparison format (ALMA using netCDF) ##' @@ -42,35 +42,32 @@ model2netcdf.ED2 <- function(outdir, pfts, settings = NULL, process_partial = FALSE) { - if(!is.null(settings)) { - if(!inherits(settings, "Settings")) { + if (!is.null(settings)) { + if (!inherits(settings, "Settings")) { PEcAn.logger::logger.error("`settings` should be a PEcAn 'Settings' object") } - if(missing(sitelat)) sitelat <- settings$run$site$lat - if(missing(sitelon)) sitelon <- settings$run$site$lon - if(missing(start_date)) start_date <- settings$run$start.date - if(missing(end_date)) end_date <- settings$run$end.date - if(missing(pfts)) pfts <- extract_pfts(settings$pfts) + if (missing(sitelat)) sitelat <- settings$run$site$lat + if (missing(sitelon)) sitelon <- settings$run$site$lon + if (missing(start_date)) start_date <- settings$run$start.date + if (missing(end_date)) end_date <- settings$run$end.date + if (missing(pfts)) pfts <- extract_pfts(settings$pfts) } - + start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - + end_year <- lubridate::year(end_date) + flist <- list() flist[["-T-"]] <- dir(outdir, "-T-") # tower files flist[["-E-"]] <- dir(outdir, "-E-") # monthly files # check if there are files - file.check <- sapply(flist, function (f) length(f) != 0) + file.check <- sapply(flist, function(f) length(f) != 0) if (!any(file.check)) { - # no output files PEcAn.logger::logger.warn("WARNING: No output files found for :", outdir) return(NULL) - } else { - # which output files are there ed_res_flag <- names(flist)[file.check] @@ -96,85 +93,91 @@ model2netcdf.ED2 <- function(outdir, # (ii) check whether this is an ensemble run, then return null, otherwise # process whatever there is # for now I'm going with this, do failed runs also provide information - # on parameters? + # on parameters? year_check <- unique(unlist(ylist)) - if (max(year_check) < end_year) { #if run failed early + if (max(year_check) < end_year) { # if run failed early PEcAn.logger::logger.warn("Run ended earlier than expected. Check logfile.txt") - - #figure out if this is an ensemble + + # figure out if this is an ensemble run_id <- basename(outdir) workflow_dir <- dirname(dirname(outdir)) - rundir <- file.path(workflow_dir, "run", run_id) + rundir <- file.path(workflow_dir, "run", run_id) readme <- file.path(rundir, "README.txt") runtype <- readLines(readme, n = 1) is_ensemble <- grepl("ensemble", runtype) if (is_ensemble & !process_partial) { - PEcAn.logger::logger.info("This is an ensemble run. ", - "Not processing anything.") + PEcAn.logger::logger.info( + "This is an ensemble run. ", + "Not processing anything." + ) return(NULL) } else { PEcAn.logger::logger.info("Processing existing outputs.") end_year <- max(year_check) } } - + # ----- start loop over years for (y in start_year:end_year) { - PEcAn.logger::logger.info(paste0("----- Processing year: ", y)) # ----- read values from ED output files for (i in seq_along(out_list)) { rflag <- ed_res_flag[i] # fcnx is either read_T_files() or read_E_files() - fcnx <- paste0("read_", gsub("-", "", rflag), "_files") - out_list[[rflag]] <- do.call(fcnx, list(yr = y, ylist[[rflag]], flist[[rflag]], - outdir, start_date, end_date, - pfts, settings)) + fcnx <- paste0("read_", gsub("-", "", rflag), "_files") + out_list[[rflag]] <- do.call(fcnx, list( + yr = y, ylist[[rflag]], flist[[rflag]], + outdir, start_date, end_date, + pfts, settings + )) } # generate start/end dates for processing - + # read_E_files already takes care of adjusting start and end dates to match # ED2 output. These adjustments dont' make sense for E files, so skip them. - if(rflag == "-E-"){ + if (rflag == "-E-") { start_date_real <- start_date end_date_real <- end_date } else { - if (y == start_year) { start_date_real <- lubridate::ymd(start_date) } else { - #When would this be run? + # When would this be run? start_date_real <- lubridate::make_date(y, 1, 1) } - + if (y == end_year) { end_date_real <- lubridate::ymd(end_date) } else { - #When would this be run? + # When would this be run? end_date_real <- lubridate::make_date(y, 12, 31) } } - + # create lat/long nc variables lat <- ncdf4::ncdim_def("lat", "degrees_north", - vals = as.numeric(sitelat), - longname = "station_latitude") + vals = as.numeric(sitelat), + longname = "station_latitude" + ) lon <- ncdf4::ncdim_def("lon", "degrees_east", - vals = as.numeric(sitelon), - longname = "station_longitude") - + vals = as.numeric(sitelon), + longname = "station_longitude" + ) + # ----- put values to nc_var list nc_var <- list() for (i in seq_along(out_list)) { - rflag <- ed_res_flag[i] - #fcnx is either put_T_values() or put_E_values() - fcnx <- paste0("put_", gsub("-", "", rflag), "_values") - put_out <- do.call(fcnx, list(yr = y, nc_var = nc_var, var_list = out_list[[rflag]], - lat = lat, lon = lon, start_date = start_date_real, - end_date = end_date_real)) - nc_var <- put_out$nc_var + rflag <- ed_res_flag[i] + # fcnx is either put_T_values() or put_E_values() + fcnx <- paste0("put_", gsub("-", "", rflag), "_values") + put_out <- do.call(fcnx, list( + yr = y, nc_var = nc_var, var_list = out_list[[rflag]], + lat = lat, lon = lon, start_date = start_date_real, + end_date = end_date_real + )) + nc_var <- put_out$nc_var out_list[[rflag]] <- put_out$out } @@ -182,34 +185,37 @@ model2netcdf.ED2 <- function(outdir, PEcAn.logger::logger.info("*** Writing netCDF file ***") out <- unlist(out_list, recursive = FALSE) - #create nc file with slots for all variables - nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), - nc_var) + # create nc file with slots for all variables + nc <- ncdf4::nc_create( + file.path(outdir, paste(y, "nc", sep = ".")), + nc_var + ) # define time_bounds for -T- outputs, if exists - if (file.check[["-T-"]]==TRUE) { + if (file.check[["-T-"]] == TRUE) { ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec = NA) } # define time_bounds for -E- outputs, if exists - if (file.check[["-E-"]]==TRUE) { + if (file.check[["-E-"]] == TRUE) { ncdf4::ncatt_put(nc, "dtime", "bounds", "dtime_bounds", prec = NA) } varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") # fill nc file with data for (i in seq_along(nc_var)) { var_put(nc, varid = nc_var[[i]], vals = out[[i]]) - cat(paste(nc_var[[i]]$name, nc_var[[i]]$longname), file = varfile, - sep = "\n") + cat(paste(nc_var[[i]]$name, nc_var[[i]]$longname), + file = varfile, + sep = "\n" + ) } ncdf4::nc_close(nc) close(varfile) } # end year-loop - } # model2netcdf.ED2 -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Function for reading -T- files ##' @@ -227,7 +233,7 @@ model2netcdf.ED2 <- function(outdir, ##' @param pfts for consistency with [read_E_files()]---unused ##' @param settings A PEcAn settings object. Values for `start_date` and ##' `end_date` will be taken from `settings` if it is supplied. -##' +##' ##' @export read_T_files <- function(yr, @@ -238,479 +244,497 @@ read_T_files <- end_date, pfts = NULL, settings = NULL) { - - - PEcAn.logger::logger.info(paste0("*** Reading -T- file ***")) - if (!is.null(settings)) { - if(!inherits(settings, "Settings")) { - PEcAn.logger::logger.error("`settings` should be a PEcAn 'Settings' object") - } - if(missing(start_date)) start_date <- settings$run$start.date - if(missing(end_date)) end_date <- settings$run$end.date - } - #TODO: rename add() to something more descriptive - # add - add <- function(dat, col, row, year) { - ## data is always given for whole year, except it will start always at 0 - ## the left over data is filled with 0's - if (year == strftime(start_date, "%Y")) { - start <- (as.numeric(strftime(start_date, "%j")) - 1) * block - } else { - start <- 0 - } - if (year == strftime(end_date, "%Y")) { - end <- as.numeric(strftime(end_date, "%j")) * block - } else { - end <- as.numeric(strftime(paste0(year, "-12-31"), "%j")) * block + PEcAn.logger::logger.info(paste0("*** Reading -T- file ***")) + if (!is.null(settings)) { + if (!inherits(settings, "Settings")) { + PEcAn.logger::logger.error("`settings` should be a PEcAn 'Settings' object") + } + if (missing(start_date)) start_date <- settings$run$start.date + if (missing(end_date)) end_date <- settings$run$end.date } + # TODO: rename add() to something more descriptive + # add + add <- function(dat, col, row, year) { + ## data is always given for whole year, except it will start always at 0 + ## the left over data is filled with 0's + if (year == strftime(start_date, "%Y")) { + start <- (as.numeric(strftime(start_date, "%j")) - 1) * block + } else { + start <- 0 + } + if (year == strftime(end_date, "%Y")) { + end <- as.numeric(strftime(end_date, "%j")) * block + } else { + end <- as.numeric(strftime(paste0(year, "-12-31"), "%j")) * block + } - dims <- dim(dat) - if (is.null(dims)) { - if (length(dat) == 1) { + dims <- dim(dat) + if (is.null(dims)) { + if (length(dat) == 1) { + if (length(out) < col) { + out[[col]] <- array(dat, dim = (end - start)) + } else { + if (start != 0) { + PEcAn.logger::logger.warn( + "start date is not 0 this year, + but data already exists in this col", + col, "how is this possible?" + ) + } + out[[col]] <- abind::abind(out[[col]], + array(dat, dim = (end - start)), + along = 1 + ) + } + } else { + PEcAn.logger::logger.warn("expected a single value") + } + } else if (length(dims) == 1) { + dat <- dat[1:(end - start)] + if (length(out) < col) { + out[[col]] <- dat + } else { + if (start != 0) { + PEcAn.logger::logger.warn( + "start date is not 0 this year, + but data already exists in this col", + col, "how is this possible?" + ) + } + out[[col]] <- abind::abind(out[[col]], dat, along = 1) + } + } else if (length(dims) == 2) { + dat <- t(dat) + dims <- dim(dat) + dat <- dat[1:(end - start), ] if (length(out) < col) { - out[[col]] <- array(dat, dim = (end - start)) + out[[col]] <- dat } else { if (start != 0) { - PEcAn.logger::logger.warn("start date is not 0 this year, - but data already exists in this col", - col, "how is this possible?") + PEcAn.logger::logger.warn("start date is not 0 this year, + but data already exists in this + col", col, "how is this possible?") } - out[[col]] <- abind::abind(out[[col]], - array(dat, dim = (end - start)), - along = 1) + out[[col]] <- abind::abind(out[[col]], dat, along = 1) } } else { - PEcAn.logger::logger.warn("expected a single value") + PEcAn.logger::logger.debug("-------------------------------------------------------------") + PEcAn.logger::logger.debug("col=", col) + PEcAn.logger::logger.debug("length=", length(dat)) + PEcAn.logger::logger.debug("start=", start) + PEcAn.logger::logger.debug("end=", end) + PEcAn.logger::logger.debug("dims=", dims) + PEcAn.logger::logger.warn("Don't know how to handle larger arrays yet.") } - } else if (length(dims) == 1) { - dat <- dat[1:(end - start)] - if (length(out) < col) { - out[[col]] <- dat + + ## finally make sure we use -999 for invalid values + out[[col]][is.null(out[[col]])] <- -999 + out[[col]][is.na(out[[col]])] <- -999 + + return(out) + } # end add-function + + + getHdf5Data <- function(nc, var) { + if (var %in% names(nc$var)) { + return(ncdf4::ncvar_get(nc, var)) } else { - if (start != 0) { - PEcAn.logger::logger.warn("start date is not 0 this year, - but data already exists in this col", - col, "how is this possible?") - } - out[[col]] <- abind::abind(out[[col]], dat, along = 1) + PEcAn.logger::logger.warn("Could not find", var, "in ed hdf5 output.") + return(-999) } - } else if (length(dims) == 2) { - dat <- t(dat) - dims <- dim(dat) - dat <- dat[1:(end - start), ] - if (length(out) < col) { - out[[col]] <- dat - } else { - if (start != 0) { - PEcAn.logger::logger.warn("start date is not 0 this year, - but data already exists in this - col", col, "how is this possible?") - } - out[[col]] <- abind::abind(out[[col]], dat, along = 1) - } - } else { - PEcAn.logger::logger.debug("-------------------------------------------------------------") - PEcAn.logger::logger.debug("col=", col) - PEcAn.logger::logger.debug("length=", length(dat)) - PEcAn.logger::logger.debug("start=", start) - PEcAn.logger::logger.debug("end=", end) - PEcAn.logger::logger.debug("dims=", dims) - PEcAn.logger::logger.warn("Don't know how to handle larger arrays yet.") - } - - ## finally make sure we use -999 for invalid values - out[[col]][is.null(out[[col]])] <- -999 - out[[col]][is.na(out[[col]])] <- -999 - - return(out) - } # end add-function - - - getHdf5Data <- function(nc, var) { - if (var %in% names(nc$var)) { - return(ncdf4::ncvar_get(nc, var)) - } else { - PEcAn.logger::logger.warn("Could not find", var, "in ed hdf5 output.") - return(-999) } - } - - CheckED2Variables <- function(nc) { - vars_detected <- NULL - name_convention <- NULL - - if ("FMEAN_BDEAD_PY" %in% names(nc$var)) { - vars_detected <- c(vars_detected,"FMEAN_BDEAD_PY") - name_convention <- "Contains_FMEAN" + + CheckED2Variables <- function(nc) { + vars_detected <- NULL + name_convention <- NULL + + if ("FMEAN_BDEAD_PY" %in% names(nc$var)) { + vars_detected <- c(vars_detected, "FMEAN_BDEAD_PY") + name_convention <- "Contains_FMEAN" + } + if ("FMEAN_SOIL_TEMP_PY" %in% names(nc$var)) { + vars_detected <- c(vars_detected, "FMEAN_SOIL_TEMP_PY") + name_convention <- "Contains_FMEAN" + } + if (!is.null(vars_detected)) { + PEcAn.logger::logger.warn(paste("Found variable(s): ", paste(vars_detected, collapse = " "), ", now processing FMEAN* named variables. Note that varible naming conventions may change with ED2 version.")) + } + return(name_convention) } - if ("FMEAN_SOIL_TEMP_PY" %in% names(nc$var)) { - vars_detected <- c(vars_detected, "FMEAN_SOIL_TEMP_PY") - name_convention <- "Contains_FMEAN" + + # note that there is always one Tower file per year + ysel <- which(yr == yfiles) + + if (yr < strftime(start_date, "%Y")) { + PEcAn.logger::logger.info(yr, "<", strftime(start_date, "%Y")) } - if(!is.null(vars_detected)){ - PEcAn.logger::logger.warn(paste("Found variable(s): ", paste(vars_detected, collapse = " "), ", now processing FMEAN* named variables. Note that varible naming conventions may change with ED2 version.")) + + if (yr > strftime(end_date, "%Y")) { + PEcAn.logger::logger.info(yr, ">", strftime(end_date, "%Y")) } - return(name_convention) - } - # note that there is always one Tower file per year - ysel <- which(yr == yfiles) - - if (yr < strftime(start_date, "%Y")) { - PEcAn.logger::logger.info(yr, "<", strftime(start_date, "%Y")) - } - - if (yr > strftime(end_date, "%Y")) { - PEcAn.logger::logger.info(yr, ">", strftime(end_date, "%Y")) - } - - n <- length(ysel) - out <- list() - row <- 1 - - # note that there is always one Tower file per year - ncT <- ncdf4::nc_open(file.path(outdir, h5_files[ysel])) - - ## determine timestep from HDF5 file - block <- ifelse(lubridate::leap_year(yr) == TRUE, - ncT$dim$phony_dim_0$len / 366, # a leaper - ncT$dim$phony_dim_0$len / 365) # non leap - - PEcAn.logger::logger.info(paste0("Output interval: ", 86400 / block, " sec")) - - - if (file.exists(file.path(outdir, sub("-T-", "-Y-", h5_files[ysel])))) { - ncY <- ncdf4::nc_open(file.path(outdir, sub("-T-", "-Y-", h5_files[ysel]))) - slzdata <- getHdf5Data(ncY, "SLZ") - ncdf4::nc_close(ncY) - } else { - PEcAn.logger::logger.warn("Could not find SLZ in Y file, + n <- length(ysel) + out <- list() + row <- 1 + + # note that there is always one Tower file per year + ncT <- ncdf4::nc_open(file.path(outdir, h5_files[ysel])) + + ## determine timestep from HDF5 file + block <- ifelse(lubridate::leap_year(yr) == TRUE, + ncT$dim$phony_dim_0$len / 366, # a leaper + ncT$dim$phony_dim_0$len / 365 + ) # non leap + + PEcAn.logger::logger.info(paste0("Output interval: ", 86400 / block, " sec")) + + + if (file.exists(file.path(outdir, sub("-T-", "-Y-", h5_files[ysel])))) { + ncY <- ncdf4::nc_open(file.path(outdir, sub("-T-", "-Y-", h5_files[ysel]))) + slzdata <- getHdf5Data(ncY, "SLZ") + ncdf4::nc_close(ncY) + } else { + PEcAn.logger::logger.warn("Could not find SLZ in Y file, making a crude assumpution.") - slzdata <- array(c(-2, -1.5, -1, -0.8, -0.6, -0.4, -0.2, -0.1, -0.05)) - } - - ## Check for what naming convention of ED2 vars we are using. May change with ED2 version. - ED2vc <- CheckED2Variables(ncT) - - ## store for later use, will only use last data - dz <- diff(slzdata) - dz <- dz[dz != 0] - - if (!is.null(ED2vc)) { - #NOTE: Take great care editing this. The order of values in `out` must - #match the order of nc_vars in put_T_values()! - ## out <- add(getHdf5Data(ncT, 'TOTAL_AGB,1,row, yr) ## AbvGrndWood - out <- add(getHdf5Data(ncT, "FMEAN_BDEAD_PY"), 1, row, yr) ## AbvGrndWood - out <- add(getHdf5Data(ncT, "FMEAN_PLRESP_PY"), 2, row, yr) ## AutoResp - out <- add(-999, 3, row, yr) ## CarbPools - out <- add(getHdf5Data(ncT, "FMEAN_CAN_CO2_PY"), 4, row, yr) ## CO2CAS - out <- add(-999, 5, row, yr) ## CropYield - out <- add(getHdf5Data(ncT, "FMEAN_GPP_PY"), 6, row, yr) ## GPP - out <- add(getHdf5Data(ncT, "FMEAN_RH_PY"), 7, row, yr) ## HeteroResp - out <- add(-getHdf5Data(ncT, "FMEAN_GPP_PY") + getHdf5Data(ncT, "FMEAN_PLRESP_PY") + - getHdf5Data(ncT, "FMEAN_RH_PY"), 8, row, yr) ## NEE - out <- add(getHdf5Data(ncT, "FMEAN_GPP_PY") - getHdf5Data(ncT, "FMEAN_PLRESP_PY"), - 9, row, yr) ## NPP - out <- add(getHdf5Data(ncT, "FMEAN_RH_PY") + getHdf5Data(ncT, "FMEAN_PLRESP_PY"), - 10, row, yr) ## TotalResp - ## out <- add(getHdf5Data(ncT, 'BDEAD + getHdf5Data(ncT, 'BALIVE,11,row, yr) ## TotLivBiom - out <- add(-999, 11, row, yr) ## TotLivBiom - out <- add(getHdf5Data(ncT, "FAST_SOIL_C_PY") + getHdf5Data(ncT, "STRUCT_SOIL_C_PY") + - getHdf5Data(ncT, "SLOW_SOIL_C_PY"), 12, row, yr) ## TotSoilCarb - - ## depth from surface to frozen layer - tdepth <- 0 - fdepth <- 0 - soiltemp <- getHdf5Data(ncT, "FMEAN_SOIL_TEMP_PY") - if (length(dim(soiltemp)) == 3) { - fdepth <- array(0, dim = dim(soiltemp)[1:2]) - tdepth <- array(0, dim = dim(soiltemp)[1:2]) - for (t in 1:dim(soiltemp)[1]) { # time - for (p in 1:dim(soiltemp)[2]) { # polygon - for (i in dim(soiltemp)[3]:2) { # depth - if (fdepth[t, p] == 0 & soiltemp[t, p, i] < 273.15 & + slzdata <- array(c(-2, -1.5, -1, -0.8, -0.6, -0.4, -0.2, -0.1, -0.05)) + } + + ## Check for what naming convention of ED2 vars we are using. May change with ED2 version. + ED2vc <- CheckED2Variables(ncT) + + ## store for later use, will only use last data + dz <- diff(slzdata) + dz <- dz[dz != 0] + + if (!is.null(ED2vc)) { + # NOTE: Take great care editing this. The order of values in `out` must + # match the order of nc_vars in put_T_values()! + ## out <- add(getHdf5Data(ncT, 'TOTAL_AGB,1,row, yr) ## AbvGrndWood + out <- add(getHdf5Data(ncT, "FMEAN_BDEAD_PY"), 1, row, yr) ## AbvGrndWood + out <- add(getHdf5Data(ncT, "FMEAN_PLRESP_PY"), 2, row, yr) ## AutoResp + out <- add(-999, 3, row, yr) ## CarbPools + out <- add(getHdf5Data(ncT, "FMEAN_CAN_CO2_PY"), 4, row, yr) ## CO2CAS + out <- add(-999, 5, row, yr) ## CropYield + out <- add(getHdf5Data(ncT, "FMEAN_GPP_PY"), 6, row, yr) ## GPP + out <- add(getHdf5Data(ncT, "FMEAN_RH_PY"), 7, row, yr) ## HeteroResp + out <- add(-getHdf5Data(ncT, "FMEAN_GPP_PY") + getHdf5Data(ncT, "FMEAN_PLRESP_PY") + + getHdf5Data(ncT, "FMEAN_RH_PY"), 8, row, yr) ## NEE + out <- add( + getHdf5Data(ncT, "FMEAN_GPP_PY") - getHdf5Data(ncT, "FMEAN_PLRESP_PY"), + 9, row, yr + ) ## NPP + out <- add( + getHdf5Data(ncT, "FMEAN_RH_PY") + getHdf5Data(ncT, "FMEAN_PLRESP_PY"), + 10, row, yr + ) ## TotalResp + ## out <- add(getHdf5Data(ncT, 'BDEAD + getHdf5Data(ncT, 'BALIVE,11,row, yr) ## TotLivBiom + out <- add(-999, 11, row, yr) ## TotLivBiom + out <- add(getHdf5Data(ncT, "FAST_SOIL_C_PY") + getHdf5Data(ncT, "STRUCT_SOIL_C_PY") + + getHdf5Data(ncT, "SLOW_SOIL_C_PY"), 12, row, yr) ## TotSoilCarb + + ## depth from surface to frozen layer + tdepth <- 0 + fdepth <- 0 + soiltemp <- getHdf5Data(ncT, "FMEAN_SOIL_TEMP_PY") + if (length(dim(soiltemp)) == 3) { + fdepth <- array(0, dim = dim(soiltemp)[1:2]) + tdepth <- array(0, dim = dim(soiltemp)[1:2]) + for (t in 1:dim(soiltemp)[1]) { # time + for (p in 1:dim(soiltemp)[2]) { # polygon + for (i in dim(soiltemp)[3]:2) { # depth + if (fdepth[t, p] == 0 & soiltemp[t, p, i] < 273.15 & soiltemp[t, p, i - 1] > 273.13) { - fdepth[t, p] <- i - } - if (tdepth[t, p] == 0 & soiltemp[t, p, i] > 273.15 & + fdepth[t, p] <- i + } + if (tdepth[t, p] == 0 & soiltemp[t, p, i] > 273.15 & soiltemp[t, p, i - 1] < 273.13) { - tdepth[t, p] <- i + tdepth[t, p] <- i + } } - } - SLZ <- c(slzdata[t, ], 0) - z1 <- (SLZ[fdepth[t, p] + 1] + SLZ[fdepth[t, p]]) / 2 - z2 <- (SLZ[fdepth[t, p]] + SLZ[fdepth[t, p] - 1]) / 2 - if (fdepth[t, p] > 0) { - fdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, fdepth[t, p]]) / - (soiltemp[t, p, fdepth[t, p] - 1] - soiltemp[t, p, fdepth[t, p]]) - } - if (tdepth[t, p] > 0) { SLZ <- c(slzdata[t, ], 0) - z1 <- (SLZ[tdepth[t, p] + 1] + SLZ[tdepth[t, p]]) / 2 - z2 <- (SLZ[tdepth[t, p]] + SLZ[tdepth[t, p] - 1]) / 2 - tdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, tdepth[t, p]]) / - (soiltemp[t, p, tdepth[t, p] - 1] - soiltemp[t, p, tdepth[t, p]]) + z1 <- (SLZ[fdepth[t, p] + 1] + SLZ[fdepth[t, p]]) / 2 + z2 <- (SLZ[fdepth[t, p]] + SLZ[fdepth[t, p] - 1]) / 2 + if (fdepth[t, p] > 0) { + fdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, fdepth[t, p]]) / + (soiltemp[t, p, fdepth[t, p] - 1] - soiltemp[t, p, fdepth[t, p]]) + } + if (tdepth[t, p] > 0) { + SLZ <- c(slzdata[t, ], 0) + z1 <- (SLZ[tdepth[t, p] + 1] + SLZ[tdepth[t, p]]) / 2 + z2 <- (SLZ[tdepth[t, p]] + SLZ[tdepth[t, p] - 1]) / 2 + tdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, tdepth[t, p]]) / + (soiltemp[t, p, tdepth[t, p] - 1] - soiltemp[t, p, tdepth[t, p]]) + } } } - } - } else { - # no polygons, just time vs depth? - fdepth <- array(0, ncol(soiltemp)) - tdepth <- array(0, ncol(soiltemp)) - for (t in 1:ncol(soiltemp)) { # time - for (d in 2:nrow(soiltemp)) { # depth - if (fdepth[t] == 0 & soiltemp[d, t] < 273.15 & soiltemp[d - 1, t] > 273.13) { - fdepth[t] <- d + } else { + # no polygons, just time vs depth? + fdepth <- array(0, ncol(soiltemp)) + tdepth <- array(0, ncol(soiltemp)) + for (t in 1:ncol(soiltemp)) { # time + for (d in 2:nrow(soiltemp)) { # depth + if (fdepth[t] == 0 & soiltemp[d, t] < 273.15 & soiltemp[d - 1, t] > 273.13) { + fdepth[t] <- d + } + if (tdepth[t] == 0 & soiltemp[d, t] > 273.15 & soiltemp[d - 1, t] < 273.13) { + tdepth[t] <- d + } } - if (tdepth[t] == 0 & soiltemp[d, t] > 273.15 & soiltemp[d - 1, t] < 273.13) { - tdepth[t] <- d + if (fdepth[t] > 0) { + SLZ <- c(slzdata, 0) + z1 <- (SLZ[fdepth[t] + 1] + SLZ[fdepth[t]]) / 2 + z2 <- (SLZ[fdepth[t]] + SLZ[fdepth[t] - 1]) / 2 + fdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[fdepth[t], t]) / + (soiltemp[fdepth[t] - 1, t] - soiltemp[fdepth[t], t]) + } + if (tdepth[t] > 0) { + SLZ <- c(slzdata, 0) + z1 <- (SLZ[tdepth[t] + 1] + SLZ[tdepth[t]]) / 2 + z2 <- (SLZ[tdepth[t]] + SLZ[tdepth[t] - 1]) / 2 + tdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[tdepth[t], t]) / + (soiltemp[tdepth[t] - 1, t] - soiltemp[tdepth[t], t]) } - } - if (fdepth[t] > 0) { - SLZ <- c(slzdata, 0) - z1 <- (SLZ[fdepth[t] + 1] + SLZ[fdepth[t]]) / 2 - z2 <- (SLZ[fdepth[t]] + SLZ[fdepth[t] - 1]) / 2 - fdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[fdepth[t], t]) / - (soiltemp[fdepth[t] - 1, t] - soiltemp[fdepth[t], t]) - } - if (tdepth[t] > 0) { - SLZ <- c(slzdata, 0) - z1 <- (SLZ[tdepth[t] + 1] + SLZ[tdepth[t]]) / 2 - z2 <- (SLZ[tdepth[t]] + SLZ[tdepth[t] - 1]) / 2 - tdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[tdepth[t], t]) / - (soiltemp[tdepth[t] - 1, t] - soiltemp[tdepth[t], t]) } } - } - - out <- add(fdepth, 13, row, yr) ## Fdepth - out <- add(getHdf5Data(ncT, "FMEAN_SFCW_DEPTH_PY"), 14, row, yr) ## SnowDepth (ED2 currently groups snow in to surface water) - out <- add(1 - getHdf5Data(ncT, "FMEAN_SFCW_FLIQ_PY"), 15, row, yr) ## SnowFrac (ED2 currently groups snow in to surface water) - out <- add(tdepth, 16, row, yr) ## Tdepth - out <- add(getHdf5Data(ncT, "FMEAN_ATM_CO2_PY"), 17, row, yr) ## CO2air - out <- add(getHdf5Data(ncT, "FMEAN_ATM_RLONG_PY"), 18, row, yr) ## Lwdown - out <- add(getHdf5Data(ncT, "FMEAN_ATM_PRSS_PY"), 19, row, yr) ## Psurf - out <- add(getHdf5Data(ncT, "FMEAN_ATM_SHV_PY"), 20, row, yr) ## Qair - out <- add(getHdf5Data(ncT, "FMEAN_PCPG_PY"), 21, row, yr) ## Rainf - ##out <- add(getHdf5Data(ncT, 'AVG_NIR_BEAM') + - ## getHdf5Data(ncT, 'AVG_NIR_DIFFUSE')+ - ## getHdf5Data(ncT, 'AVG_PAR_BEAM')+ - ## getHdf5Data(ncT, 'AVG_PAR_DIFFUSE'),22,row, yr) ## Swdown - ##out <- add(getHdf5Data(ncT, 'FMEAN_PAR_L_BEAM_PY')+ - ## getHdf5Data(ncT, 'FMEAN_PAR_L_DIFF_PY'),22,row, yr) ## Swdown - out <- add(getHdf5Data(ncT, "FMEAN_ATM_PAR_PY"), 22, row, yr) ## Swdown - out <- add(getHdf5Data(ncT, "FMEAN_ATM_TEMP_PY"), 23, row, yr) ## Tair - out <- add(getHdf5Data(ncT, "FMEAN_ATM_VELS_PY"), 24, row, yr) ## Wind - ## out <- add(getHdf5Data(ncT, 'FMEAN_ATM_RLONG_PY')-getHdf5Data(ncT, 'AVG_RLONGUP'),25,row, - ## yr) ## Lwnet - out <- add(-999, 25, row, yr) ## Lwnet - ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_GC') + getHdf5Data(ncT, - ## 'AVG_VAPOR_GC')*2272000,26,row, yr) ## Qg - out <- add(-999, 26, row, yr) ## Qg - ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_TOT'),27,row, yr) ## Qh - out <- add(getHdf5Data(ncT, "FMEAN_SENSIBLE_AC_PY"), 27, row, yr) ## Qh - out <- add(getHdf5Data(ncT, "FMEAN_VAPOR_LC_PY") + getHdf5Data(ncT, "FMEAN_VAPOR_WC_PY") + - getHdf5Data(ncT, "FMEAN_VAPOR_GC_PY") + getHdf5Data(ncT, "FMEAN_TRANSP_PY"), 28, row, yr) ## Qle - out <- add(-999, 29, row, yr) ## Swnet - out <- add(-999, 30, row, yr) ## RootMoist - out <- add(getHdf5Data(ncT, "FMEAN_TRANSP_PY"), 31, row, yr) ## Tveg - out <- add(getHdf5Data(ncT, "ZBAR"), 32, row, yr) ## WaterTableD - out <- add(-999, 33, row, yr) ## fPAR - ##lai <- matrix(apply(getHdf5Data(ncT, 'LAI_PFT'),1,sum,na.rm=TRUE),nrow=block) - ## out <- add(lai,34,row, yr) ## LAI****************** - ## out <- add(getHdf5Data(ncT, 'FMEAN_LAI_PY'),34,row, yr) ## LAI - no longer using FMEAN LAI - - ## OLD - to be deprecated - #laidata <- getHdf5Data(ncT,"LAI_PY") - #if(length(dim(laidata)) == 3){ - # out <- add(apply(laidata,3,sum),34,row,yr) - #} else { - # out <- add(-999,34,row, yr) - #} - - # code changes proposed by MCD, tested by SPS 20160607 - laidata <- getHdf5Data(ncT, "LAI_PY") - if (length(dim(laidata)) == 3) { - laidata <- apply(laidata, 3, sum) - out <- add(array(laidata, dim = length(laidata)), 34, row, yr) + + out <- add(fdepth, 13, row, yr) ## Fdepth + out <- add(getHdf5Data(ncT, "FMEAN_SFCW_DEPTH_PY"), 14, row, yr) ## SnowDepth (ED2 currently groups snow in to surface water) + out <- add(1 - getHdf5Data(ncT, "FMEAN_SFCW_FLIQ_PY"), 15, row, yr) ## SnowFrac (ED2 currently groups snow in to surface water) + out <- add(tdepth, 16, row, yr) ## Tdepth + out <- add(getHdf5Data(ncT, "FMEAN_ATM_CO2_PY"), 17, row, yr) ## CO2air + out <- add(getHdf5Data(ncT, "FMEAN_ATM_RLONG_PY"), 18, row, yr) ## Lwdown + out <- add(getHdf5Data(ncT, "FMEAN_ATM_PRSS_PY"), 19, row, yr) ## Psurf + out <- add(getHdf5Data(ncT, "FMEAN_ATM_SHV_PY"), 20, row, yr) ## Qair + out <- add(getHdf5Data(ncT, "FMEAN_PCPG_PY"), 21, row, yr) ## Rainf + ## out <- add(getHdf5Data(ncT, 'AVG_NIR_BEAM') + + ## getHdf5Data(ncT, 'AVG_NIR_DIFFUSE')+ + ## getHdf5Data(ncT, 'AVG_PAR_BEAM')+ + ## getHdf5Data(ncT, 'AVG_PAR_DIFFUSE'),22,row, yr) ## Swdown + ## out <- add(getHdf5Data(ncT, 'FMEAN_PAR_L_BEAM_PY')+ + ## getHdf5Data(ncT, 'FMEAN_PAR_L_DIFF_PY'),22,row, yr) ## Swdown + out <- add(getHdf5Data(ncT, "FMEAN_ATM_PAR_PY"), 22, row, yr) ## Swdown + out <- add(getHdf5Data(ncT, "FMEAN_ATM_TEMP_PY"), 23, row, yr) ## Tair + out <- add(getHdf5Data(ncT, "FMEAN_ATM_VELS_PY"), 24, row, yr) ## Wind + ## out <- add(getHdf5Data(ncT, 'FMEAN_ATM_RLONG_PY')-getHdf5Data(ncT, 'AVG_RLONGUP'),25,row, + ## yr) ## Lwnet + out <- add(-999, 25, row, yr) ## Lwnet + ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_GC') + getHdf5Data(ncT, + ## 'AVG_VAPOR_GC')*2272000,26,row, yr) ## Qg + out <- add(-999, 26, row, yr) ## Qg + ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_TOT'),27,row, yr) ## Qh + out <- add(getHdf5Data(ncT, "FMEAN_SENSIBLE_AC_PY"), 27, row, yr) ## Qh + out <- add(getHdf5Data(ncT, "FMEAN_VAPOR_LC_PY") + getHdf5Data(ncT, "FMEAN_VAPOR_WC_PY") + + getHdf5Data(ncT, "FMEAN_VAPOR_GC_PY") + getHdf5Data(ncT, "FMEAN_TRANSP_PY"), 28, row, yr) ## Qle + out <- add(-999, 29, row, yr) ## Swnet + out <- add(-999, 30, row, yr) ## RootMoist + out <- add(getHdf5Data(ncT, "FMEAN_TRANSP_PY"), 31, row, yr) ## Tveg + out <- add(getHdf5Data(ncT, "ZBAR"), 32, row, yr) ## WaterTableD + out <- add(-999, 33, row, yr) ## fPAR + ## lai <- matrix(apply(getHdf5Data(ncT, 'LAI_PFT'),1,sum,na.rm=TRUE),nrow=block) + ## out <- add(lai,34,row, yr) ## LAI****************** + ## out <- add(getHdf5Data(ncT, 'FMEAN_LAI_PY'),34,row, yr) ## LAI - no longer using FMEAN LAI + + ## OLD - to be deprecated + # laidata <- getHdf5Data(ncT,"LAI_PY") + # if(length(dim(laidata)) == 3){ + # out <- add(apply(laidata,3,sum),34,row,yr) + # } else { + # out <- add(-999,34,row, yr) + # } + + # code changes proposed by MCD, tested by SPS 20160607 + laidata <- getHdf5Data(ncT, "LAI_PY") + if (length(dim(laidata)) == 3) { + laidata <- apply(laidata, 3, sum) + out <- add(array(laidata, dim = length(laidata)), 34, row, yr) + } else { + out <- add(-999, 34, row, yr) + } + + ## fliq <- sum(getHdf5Data(ncT, 'AVG_SOIL_FRACLIQ')*dz)/-min(z) + fliq <- NA # getHdf5Data(ncT, 'FMEAN_SFCW_FLIQ_PY') + out <- add(1 - fliq, 35, row, yr) ## SMFrozFrac + out <- add(fliq, 36, row, yr) ## SMLiqFrac + ## This needs to be soil wetness, i.e. multilple levels deep + out <- add(getHdf5Data(ncT, "FMEAN_SOIL_WATER_PY"), 37, row, yr) ## SoilWater ********** + ## out <- add(sum(soiltemp*dz)/-min(z),38) ## SoilTemp + out <- add(soiltemp, 38, row, yr) ## SoilTemp + out <- add(-999, 39, row, yr) ## SoilWet + out <- add(getHdf5Data(ncT, "FMEAN_ALBEDO_PY"), 40, row, yr) ## Albedo + out <- add(getHdf5Data(ncT, "FMEAN_SFCW_TEMP_PY"), 41, row, yr) ## SnowT (ED2 currently groups snow in to surface water) + out <- add(getHdf5Data(ncT, "FMEAN_SFCW_MASS_PY"), 42, row, yr) ## SWE (ED2 currently groups snow in to surface water) + out <- add(getHdf5Data(ncT, "FMEAN_LEAF_TEMP_PY"), 43, row, yr) ## VegT + out <- add( + getHdf5Data(ncT, "FMEAN_VAPOR_LC_PY") + getHdf5Data(ncT, "FMEAN_VAPOR_WC_PY") + + getHdf5Data(ncT, "FMEAN_VAPOR_GC_PY") + getHdf5Data(ncT, "FMEAN_TRANSP_PY"), 44, + row, yr + ) ## Evap + out <- add(getHdf5Data(ncT, "FMEAN_QRUNOFF_PY"), 45, row, yr) ## Qs + out <- add(getHdf5Data(ncT, "BASEFLOW"), 46, row, yr) ## Qsb + + out <- add(getHdf5Data(ncT, "FMEAN_ROOT_RESP_PY") + getHdf5Data(ncT, "FMEAN_ROOT_GROWTH_RESP_PY") + + getHdf5Data(ncT, "FMEAN_RH_PY"), 47, row, yr) ## SoilResp + out$SLZ <- slzdata } else { - out <- add(-999, 34, row, yr) - } - - ##fliq <- sum(getHdf5Data(ncT, 'AVG_SOIL_FRACLIQ')*dz)/-min(z) - fliq <- NA #getHdf5Data(ncT, 'FMEAN_SFCW_FLIQ_PY') - out <- add(1 - fliq, 35, row, yr) ## SMFrozFrac - out <- add(fliq, 36, row, yr) ## SMLiqFrac - ## This needs to be soil wetness, i.e. multilple levels deep - out <- add(getHdf5Data(ncT, "FMEAN_SOIL_WATER_PY"), 37, row, yr) ## SoilWater ********** - ## out <- add(sum(soiltemp*dz)/-min(z),38) ## SoilTemp - out <- add(soiltemp, 38, row, yr) ## SoilTemp - out <- add(-999, 39, row, yr) ## SoilWet - out <- add(getHdf5Data(ncT, "FMEAN_ALBEDO_PY"), 40, row, yr) ## Albedo - out <- add(getHdf5Data(ncT, "FMEAN_SFCW_TEMP_PY"), 41, row, yr) ## SnowT (ED2 currently groups snow in to surface water) - out <- add(getHdf5Data(ncT, "FMEAN_SFCW_MASS_PY"), 42, row, yr) ## SWE (ED2 currently groups snow in to surface water) - out <- add(getHdf5Data(ncT, "FMEAN_LEAF_TEMP_PY"), 43, row, yr) ## VegT - out <- add(getHdf5Data(ncT, "FMEAN_VAPOR_LC_PY") + getHdf5Data(ncT, "FMEAN_VAPOR_WC_PY") + - getHdf5Data(ncT, "FMEAN_VAPOR_GC_PY") + getHdf5Data(ncT, "FMEAN_TRANSP_PY"), 44, - row, yr) ## Evap - out <- add(getHdf5Data(ncT, "FMEAN_QRUNOFF_PY"), 45, row, yr) ## Qs - out <- add(getHdf5Data(ncT, "BASEFLOW"), 46, row, yr) ## Qsb - - out <- add(getHdf5Data(ncT, "FMEAN_ROOT_RESP_PY") + getHdf5Data(ncT, "FMEAN_ROOT_GROWTH_RESP_PY") + - getHdf5Data(ncT, "FMEAN_RH_PY"), 47, row, yr) ## SoilResp - out$SLZ <- slzdata - - } else { - ## out <- add(getHdf5Data(ncT, 'TOTAL_AGB,1,row, yr) ## AbvGrndWood - out <- add(getHdf5Data(ncT, "AVG_BDEAD"), 1, row, yr) ## AbvGrndWood - out <- add(getHdf5Data(ncT, "AVG_PLANT_RESP"), 2, row, yr) ## AutoResp - out <- add(-999, 3, row, yr) ## CarbPools - out <- add(getHdf5Data(ncT, "AVG_CO2CAN"), 4, row, yr) ## CO2CAS - out <- add(-999, 5, row, yr) ## CropYield - out <- add(getHdf5Data(ncT, "AVG_GPP"), 6, row, yr) ## GPP - out <- add(getHdf5Data(ncT, "AVG_HTROPH_RESP"), 7, row, yr) ## HeteroResp - out <- add(-getHdf5Data(ncT, "AVG_GPP") + getHdf5Data(ncT, "AVG_PLANT_RESP") + getHdf5Data(ncT, - "AVG_HTROPH_RESP"), 8, row, yr) ## NEE - out <- add(getHdf5Data(ncT, "AVG_GPP") - getHdf5Data(ncT, "AVG_PLANT_RESP"), 9, row, - yr) ## NPP - out <- add(getHdf5Data(ncT, "AVG_HTROPH_RESP") + getHdf5Data(ncT, "AVG_PLANT_RESP"), - 10, row, yr) ## TotalResp - ## out <- add(getHdf5Data(ncT, 'AVG_BDEAD + getHdf5Data(ncT, 'AVG_BALIVE,11,row, yr) ## - ## TotLivBiom - out <- add(-999, 11, row, yr) ## TotLivBiom - out <- add(getHdf5Data(ncT, "AVG_FSC") + getHdf5Data(ncT, "AVG_STSC") + - getHdf5Data(ncT, "AVG_SSC"), 12, row, yr) ## TotSoilCarb - ## depth from surface to frozen layer - tdepth <- 0 - fdepth <- 0 - soiltemp <- getHdf5Data(ncT, "AVG_SOIL_TEMP") - if (length(dim(soiltemp)) == 3) { - fdepth <- array(0, dim = dim(soiltemp)[1:2]) - tdepth <- array(0, dim = dim(soiltemp)[1:2]) - for (t in 1:dim(soiltemp)[1]) { # time - for (p in 1:dim(soiltemp)[2]) { # polygon - for (i in dim(soiltemp)[3]:2) { # depth - if (fdepth[t, p] == 0 & soiltemp[t, p, i] < 273.15 & + ## out <- add(getHdf5Data(ncT, 'TOTAL_AGB,1,row, yr) ## AbvGrndWood + out <- add(getHdf5Data(ncT, "AVG_BDEAD"), 1, row, yr) ## AbvGrndWood + out <- add(getHdf5Data(ncT, "AVG_PLANT_RESP"), 2, row, yr) ## AutoResp + out <- add(-999, 3, row, yr) ## CarbPools + out <- add(getHdf5Data(ncT, "AVG_CO2CAN"), 4, row, yr) ## CO2CAS + out <- add(-999, 5, row, yr) ## CropYield + out <- add(getHdf5Data(ncT, "AVG_GPP"), 6, row, yr) ## GPP + out <- add(getHdf5Data(ncT, "AVG_HTROPH_RESP"), 7, row, yr) ## HeteroResp + out <- add(-getHdf5Data(ncT, "AVG_GPP") + getHdf5Data(ncT, "AVG_PLANT_RESP") + getHdf5Data( + ncT, + "AVG_HTROPH_RESP" + ), 8, row, yr) ## NEE + out <- add( + getHdf5Data(ncT, "AVG_GPP") - getHdf5Data(ncT, "AVG_PLANT_RESP"), 9, row, + yr + ) ## NPP + out <- add( + getHdf5Data(ncT, "AVG_HTROPH_RESP") + getHdf5Data(ncT, "AVG_PLANT_RESP"), + 10, row, yr + ) ## TotalResp + ## out <- add(getHdf5Data(ncT, 'AVG_BDEAD + getHdf5Data(ncT, 'AVG_BALIVE,11,row, yr) ## + ## TotLivBiom + out <- add(-999, 11, row, yr) ## TotLivBiom + out <- add(getHdf5Data(ncT, "AVG_FSC") + getHdf5Data(ncT, "AVG_STSC") + + getHdf5Data(ncT, "AVG_SSC"), 12, row, yr) ## TotSoilCarb + ## depth from surface to frozen layer + tdepth <- 0 + fdepth <- 0 + soiltemp <- getHdf5Data(ncT, "AVG_SOIL_TEMP") + if (length(dim(soiltemp)) == 3) { + fdepth <- array(0, dim = dim(soiltemp)[1:2]) + tdepth <- array(0, dim = dim(soiltemp)[1:2]) + for (t in 1:dim(soiltemp)[1]) { # time + for (p in 1:dim(soiltemp)[2]) { # polygon + for (i in dim(soiltemp)[3]:2) { # depth + if (fdepth[t, p] == 0 & soiltemp[t, p, i] < 273.15 & soiltemp[t, p, i - 1] > 273.13) { - fdepth[t, p] <- i - } - if (tdepth[t, p] == 0 & soiltemp[t, p, i] > 273.15 & + fdepth[t, p] <- i + } + if (tdepth[t, p] == 0 & soiltemp[t, p, i] > 273.15 & soiltemp[t, p, i - 1] < 273.13) { - tdepth[t, p] <- i + tdepth[t, p] <- i + } } - } - SLZ <- c(slzdata[t, ], 0) - z1 <- (SLZ[fdepth[t, p] + 1] + SLZ[fdepth[t, p]]) / 2 - z2 <- (SLZ[fdepth[t, p]] + SLZ[fdepth[t, p] - 1]) / 2 - if (fdepth[t, p] > 0) { - fdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, fdepth[t, p]]) / - (soiltemp[t, p, fdepth[t, p] - 1] - soiltemp[t, p, fdepth[t, p]]) - } - if (tdepth[t, p] > 0) { SLZ <- c(slzdata[t, ], 0) - z1 <- (SLZ[tdepth[t, p] + 1] + SLZ[tdepth[t, p]]) / 2 - z2 <- (SLZ[tdepth[t, p]] + SLZ[tdepth[t, p] - 1]) / 2 - tdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, tdepth[t, p]]) / - (soiltemp[t, p, tdepth[t, p] - 1] - soiltemp[t, p, tdepth[t, p]]) + z1 <- (SLZ[fdepth[t, p] + 1] + SLZ[fdepth[t, p]]) / 2 + z2 <- (SLZ[fdepth[t, p]] + SLZ[fdepth[t, p] - 1]) / 2 + if (fdepth[t, p] > 0) { + fdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, fdepth[t, p]]) / + (soiltemp[t, p, fdepth[t, p] - 1] - soiltemp[t, p, fdepth[t, p]]) + } + if (tdepth[t, p] > 0) { + SLZ <- c(slzdata[t, ], 0) + z1 <- (SLZ[tdepth[t, p] + 1] + SLZ[tdepth[t, p]]) / 2 + z2 <- (SLZ[tdepth[t, p]] + SLZ[tdepth[t, p] - 1]) / 2 + tdepth[t, p] <- z1 + (z2 - z1) * (273.15 - soiltemp[t, p, tdepth[t, p]]) / + (soiltemp[t, p, tdepth[t, p] - 1] - soiltemp[t, p, tdepth[t, p]]) + } } } - } - } else { - # no polygons, just time vs depth? - fdepth <- array(0, ncol(soiltemp)) - tdepth <- array(0, ncol(soiltemp)) - for (t in 1:ncol(soiltemp)) { # time - for (d in 2:nrow(soiltemp)) { # depth - if (fdepth[t] == 0 & soiltemp[d, t] < 273.15 & soiltemp[d - 1, t] > 273.13) { - fdepth[t] <- d + } else { + # no polygons, just time vs depth? + fdepth <- array(0, ncol(soiltemp)) + tdepth <- array(0, ncol(soiltemp)) + for (t in 1:ncol(soiltemp)) { # time + for (d in 2:nrow(soiltemp)) { # depth + if (fdepth[t] == 0 & soiltemp[d, t] < 273.15 & soiltemp[d - 1, t] > 273.13) { + fdepth[t] <- d + } + if (tdepth[t] == 0 & soiltemp[d, t] > 273.15 & soiltemp[d - 1, t] < 273.13) { + tdepth[t] <- d + } } - if (tdepth[t] == 0 & soiltemp[d, t] > 273.15 & soiltemp[d - 1, t] < 273.13) { - tdepth[t] <- d + if (fdepth[t] > 0) { + SLZ <- c(slzdata, 0) + z1 <- (SLZ[fdepth[t] + 1] + SLZ[fdepth[t]]) / 2 + z2 <- (SLZ[fdepth[t]] + SLZ[fdepth[t] - 1]) / 2 + fdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[fdepth[t], t]) / + (soiltemp[fdepth[t] - 1, t] - soiltemp[fdepth[t], t]) + } + if (tdepth[t] > 0) { + SLZ <- c(slzdata, 0) + z1 <- (SLZ[tdepth[t] + 1] + SLZ[tdepth[t]]) / 2 + z2 <- (SLZ[tdepth[t]] + SLZ[tdepth[t] - 1]) / 2 + tdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[tdepth[t], t]) / + (soiltemp[tdepth[t] - 1, t] - soiltemp[tdepth[t], t]) } - } - if (fdepth[t] > 0) { - SLZ <- c(slzdata, 0) - z1 <- (SLZ[fdepth[t] + 1] + SLZ[fdepth[t]]) / 2 - z2 <- (SLZ[fdepth[t]] + SLZ[fdepth[t] - 1]) / 2 - fdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[fdepth[t], t]) / - (soiltemp[fdepth[t] - 1, t] - soiltemp[fdepth[t], t]) - } - if (tdepth[t] > 0) { - SLZ <- c(slzdata, 0) - z1 <- (SLZ[tdepth[t] + 1] + SLZ[tdepth[t]]) / 2 - z2 <- (SLZ[tdepth[t]] + SLZ[tdepth[t] - 1]) / 2 - tdepth[t] <- z1 + (z2 - z1) * (273.15 - soiltemp[tdepth[t], t]) / - (soiltemp[tdepth[t] - 1, t] - soiltemp[tdepth[t], t]) } } + + out <- add(fdepth, 13, row, yr) ## Fdepth + out <- add(getHdf5Data(ncT, "AVG_SNOWDEPTH"), 14, row, yr) ## SnowDepth + out <- add(1 - getHdf5Data(ncT, "AVG_SNOWFRACLIQ"), 15, row, yr) ## SnowFrac + out <- add(tdepth, 16, row, yr) ## Tdepth + out <- add(getHdf5Data(ncT, "AVG_ATM_CO2"), 17, row, yr) ## CO2air + out <- add(getHdf5Data(ncT, "AVG_RLONG"), 18, row, yr) ## Lwdown + out <- add(getHdf5Data(ncT, "AVG_PRSS"), 19, row, yr) ## Psurf + out <- add(getHdf5Data(ncT, "AVG_ATM_SHV"), 20, row, yr) ## Qair + out <- add(getHdf5Data(ncT, "AVG_PCPG"), 21, row, yr) ## Rainf + ## out <- add(getHdf5Data(ncT, 'AVG_NIR_BEAM') + + ## getHdf5Data(ncT, 'AVG_NIR_DIFFUSE')+ + ## getHdf5Data(ncT, 'AVG_PAR_BEAM')+ + ## getHdf5Data(ncT, 'AVG_PAR_DIFFUSE'),22,row, yr) ## Swdown + out <- add( + getHdf5Data(ncT, "AVG_PAR_BEAM") + getHdf5Data(ncT, "AVG_PAR_DIFFUSE"), + 22, row, yr + ) ## Swdown + out <- add(getHdf5Data(ncT, "AVG_ATM_TMP"), 23, row, yr) ## Tair + out <- add(getHdf5Data(ncT, "AVG_VELS"), 24, row, yr) ## Wind + ## out <- add(getHdf5Data(ncT, 'AVG_RLONG')-getHdf5Data(ncT, 'AVG_RLONGUP'),25,row, yr) ## Lwnet + out <- add(-999, 25, row, yr) ## Lwnet + ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_GC') + getHdf5Data(ncT, + ## 'AVG_VAPOR_GC')*2272000,26,row, yr) ## Qg + out <- add(-999, 26, row, yr) ## Qg + ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_TOT'),27,row, yr) ## Qh + out <- add(getHdf5Data(ncT, "AVG_SENSIBLE_AC"), 27, row, yr) ## Qh + out <- add(getHdf5Data(ncT, "AVG_EVAP"), 28, row, yr) ## Qle + out <- add(-999, 29, row, yr) ## Swnet + out <- add(-999, 30, row, yr) ## RootMoist + out <- add(getHdf5Data(ncT, "AVG_TRANSP"), 31, row, yr) ## Tveg + out <- add(getHdf5Data(ncT, "ZBAR"), 32, row, yr) ## WaterTableD + out <- add(-999, 33, row, yr) ## fPAR + ## lai <- matrix(apply(getHdf5Data(ncT, 'LAI_PFT'),1,sum,na.rm=TRUE),nrow=block) + ## out <- add(lai,34,row, yr) ## LAI****************** + out <- add(getHdf5Data(ncT, "LAI"), 34, row, yr) ## LAI + ## fliq <- sum(getHdf5Data(ncT, 'AVG_SOIL_FRACLIQ')*dz)/-min(z) + fliq <- NA # getHdf5Data(ncT, 'AVG_SOIL_FRACLIQ') + out <- add(1 - fliq, 35, row, yr) ## SMFrozFrac + out <- add(fliq, 36, row, yr) ## SMLiqFrac + ## This needs to be soil wetness, i.e. multilple levels deep + out <- add(getHdf5Data(ncT, "AVG_SOIL_WATER"), 37, row, yr) ## SoilWater ********** + ## out <- add(sum(soiltemp*dz)/-min(z),38) ## SoilTemp + out <- add(soiltemp, 38, row, yr) ## SoilTemp + out <- add(-999, 39, row, yr) ## SoilWet + out <- add(getHdf5Data(ncT, "AVG_ALBEDO"), 40, row, yr) ## Albedo + out <- add(getHdf5Data(ncT, "AVG_SNOWTEMP"), 41, row, yr) ## SnowT + out <- add(getHdf5Data(ncT, "AVG_SNOWMASS"), 42, row, yr) ## SWE + out <- add(getHdf5Data(ncT, "AVG_VEG_TEMP"), 43, row, yr) ## VegT + out <- add( + getHdf5Data(ncT, "AVG_EVAP") + getHdf5Data(ncT, "AVG_TRANSP"), 44, row, + yr + ) ## Evap + out <- add(getHdf5Data(ncT, "AVG_RUNOFF"), 45, row, yr) ## Qs + out <- add(getHdf5Data(ncT, "BASEFLOW"), 46, row, yr) ## Qsb + out <- add(getHdf5Data(ncT, "AVG_ROOT_RESP") + getHdf5Data(ncT, "AVG_ROOT_MAINTENANCE") + + getHdf5Data(ncT, "AVG_HTROPH_RESP"), 47, row, yr) ## SoilResp + out$SLZ <- slzdata } - - out <- add(fdepth, 13, row, yr) ## Fdepth - out <- add(getHdf5Data(ncT, "AVG_SNOWDEPTH"), 14, row, yr) ## SnowDepth - out <- add(1 - getHdf5Data(ncT, "AVG_SNOWFRACLIQ"), 15, row, yr) ## SnowFrac - out <- add(tdepth, 16, row, yr) ## Tdepth - out <- add(getHdf5Data(ncT, "AVG_ATM_CO2"), 17, row, yr) ## CO2air - out <- add(getHdf5Data(ncT, "AVG_RLONG"), 18, row, yr) ## Lwdown - out <- add(getHdf5Data(ncT, "AVG_PRSS"), 19, row, yr) ## Psurf - out <- add(getHdf5Data(ncT, "AVG_ATM_SHV"), 20, row, yr) ## Qair - out <- add(getHdf5Data(ncT, "AVG_PCPG"), 21, row, yr) ## Rainf - ##out <- add(getHdf5Data(ncT, 'AVG_NIR_BEAM') + - ## getHdf5Data(ncT, 'AVG_NIR_DIFFUSE')+ - ## getHdf5Data(ncT, 'AVG_PAR_BEAM')+ - ## getHdf5Data(ncT, 'AVG_PAR_DIFFUSE'),22,row, yr) ## Swdown - out <- add(getHdf5Data(ncT, "AVG_PAR_BEAM") + getHdf5Data(ncT, "AVG_PAR_DIFFUSE"), - 22, row, yr) ## Swdown - out <- add(getHdf5Data(ncT, "AVG_ATM_TMP"), 23, row, yr) ## Tair - out <- add(getHdf5Data(ncT, "AVG_VELS"), 24, row, yr) ## Wind - ##out <- add(getHdf5Data(ncT, 'AVG_RLONG')-getHdf5Data(ncT, 'AVG_RLONGUP'),25,row, yr) ## Lwnet - out <- add(-999, 25, row, yr) ## Lwnet - ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_GC') + getHdf5Data(ncT, - ## 'AVG_VAPOR_GC')*2272000,26,row, yr) ## Qg - out <- add(-999, 26, row, yr) ## Qg - ## out <- add(getHdf5Data(ncT, 'AVG_SENSIBLE_TOT'),27,row, yr) ## Qh - out <- add(getHdf5Data(ncT, "AVG_SENSIBLE_AC"), 27, row, yr) ## Qh - out <- add(getHdf5Data(ncT, "AVG_EVAP"), 28, row, yr) ## Qle - out <- add(-999, 29, row, yr) ## Swnet - out <- add(-999, 30, row, yr) ## RootMoist - out <- add(getHdf5Data(ncT, "AVG_TRANSP"), 31, row, yr) ## Tveg - out <- add(getHdf5Data(ncT, "ZBAR"), 32, row, yr) ## WaterTableD - out <- add(-999, 33, row, yr) ## fPAR - ##lai <- matrix(apply(getHdf5Data(ncT, 'LAI_PFT'),1,sum,na.rm=TRUE),nrow=block) - ## out <- add(lai,34,row, yr) ## LAI****************** - out <- add(getHdf5Data(ncT, "LAI"), 34, row, yr) ## LAI - ##fliq <- sum(getHdf5Data(ncT, 'AVG_SOIL_FRACLIQ')*dz)/-min(z) - fliq <- NA #getHdf5Data(ncT, 'AVG_SOIL_FRACLIQ') - out <- add(1 - fliq, 35, row, yr) ## SMFrozFrac - out <- add(fliq, 36, row, yr) ## SMLiqFrac - ## This needs to be soil wetness, i.e. multilple levels deep - out <- add(getHdf5Data(ncT, "AVG_SOIL_WATER"), 37, row, yr) ## SoilWater ********** - ## out <- add(sum(soiltemp*dz)/-min(z),38) ## SoilTemp - out <- add(soiltemp, 38, row, yr) ## SoilTemp - out <- add(-999, 39, row, yr) ## SoilWet - out <- add(getHdf5Data(ncT, "AVG_ALBEDO"), 40, row, yr) ## Albedo - out <- add(getHdf5Data(ncT, "AVG_SNOWTEMP"), 41, row, yr) ## SnowT - out <- add(getHdf5Data(ncT, "AVG_SNOWMASS"), 42, row, yr) ## SWE - out <- add(getHdf5Data(ncT, "AVG_VEG_TEMP"), 43, row, yr) ## VegT - out <- add(getHdf5Data(ncT, "AVG_EVAP") + getHdf5Data(ncT, "AVG_TRANSP"), 44, row, - yr) ## Evap - out <- add(getHdf5Data(ncT, "AVG_RUNOFF"), 45, row, yr) ## Qs - out <- add(getHdf5Data(ncT, "BASEFLOW"), 46, row, yr) ## Qsb - out <- add(getHdf5Data(ncT, "AVG_ROOT_RESP") + getHdf5Data(ncT, "AVG_ROOT_MAINTENANCE") + - getHdf5Data(ncT, "AVG_HTROPH_RESP"), 47, row, yr) ## SoilResp - out$SLZ <- slzdata - } - - ncdf4::nc_close(ncT) - return(out) - -} # read_T_files + ncdf4::nc_close(ncT) + + return(out) + } # read_T_files -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Function for put -T- values to nc_var list -##' +##' ##' @param yr the year being processed ##' @param nc_var a list (potentially empty) for `ncvar4` objects to be added to ##' @param var_list list returned by [read_E_files()] @@ -733,209 +757,291 @@ put_T_values <- begins, ends, out) { - - if(!missing(begins)) { - warning("`begins` is deprecated, using `start_date` instead") - start_date <- begins - } - if(!missing(ends)) { - warning("`ends` is deprecated, using `end_date` instead") - end_date <- ends - } - if(!missing(out)) { - warning("`out` is deprecated, using `var_list` instead") - var_list <- out - } - s <- length(nc_var) - - # create out list to be modified - out <- var_list - - ## Conversion factor for umol C -> kg C - Mc <- 12.017 #molar mass of C, g/mol - umol2kg_C <- Mc * PEcAn.utils::ud_convert(1, "umol", "mol") * PEcAn.utils::ud_convert(1, "g", "kg") - yr2s <- PEcAn.utils::ud_convert(1, "s", "yr") - - # TODO - remove this function and replace with ifelse statements inline below (SPS) - conversion <- function(col, mult) { - ## make sure only to convert those values that are not -999 - out[[col]][out[[col]] != -999] <- out[[col]][out[[col]] != -999] * mult - return(out) - } - - checkTemp <- function(col) { - out[[col]][out[[col]] == 0] <- -999 - return(out) - } - - - # ----- define ncdf dimensions - #### setup output time and time bounds - ## Create a date vector that contains each day of the model run for each output year (e.g. "2001-07-15", "2001-07-16"....) - ## and which is the correct length for each full or partial year - output_date_vector <- output_date_vector <- - seq( - lubridate::ymd(start_date), - lubridate::ymd(end_date), - by = "day", + if (!missing(begins)) { + warning("`begins` is deprecated, using `start_date` instead") + start_date <- begins + } + if (!missing(ends)) { + warning("`ends` is deprecated, using `end_date` instead") + end_date <- ends + } + if (!missing(out)) { + warning("`out` is deprecated, using `var_list` instead") + var_list <- out + } + s <- length(nc_var) + + # create out list to be modified + out <- var_list + + ## Conversion factor for umol C -> kg C + Mc <- 12.017 # molar mass of C, g/mol + umol2kg_C <- Mc * PEcAn.utils::ud_convert(1, "umol", "mol") * PEcAn.utils::ud_convert(1, "g", "kg") + yr2s <- PEcAn.utils::ud_convert(1, "s", "yr") + + # TODO - remove this function and replace with ifelse statements inline below (SPS) + conversion <- function(col, mult) { + ## make sure only to convert those values that are not -999 + out[[col]][out[[col]] != -999] <- out[[col]][out[[col]] != -999] * mult + return(out) + } + + checkTemp <- function(col) { + out[[col]][out[[col]] == 0] <- -999 + return(out) + } + + + # ----- define ncdf dimensions + #### setup output time and time bounds + ## Create a date vector that contains each day of the model run for each output year (e.g. "2001-07-15", "2001-07-16"....) + ## and which is the correct length for each full or partial year + output_date_vector <- output_date_vector <- + seq( + lubridate::ymd(start_date), + lubridate::ymd(end_date), + by = "day", + ) + ## Calculate model output frequency per day (e.g. 0.02083333) + model_timestep_s <- length(output_date_vector) / length(out[[1]]) + iter_per_day <- round(1 / model_timestep_s) ## e.g. 48 + ## Create a timesteps vector (e.g. 0.00000000 0.02083333 0.04166667 0.06250000 0.08333333 0.10416667 ...) + timesteps <- utils::head(seq(0, 1, by = 1 / iter_per_day), -1) + ## Create a new date vector where each day is repeated by iter_per_day + ## (e.g. "2001-07-15" "2001-07-15" "2001-07-15" "2001-07-15" "2001-07-15" ...) + sub_dates <- rep(output_date_vector, each = iter_per_day) + ## Generate a vector of julian dates from sub_dates (e.g. 196 196 196 196 196 196 ...) + jdates <- lubridate::yday(sub_dates) + ## Create a fractional DOY vector using jdates, subtract by 1 to be 0 index + ## (e.g. 195.0000 195.0208 195.0417 195.0625 195.0833 195.1042) + ## which yields, e.g. as.Date(195.0000,origin="2001-01-01"), "2001-07-15" + tvals <- (jdates + timesteps) - 1 + ## Create time bounds to populate time_bounds variable + bounds <- array(data = NA, dim = c(length(tvals), 2)) + bounds[, 1] <- tvals + bounds[, 2] <- bounds[, 1] + (1 / iter_per_day) + bounds <- round(bounds, 4) # create time bounds for each timestep in t, t+1; t+1, t+2... format + #### + + t <- ncdf4::ncdim_def( + name = "time", units = paste0("days since ", yr, "-01-01 00:00:00"), + vals = tvals, + calendar = "standard", unlim = TRUE ) - ## Calculate model output frequency per day (e.g. 0.02083333) - model_timestep_s <- length(output_date_vector) / length(out[[1]]) - iter_per_day <- round(1 / model_timestep_s) ## e.g. 48 - ## Create a timesteps vector (e.g. 0.00000000 0.02083333 0.04166667 0.06250000 0.08333333 0.10416667 ...) - timesteps <- utils::head(seq(0, 1, by = 1 / iter_per_day), -1) - ## Create a new date vector where each day is repeated by iter_per_day - ## (e.g. "2001-07-15" "2001-07-15" "2001-07-15" "2001-07-15" "2001-07-15" ...) - sub_dates <- rep(output_date_vector, each = iter_per_day) - ## Generate a vector of julian dates from sub_dates (e.g. 196 196 196 196 196 196 ...) - jdates <- lubridate::yday(sub_dates) - ## Create a fractional DOY vector using jdates, subtract by 1 to be 0 index - ## (e.g. 195.0000 195.0208 195.0417 195.0625 195.0833 195.1042) - ## which yields, e.g. as.Date(195.0000,origin="2001-01-01"), "2001-07-15" - tvals <- (jdates + timesteps) - 1 - ## Create time bounds to populate time_bounds variable - bounds <- array(data = NA, dim = c(length(tvals), 2)) - bounds[, 1] <- tvals - bounds[, 2] <- bounds[, 1] + (1 / iter_per_day) - bounds <- round(bounds, 4) # create time bounds for each timestep in t, t+1; t+1, t+2... format - #### - - t <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", yr, "-01-01 00:00:00"), - vals = tvals, - calendar = "standard", unlim = TRUE) - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname = "history time interval endpoint dimensions", - vals = 1:2, units = "") - - slzdata <- out$SLZ - dz <- diff(slzdata) - dz <- dz[dz != 0] - - zg <- ncdf4::ncdim_def("SoilLayerMidpoint", "meters", c(slzdata[1:length(dz)] + dz / 2, 0)) - - # currently unused - #dims <- list(lon = lon, lat = lat, time = t) - #dimsz <- list(lon = lon, lat = lat, time = t, nsoil = zg) - - # ----- fill list - - out <- conversion(1, PEcAn.utils::ud_convert(1, "t ha-1", "kg m-2")) ## tC/ha -> kg/m2 - nc_var[[s + 1]] <- ncdf4::ncvar_def("AbvGrndWood", units = "kg C m-2", dim = list(lon, lat, t), missval = -999, - longname = "Above ground woody biomass") - out <- conversion(2, umol2kg_C) ## umol/m2 s-1 -> kg/m2 s-1 - nc_var[[s + 2]] <- ncdf4::ncvar_def("AutoResp", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Autotrophic Respiration") - nc_var[[s + 3]] <- ncdf4::ncvar_def("CarbPools", units = "kg C m-2", dim = list(lon, lat, t), missval = -999, - longname = "Size of each carbon pool") - nc_var[[s + 4]] <- ncdf4::ncvar_def("CO2CAS", units = "ppmv", dim = list(lon, lat, t), missval = -999, - longname = "CO2CAS") - nc_var[[s + 5]] <- ncdf4::ncvar_def("CropYield", units = "kg m-2", dim = list(lon, lat, t), missval = -999, - longname = "Crop Yield") - out <- conversion(6, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 - nc_var[[s + 6]] <- ncdf4::ncvar_def("GPP", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Gross Primary Productivity") - out <- conversion(7, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 - nc_var[[s + 7]] <- ncdf4::ncvar_def("HeteroResp", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Heterotrophic Respiration") - out <- conversion(8, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 - nc_var[[s + 8]] <- ncdf4::ncvar_def("NEE", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Net Ecosystem Exchange") - out <- conversion(9, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 - nc_var[[s + 9]] <- ncdf4::ncvar_def("NPP", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Net Primary Productivity") - out <- conversion(10, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 - nc_var[[s + 10]] <- ncdf4::ncvar_def("TotalResp", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Total Respiration") - nc_var[[s + 11]] <- ncdf4::ncvar_def("TotLivBiom", units = "kg C m-2", dim = list(lon, lat, t), missval = -999, - longname = "Total living biomass") - nc_var[[s + 12]] <- ncdf4::ncvar_def("TotSoilCarb", units = "kg C m-2", dim = list(lon, lat, t), missval = -999, - longname = "Total Soil Carbon") - nc_var[[s + 13]] <- ncdf4::ncvar_def("Fdepth", units = "m", dim = list(lon, lat, t), missval = -999, - longname = "Frozen Thickness") - nc_var[[s + 14]] <- ncdf4::ncvar_def("SnowDepth", units = "m", dim = list(lon, lat, t), missval = -999, - longname = "Total snow depth") - nc_var[[s + 15]] <- PEcAn.utils::mstmipvar("SnowFrac", lat, lon, t, zg) # not standard - nc_var[[s + 16]] <- ncdf4::ncvar_def("Tdepth", units = "m", dim = list(lon, lat, t), missval = -999, - longname = "Active Layer Thickness") - nc_var[[s + 17]] <- ncdf4::ncvar_def("CO2air", units = "umol mol-1", dim = list(lon, lat, t), missval = -999, - longname = "Near surface CO2 concentration") - nc_var[[s + 18]] <- ncdf4::ncvar_def("LWdown", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Surface incident longwave radiation") - nc_var[[s + 19]] <- ncdf4::ncvar_def("Psurf", units = "Pa", dim = list(lon, lat, t), missval = -999, - longname = "Surface pressure") - nc_var[[s + 20]] <- ncdf4::ncvar_def("Qair", units = "kg kg-1", dim = list(lon, lat, t), missval = -999, - longname = "Near surface specific humidity") - nc_var[[s + 21]] <- ncdf4::ncvar_def("Rainf", units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Rainfall rate") - nc_var[[s + 22]] <- ncdf4::ncvar_def("SWdown", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Surface incident shortwave radiation") - out <- checkTemp(23) - nc_var[[s + 23]] <- ncdf4::ncvar_def("Tair", units = "K", dim = list(lon, lat, t), missval = -999, - longname = "Near surface air temperature") - nc_var[[s + 24]] <- ncdf4::ncvar_def("Wind", units = "m s-1", dim = list(lon, lat, t), missval = -999, - longname = "Near surface module of the wind") - nc_var[[s + 25]] <- ncdf4::ncvar_def("LWnet", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Net Longwave Radiation") - nc_var[[s + 26]] <- ncdf4::ncvar_def("Qg", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Ground heat") - nc_var[[s + 27]] <- ncdf4::ncvar_def("Qh", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Sensible heat") - out <- conversion(28, PEcAn.data.atmosphere::get.lv()) ## kg m-2 s-1 -> W m-2 - nc_var[[s + 28]] <- ncdf4::ncvar_def("Qle", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Latent heat") - nc_var[[s + 29]] <- ncdf4::ncvar_def("SWnet", units = "W m-2", dim = list(lon, lat, t), missval = -999, - longname = "Net shortwave radiation") - nc_var[[s + 30]] <- PEcAn.utils::mstmipvar("RootMoist", lat, lon, t, zg) # not standard - nc_var[[s + 31]] <- ncdf4::ncvar_def("TVeg", units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Transpiration") - nc_var[[s + 32]] <- PEcAn.utils::mstmipvar("WaterTableD", lat, lon, t, zg) # not standard - - nc_var[[s + 33]] <- ncdf4::ncvar_def("fPAR", units = "", dim = list(lon, lat, t), missval = -999, - longname = "Absorbed fraction incoming PAR") - nc_var[[s + 34]] <- ncdf4::ncvar_def("LAI", units = "m2 m-2", dim = list(lon, lat, t), missval = -999, - longname = "Leaf Area Index") - nc_var[[s + 35]] <- PEcAn.utils::mstmipvar("SMFrozFrac", lat, lon, t, zg) # not standard - nc_var[[s + 36]] <- PEcAn.utils::mstmipvar("SMLiqFrac", lat, lon, t, zg) # not standard - nc_var[[s + 37]] <- ncdf4::ncvar_def("SoilMoist", units = "kg m-2", dim = list(lon, lat, zg, t), missval = -999, - longname = "Average Layer Soil Moisture") - out <- checkTemp(38) - nc_var[[s + 38]] <- ncdf4::ncvar_def("SoilTemp", units = "K", dim = list(lon, lat, zg, t), missval = -999, - longname = "Average Layer Soil Temperature") - nc_var[[s + 39]] <- ncdf4::ncvar_def("SoilWet", units = "", dim = list(lon, lat, t), missval = -999, - longname = "Total Soil Wetness") - nc_var[[s + 40]] <- PEcAn.utils::mstmipvar("Albedo", lat, lon, t, zg) # not standard - out <- checkTemp(41) - nc_var[[s + 41]] <- PEcAn.utils::mstmipvar("SnowT", lat, lon, t, zg) # not standard - nc_var[[s + 42]] <- ncdf4::ncvar_def("SWE", units = "kg m-2", dim = list(lon, lat, t), missval = -999, - longname = "Snow Water Equivalent") - out <- checkTemp(43) - nc_var[[s + 43]] <- PEcAn.utils::mstmipvar("VegT", lat, lon, t, zg) # not standard - nc_var[[s + 44]] <- ncdf4::ncvar_def("Evap", units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Total Evaporation") - nc_var[[s + 45]] <- ncdf4::ncvar_def("Qs", units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Surface runoff") - nc_var[[s + 46]] <- ncdf4::ncvar_def("Qsb", units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Subsurface runoff") - out <- conversion(47, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 - nc_var[[s + 47]] <- ncdf4::ncvar_def("SoilResp", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Soil Respiration") - # Remove SLZ from output before finalizing list. replace with time_bounds - if(!is.null(out[["SLZ"]])){ - out[["SLZ"]] <- NULL - } - out_length <- length(out) - out[[out_length + 1]] <- c(rbind(bounds[, 1], bounds[, 2])) - nc_var[[s + (out_length + 1)]] <- ncdf4::ncvar_def(name="time_bounds", units='', - longname = "history time interval endpoints", - dim=list(time_interval,time = t), - prec = "double") - - return(list(nc_var = nc_var, out = out)) + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, units = "" + ) + + slzdata <- out$SLZ + dz <- diff(slzdata) + dz <- dz[dz != 0] -} # put_T_values + zg <- ncdf4::ncdim_def("SoilLayerMidpoint", "meters", c(slzdata[1:length(dz)] + dz / 2, 0)) + # currently unused + # dims <- list(lon = lon, lat = lat, time = t) + # dimsz <- list(lon = lon, lat = lat, time = t, nsoil = zg) -##-------------------------------------------------------------------------------------------------# + # ----- fill list + + out <- conversion(1, PEcAn.utils::ud_convert(1, "t ha-1", "kg m-2")) ## tC/ha -> kg/m2 + nc_var[[s + 1]] <- ncdf4::ncvar_def("AbvGrndWood", + units = "kg C m-2", dim = list(lon, lat, t), missval = -999, + longname = "Above ground woody biomass" + ) + out <- conversion(2, umol2kg_C) ## umol/m2 s-1 -> kg/m2 s-1 + nc_var[[s + 2]] <- ncdf4::ncvar_def("AutoResp", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Autotrophic Respiration" + ) + nc_var[[s + 3]] <- ncdf4::ncvar_def("CarbPools", + units = "kg C m-2", dim = list(lon, lat, t), missval = -999, + longname = "Size of each carbon pool" + ) + nc_var[[s + 4]] <- ncdf4::ncvar_def("CO2CAS", + units = "ppmv", dim = list(lon, lat, t), missval = -999, + longname = "CO2CAS" + ) + nc_var[[s + 5]] <- ncdf4::ncvar_def("CropYield", + units = "kg m-2", dim = list(lon, lat, t), missval = -999, + longname = "Crop Yield" + ) + out <- conversion(6, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 + nc_var[[s + 6]] <- ncdf4::ncvar_def("GPP", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Gross Primary Productivity" + ) + out <- conversion(7, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 + nc_var[[s + 7]] <- ncdf4::ncvar_def("HeteroResp", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Heterotrophic Respiration" + ) + out <- conversion(8, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 + nc_var[[s + 8]] <- ncdf4::ncvar_def("NEE", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Net Ecosystem Exchange" + ) + out <- conversion(9, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 + nc_var[[s + 9]] <- ncdf4::ncvar_def("NPP", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Net Primary Productivity" + ) + out <- conversion(10, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 + nc_var[[s + 10]] <- ncdf4::ncvar_def("TotalResp", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Total Respiration" + ) + nc_var[[s + 11]] <- ncdf4::ncvar_def("TotLivBiom", + units = "kg C m-2", dim = list(lon, lat, t), missval = -999, + longname = "Total living biomass" + ) + nc_var[[s + 12]] <- ncdf4::ncvar_def("TotSoilCarb", + units = "kg C m-2", dim = list(lon, lat, t), missval = -999, + longname = "Total Soil Carbon" + ) + nc_var[[s + 13]] <- ncdf4::ncvar_def("Fdepth", + units = "m", dim = list(lon, lat, t), missval = -999, + longname = "Frozen Thickness" + ) + nc_var[[s + 14]] <- ncdf4::ncvar_def("SnowDepth", + units = "m", dim = list(lon, lat, t), missval = -999, + longname = "Total snow depth" + ) + nc_var[[s + 15]] <- PEcAn.utils::mstmipvar("SnowFrac", lat, lon, t, zg) # not standard + nc_var[[s + 16]] <- ncdf4::ncvar_def("Tdepth", + units = "m", dim = list(lon, lat, t), missval = -999, + longname = "Active Layer Thickness" + ) + nc_var[[s + 17]] <- ncdf4::ncvar_def("CO2air", + units = "umol mol-1", dim = list(lon, lat, t), missval = -999, + longname = "Near surface CO2 concentration" + ) + nc_var[[s + 18]] <- ncdf4::ncvar_def("LWdown", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Surface incident longwave radiation" + ) + nc_var[[s + 19]] <- ncdf4::ncvar_def("Psurf", + units = "Pa", dim = list(lon, lat, t), missval = -999, + longname = "Surface pressure" + ) + nc_var[[s + 20]] <- ncdf4::ncvar_def("Qair", + units = "kg kg-1", dim = list(lon, lat, t), missval = -999, + longname = "Near surface specific humidity" + ) + nc_var[[s + 21]] <- ncdf4::ncvar_def("Rainf", + units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Rainfall rate" + ) + nc_var[[s + 22]] <- ncdf4::ncvar_def("SWdown", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Surface incident shortwave radiation" + ) + out <- checkTemp(23) + nc_var[[s + 23]] <- ncdf4::ncvar_def("Tair", + units = "K", dim = list(lon, lat, t), missval = -999, + longname = "Near surface air temperature" + ) + nc_var[[s + 24]] <- ncdf4::ncvar_def("Wind", + units = "m s-1", dim = list(lon, lat, t), missval = -999, + longname = "Near surface module of the wind" + ) + nc_var[[s + 25]] <- ncdf4::ncvar_def("LWnet", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Net Longwave Radiation" + ) + nc_var[[s + 26]] <- ncdf4::ncvar_def("Qg", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Ground heat" + ) + nc_var[[s + 27]] <- ncdf4::ncvar_def("Qh", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Sensible heat" + ) + out <- conversion(28, PEcAn.data.atmosphere::get.lv()) ## kg m-2 s-1 -> W m-2 + nc_var[[s + 28]] <- ncdf4::ncvar_def("Qle", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Latent heat" + ) + nc_var[[s + 29]] <- ncdf4::ncvar_def("SWnet", + units = "W m-2", dim = list(lon, lat, t), missval = -999, + longname = "Net shortwave radiation" + ) + nc_var[[s + 30]] <- PEcAn.utils::mstmipvar("RootMoist", lat, lon, t, zg) # not standard + nc_var[[s + 31]] <- ncdf4::ncvar_def("TVeg", + units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Transpiration" + ) + nc_var[[s + 32]] <- PEcAn.utils::mstmipvar("WaterTableD", lat, lon, t, zg) # not standard + + nc_var[[s + 33]] <- ncdf4::ncvar_def("fPAR", + units = "", dim = list(lon, lat, t), missval = -999, + longname = "Absorbed fraction incoming PAR" + ) + nc_var[[s + 34]] <- ncdf4::ncvar_def("LAI", + units = "m2 m-2", dim = list(lon, lat, t), missval = -999, + longname = "Leaf Area Index" + ) + nc_var[[s + 35]] <- PEcAn.utils::mstmipvar("SMFrozFrac", lat, lon, t, zg) # not standard + nc_var[[s + 36]] <- PEcAn.utils::mstmipvar("SMLiqFrac", lat, lon, t, zg) # not standard + nc_var[[s + 37]] <- ncdf4::ncvar_def("SoilMoist", + units = "kg m-2", dim = list(lon, lat, zg, t), missval = -999, + longname = "Average Layer Soil Moisture" + ) + out <- checkTemp(38) + nc_var[[s + 38]] <- ncdf4::ncvar_def("SoilTemp", + units = "K", dim = list(lon, lat, zg, t), missval = -999, + longname = "Average Layer Soil Temperature" + ) + nc_var[[s + 39]] <- ncdf4::ncvar_def("SoilWet", + units = "", dim = list(lon, lat, t), missval = -999, + longname = "Total Soil Wetness" + ) + nc_var[[s + 40]] <- PEcAn.utils::mstmipvar("Albedo", lat, lon, t, zg) # not standard + out <- checkTemp(41) + nc_var[[s + 41]] <- PEcAn.utils::mstmipvar("SnowT", lat, lon, t, zg) # not standard + nc_var[[s + 42]] <- ncdf4::ncvar_def("SWE", + units = "kg m-2", dim = list(lon, lat, t), missval = -999, + longname = "Snow Water Equivalent" + ) + out <- checkTemp(43) + nc_var[[s + 43]] <- PEcAn.utils::mstmipvar("VegT", lat, lon, t, zg) # not standard + nc_var[[s + 44]] <- ncdf4::ncvar_def("Evap", + units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Total Evaporation" + ) + nc_var[[s + 45]] <- ncdf4::ncvar_def("Qs", + units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Surface runoff" + ) + nc_var[[s + 46]] <- ncdf4::ncvar_def("Qsb", + units = "kg m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Subsurface runoff" + ) + out <- conversion(47, yr2s) ## kg C m-2 yr-1 -> kg C m-2 s-1 + nc_var[[s + 47]] <- ncdf4::ncvar_def("SoilResp", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Soil Respiration" + ) + # Remove SLZ from output before finalizing list. replace with time_bounds + if (!is.null(out[["SLZ"]])) { + out[["SLZ"]] <- NULL + } + out_length <- length(out) + out[[out_length + 1]] <- c(rbind(bounds[, 1], bounds[, 2])) + nc_var[[s + (out_length + 1)]] <- ncdf4::ncvar_def( + name = "time_bounds", units = "", + longname = "history time interval endpoints", + dim = list(time_interval, time = t), + prec = "double" + ) + + return(list(nc_var = nc_var, out = out)) + } # put_T_values + + +## -------------------------------------------------------------------------------------------------# ##' Function for reading -E- files ##' @@ -959,57 +1065,57 @@ put_T_values <- ##' ##' @details if \code{settings} is provided, then values for missing arguments ##' for `start_date`, `end_date`, and `pfts` will be taken from it -##' +##' ##' @return a list -##' -read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, +##' +read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, pfts, settings = NULL) { - PEcAn.logger::logger.info(paste0("*** Reading -E- file ***")) - + if (!is.null(settings)) { - if(!inherits(settings, "Settings")) { + if (!inherits(settings, "Settings")) { PEcAn.logger::logger.error("`settings` should be a PEcAn 'Settings' object") } - if(missing(start_date)) start_date <- settings$run$start.date - if(missing(end_date)) end_date <- settings$run$end.date - if(missing(pfts)) pfts <- extract_pfts(settings$pfts) + if (missing(start_date)) start_date <- settings$run$start.date + if (missing(end_date)) end_date <- settings$run$end.date + if (missing(pfts)) pfts <- extract_pfts(settings$pfts) } - - stopifnot(!is.null(outdir), !is.null(start_date), !is.null(end_date), - !is.null(pfts)) - + + stopifnot( + !is.null(outdir), !is.null(start_date), !is.null(end_date), + !is.null(pfts) + ) + # #extract data from a single -E- .h5 file as a tibble extract_E_file <- function(file) { - # Cohort-level variables to extract cohort_vars <- c( - "DBH", #diameter at breast height (cm) of each cohort - "AGB_CO", #cohort level above ground biomass (kgC/plant) - "MMEAN_NPPDAILY_CO", #net primary productivity (kgC/plant/yr) - "MMEAN_TRANSP_CO", #Monthly mean leaf transpiration (kg/plant/s) - "BSEEDS_CO", #seed biomass in units of (kgC/plant) - "NPLANT", #plant density (plants/m2), required for /plant -> /m2 conversion - "PFT" #pft numbers + "DBH", # diameter at breast height (cm) of each cohort + "AGB_CO", # cohort level above ground biomass (kgC/plant) + "MMEAN_NPPDAILY_CO", # net primary productivity (kgC/plant/yr) + "MMEAN_TRANSP_CO", # Monthly mean leaf transpiration (kg/plant/s) + "BSEEDS_CO", # seed biomass in units of (kgC/plant) + "NPLANT", # plant density (plants/m2), required for /plant -> /m2 conversion + "PFT" # pft numbers ) - + # Patch-level variables needed for calculations patch_vars <- c( "AREA", # fractional patch area relative to site area (unitless) - "AGE", #patch age since last disturbance - "PACO_N", #number of cohorts in each patch - "PACO_ID" #index of the first cohort of each patch. Needed for figuring out which patch each cohort belongs to + "AGE", # patch age since last disturbance + "PACO_N", # number of cohorts in each patch + "PACO_ID" # index of the first cohort of each patch. Needed for figuring out which patch each cohort belongs to ) - + nc <- ncdf4::nc_open(file) on.exit(ncdf4::nc_close(nc), add = FALSE) avail_cohort <- cohort_vars[cohort_vars %in% names(nc$var)] - if(length(avail_cohort) == 0) { + if (length(avail_cohort) == 0) { PEcAn.logger::logger.warn("No cohort-level variables found!") return(NULL) } avail_patch <- patch_vars[patch_vars %in% names(nc$var)] - if(length(avail_patch) == 0) { + if (length(avail_patch) == 0) { PEcAn.logger::logger.warn("No patch-level variables found!") return(NULL) } @@ -1020,13 +1126,13 @@ read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, purrr::set_names(avail_cohort) %>% tibble::as_tibble() %>% dplyr::mutate(COHORT_ID = 1:dplyr::n()) - + patch_df <- purrr::map(avail_patch, function(.x) as.vector(ncdf4::ncvar_get(nc, .x))) %>% purrr::set_names(avail_patch) %>% tibble::as_tibble() %>% dplyr::mutate(PATCH_ID = 1:dplyr::n()) - + # join patch data with cohort data. PACO_N is the number of cohorts (i.e. # rows) in each patch, so I use that to figure out which rows are in which # patch. @@ -1035,24 +1141,24 @@ read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, cohort_patch_id <- c(cohort_patch_id, rep(i, patch_df$PACO_N[i])) } cohort_df$PATCH_ID <- cohort_patch_id - + # Then the cohort and patch dataframes can be joined by PATCH_ID - dplyr::left_join(cohort_df, patch_df, by = "PATCH_ID") %>% + dplyr::left_join(cohort_df, patch_df, by = "PATCH_ID") %>% # Dates in filename are not valid because day is 00. Extract just year # and month and use lubridate:ym() to build date dplyr::mutate(date = stringr::str_match(basename(file), "(\\d{4}-\\d{2})-\\d{2}")[, 2] %>% lubridate::ym()) %>% dplyr::select(date, dplyr::everything()) %>% dplyr::select(-"PACO_ID") } - + # Extract data from all the .h5 files of the yr - raw <- - purrr::map(file.path(outdir, h5_files[yr == yfiles]), extract_E_file) %>% - dplyr::bind_rows() - - + raw <- + purrr::map(file.path(outdir, h5_files[yr == yfiles]), extract_E_file) %>% + dplyr::bind_rows() + + # Unit conversions - out <- raw %>% + out <- raw %>% dplyr::mutate( # Convert units of per plant to per m^2 by multiplying by plant density # (plant/m^2) @@ -1065,14 +1171,14 @@ read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, ) %>% # Weighted summary # For each month (-E- file) and PFT... - dplyr::group_by(.data$date, .data$PFT) %>% + dplyr::group_by(.data$date, .data$PFT) %>% # ... get a weighted sum across cohorts and patches dplyr::summarize( dplyr::across( dplyr::any_of(c("BSEEDS_CO", "AGB_CO", "MMEAN_NPPDAILY_CO", "MMEAN_TRANSP_CO", "NPLANT")), function(.x) sum(.x * .data$AREA, na.rm = TRUE) ), - #(or a mean when it makes sense) + # (or a mean when it makes sense) DBH_mean = mean(.data$DBH, na.rm = TRUE), .groups = "drop" ) @@ -1080,60 +1186,61 @@ read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, # case in ED2 output, but we need to fill in some dummy values so the # dimensions don't get screwed up later. Other code in model2netcdf.ED2 # assumes the dimensions are constant for the entire run. - + expected <- tidyr::expand_grid( date = unique(out$date), PFT = pfts ) - out <- + out <- dplyr::full_join(out, expected, by = c("date", "PFT")) %>% dplyr::arrange(.data$date) - + # Rename variables to PEcAn standard and convert to list n_pft <- length(unique(out$PFT)) out_list <- - out %>% - dplyr::arrange(.data$PFT, .data$date) %>% - dplyr::select(-"PFT", -"date") %>% - #output is expected to be list of matrixes with ncol == number of PFTs. - #Here, I make a tibble with matrix-columns (each data frame column is a - #n_pft-wide matrix), then convert it to a list. - dplyr::summarize(dplyr::across(dplyr::everything(), + out %>% + dplyr::arrange(.data$PFT, .data$date) %>% + dplyr::select(-"PFT", -"date") %>% + # output is expected to be list of matrixes with ncol == number of PFTs. + # Here, I make a tibble with matrix-columns (each data frame column is a + # n_pft-wide matrix), then convert it to a list. + dplyr::summarize(dplyr::across( + dplyr::everything(), function(.x) matrix(.x, ncol = n_pft) - )) %>% + )) %>% dplyr::select( - #PEcAn name #ED2 name + # PEcAn name #ED2 name "AGB_PFT" = "AGB_CO", "BSEEDS" = "BSEEDS_CO", "DBH" = "DBH_mean", "NPP_PFT" = "MMEAN_NPPDAILY_CO", "TRANSP_PFT" = "MMEAN_TRANSP_CO", "DENS" = "NPLANT" - ) %>% as.list() - - + ) %>% + as.list() + + # even if this is a SA run for soil, currently we are not reading any variable # that has a soil dimension. "soil" will be passed to read.output as pft.name # from upstream, when it's not part of the attribute it will read the sum soil.check <- grepl("soil", names(pfts)) - if(any(soil.check)){ + if (any(soil.check)) { # for now keep soil out - #TODO: print a message?? + # TODO: print a message?? pfts <- pfts[!(soil.check)] } - out_list$PFT <- sort(pfts) #named vector for matching PFT numbers to names - + out_list$PFT <- sort(pfts) # named vector for matching PFT numbers to names + return(out_list) - } ##' Put -E- values to nc_var list -##' +##' ##' Puts a select number of variables from the monthly -E- files into a `nc_var` ##' list to be written to a .nc file. -##' +##' ##' @param yr the year being processed ##' @param nc_var a list (potentially empty) for `ncvar4` objects to be added to ##' @param var_list list returned by [read_E_files()] @@ -1144,9 +1251,9 @@ read_E_files <- function(yr, yfiles, h5_files, outdir, start_date, end_date, ##' @param begins deprecated; use `start_date` instead ##' @param ends deprecated; use `end_date` instead ##' @param out deprecated; use `var_list` instead -##' +##' ##' @return a list of `ncdim4` objects -##' +##' put_E_values <- function(yr, nc_var, @@ -1158,187 +1265,185 @@ put_E_values <- begins, ends, out) { - - if(!missing(begins)) { - warning("`begins` is deprecated, using `start_date` instead") - start_date <- begins - } - if(!missing(ends)) { - warning("`ends` is deprecated, using `end_date` instead") - end_date <- ends - } - if(!missing(out)) { - warning("`out` is deprecated, using `var_list` instead") - var_list <- out - } + if (!missing(begins)) { + warning("`begins` is deprecated, using `start_date` instead") + start_date <- begins + } + if (!missing(ends)) { + warning("`ends` is deprecated, using `end_date` instead") + end_date <- ends + } + if (!missing(out)) { + warning("`out` is deprecated, using `var_list` instead") + var_list <- out + } - # Extract the PFT names and numbers for all PFTs - pfts <- var_list$PFT - - - # ----- fill list - - ##### setup output time and time bounds - ## Create a date vector that contains each month of the model run (e.g. - ## "2001-07-01" "2001-08-01" "2001-09-01"....) and which is the correct length - ## for each full or partial year. In the last year of the simulation, the .h5 - ## for the month in end_date will never be written, because it won't be - ## complete. - if(yr == lubridate::year(start_date)){ - date_vec_start <- lubridate::floor_date(lubridate::ymd(start_date), "month") - #TODO I should check what happens if a run starts not on the first day of the month - } else { - date_vec_start <- lubridate::make_date(year = yr, month = 1, day = 1) - } - - date_vec_end <- lubridate::floor_date(lubridate::ymd(end_date), "month") - if (yr == lubridate::year(end_date)) { - #remove the last month - date_vec_end <- date_vec_end - months(1) - } else { - date_vec_end <- lubridate::make_date(year = yr, month = 12, day = 1) - } - - output_date_vector <- - seq( - date_vec_start, - date_vec_end, - by = "month" - ) - ## Create a vector of the number of days in each month by year (e.g. 31 31 30 - ## 31 30 31) - num_days_per_month <- lubridate::days_in_month(output_date_vector) - ## Update num_days_per_month and output_date_vector if model run did not start - ## on the first day of a month e.g. "2001-07-15" "2001-08-01", 17 31 - if (lubridate::yday(start_date) != lubridate::yday(output_date_vector[1])) { - temp <- - num_days_per_month[1] - (( - lubridate::yday(start_date) - lubridate::yday(output_date_vector[1]) - )) - num_days_per_month[1] <- temp - output_date_vector[1] <- start_date - } - ## Create a vector of output month julian dates (e.g. 196 213 244 274 305 335) - jdates <- lubridate::yday(output_date_vector) - ## Create a 0 index dtime variable - dtvals <- jdates - 1 # convert to 0 index - ## Create monthly time bounds to populate dtime_bounds variable - bounds <- array(data = NA, dim = c(length(dtvals), 2)) - bounds[, 1] <- dtvals - bounds[, 2] <- bounds[, 1] + num_days_per_month - # create time bounds for each timestep in t, t+1; t+1, t+2... format - bounds <- round(bounds, 4) - - t <- - ncdf4::ncdim_def( - name = "dtime", - units = paste0("days since ", yr, "-01-01 00:00:00"), - vals = dtvals, - calendar = "standard", - unlim = TRUE - ) - time_interval <- - ncdf4::ncdim_def( - name = "hist_interval", - longname = "history time interval endpoint dimensions", - vals = 1:2, - units = "" - ) - p <- - ncdf4::ncdim_def( - name = "pft", - units = "unitless", - vals = pfts, - longname = "Plant Functional Type", - unlim = TRUE - ) - - # NOTE : the order of dimensions is going to be important for read.output. - # This was the fist case of reading pft-specific outputs at the time, but - # checking base/utils/data/standard_vars.csv "pft" should come after "time" as - # a dimension e.g. when NEE is pft-specific for some model output it will be - # the 4th dimension - # lon / lat / time / pft - # From read.output's perspective, dimension of pft will be the same for NEE - # there and DBH here - - # NOTE: the order of variables in `evars` MUST match the order in `var_list` - # output by read_E_files. Some day the whole model2netcdf.ED2 function should - # probably be re-written to combine the read_*_files and put_*_values - # functions to make this harder to accidentally screw up. - evars <- list( - ncdf4::ncvar_def( - "AGB_PFT", #original ED2 name: AGB_CO - units = "kgC m-2", - dim = list(lon, lat, t, p), - missval = -999, - longname = "Above ground biomass by PFT" - ), - ncdf4::ncvar_def( - "BSEEDS", #original ED2 name: BSEEDS_CO - units = "kgC m-2", - dim = list(lon, lat, t, p), - missval = -999, - longname = "Seed biomass by PFT" - ), - ncdf4::ncvar_def( - "DBH", #original ED2 name: DBH - units = "cm", - dim = list(lon, lat, t, p), - missval = -999, - longname = "Diameter at breast height by PFT" - ), - ncdf4::ncvar_def( - "NPP_PFT", #original ED2 name: MMEAN_NPPDAILY_CO - units = "KgC m-2 s-1", - dim = list(lon, lat, t, p), - missval = -999, - longname = "Net primary productivity by PFT" - ), - ncdf4::ncvar_def( - "TRANSP_PFT", #original ED2 name: MMEAN_TRANSP_CO - units = "kg m-2 s-1", - dim = list(lon, lat, t, p), - missval = -999, - longname = "Leaf transpiration by PFT" - ), - ncdf4::ncvar_def( - "DENS", #original ED2 name: NPLANT - units = "plant m-2", - dim = list(lon, lat, t, p), - missval = -999, - longname = "Plant density by PFT" - ), - - # longname of this variable will be parsed by read.output - # so that read.output has a way of accessing PFT names - ncdf4::ncvar_def( - "PFT", - units = "", - dim = list(p), - longname = paste(names(pfts), collapse = ",") - ), - ncdf4::ncvar_def( - name = "dtime_bounds", - units = "", - longname = "monthly history time interval endpoints", - dim = list(time_interval, dtime = t), - prec = "double" + # Extract the PFT names and numbers for all PFTs + pfts <- var_list$PFT + + + # ----- fill list + + ##### setup output time and time bounds + ## Create a date vector that contains each month of the model run (e.g. + ## "2001-07-01" "2001-08-01" "2001-09-01"....) and which is the correct length + ## for each full or partial year. In the last year of the simulation, the .h5 + ## for the month in end_date will never be written, because it won't be + ## complete. + if (yr == lubridate::year(start_date)) { + date_vec_start <- lubridate::floor_date(lubridate::ymd(start_date), "month") + # TODO I should check what happens if a run starts not on the first day of the month + } else { + date_vec_start <- lubridate::make_date(year = yr, month = 1, day = 1) + } + + date_vec_end <- lubridate::floor_date(lubridate::ymd(end_date), "month") + if (yr == lubridate::year(end_date)) { + # remove the last month + date_vec_end <- date_vec_end - months(1) + } else { + date_vec_end <- lubridate::make_date(year = yr, month = 12, day = 1) + } + + output_date_vector <- + seq( + date_vec_start, + date_vec_end, + by = "month" + ) + ## Create a vector of the number of days in each month by year (e.g. 31 31 30 + ## 31 30 31) + num_days_per_month <- lubridate::days_in_month(output_date_vector) + ## Update num_days_per_month and output_date_vector if model run did not start + ## on the first day of a month e.g. "2001-07-15" "2001-08-01", 17 31 + if (lubridate::yday(start_date) != lubridate::yday(output_date_vector[1])) { + temp <- + num_days_per_month[1] - (( + lubridate::yday(start_date) - lubridate::yday(output_date_vector[1]) + )) + num_days_per_month[1] <- temp + output_date_vector[1] <- start_date + } + ## Create a vector of output month julian dates (e.g. 196 213 244 274 305 335) + jdates <- lubridate::yday(output_date_vector) + ## Create a 0 index dtime variable + dtvals <- jdates - 1 # convert to 0 index + ## Create monthly time bounds to populate dtime_bounds variable + bounds <- array(data = NA, dim = c(length(dtvals), 2)) + bounds[, 1] <- dtvals + bounds[, 2] <- bounds[, 1] + num_days_per_month + # create time bounds for each timestep in t, t+1; t+1, t+2... format + bounds <- round(bounds, 4) + + t <- + ncdf4::ncdim_def( + name = "dtime", + units = paste0("days since ", yr, "-01-01 00:00:00"), + vals = dtvals, + calendar = "standard", + unlim = TRUE + ) + time_interval <- + ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, + units = "" + ) + p <- + ncdf4::ncdim_def( + name = "pft", + units = "unitless", + vals = pfts, + longname = "Plant Functional Type", + unlim = TRUE + ) + + # NOTE : the order of dimensions is going to be important for read.output. + # This was the fist case of reading pft-specific outputs at the time, but + # checking base/utils/data/standard_vars.csv "pft" should come after "time" as + # a dimension e.g. when NEE is pft-specific for some model output it will be + # the 4th dimension + # lon / lat / time / pft + # From read.output's perspective, dimension of pft will be the same for NEE + # there and DBH here + + # NOTE: the order of variables in `evars` MUST match the order in `var_list` + # output by read_E_files. Some day the whole model2netcdf.ED2 function should + # probably be re-written to combine the read_*_files and put_*_values + # functions to make this harder to accidentally screw up. + evars <- list( + ncdf4::ncvar_def( + "AGB_PFT", # original ED2 name: AGB_CO + units = "kgC m-2", + dim = list(lon, lat, t, p), + missval = -999, + longname = "Above ground biomass by PFT" + ), + ncdf4::ncvar_def( + "BSEEDS", # original ED2 name: BSEEDS_CO + units = "kgC m-2", + dim = list(lon, lat, t, p), + missval = -999, + longname = "Seed biomass by PFT" + ), + ncdf4::ncvar_def( + "DBH", # original ED2 name: DBH + units = "cm", + dim = list(lon, lat, t, p), + missval = -999, + longname = "Diameter at breast height by PFT" + ), + ncdf4::ncvar_def( + "NPP_PFT", # original ED2 name: MMEAN_NPPDAILY_CO + units = "KgC m-2 s-1", + dim = list(lon, lat, t, p), + missval = -999, + longname = "Net primary productivity by PFT" + ), + ncdf4::ncvar_def( + "TRANSP_PFT", # original ED2 name: MMEAN_TRANSP_CO + units = "kg m-2 s-1", + dim = list(lon, lat, t, p), + missval = -999, + longname = "Leaf transpiration by PFT" + ), + ncdf4::ncvar_def( + "DENS", # original ED2 name: NPLANT + units = "plant m-2", + dim = list(lon, lat, t, p), + missval = -999, + longname = "Plant density by PFT" + ), + + # longname of this variable will be parsed by read.output + # so that read.output has a way of accessing PFT names + ncdf4::ncvar_def( + "PFT", + units = "", + dim = list(p), + longname = paste(names(pfts), collapse = ",") + ), + ncdf4::ncvar_def( + name = "dtime_bounds", + units = "", + longname = "monthly history time interval endpoints", + dim = list(time_interval, dtime = t), + prec = "double" + ) ) - ) - #TODO: assure that nc_var and var_list are of same length and same order? - nc_var <- append(nc_var, evars) - var_list <- append(var_list, list(dtime_bounds = c(bounds))) - - return(list(nc_var = nc_var, out = var_list)) - -} # put_E_values + # TODO: assure that nc_var and var_list are of same length and same order? + nc_var <- append(nc_var, evars) + var_list <- append(var_list, list(dtime_bounds = c(bounds))) + + return(list(nc_var = nc_var, out = var_list)) + } # put_E_values ##' Read "S" files output by ED2 -##' +##' ##' S-file contents are not written to standard netcdfs but are used by ##' read_restart from SDA's perspective it doesn't make sense to write and read ##' to ncdfs because ED restarts from history files @@ -1352,78 +1457,79 @@ put_E_values <- ##' @param ... currently unused ##' ##' @export -read_S_files <- function(sfile, outdir, pfts, pecan_names = NULL, settings = NULL, ...){ - +read_S_files <- function(sfile, outdir, pfts, pecan_names = NULL, settings = NULL, ...) { PEcAn.logger::logger.info(paste0("*** Reading -S- file ***")) if (!is.null(settings)) { - if(!inherits(settings, "Settings")) { + if (!inherits(settings, "Settings")) { PEcAn.logger::logger.error("`settings` should be a PEcAn 'Settings' object") } - if(missing(pfts)) pfts <- extract_pfts(settings$pfts) + if (missing(pfts)) pfts <- extract_pfts(settings$pfts) } # commonly used vars - if(is.null(pecan_names)) pecan_names <- c("AGB", "AbvGrndWood", "GWBI", "DBH") - + if (is.null(pecan_names)) pecan_names <- c("AGB", "AbvGrndWood", "GWBI", "DBH") + ed_varnames <- pecan_names - + # TODO: ed.var lookup function can also return deterministically related variables - + # translate pecan vars to ED vars - trans_out <- translate_vars_ed(ed_varnames) - ed_varnames <- trans_out$vars # variables to read from history files - ed_derivs <- trans_out$expr # derivations to obtain pecan standard variables - add_vars <- trans_out$addvars # these are the vars -if there are any- that won't be updated by analysis, but will be used in write_restart - ed_units <- trans_out$units # might use - - # List of vars to extract includes the requested one, plus others needed below - add_vars <- c(add_vars, "PFT", "AREA", "PACO_N", "NPLANT","DAGB_DT", "BDEAD", "DBH", - "BSTORAGE", "BALIVE", "BLEAF", "BROOT", "BSEEDS_CO", "BSAPWOODA", "BSAPWOODB") - vars <- c(ed_varnames, add_vars) - + trans_out <- translate_vars_ed(ed_varnames) + ed_varnames <- trans_out$vars # variables to read from history files + ed_derivs <- trans_out$expr # derivations to obtain pecan standard variables + add_vars <- trans_out$addvars # these are the vars -if there are any- that won't be updated by analysis, but will be used in write_restart + ed_units <- trans_out$units # might use + + # List of vars to extract includes the requested one, plus others needed below + add_vars <- c( + add_vars, "PFT", "AREA", "PACO_N", "NPLANT", "DAGB_DT", "BDEAD", "DBH", + "BSTORAGE", "BALIVE", "BLEAF", "BROOT", "BSEEDS_CO", "BSAPWOODA", "BSAPWOODB" + ) + vars <- c(ed_varnames, add_vars) + # list to collect outputs ed.dat <- list() - + nc <- ncdf4::nc_open(file.path(outdir, sfile)) allvars <- names(nc$var) - if (!is.null(vars)) allvars <- allvars[ allvars %in% vars ] - + if (!is.null(vars)) allvars <- allvars[allvars %in% vars] + for (j in seq_along(allvars)) { ed.dat[[j]] <- list() ed.dat[[j]] <- ncdf4::ncvar_get(nc, allvars[j]) } names(ed.dat) <- allvars - + ncdf4::nc_close(nc) - - + + # even if this is a SA run for soil, currently we are not reading any variable # that has a soil dimension. "soil" will be passed to read.output as pft.name # from upstream, when it's not part of the attribute it will read the sum soil.check <- grepl("soil", names(pfts)) - if(any(soil.check)){ + if (any(soil.check)) { # for now keep soil out - #TODO: print a message?? + # TODO: print a message?? pfts <- pfts[!(soil.check)] } - + out <- list() for (varname in pecan_names) { out[[varname]] <- array(NA, length(pfts)) } - - - # Get cohort-level variables - pft <- ed.dat$PFT - plant_dens <- ed.dat$NPLANT # Cohort stem density -- plant/m2 - dbh <- ed.dat$DBH # used in allometric eqns -- dbh - - # Get patch areas. In general patches aren't the same area, so this is needed to area-weight when averaging up to site level. Requires minor finnagling to convert patch-level AREA to a cohort-length variable. - patch_area <- ed.dat$AREA # unitless, a proportion of total site area -- one entry per patch (always add up to 1) - paco_n <- ed.dat$PACO_N # number of cohorts per patch - + + + # Get cohort-level variables + pft <- ed.dat$PFT + plant_dens <- ed.dat$NPLANT # Cohort stem density -- plant/m2 + dbh <- ed.dat$DBH # used in allometric eqns -- dbh + + # Get patch areas. In general patches aren't the same area, so this is needed to area-weight when averaging up to site level. Requires minor finnagling to convert patch-level AREA to a cohort-length variable. + patch_area <- ed.dat$AREA # unitless, a proportion of total site area -- one entry per patch (always add up to 1) + paco_n <- ed.dat$PACO_N # number of cohorts per patch + patch_index <- rep(1:length(paco_n), times = paco_n) - + # read xml to extract allometric coeffs later configfile <- paste0(gsub("/out/", "/run/", outdir), "/config.xml") pars <- XML::xmlToList(XML::xmlParse(configfile)) @@ -1431,24 +1537,22 @@ read_S_files <- function(sfile, outdir, pfts, pecan_names = NULL, settings = NUL pars[names(pars) != "pft"] <- NULL # pass pft numbers as sublist names names(pars) <- pfts - + # Aggregate for (l in seq_along(pecan_names)) { - - variable <- PEcAn.utils::convert.expr(ed_derivs[l]) # convert + variable <- PEcAn.utils::convert.expr(ed_derivs[l]) # convert expr <- variable$variable.eqn$expression - + sapply(variable$variable.eqn$variables, function(x) assign(x, ed.dat[[x]], envir = .GlobalEnv)) tmp.var <- eval(parse(text = expr)) # parse - + if (ed_units[l] %in% c("kg/m2")) { # does this always mean this is a patch-level variable w/o per-pft values? out[[pecan_names[l]]] <- NA out[[pecan_names[l]]] <- sum(tmp.var * patch_area, na.rm = TRUE) - - } else {# per-pft vars - for(k in seq_len(length(pfts))) { + } else { # per-pft vars + for (k in seq_len(length(pfts))) { ind <- (pft == pfts[k]) - + if (any(ind)) { # check for different variables/units? if (pecan_names[l] == "GWBI") { @@ -1457,47 +1561,43 @@ read_S_files <- function(sfile, outdir, pfts, pecan_names = NULL, settings = NUL ddbh_dt[!ind] <- 0 dagb_dt <- ed.dat$DAGB_DT dagb_dt[!ind] <- 0 - + # get b1Bl/b2Bl/dbh_adult from xml # these are in order so you can use k, but you can also extract by pft - small <- dbh <= as.numeric(pars[[k]]$dbh_adult) - ddbh_dt[small] <- as.numeric(pars[[k]]$b1Bl_small) / 2 * ddbh_dt[small] ^ as.numeric(pars[[k]]$b2Bl_small) - ddbh_dt[!small] <- as.numeric(pars[[k]]$b1Bl_large) / 2 * ddbh_dt[!small] ^ as.numeric(pars[[k]]$b2Bl_large) + small <- dbh <= as.numeric(pars[[k]]$dbh_adult) + ddbh_dt[small] <- as.numeric(pars[[k]]$b1Bl_small) / 2 * ddbh_dt[small]^as.numeric(pars[[k]]$b2Bl_small) + ddbh_dt[!small] <- as.numeric(pars[[k]]$b1Bl_large) / 2 * ddbh_dt[!small]^as.numeric(pars[[k]]$b2Bl_large) gwbi_ch <- dagb_dt - ddbh_dt - # kgC/m2/yr = kgC/plant/yr * plant/m2 + # kgC/m2/yr = kgC/plant/yr * plant/m2 plant2cohort <- gwbi_ch * plant_dens cohort2patch <- tapply(plant2cohort, list("patch" = patch_index), sum, na.rm = TRUE) out[[pecan_names[l]]][k] <- sum(cohort2patch * patch_area, na.rm = TRUE) - } else if (ed_units[l] %in% c("kgC/plant")) { pft.var <- tmp.var pft.var[!ind] <- 0 - # kgC/m2 = kgC/plant * plant/m2 + # kgC/m2 = kgC/plant * plant/m2 plant2cohort <- pft.var * plant_dens # sum cohorts to aggrete to patches cohort2patch <- tapply(plant2cohort, list("patch" = patch_index), sum, na.rm = TRUE) - # scale up to site-level - out[[pecan_names[l]]][k] <- sum(cohort2patch*patch_area, na.rm = TRUE) - + # scale up to site-level + out[[pecan_names[l]]][k] <- sum(cohort2patch * patch_area, na.rm = TRUE) } - } #any(ind)-if SHOULD THERE BE AN ELSE? DOES ED2 EVER DRIVES SOME PFTs TO EXTINCTION? - } #k-loop - - }# per-pft or not - } #l-loop - - + } # any(ind)-if SHOULD THERE BE AN ELSE? DOES ED2 EVER DRIVES SOME PFTs TO EXTINCTION? + } # k-loop + } # per-pft or not + } # l-loop + + # pass everything, unaggregated out$restart <- ed.dat - - + + return(out) - } # read_S_files #' Extract pft numbers from settings$pfts -#' +#' #' A helper function to extract a named vector of pft numbers from #' `settings$pfts`. Will use pft numbers in `settings` if they exist, otherwise #' it'll match using the `pftmapping` dataset @@ -1507,12 +1607,11 @@ read_S_files <- function(sfile, outdir, pfts, pecan_names = NULL, settings = NUL #' @return named numeric vector #' extract_pfts <- function(pfts) { - get_pft_num <- function(x) { pftmapping <- PEcAn.ED2::pftmapping pft_number <- x[["ed2_pft_number"]] pft_name <- x[["name"]] - if(!is.null(pft_number)) { + if (!is.null(pft_number)) { pft_number <- as.numeric(pft_number) if (!is.finite(pft_number)) { PEcAn.logger::logger.severe( @@ -1523,15 +1622,15 @@ extract_pfts <- function(pfts) { } else { pft_number <- pftmapping$ED[pftmapping$PEcAn == pft_name] } - + as.integer(pft_number) } - + # apply to all pfts in list pfts_out <- sapply(pfts, get_pft_num) names(pfts_out) <- pfts %>% sapply(`[[`, "name") - - #return named numeric vector: + + # return named numeric vector: pfts_out } @@ -1541,10 +1640,10 @@ var_put <- function(nc, varid, vals, start = NA, count = NA) { output <- utils::capture.output( ncdf4::ncvar_put(nc = nc, varid = varid, vals = vals, start = start, count = count) ) - if(length(output)!=0) { + if (length(output) != 0) { cat(paste0("With '", varid$name, "':"), output, "\n") } } -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ### EOF diff --git a/models/ed/R/modify_ed2in.R b/models/ed/R/modify_ed2in.R index 9cc843659db..7d21da319ec 100644 --- a/models/ed/R/modify_ed2in.R +++ b/models/ed/R/modify_ed2in.R @@ -1,53 +1,53 @@ #' Modify an ED2IN object #' -#' This is a convenience function for modifying an `ed2in` list object. -#' Arguments passed in all caps are assumed to be ED2IN namelist parameters and -#' are inserted directly into the `ed2in` list objects. Lowercase arguments are -#' defined explicitly (see "Parameters"), and those that do not match explicit -#' arguments will be ignored with a warning. Because the lowercase arguments -#' come with additional validity checks, they are recommended over modifying +#' This is a convenience function for modifying an `ed2in` list object. +#' Arguments passed in all caps are assumed to be ED2IN namelist parameters and +#' are inserted directly into the `ed2in` list objects. Lowercase arguments are +#' defined explicitly (see "Parameters"), and those that do not match explicit +#' arguments will be ignored with a warning. Because the lowercase arguments +#' come with additional validity checks, they are recommended over modifying #' the ED2IN file directly via uppercase arguments. For all lowercase #' arguments, the default (`NULL`) means to use whatever is currently #' in the input `ed2in`. #' -#' Namelist arguments are applied last, and will silently overwrite any +#' Namelist arguments are applied last, and will silently overwrite any #' arguments set by special case arguments. #' -#' Namelist arguments can be stored in a list and passed in via the `.dots` -#' argument (e.g. `.dots = list(SFILIN = "/path/prefix_", ...)`), or using the -#' `rlang::!!!` splicing operator. If both are provided, they will be spliced +#' Namelist arguments can be stored in a list and passed in via the `.dots` +#' argument (e.g. `.dots = list(SFILIN = "/path/prefix_", ...)`), or using the +#' `rlang::!!!` splicing operator. If both are provided, they will be spliced #' together, with the `...` taking precedence. #' #' For `output_types`, select one or more of the following: #' - "fast" -- Fast analysis; mostly polygon-level averages (`IFOUTPUT`) #' - "daily -- Daily means (one file per day) (`IDOUTPUT`) #' - "monthly" -- Monthly means (one file per month) (`IMOUTPUT`) -#' - "monthly_diurnal" -- Monthly means of the diurnal cycle (one file per +#' - "monthly_diurnal" -- Monthly means of the diurnal cycle (one file per #' month) (`IQOUTPUT`) #' - "annual" -- Annual (one file per year) (`IYOUTPUT`) -#' - "instant" -- Instantaneous fluxes, mostly polygon-level variables, one +#' - "instant" -- Instantaneous fluxes, mostly polygon-level variables, one #' file per year (`ITOUTPUT`) #' - "restart" -- Restart file for HISTORY runs. (`ISOUTPUT`) #' - "all" -- All output types #' #' @param ed2in list to modify #' @param ... Namelist arguments (see Description and Details) -#' @param veg_prefix Vegetation file prefix (`SFILIN`). If `lat` and `lon` are part of the prefix, -#' @param latitude Run latitude coordinate. If `veg_prefix` is also provided, -#' pass to [read_ed_veg], otherwise set in ED2IN directly. Should be omitted if +#' @param veg_prefix Vegetation file prefix (`SFILIN`). If `lat` and `lon` are part of the prefix, +#' @param latitude Run latitude coordinate. If `veg_prefix` is also provided, +#' pass to [read_ed_veg], otherwise set in ED2IN directly. Should be omitted if #' `lat` and `lon` are already part of `veg_prefix`. -#' @param longitude Run longitude coordinate. If `veg_prefix` is also provided, -#' pass to [read_ed_veg], otherwise set in ED2IN directly. Should be omitted if +#' @param longitude Run longitude coordinate. If `veg_prefix` is also provided, +#' pass to [read_ed_veg], otherwise set in ED2IN directly. Should be omitted if #' `lat` and `lon` are already part of `veg_prefix`. -#' @param met_driver Path and filename of met driver header +#' @param met_driver Path and filename of met driver header #' (`ED_MET_DRIVER_DB`) #' @param start_date Run start date (`IMONTHA`, `IDATEA`, `IYEARA`, `ITIMEA`) #' @param end_date Run end date (`IMONTHZ`, `IDATEZ`, `IYEARZ` `ITIMEZ`) -#' @param EDI_path Path to `EDI` directory, which often has the `VEG_DATABASE` +#' @param EDI_path Path to `EDI` directory, which often has the `VEG_DATABASE` #' and `THSUMS_DATABASE` files. #' @param output_types Character vector of output types (see Details) -#' @param output_dir Output directory, for `FFILOUT` (analysis) and `SFILOUT` -#'(history) files +#' @param output_dir Output directory, for `FFILOUT` (analysis) and `SFILOUT` +#' (history) files #' @param run_dir Directory in which to store run-related config files (e.g. `config.xml`). #' @param runtype ED initialization mode; either "INITIAL" or "HISTORY" #' @param run_name Give the run an informative name/description. Sets @@ -60,9 +60,9 @@ #' (`NULL`) means to use whatever is already in the current ED2IN #' file, which is usually all (1-17) of ED's PFTs. #' @param pecan_defaults Logical. If `TRUE`, set common `ED2IN` defaults. -#' @param add_if_missing Logical. If `TRUE`, all-caps arguments not found in -#'existing `ed2in` list will be added to the end. Default = `FALSE`. -#' @param check_paths Logical. If `TRUE` (default), for any parameters that +#' @param add_if_missing Logical. If `TRUE`, all-caps arguments not found in +#' existing `ed2in` list will be added to the end. Default = `FALSE`. +#' @param check_paths Logical. If `TRUE` (default), for any parameters that #' expect files, check that files exist and throw an error if they don't. #' @param .dots A list of `...` arguments. #' @return Modified `ed2in` list object. See [read_ed2in]. @@ -85,7 +85,6 @@ modify_ed2in <- function(ed2in, ..., add_if_missing = FALSE, check_paths = TRUE, .dots = list()) { - if (is.null(.dots)) { .dots <- list() } @@ -177,7 +176,7 @@ modify_ed2in <- function(ed2in, ..., as.numeric(strftime(start_date, "%H%M", tz = "UTC")) ed2in[["METCYC1"]] <- ed2in[["IYEARA"]] } - + if (!is.null(end_date)) { ed2in[["IYEARZ"]] <- lubridate::year(end_date) diff --git a/models/ed/R/other.helpers.ED2.R b/models/ed/R/other.helpers.ED2.R index ea36b124d17..710117ddba4 100644 --- a/models/ed/R/other.helpers.ED2.R +++ b/models/ed/R/other.helpers.ED2.R @@ -9,10 +9,10 @@ #' @param ... arguments passed on to base::list.files #' @export list.files.nodir <- function(path, ...) { - allfiles <- list.files(path, ...) - dirs <- list.dirs(path, full.names = FALSE) - outfiles <- setdiff(allfiles, dirs) - return(outfiles) + allfiles <- list.files(path, ...) + dirs <- list.dirs(path, full.names = FALSE) + outfiles <- setdiff(allfiles, dirs) + return(outfiles) } @@ -23,20 +23,18 @@ list.files.nodir <- function(path, ...) { #' var.names <- c("DBH", "AGB", "AbvGrndWood") #' translate_vars_ed(var.names) translate_vars_ed <- function(varnames) { - var.list <- add.list <- list() ed_vars <- ed_derivations <- ed_units <- rep(NA, length(varnames)) - - for(n in seq_along(varnames)){ - edvarout <- ed.var(varnames[n]) - var.list[[n]] <- edvarout$readvar + + for (n in seq_along(varnames)) { + edvarout <- ed.var(varnames[n]) + var.list[[n]] <- edvarout$readvar ed_derivations[n] <- edvarout$expr - add.list[[n]] <- edvarout$drelated - ed_units[n] <- edvarout$units + add.list[[n]] <- edvarout$drelated + ed_units[n] <- edvarout$units } - - varnames <- unique(unlist(var.list)) + + varnames <- unique(unlist(var.list)) addvarnames <- unique(unlist(add.list)) return(list(vars = varnames, addvars = addvarnames, expr = ed_derivations, units = ed_units)) } - diff --git a/models/ed/R/parse.history.R b/models/ed/R/parse.history.R index dae6430f5b8..e59f11acfd6 100644 --- a/models/ed/R/parse.history.R +++ b/models/ed/R/parse.history.R @@ -1,6 +1,6 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials +# All rights reserved. This program and the accompanying materials # are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at @@ -10,7 +10,7 @@ ##' This will generate the CSV file needed by write configs to write the ##' config.xml. This is a hack right now, all this information should be ##' in the PEcAn DB. -##' +##' ##' @name parse.history ##' @title Create a CSV from history.xml outputed by ED ##' @@ -20,11 +20,11 @@ ##' ##' @author Rob Kooper parse.history <- function(historyfile, outfile = "") { - hist <- XML::xmlToList(XML::xmlParse(historyfile)) - keys <- names(hist$pft) - - cat(paste(keys, collapse = ";"), sep = "\n", file = outfile, append = FALSE) - for (pft in hist) { - cat(paste(lapply(pft[keys], stringr::str_trim), collapse = ";"), sep = "\n", file = outfile, append = TRUE) - } + hist <- XML::xmlToList(XML::xmlParse(historyfile)) + keys <- names(hist$pft) + + cat(paste(keys, collapse = ";"), sep = "\n", file = outfile, append = FALSE) + for (pft in hist) { + cat(paste(lapply(pft[keys], stringr::str_trim), collapse = ";"), sep = "\n", file = outfile, append = TRUE) + } } # parse.history diff --git a/models/ed/R/read_ed2in.R b/models/ed/R/read_ed2in.R index 342baa70b95..c4d15a09413 100644 --- a/models/ed/R/read_ed2in.R +++ b/models/ed/R/read_ed2in.R @@ -10,17 +10,17 @@ read_ed2in <- function(filename) { # Extract tag-value pairs ed2in_tag_rxp <- paste0( - "^[[:blank:]]*", # Initial whitespace (does not start with a `!` comment) - "NL%([[:graph:]]+)", # Capture namelist tag (1) - "[[:blank:]]+=[[:blank:]]*", # Equals, with optional surrounding whitespace - "(", # Begin value capture (2) - "[[:digit:].-]+(,[[:blank:]]*[[:digit:].-]+)*", # Number, or number list - "|", # ...or... - "@.*?@", # Old substitution tag (e.g. @MYVALUE@) - "|", # ...or... - "'[[:graph:][:blank:]]*'", # Quoted string, or list of strings - ")", # End value capture - "[[:blank:]]*!?.*$" # Trailing whitespace and possible comments + "^[[:blank:]]*", # Initial whitespace (does not start with a `!` comment) + "NL%([[:graph:]]+)", # Capture namelist tag (1) + "[[:blank:]]+=[[:blank:]]*", # Equals, with optional surrounding whitespace + "(", # Begin value capture (2) + "[[:digit:].-]+(,[[:blank:]]*[[:digit:].-]+)*", # Number, or number list + "|", # ...or... + "@.*?@", # Old substitution tag (e.g. @MYVALUE@) + "|", # ...or... + "'[[:graph:][:blank:]]*'", # Quoted string, or list of strings + ")", # End value capture + "[[:blank:]]*!?.*$" # Trailing whitespace and possible comments ) tag_lines <- grep(ed2in_tag_rxp, raw_file, perl = TRUE) @@ -35,11 +35,11 @@ read_ed2in <- function(filename) { # Convert to a list to allow storing of multiple data types values_list <- as.list(values) - + # NOTE: code below relies on as.numeric() coercing values to NA numeric_values <- !is.na(suppressWarnings(as.numeric(values))) | - grepl("^@.*?@$", values) # Unquoted old substitutions are numeric - #check for old substitution tags + grepl("^@.*?@$", values) # Unquoted old substitutions are numeric + # check for old substitution tags if (any(grepl("^@.*?@$", values))) { PEcAn.logger::logger.warn("Old substitution tags present in ED2IN file") } @@ -86,7 +86,7 @@ read_ed2in <- function(filename) { #' Sets attributes to `NULL` before printing, so the output isn't as messy. #' #' @inheritParams base::print -#' +#' #' @export print.ed2in <- function(x, ...) { attributes(x) <- attributes(x)["names"] diff --git a/models/ed/R/read_ed_metheader.R b/models/ed/R/read_ed_metheader.R index 640a531cde1..e15ac32ce1c 100644 --- a/models/ed/R/read_ed_metheader.R +++ b/models/ed/R/read_ed_metheader.R @@ -1,9 +1,9 @@ #' Read ED meteorology header file #' -#' Read a ED_MET_DRIVER_HEADER file into a list-like object that can be +#' Read a ED_MET_DRIVER_HEADER file into a list-like object that can be #' manipulated within R. Returns a list of file formats. #' -#' The output is an unnamed list with each element corresponding to a single +#' The output is an unnamed list with each element corresponding to a single #' file format. Each file format contains the following elements: #' #' - `path_prefix` -- Path and prefix of files @@ -13,13 +13,13 @@ #' - `dy` -- Size of latitude grid cell #' - `xmin` -- Minimum longitude #' - `ymin` -- Minimum latitude -#' - `variables` -- Data frame of variables, with the columns described below. -#' Starred columns are required for writing. This table is left joined with +#' - `variables` -- Data frame of variables, with the columns described below. +#' Starred columns are required for writing. This table is left joined with #' [met_variable_description] and [met_flag_description]. #' - `variable` -- Variable name #' - `description` -- Variable description #' - `unit` -- Variable unit -#' - `update_frequency` -- Update frequency (seconds) or scalar values if +#' - `update_frequency` -- Update frequency (seconds) or scalar values if #' `flag=4` #' - `flag` -- Variable flags. #' - `flag_description` -- Description of variable flag @@ -38,11 +38,11 @@ #' ``` #' #' The variables in the third row are defined as follows: -#' @param filename File name (including path) of met driver header file, as +#' @param filename File name (including path) of met driver header file, as #' character -#' @param check Logical, whether or not to check file for correctness (default +#' @param check Logical, whether or not to check file for correctness (default #' = `TRUE`) -#' @param check_files Logical. If `TRUE`, perform basic diagnostics on met +#' @param check_files Logical. If `TRUE`, perform basic diagnostics on met #' files as well. #' @return List of ED met input parameters. See Details. #' @importFrom rlang .data @@ -71,7 +71,7 @@ read_ed_metheader <- function(filename, check = TRUE, check_files = TRUE) { ed_metheader <- vector("list", nvars) var_lines <- 6 for (i in seq_len(nvars)) { - block <- seq(2 + length(comment_line) + (i-1) * var_lines, length.out = var_lines) + block <- seq(2 + length(comment_line) + (i - 1) * var_lines, length.out = var_lines) sub_file <- full_file[block] path_prefix <- sub_file[1] met_files <- PEcAn.utils::match_file(path_prefix) @@ -141,7 +141,7 @@ met_variable_description <- tibble::tribble( #' Description of meteorology flags #' #' Descriptions of ED met header variable flags. -#' +#' #' `data.frame` with the following columns: #' - `flag` -- Numeric flag (in header file) #' - `flag_description` -- Description of flag diff --git a/models/ed/R/read_ed_veg.R b/models/ed/R/read_ed_veg.R index bea75ec29ea..be92815832a 100644 --- a/models/ed/R/read_ed_veg.R +++ b/models/ed/R/read_ed_veg.R @@ -5,10 +5,10 @@ #' @param path_prefix Full path and prefix to initial condition files. #' @param latitude Run latitude (default = `NULL`). If `NULL`, deduced from file name. #' @param longitude Run longitude (default = `NULL`). If `NULL`, deduced from file name. -#' @param check Whether or not to check css, pss, and site files for validity. +#' @param check Whether or not to check css, pss, and site files for validity. #' Default = `TRUE`. -#' @return List containing `css`, `pss`, and `site` objects, `latitude` and -#' `longitude`, and `orig_paths`, a list of paths to the original `css`, `pss`, +#' @return List containing `css`, `pss`, and `site` objects, `latitude` and +#' `longitude`, and `orig_paths`, a list of paths to the original `css`, `pss`, #' and `site` files. #' @export read_ed_veg <- function(path_prefix, latitude = NULL, longitude = NULL, @@ -65,12 +65,12 @@ read_ed_veg <- function(path_prefix, latitude = NULL, longitude = NULL, #' Parse latitude or longitude #' -#' Automatically determine latitude or longitude from an ED input filepath. If -#' the latitude/longitude regular expression isn't matched, this will throw an +#' Automatically determine latitude or longitude from an ED input filepath. If +#' the latitude/longitude regular expression isn't matched, this will throw an #' error. #' #' @param filepath Path to a css, pss, or site file -#' @param latlon Which value to retrieve, either "lat" for latitude or "lon" +#' @param latlon Which value to retrieve, either "lat" for latitude or "lon" #' for longitude #' @return Numeric value of latitude or longitude get_latlon <- function(filepath, latlon) { @@ -124,4 +124,3 @@ read_site <- function(filepath, check = TRUE, ...) { } site } - diff --git a/models/ed/R/read_restart.ED2.R b/models/ed/R/read_restart.ED2.R index f9fc9b18dba..aa20185b2e7 100644 --- a/models/ed/R/read_restart.ED2.R +++ b/models/ed/R/read_restart.ED2.R @@ -9,137 +9,158 @@ #' @param params Any parameters required for state calculations #' @examples #' \dontrun{ -#' outdir <- "~/sda-hackathon/outputs" -#' runid <- "99000000020" -#' settings_file <- "outputs/pecan.CONFIGS.xml" -#' settings <- PEcAn.settings::read.settings(settings_file) -#' forecast <- read_restart.ED2(...) +#' outdir <- "~/sda-hackathon/outputs" +#' runid <- "99000000020" +#' settings_file <- "outputs/pecan.CONFIGS.xml" +#' settings <- PEcAn.settings::read.settings(settings_file) +#' forecast <- read_restart.ED2(...) #' } -#' +#' #' @export -read_restart.ED2 <- function(outdir, +read_restart.ED2 <- function(outdir, runid, stop.time, - settings, - var.names, + settings, + var.names, params) { - # depends on code run on local or remote, currently runs locally rundir <- settings$rundir mod_outdir <- settings$modeloutdir # is there a case this is different than outdir? - - + + histfile <- get_restartfile.ED2(mod_outdir, runid, stop.time) if (is.null(histfile)) { PEcAn.logger::logger.severe("Failed to find ED2 history restart file.") } - - - pft_names <- sapply(settings$pfts, '[[', 'name') - - - # var.names <- c("AbvGrndWood", "GWBI", "TotLivBiom", "leaf_carbon_content") - histout <- read_S_files(sfile = basename(histfile), - outdir = dirname(histfile), - pft_names = pft_names, - pecan_names = var.names) - + + + pft_names <- sapply(settings$pfts, "[[", "name") + + + # var.names <- c("AbvGrndWood", "GWBI", "TotLivBiom", "leaf_carbon_content") + histout <- read_S_files( + sfile = basename(histfile), + outdir = dirname(histfile), + pft_names = pft_names, + pecan_names = var.names + ) + # unit conversions and other aggregations forecast <- list() - + for (var_name in var.names) { - # should there be a tag passed via settings to check for per pft assimilation vs totals? perpft <- FALSE # for now just working with totals for HF tree-ring DA - + if (var_name == "AGB") { - - forecast_tmp <- switch(perpft+1, sum(histout$AGB, na.rm = TRUE), histout$AGB) # kgC/m2 - forecast[[length(forecast)+1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha - names(forecast)[length(forecast)] <- switch(perpft+1, "AGB", paste0("AGB.", pft_names)) - + forecast_tmp <- switch(perpft + 1, + sum(histout$AGB, na.rm = TRUE), + histout$AGB + ) # kgC/m2 + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha + names(forecast)[length(forecast)] <- switch(perpft + 1, + "AGB", + paste0("AGB.", pft_names) + ) } - + if (var_name == "AGB.pft") { perpft <- TRUE - forecast_tmp <- switch(perpft+1, sum(histout$AGB, na.rm = TRUE), histout$AGB) # kgC/m2 - names(forecast_tmp) <- switch(perpft + 1, "AGB", paste0(pft_names)) - forecast[[length(forecast)+1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha - names(forecast)[length(forecast)] <- "AGB.pft" + forecast_tmp <- switch(perpft + 1, + sum(histout$AGB, na.rm = TRUE), + histout$AGB + ) # kgC/m2 + names(forecast_tmp) <- switch(perpft + 1, + "AGB", + paste0(pft_names) + ) + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha + names(forecast)[length(forecast)] <- "AGB.pft" } - + if (var_name == "TotLivBiom") { - - forecast_tmp <- switch(perpft+1, sum(histout$TotLivBiom, na.rm = TRUE), histout$TotLivBiom) # kgC/m2 - forecast[[length(forecast)+1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha - names(forecast)[length(forecast)] <- switch(perpft+1, "TotLivBiom", paste0("TotLivBiom.", pft_names)) - + forecast_tmp <- switch(perpft + 1, + sum(histout$TotLivBiom, na.rm = TRUE), + histout$TotLivBiom + ) # kgC/m2 + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha + names(forecast)[length(forecast)] <- switch(perpft + 1, + "TotLivBiom", + paste0("TotLivBiom.", pft_names) + ) } - + if (var_name == "AbvGrndWood") { - - forecast_tmp <- switch(perpft+1, sum(histout$AbvGrndWood, na.rm = TRUE), histout$AbvGrndWood) # kgC/m2 - forecast[[length(forecast)+1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha - names(forecast)[length(forecast)] <- switch(perpft+1, "AbvGrndWood", paste0("AbvGrndWood.", pft_names)) - + forecast_tmp <- switch(perpft + 1, + sum(histout$AbvGrndWood, na.rm = TRUE), + histout$AbvGrndWood + ) # kgC/m2 + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha + names(forecast)[length(forecast)] <- switch(perpft + 1, + "AbvGrndWood", + paste0("AbvGrndWood.", pft_names) + ) } - + if (var_name == "leaf_carbon_content") { - - forecast_tmp <- switch(perpft+1, sum(histout$leaf_carbon_content, na.rm = TRUE), histout$leaf_carbon_content) # kgC/m2 - forecast[[length(forecast)+1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha - names(forecast)[length(forecast)] <- switch(perpft+1, "leaf_carbon_content", paste0("leaf_carbon_content.", pft_names)) - + forecast_tmp <- switch(perpft + 1, + sum(histout$leaf_carbon_content, na.rm = TRUE), + histout$leaf_carbon_content + ) # kgC/m2 + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2", "Mg/ha") # conv to MgC/ha + names(forecast)[length(forecast)] <- switch(perpft + 1, + "leaf_carbon_content", + paste0("leaf_carbon_content.", pft_names) + ) } - + if (var_name == "storage_carbon_content") { - - forecast[[length(forecast)+1]] <- switch(perpft+1, sum(histout$storage_carbon_content, na.rm = TRUE), histout$storage_carbon_content) # kgC/m2 - names(forecast)[length(forecast)] <- switch(perpft+1, "storage_carbon_content", paste0("storage_carbon_content.", pft_names)) - + forecast[[length(forecast) + 1]] <- switch(perpft + 1, + sum(histout$storage_carbon_content, na.rm = TRUE), + histout$storage_carbon_content + ) # kgC/m2 + names(forecast)[length(forecast)] <- switch(perpft + 1, + "storage_carbon_content", + paste0("storage_carbon_content.", pft_names) + ) } - - + + if (var_name == "GWBI") { - - forecast_tmp <- switch(perpft+1, sum(histout$GWBI, na.rm = TRUE), histout$GWBI) # kgC/m2/yr - forecast[[length(forecast)+1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2/yr", "Mg/ha/yr") # conv to MgC/ha/yr - names(forecast)[length(forecast)] <- switch(perpft+1, "GWBI", paste0("GWBI.", pft_names)) - + forecast_tmp <- switch(perpft + 1, + sum(histout$GWBI, na.rm = TRUE), + histout$GWBI + ) # kgC/m2/yr + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(forecast_tmp, "kg/m^2/yr", "Mg/ha/yr") # conv to MgC/ha/yr + names(forecast)[length(forecast)] <- switch(perpft + 1, + "GWBI", + paste0("GWBI.", pft_names) + ) } - + if (var_name == "fast_soil_pool_carbon_content") { - - forecast[[length(forecast)+1]] <- histout$fast_soil_pool_carbon_content # kgC/m2 - names(forecast)[length(forecast)] <- "fast_soil_pool_carbon_content" - + forecast[[length(forecast) + 1]] <- histout$fast_soil_pool_carbon_content # kgC/m2 + names(forecast)[length(forecast)] <- "fast_soil_pool_carbon_content" } - + if (var_name == "structural_soil_pool_carbon_content") { - - forecast[[length(forecast)+1]] <- histout$structural_soil_pool_carbon_content # kgC/m2 - names(forecast)[length(forecast)] <- "structural_soil_pool_carbon_content" - + forecast[[length(forecast) + 1]] <- histout$structural_soil_pool_carbon_content # kgC/m2 + names(forecast)[length(forecast)] <- "structural_soil_pool_carbon_content" } - - } # var.names loop - + restart <- list() # pass certain things for write_restart to use (so that there we don't have to re-read and re-calculate stuff) # IMPORTANT NOTE: in the future, these "certain things" need to be confined to old states that will be used # to carry out deternimistic relationships, no other read/write restart should copy this logic - restart$restart <- histout$restart + restart$restart <- histout$restart restart$histfile <- histfile - + params$restart <- restart - + PEcAn.logger::logger.info("Finished --", runid) - + X_tmp <- list(X = unlist(forecast), params = params) - + return(X_tmp) - } # read_restart.ED2 - - diff --git a/models/ed/R/run_ed_singularity.R b/models/ed/R/run_ed_singularity.R index 74b69388554..e169f0c84ae 100644 --- a/models/ed/R/run_ed_singularity.R +++ b/models/ed/R/run_ed_singularity.R @@ -2,8 +2,8 @@ #' #' Uses [base::system2] to run ED or EDR via a Singularity container. #' -#' On some systems, to run Singularity properly, you will need to bind -#' additional paths. To do this, pass the arguments as a character vector to +#' On some systems, to run Singularity properly, you will need to bind +#' additional paths. To do this, pass the arguments as a character vector to #' `singularity_args`. For instance: #' #' ``` @@ -11,10 +11,10 @@ #' run_ed_singularity(..., singularity_args = paste("--bind", bindpaths)) #' ``` #' -#' By default, [base::system2] prints the output to the console. To store -#' standard ED output in a variable as a character vector, set `stdout = TRUE`. -#' To redirect all output to the variable, including GCC exceptions, use -#' `stderr = TRUE` (this will automatically set `stdout = TRUE` as well). +#' By default, [base::system2] prints the output to the console. To store +#' standard ED output in a variable as a character vector, set `stdout = TRUE`. +#' To redirect all output to the variable, including GCC exceptions, use +#' `stderr = TRUE` (this will automatically set `stdout = TRUE` as well). #' Output can also be redirected to a file via `stderr = "/path/to/file.log"`. #' #' @param img_path Path to Singularity container (usually a `.simg` file) diff --git a/models/ed/R/veg2model.ED2.R b/models/ed/R/veg2model.ED2.R index ac7e23d9ba5..7b13bed0b7c 100644 --- a/models/ed/R/veg2model.ED2.R +++ b/models/ed/R/veg2model.ED2.R @@ -1,5 +1,5 @@ #' Writes ED specific IC files -#' +#' #' @param veg_info object passed from write_ic includes pft matches #' @param start_date "YYYY-MM-DD" passed from write_ic #' @param new_site object passed from write_ic includes site id, lat, lon, and sitename @@ -10,109 +10,113 @@ #' @return filenames #' @export #' @author Istem Fer -veg2model.ED2 <- function(outfolder, veg_info, start_date, new_site, source, ens){ - - - lat <- as.numeric(as.character(new_site$lat)) - lon <- as.numeric(as.character(new_site$lon)) +veg2model.ED2 <- function(outfolder, veg_info, start_date, new_site, source, ens) { + lat <- as.numeric(as.character(new_site$lat)) + lon <- as.numeric(as.character(new_site$lon)) #--------------------------------------------------------------------------------------------------# # Handle file names - - start_year <- lubridate::year(start_date) + + start_year <- lubridate::year(start_date) formatnames <- c("ED2.cohort", "ED2.patch", "ED2.site") dbfilenames <- c("css.file", "pss.file", "site.file") - + file.prefix <- paste(source, start_year, ens, - get.ed.file.latlon.text(lat, lon, site.style = FALSE), sep = ".") + get.ed.file.latlon.text(lat, lon, site.style = FALSE), + sep = "." + ) + + + filenames <- c( + paste0(file.prefix, ".css"), + paste0(file.prefix, ".pss"), + paste0(file.prefix, ".site") + ) + + filenames_full <- file.path(outfolder, filenames) - - filenames <- c(paste0(file.prefix, ".css"), - paste0(file.prefix, ".pss"), - paste0(file.prefix, ".site")) - - filenames_full <- file.path(outfolder, filenames) - #--------------------------------------------------------------------------------------------------# # Prepare pss - - # veg_info[[1]] either has + + # veg_info[[1]] either has # i) full pss info (FIA case) # ii) info passed from settings # iii) no info pss <- as.data.frame(veg_info[[1]], stringsAsFactors = FALSE) - + # for FIA these steps are unnecessary, it already has the pss info - if(source != "FIA"){ - if(!is.null(pss$time)){ + if (source != "FIA") { + if (!is.null(pss$time)) { time <- as.numeric(pss$time) - }else{ + } else { PEcAn.logger::logger.info("No year info passed via metadata, using start year: ", start_year) time <- start_year } - if(!is.null(pss$n.patch)){ + if (!is.null(pss$n.patch)) { n.patch <- as.numeric(pss$n.patch) - }else{ + } else { PEcAn.logger::logger.info("No patch number info passed via metadata, assuming 1 patch.") n.patch <- 1 } - if(!is.null(pss$trk)){ + if (!is.null(pss$trk)) { trk <- as.numeric(pss$trk) - }else{ + } else { PEcAn.logger::logger.info("No trk info passed via metadata, assuming 1.") trk <- 1 } - if(!is.null(pss$age)){ + if (!is.null(pss$age)) { age <- as.numeric(pss$age) - }else{ + } else { PEcAn.logger::logger.info("No stand age info passed via metadata, assuming 100.") age <- 100 } - if(!is.null(pss$area)){ + if (!is.null(pss$area)) { area <- as.numeric(pss$area) - }else{ + } else { PEcAn.logger::logger.severe("No area info passed via metadata, please provide area of your plot in m2 under 'settings$run$inputs$css$metadata$area'.") } - + pss <- data.frame(time = rep(time, n.patch), patch = seq_len(n.patch), trk = rep(trk, n.patch), age = rep(age, n.patch)) - - PEcAn.logger::logger.info(paste0("Values used in the patch file - time:", - pss$time, ", patch:", pss$patch, ", trk:", - pss$trk, ", age:", pss$age)) - + + PEcAn.logger::logger.info(paste0( + "Values used in the patch file - time:", + pss$time, ", patch:", pss$patch, ", trk:", + pss$trk, ", age:", pss$age + )) + # TODO : soils can also be here, passed from settings } - + ## fill missing data w/ defaults - pss$site <- 1 - pss$area <- 1 / n.patch + pss$site <- 1 + pss$area <- 1 / n.patch pss$water <- 0 - + # Reorder columns pss <- pss[, c("site", "time", "patch", "trk", "age", "area", "water")] - + # Add soil data: Currently uses default values, will soil_process overwrite it afterwards? - soil <- c(1, 5, 5, 0.01, 0, 1, 1) #soil C & N pools (biogeochem) defaults (fsc,stsc,stsl,ssc,psc,msn,fsn) - soil.dat <- as.data.frame(matrix(soil, n.patch, 7, byrow = TRUE)) + soil <- c(1, 5, 5, 0.01, 0, 1, 1) # soil C & N pools (biogeochem) defaults (fsc,stsc,stsl,ssc,psc,msn,fsn) + soil.dat <- as.data.frame(matrix(soil, n.patch, 7, byrow = TRUE)) names(soil.dat) <- c("fsc", "stsc", "stsl", "ssc", "psc", "msn", "fsn") - pss <- cbind(pss, soil.dat) + pss <- cbind(pss, soil.dat) #--------------------------------------------------------------------------------------------------# # Prepare css - + obs <- veg_info[[2]] - + # remove NA rows for unmatched PFTs, this should mean dead trees only css <- obs[!is.na(obs$pft), ] - + # might further need removing dead trees by mortality status # css <- remove_dead_trees() - if(!is.null(css$Subplot)){ - css$patch <- css$Subplot - }else{ - css$patch <- 1 + if (!is.null(css$Subplot)) { + css$patch <- css$Subplot + } else { + css$patch <- 1 } # Remove rows that don't map to any patch @@ -123,32 +127,32 @@ veg2model.ED2 <- function(outfolder, veg_info, start_date, new_site, source, ens PEcAn.logger::logger.debug(paste0(nrow(css), " trees that map to selected patches.")) } - - if(is.null(css$n)){ - css$n <- 1/area + + if (is.null(css$n)) { + css$n <- 1 / area } - - if(is.null(css$cohort)){ + + if (is.null(css$cohort)) { # every tree is its own cohort, ED2 will fuse them or simulate them individually depending on max.cohort - css$cohort <- do.call("c", lapply(seq_len(n.patch), function(x) 1:sum(css$patch==x))) + css$cohort <- do.call("c", lapply(seq_len(n.patch), function(x) 1:sum(css$patch == x))) } - + inv.years <- as.numeric(unique(css$year)) # suitable years av.years <- inv.years[inv.years <= start_year] - if(length(av.years) == 0){ + if (length(av.years) == 0) { PEcAn.logger::logger.severe("No available years found in the data.") } css$time <- max(av.years) - + # filter out other years css <- css[css$year == css$time, ] - - if("DBH" %in% colnames(css)){ + + if ("DBH" %in% colnames(css)) { colnames(css)[colnames(css) == "DBH"] <- "dbh" } - - + + # Convert PFT names to ED2 Numbers pftmapping <- PEcAn.ED2::pftmapping css$pft.number <- NA @@ -158,46 +162,47 @@ veg2model.ED2 <- function(outfolder, veg_info, start_date, new_site, source, ens PEcAn.logger::logger.severe(paste0("Couldn't find an ED2 PFT number for ", as.character(css$pft[p]))) } } - - # --- Continue work formatting css - css$time[is.na(css$time)] <- start_year + + # --- Continue work formatting css + css$time[is.na(css$time)] <- start_year css$cohort[is.na(css$cohort)] <- 1:sum(is.na(css$cohort)) - css$dbh[is.na(css$dbh)] <- 1 # assign nominal small dbh to missing - density.median <- stats::median(css$n[which(css$n > 0)]) - css$n[is.na(css$n) | css$n == 0] <- density.median + css$dbh[is.na(css$dbh)] <- 1 # assign nominal small dbh to missing + density.median <- stats::median(css$n[which(css$n > 0)]) + css$n[is.na(css$n) | css$n == 0] <- density.median css$hite <- css$bdead <- css$balive <- css$lai <- 0 - - # pft.number col needs to be pft - css <- css[ , colnames(css) != "pft"] + + # pft.number col needs to be pft + css <- css[, colnames(css) != "pft"] colnames(css)[colnames(css) == "pft.number"] <- "pft" - + css <- css[, c("time", "patch", "cohort", "dbh", "hite", "pft", "n", "bdead", "balive", "lai")] #--------------------------------------------------------------------------------------------------# # Write files - + # css utils::write.table(css, filenames_full[1], quote = FALSE, row.names = FALSE) - + # pss utils::write.table(pss, filenames_full[2], quote = FALSE, row.names = FALSE) - + # site # hardcoded per fia2ED implemention site <- c( - "nsite 1 file_format 1", + "nsite 1 file_format 1", "sitenum area TCI elev slope aspect soil", "1 1.0 -7 100.0 0.0 0.0 3" ) - + site.file.con <- file(filenames_full[3]) writeLines(site, filenames_full[3]) close(site.file.con) - - # convert_input inserts only 1 file anyway - return(list(file = filenames_full[1], dbfile.name = filenames[1], - mimetype = "text/plain", formatname = "ED2.cohort")) + # convert_input inserts only 1 file anyway + return(list( + file = filenames_full[1], dbfile.name = filenames[1], + mimetype = "text/plain", formatname = "ED2.cohort" + )) } get.ed.file.latlon.text <- function(lat, lon, site.style = FALSE, ed.res = 1) { diff --git a/models/ed/R/write.configs.ed.R b/models/ed/R/write.configs.ed.R index a0da36fb94d..789b2484e07 100644 --- a/models/ed/R/write.configs.ed.R +++ b/models/ed/R/write.configs.ed.R @@ -1,23 +1,23 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. -# All rights reserved. This program and the accompanying materials +# All rights reserved. This program and the accompanying materials # are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ## Functions to prepare and write out ED2.2 config.xml files for MA, SA, and Ensemble runs -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# PREFIX_XML <- "\n\n" ## TODO: Update this script file to use the database for setting up ED2IN and config files -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Convert parameters from PEcAn database default units to ED defaults ##' ##' Performs model specific unit conversions on a a list of trait values, such @@ -30,27 +30,27 @@ convert.samples.ED <- function(trait.samples) { DEFAULT.LEAF.C <- 0.48 DEFAULT.MAINTENANCE.RESPIRATION <- 1 / 2 ## convert SLA from m2 / kg leaf to m2 / kg C - + # IF: trait.samples not being a list throws an error later in the # write.config.xml.ED2 trait.samples <- as.list(trait.samples) - + if ("SLA" %in% names(trait.samples)) { sla <- as.numeric(trait.samples[["SLA"]]) - trait.samples[["SLA"]] <- sla/DEFAULT.LEAF.C + trait.samples[["SLA"]] <- sla / DEFAULT.LEAF.C } - + # for model version compatibility (q and fineroot2leaf are the same) if ("fineroot2leaf" %in% names(trait.samples)) { trait.samples[["q"]] <- as.numeric(trait.samples[["fineroot2leaf"]]) } - + ## convert leaf width / 1000 if ("leaf_width" %in% names(trait.samples)) { lw <- as.numeric(trait.samples[["leaf_width"]]) trait.samples[["leaf_width"]] <- lw / 1000 } - + if ("root_respiration_rate" %in% names(trait.samples)) { rrr1 <- as.numeric(trait.samples[["root_respiration_rate"]]) rrr2 <- rrr1 * DEFAULT.MAINTENANCE.RESPIRATION @@ -58,45 +58,43 @@ convert.samples.ED <- function(trait.samples) { # model version compatibility (rrr and rrf are the same) trait.samples[["root_respiration_factor"]] <- trait.samples[["root_respiration_rate"]] } - + if ("Vcmax" %in% names(trait.samples)) { vcmax <- as.numeric(trait.samples[["Vcmax"]]) trait.samples[["Vcmax"]] <- PEcAn.utils::arrhenius.scaling(vcmax, old.temp = 25, new.temp = 15) # write as Vm0 for version compatibility (Vm0 = Vcmax @ 15C) trait.samples[["Vm0"]] <- trait.samples[["Vcmax"]] - + ## Convert leaf_respiration_rate_m2 to dark_resp_factor; requires Vcmax if ("leaf_respiration_rate_m2" %in% names(trait.samples)) { leaf_resp <- as.numeric(trait.samples[["leaf_respiration_rate_m2"]]) - + ## First scale variables to 15 degC - trait.samples[["leaf_respiration_rate_m2"]] <- + trait.samples[["leaf_respiration_rate_m2"]] <- PEcAn.utils::arrhenius.scaling(leaf_resp, old.temp = 25, new.temp = 15) # convert leaf_respiration_rate_m2 to Rd0 (variable used in ED2) trait.samples[["Rd0"]] <- trait.samples[["leaf_respiration_rate_m2"]] - + ## Calculate dark_resp_factor -- Will be depreciated when moving from ## older versions of ED2 trait.samples[["dark_respiration_factor"]] <- trait.samples[["leaf_respiration_rate_m2"]] / trait.samples[["Vcmax"]] - - - } ## End dark_respiration_factor loop - } ## End Vcmax - + } ## End dark_respiration_factor loop + } ## End Vcmax + if ("plant_min_temp" %in% names(trait.samples)) { trait.samples[["plant_min_temp"]] <- PEcAn.utils::ud_convert(trait.samples[["plant_min_temp"]], "degC", "K") } # for debugging conversions save(trait.samples, file = # file.path(settings$outdir, 'trait.samples.Rdata')) - + # return converted samples return(trait.samples) } # ==================================================================================================# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Write ED configuration files ##' ##' Writes an xml and ED2IN config files for use with the Ecological Demography @@ -116,24 +114,22 @@ convert.samples.ED <- function(trait.samples) { ##' @export ##' @author David LeBauer, Shawn Serbin, Carl Davidson, Alexey Shiklomanov, ##' Istem Fer -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings$constants, check = FALSE, ...) { - - jobsh <- write.config.jobsh.ED2(settings = settings, run.id = run.id) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - + ## Write ED2 config.xml file xml <- write.config.xml.ED2(defaults = defaults, settings = settings, trait.values = trait.values) - + XML::saveXML(xml, file = file.path(settings$rundir, run.id, "config.xml"), indent = TRUE, prefix = PREFIX_XML) - + startdate <- as.Date(settings$run$start.date) enddate <- as.Date(settings$run$end.date) - - ##---------------------------------------------------------------------- + + ## ---------------------------------------------------------------------- ## Edit ED2IN file for runs revision <- settings$model$revision if (is.null(revision)) { @@ -162,7 +158,7 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings met_driver = settings$run$inputs$met$path, start_date = startdate, end_date = enddate, - IMETAVG = -1, # See below, + IMETAVG = -1, # See below, add_if_missing = TRUE, check_paths = check ) @@ -184,7 +180,9 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings ed2in_tag %in% c("METCYC1", "METCYCF"), length(rawval) == 1 ) - if (is.null(rawval)) return(ed2in.text) + if (is.null(rawval)) { + return(ed2in.text) + } # The corresponding ED2IN tags METCYC1 and METCYCF expect a year, # so we try to extract the year from the input value here. value <- tryCatch( @@ -210,9 +208,11 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings ed2in.text <- proc_met_startend(settings[[c("run", "site", "met.end")]], "METCYCF") if (is.null(settings$model$phenol.scheme)) { - PEcAn.logger::logger.error(paste0("no phenology scheme set; \n", - "need to add ", - "tag under tag in settings file")) + PEcAn.logger::logger.error(paste0( + "no phenology scheme set; \n", + "need to add ", + "tag under tag in settings file" + )) } else if (settings$model$phenol.scheme == 1) { ## Set prescribed phenology switch in ED2IN ed2in.text <- modify_ed2in( @@ -243,28 +243,28 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings ## ------------- # Special parameters for SDA - # + # if (!is.null(settings$state.data.assimilation)) { # Default values sda_tags <- list( - ISOUTPUT = 3, # Save history state file - UNITSTATE = 3, # History state frequency is years - FRQSTATE = 1 # Write history file every 1 year + ISOUTPUT = 3, # Save history state file + UNITSTATE = 3, # History state frequency is years + FRQSTATE = 1 # Write history file every 1 year ) - + # Overwrite defaults with values from settings$model$ed2in_tags list - if(!is.null(settings$model$ed2in_tags)){ + if (!is.null(settings$model$ed2in_tags)) { sda_tags <- utils::modifyList(sda_tags, settings$model$ed2in_tags[names(sda_tags)]) } ed2in.text <- modify_ed2in(ed2in.text, .dots = sda_tags, add_if_missing = TRUE, check_paths = check) } - ##---------------------------------------------------------------------- + ## ---------------------------------------------------------------------- # Get prefix of filename, append to dirname. # Assumes pattern 'DIR/PREFIX.lat' # Slightly overcomplicated to avoid error if path name happened to contain .lat' - - + + # when pss or css not exists, case 0 if (is.null(settings$run$inputs$pss$path) | is.null(settings$run$inputs$css$path)) { ed2in.text <- modify_ed2in( @@ -298,7 +298,6 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings # sites and pass same prefix name, case 3 value <- 3 } - } ed2in.text <- modify_ed2in( ed2in.text, @@ -308,7 +307,7 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings check_paths = check ) } - + thsum <- settings$run$inputs$thsum$path if (!grepl("/$", thsum)) { @@ -324,8 +323,8 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings add_if_missing = TRUE, check_paths = check ) - - ##---------------------------------------------------------------------- + + ## ---------------------------------------------------------------------- if (is.null(settings$host$scratchdir)) { modeloutdir <- file.path(settings$host$outdir, run.id) } else { @@ -334,13 +333,13 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings ed2in.text <- modify_ed2in( ed2in.text, run_name = paste0("ED2 v", revision, " PEcAn ", run.id), - run_dir = file.path(settings$host$rundir, run.id), # For `config.xml` - output_dir = modeloutdir, # Sets analysis and history paths + run_dir = file.path(settings$host$rundir, run.id), # For `config.xml` + output_dir = modeloutdir, # Sets analysis and history paths add_if_missing = TRUE, check_paths = check ) - ##--------------------------------------------------------------------- + ## --------------------------------------------------------------------- # Use all PFTs, or just the ones configured in config.xml? all_pfts <- settings$model$all_pfts if (!is.null(all_pfts) && tolower(all_pfts) != "false") { @@ -356,7 +355,7 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings # object, but I have no idea what it is. If you know, please fix this. use_pfts <- numeric(length(xml)) for (i in seq_along(xml)) { - use_pfts[i] <- as.numeric(XML::xmlValue(xml[[i]][['num']])) + use_pfts[i] <- as.numeric(XML::xmlValue(xml[[i]][["num"]])) } use_pfts <- use_pfts[is.finite(use_pfts)] PEcAn.logger::logger.debug( @@ -365,20 +364,23 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings ) } ed2in.text <- modify_ed2in(ed2in.text, include_these_pft = use_pfts) - - ##--------------------------------------------------------------------- + + ## --------------------------------------------------------------------- # Modify any additional tags provided in settings$model$ed2in_tags custom_tags <- settings$model$ed2in_tags if (!is.null(custom_tags)) { # Convert numeric tags to numeric # Anything that isn't converted to NA via `as.numeric` is numeric - custom_tags <- lapply(custom_tags, function(x) - tryCatch(as.numeric(x), warning = function(e) x)) + custom_tags <- lapply(custom_tags, function(x) { + tryCatch(as.numeric(x), warning = function(e) x) + }) # Figure out what is a numeric vector # Look for a list of numbers like: "1,2,5" # Works for decimals, negatives, and arbitrary spacing: "1.3,2.6, -7.8 , 8.1" - numvec_rxp <- paste0("^ *-?[[:digit:]]+.?[[:digit:]]*", - "([[:space:]]*,[[:space:]]*-?[[:digit:]]+.?[[:digit:]]*)+") + numvec_rxp <- paste0( + "^ *-?[[:digit:]]+.?[[:digit:]]*", + "([[:space:]]*,[[:space:]]*-?[[:digit:]]+.?[[:digit:]]*)+" + ) are_numvec <- vapply(custom_tags, function(x) grepl(numvec_rxp, x), logical(1)) custom_tags[are_numvec] <- lapply( custom_tags[are_numvec], @@ -386,8 +388,8 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings ) ed2in.text <- modify_ed2in(ed2in.text, .dots = custom_tags, add_if_missing = TRUE, check_paths = check) } - - ##---------------------------------------------------------------------- + + ## ---------------------------------------------------------------------- if (check) { check_ed2in(ed2in.text) } @@ -398,7 +400,7 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings } # write.config.ED2 # ==================================================================================================# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# #' Clear out old config and ED model run files. #' #' @param main.outdir ignored @@ -407,18 +409,17 @@ write.config.ED2 <- function(trait.values, settings, run.id, defaults = settings #' @export #' @author Shawn Serbin, David LeBauer, Alexey Shikomanov remove.config.ED2 <- function(main.outdir = settings$outdir, settings) { - print(" ") print("---- Removing previous ED2 config files and output before starting new run ----") print(" ") - + todelete <- dir(settings$outdir, pattern = c("/c.*", "/ED2INc.*"), recursive = TRUE, full.names = TRUE) - + if (length(todelete > 0)) { file.remove(todelete) } rm(todelete) - + ## Remove model run configs and model run log files on local/remote host if (!settings$host$name == "localhost") { ## Remove model run congfig and log files on remote host @@ -429,9 +430,9 @@ remove.config.ED2 <- function(main.outdir = settings$outdir, settings) { ed2in <- remote_ls(settings$host$rundir, "ED2INc.*") output_remote <- remote_ls(settings$host$outdir, ".") output <- file.path(settings$host$outdir, output_remote) - + if (length(config) > 0 | length(ed2in) > 0) { - todelete <- c(config, ed2in[-grep("log", ed2in)], output) ## Keep log files + todelete <- c(config, ed2in[-grep("log", ed2in)], output) ## Keep log files PEcAn.remote::remote.execute.cmd(settings$host, "rm", c("-f", todelete)) } } @@ -448,25 +449,24 @@ remove.config.ED2 <- function(main.outdir = settings$outdir, settings) { #' @return R XML object containing full ED2 XML file #' @author David LeBauer, Shawn Serbin, Carl Davidson, Alexey Shiklomanov write.config.xml.ED2 <- function(settings, trait.values, defaults = settings$constants) { - # TODO this should come from the database - + # Internal data sets stored in sysdata.RDA are used to override defaults in # config.xml. This code looks for a history dataset that matches the # "revision" number for ED2 set in settings (e.g. PEcAn.ED2:::history.r85) and # if it doesn't find it, it uses a generic file (PEcAn.ED2:::history). To add # a new history file, add the .csv file to models/ed/data-raw and run the # sysdata.R script in that folder - - if(is.null(settings$model$revision)) { + + if (is.null(settings$model$revision)) { PEcAn.logger::logger.debug("--- Using Generic ED2 History File") edhistory <- history } else { histfile <- paste0("history.r", settings$model$revision) edhistory <- try(eval(str2lang(histfile)), silent = TRUE) - } - - if(inherits(edhistory, "try-error")) { + } + + if (inherits(edhistory, "try-error")) { PEcAn.logger::logger.debug("--- Using Generic ED2 History File") edhistory <- history } else { @@ -475,7 +475,7 @@ write.config.xml.ED2 <- function(settings, trait.values, defaults = settings$con edtraits <- names(edhistory) pftmapping <- PEcAn.ED2::pftmapping - + ## Get ED2 specific model settings and put into output config xml file xml <- PEcAn.settings::listToXml(settings$model$config.header, "config") @@ -492,9 +492,7 @@ write.config.xml.ED2 <- function(settings, trait.values, defaults = settings$con for (i in seq_along(trait.values)) { group <- names(trait.values)[i] if (group == "env") { - ## set defaults from config.header - } else { # Make this agnostic to the way PFT names are defined in `trait.values` -- either directly as # list names or as object 'name' within each sublist is fine @@ -528,7 +526,6 @@ write.config.xml.ED2 <- function(settings, trait.values, defaults = settings$con decompositon.xml <- PEcAn.settings::listToXml(vals, "decomposition") xml <- XML::append.xmlNode(xml, decompositon.xml) - } else if (length(pft.number) == 0) { PEcAn.logger::logger.severe(glue::glue( "Unable to set PFT number automatically. ", @@ -538,7 +535,6 @@ write.config.xml.ED2 <- function(settings, trait.values, defaults = settings$con "or add the PFT to `pftmapping.csv` file in ", "`models/ed/data/pftmapping.csv`." )) - } else { ## Get default trait values from ED history vals <- as.list(edhistory[edhistory$num == pft.number, ]) @@ -553,17 +549,16 @@ write.config.xml.ED2 <- function(settings, trait.values, defaults = settings$con converted.defaults <- convert.samples.ED(defaults[[pft]]$constants) ## Selectively replace defaults and trait values with constants from settings - if (!is.null(converted.defaults)){ + if (!is.null(converted.defaults)) { vals <- utils::modifyList(vals, converted.defaults) } - + ## Make sure that include_pft is set to 1 - vals$include_pft = 1 + vals$include_pft <- 1 pft.xml <- PEcAn.settings::listToXml(vals, "pft") xml <- XML::append.xmlNode(xml, pft.xml) } - } } return(xml) @@ -597,11 +592,13 @@ write.config.jobsh.ED2 <- function(settings, run.id) { } else { modeloutdir <- file.path(settings$host$scratchdir, settings$workflow$id, run.id) mkdirscratch <- paste("mkdir -p", modeloutdir) - copyscratch <- paste("rsync", "-a", - paste0("\"", file.path(modeloutdir, ""), "\""), - paste0("\"", file.path(outdir, ""), "\"")) + copyscratch <- paste( + "rsync", "-a", + paste0("\"", file.path(modeloutdir, ""), "\""), + paste0("\"", file.path(outdir, ""), "\"") + ) if (is.null(settings$host$clearscratch) || is.na(as.logical(settings$host$clearscratch)) || - as.logical(settings$host$clearscratch)) { + as.logical(settings$host$clearscratch)) { clearscratch <- paste("rm", "-rf", paste0("\"", modeloutdir, "\"")) } else { clearscratch <- "# scratch is not cleared" @@ -645,10 +642,10 @@ write.config.jobsh.ED2 <- function(settings, run.id) { jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + if (is.null(settings$model$binary_args)) { # If argument is missing but running on RabbitMQ, assume you need # -s flag. If you want to force run ED without -s, use a blank @@ -661,10 +658,10 @@ write.config.jobsh.ED2 <- function(settings, run.id) { } jobsh <- gsub("@BINARY_ARGS@", settings$model$binary_args, jobsh) jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) - + pft_names <- extract_pfts(settings$pfts) pft_names <- deparse1(dput(pft_names)) jobsh <- gsub("@PFT_NAMES@", pft_names, jobsh) - + return(jobsh) } # write.config.jobsh.ED2 diff --git a/models/ed/R/write_ed2in.R b/models/ed/R/write_ed2in.R index 5abe1bf52b5..e5ffef03368 100644 --- a/models/ed/R/write_ed2in.R +++ b/models/ed/R/write_ed2in.R @@ -1,13 +1,13 @@ #' Write ED2IN list to file #' -#' This writes a ED2IN file from an `ed2in` list. Default method writes a -#' barebones file without comments. S3 method for `ed2in` objects extracts -#' comments and their locations from the object attributes (if `barebones` is +#' This writes a ED2IN file from an `ed2in` list. Default method writes a +#' barebones file without comments. S3 method for `ed2in` objects extracts +#' comments and their locations from the object attributes (if `barebones` is #' `FALSE`). #' #' @param ed2in Named list of ED2IN tag-value pairs. See [read_ed2in]. #' @param filename Target file name -#' @param custom_header Character vector for additional header comments. Each +#' @param custom_header Character vector for additional header comments. Each #' item gets its own line. #' @param barebones Logical. If `TRUE`, omit comments and only write tag-value pairs. #' @export @@ -28,18 +28,18 @@ write_ed2in.ed2in <- function(ed2in, filename, custom_header = character(), bare file_body <- character(nvalues + ncomments) file_body[attr(ed2in, "comment_linenos")] <- attr(ed2in, "comment_values") - file_body[attr(ed2in, "value_linenos")] <- + file_body[attr(ed2in, "value_linenos")] <- tags_values_vec[1:length(attr(ed2in, "value_linenos"))] - - #check for new tags - if(length(tags_values_vec) > length(attr(ed2in, "value_linenos"))) { - #find the $END + + # check for new tags + if (length(tags_values_vec) > length(attr(ed2in, "value_linenos"))) { + # find the $END END_line <- grep("\\$END", file_body) - 1 - new_tags <- + new_tags <- tags_values_vec[(length(attr(ed2in, "value_linenos")) + 1):length(tags_values_vec)] - #put the new tags in with $END at the end + # put the new tags in with $END at the end file_body <- c(file_body[1:END_line], new_tags, "$END") - } + } header <- c( "!=======================================", "!=======================================", diff --git a/models/ed/R/write_ed_metheader.R b/models/ed/R/write_ed_metheader.R index e93986517f9..97ce066195b 100644 --- a/models/ed/R/write_ed_metheader.R +++ b/models/ed/R/write_ed_metheader.R @@ -4,7 +4,7 @@ #' #' @param ed_metheader ED meteorlogy header object (see [read_ed_metheader]) #' @param filename Full file name (including path) of ED met header -#' @param header_line Character string for top line of output file. Default is +#' @param header_line Character string for top line of output file. Default is #' `'header'`. #' @export write_ed_metheader <- function(ed_metheader, filename, diff --git a/models/ed/R/write_ed_veg.R b/models/ed/R/write_ed_veg.R index 07a6a6cac0a..7c74338949b 100644 --- a/models/ed/R/write_ed_veg.R +++ b/models/ed/R/write_ed_veg.R @@ -1,6 +1,6 @@ #' Write ED inputs to directory #' -#' Write a complete [ED inputs object][read_ed_veg] to disk. `css`, `pss`, and +#' Write a complete [ED inputs object][read_ed_veg] to disk. `css`, `pss`, and #' `site` files are automatically named and correctly formatted. #' #' @param ed_veg ED vegetation inputs object (see [read_ed_veg]). @@ -24,9 +24,9 @@ write_ed_veg <- function(ed_veg, path_prefix) { #' #' Functions for writing css, pss, and site files from their respective objects. #' -#' Latitude and longitude coordinates will be converted directly to character, -#' without any changes to their precision. If they are `NULL` (default), the -#' function assumes that `lat` and `lon` are already in the `path_prefix`, and +#' Latitude and longitude coordinates will be converted directly to character, +#' without any changes to their precision. If they are `NULL` (default), the +#' function assumes that `lat` and `lon` are already in the `path_prefix`, and #' if they are absent, the function will throw an error. #' #' @param css css object (see [read_css]) @@ -67,9 +67,9 @@ write_site <- function(site, path_prefix, latitude = NULL, longitude = NULL) { #' Format file name for ED vegetation inputs #' -#' Adds the latitude and longitude, or checks if they are formatted correctly. -#' Then, splits the prefix into the directory and base name, appends the suffix -#' to the base name (adding a starting dot, if necessary), and returns the +#' Adds the latitude and longitude, or checks if they are formatted correctly. +#' Then, splits the prefix into the directory and base name, appends the suffix +#' to the base name (adding a starting dot, if necessary), and returns the #' filename as a character. #' #' @param suffix Character string of filename suffix. diff --git a/models/ed/R/write_restart.ED2.R b/models/ed/R/write_restart.ED2.R index 3a3611ad607..f91dfe68f4b 100644 --- a/models/ed/R/write_restart.ED2.R +++ b/models/ed/R/write_restart.ED2.R @@ -1,5 +1,5 @@ #' @title Write ED2 restart file from SDA results -#' +#' #' @param outdir output directory #' @param runid run id #' @param start.time Time of current assimilation step @@ -11,281 +11,270 @@ #' are deterministically related to the parameters updated by the analysis #' @param inputs new input paths updated by the SDA workflow, will be passed to #' `write.configs` -#' +#' #' @author Alexey Shiklomanov, Istem Fer #' @return TRUE if successful #' @export write_restart.ED2 <- function(outdir, runid, start.time, stop.time, settings, new.state, RENAME = TRUE, new.params, inputs) { - var.names <- settings$state.data.assimilation$state.variables %>% - purrr::map('variable.name') - + purrr::map("variable.name") + restart <- new.params$restart - + # IMPORTANT NOTE: in the future, things that are passed via "restart" list need to be confined to old states that will be used # to carry out deternimistic relationships, no other read/write restart should copy this logic - histfile <- restart$histfile # Get history restart file path - restart <- restart$restart - + histfile <- restart$histfile # Get history restart file path + restart <- restart$restart + # remote or not remote? # rundir <- settings$host$rundir - # mod_outdir <- settings$host$outdir + # mod_outdir <- settings$host$outdir rundir <- settings$rundir mod_outdir <- settings$modeloutdir # same as outdir? - - sda_datestr <- gregexpr("-S-", histfile)[[1]] - sda_suffix <- paste0("SDA.", substr(histfile, sda_datestr[1] + 3, sda_datestr[1] + 19)) - hyear <- substr(histfile, sda_datestr[1] + 3, sda_datestr[1] + 6) - + + sda_datestr <- gregexpr("-S-", histfile)[[1]] + sda_suffix <- paste0("SDA.", substr(histfile, sda_datestr[1] + 3, sda_datestr[1] + 19)) + hyear <- substr(histfile, sda_datestr[1] + 3, sda_datestr[1] + 6) + # check these dirs for local vs remote #### Backup old run files to date directory runfiles <- list.files.nodir(file.path(rundir, runid)) modoutfiles <- list.files.nodir(file.path(mod_outdir, runid), hyear) # copy only current year, otherwise it cumulatively copies everything - + dir.create(file.path(rundir, runid, sda_suffix)) dir.create(file.path(mod_outdir, runid, sda_suffix)) file.copy(file.path(rundir, runid, runfiles), - file.path(rundir, runid, sda_suffix, runfiles), - overwrite = TRUE) + file.path(rundir, runid, sda_suffix, runfiles), + overwrite = TRUE + ) file.copy(file.path(mod_outdir, runid, modoutfiles), - file.path(mod_outdir, runid, sda_suffix, modoutfiles), - overwrite = TRUE) - - - remote_histfile <- file.path(settings$host$outdir, runid, basename(histfile)) - + file.path(mod_outdir, runid, sda_suffix, modoutfiles), + overwrite = TRUE + ) + + + remote_histfile <- file.path(settings$host$outdir, runid, basename(histfile)) + #### Get common variables # PFT by cohort - pft_co <- restart$PFT - + pft_co <- restart$PFT + # Plant density plant_dens <- restart$NPLANT - + # Patch area - patch_area <- restart$AREA - + patch_area <- restart$AREA + #### Create a patch index indicator vector - paco_n <- restart$PACO_N # number of cohorts per patch + paco_n <- restart$PACO_N # number of cohorts per patch patch_index <- rep(1:length(paco_n), times = paco_n) - + # read xml to extract allometric coeffs later - configfile <- file.path(rundir, runid,"config.xml") + configfile <- file.path(rundir, runid, "config.xml") pars <- XML::xmlToList(XML::xmlParse(configfile)) # remove non-pft sublists - pars[names(pars)!="pft"] <- NULL - + pars[names(pars) != "pft"] <- NULL + #### Write new state to file # Default mode of H5File$new is "a", which is read + write and create file if it doesn't exist histfile_h5 <- hdf5r::H5File$new(histfile) - + for (var_name in var.names) { # var_name <- "AbvGrndWood" if (var_name == "AbvGrndWood") { - #### Disaggregate AbvGrndWood down to cohort vector - # NOTE: This is currently naive -- it just calculates the - # AbvGrndWood ratio between the old and new states and applies it to each - # cohort based on its PFT. No patch information is involved because + # NOTE: This is currently naive -- it just calculates the + # AbvGrndWood ratio between the old and new states and applies it to each + # cohort based on its PFT. No patch information is involved because # none is provided in `new.state`. - + new_tmp <- new.state[grep(var_name, names(new.state))] new_tmp <- PEcAn.utils::ud_convert(new_tmp, "Mg/ha/yr", "kg/m^2/yr") - + agb_co <- restart$AGB_CO # reaggregate old state plant2cohort <- agb_co * plant_dens cohort2patch <- tapply(plant2cohort, list("patch" = patch_index), sum, na.rm = TRUE) - - agw_ratios <- new_tmp / sum(cohort2patch*patch_area, na.rm = TRUE) - + + agw_ratios <- new_tmp / sum(cohort2patch * patch_area, na.rm = TRUE) + # when nudging a carbon pool, we need to nudge relevant pools # maybe in the future if we are assimilating these or if they're in the state matrix also check if it's in the var.names before nudging proportionally - bdead <- restart$BDEAD - #bstorage <- restart$BSTORAGE # storage is a thing in itself - bleaf <- restart$BLEAF - broot <- restart$BROOT - balive <- restart$BALIVE - bseeds <- restart$BSEEDS_CO + bdead <- restart$BDEAD + # bstorage <- restart$BSTORAGE # storage is a thing in itself + bleaf <- restart$BLEAF + broot <- restart$BROOT + balive <- restart$BALIVE + bseeds <- restart$BSEEDS_CO bsapwooda <- restart$BSAPWOODA bsapwoodb <- restart$BSAPWOODB - - new_bdead <- bdead * agw_ratios[1,1] - new_agb <- agb_co * agw_ratios[1,1] - new_bleaf <- bleaf * agw_ratios[1,1] - new_broot <- broot * agw_ratios[1,1] - new_balive <- balive * agw_ratios[1,1] - new_bseeds <- bseeds * agw_ratios[1,1] - new_bsapwooda <- bsapwooda * agw_ratios[1,1] - new_bsapwoodb <- bsapwoodb * agw_ratios[1,1] - + + new_bdead <- bdead * agw_ratios[1, 1] + new_agb <- agb_co * agw_ratios[1, 1] + new_bleaf <- bleaf * agw_ratios[1, 1] + new_broot <- broot * agw_ratios[1, 1] + new_balive <- balive * agw_ratios[1, 1] + new_bseeds <- bseeds * agw_ratios[1, 1] + new_bsapwooda <- bsapwooda * agw_ratios[1, 1] + new_bsapwoodb <- bsapwoodb * agw_ratios[1, 1] + # # if you're nudging bdead, update bstorage and dbh too # new_bstorage <- bstorage * agw_ratios[1,1] - + # what else to nudge? # soil C : FAST_SOIL_C, SLOW_SOIL_C, STRUCTURAL_SOIL_C # NPLANT - - pft_nums <- as.numeric(sapply(pars,`[[`, "num")) + + pft_nums <- as.numeric(sapply(pars, `[[`, "num")) # use ED2's allometric eqns to dtermine dbh from new bdead C2B <- 2 new_dbh <- new_bdead - for(pn in seq_along(pft_nums)){ + for (pn in seq_along(pft_nums)) { ind <- pft_co == pft_nums[pn] - + crit <- new_bdead[ind] <= pars[[pn]]$bdead_crit - new_dbh[ind][crit] = (new_bdead[ind][crit] / as.numeric(pars[[pn]]$b1Bs_small) * C2B)**(1.0/ as.numeric(pars[[pn]]$b2Bs_small)) - new_dbh[ind][!crit] = (new_bdead[ind][!crit] / as.numeric(pars[[pn]]$b1Bs_large) * C2B)**(1.0/as.numeric(pars[[pn]]$b2Bs_large)) - + new_dbh[ind][crit] <- (new_bdead[ind][crit] / as.numeric(pars[[pn]]$b1Bs_small) * C2B)**(1.0 / as.numeric(pars[[pn]]$b2Bs_small)) + new_dbh[ind][!crit] <- (new_bdead[ind][!crit] / as.numeric(pars[[pn]]$b1Bs_large) * C2B)**(1.0 / as.numeric(pars[[pn]]$b2Bs_large)) } - + # AbvGrndWood in state matrix is not per PFT but total # but leaving this bit as a reminder - + # new2old.agb_pft <- new.agb_pft / old.agb_pft - # new2old_pftnames <- gsub(paste0(var_name, ".pft."), '', + # new2old_pftnames <- gsub(paste0(var_name, ".pft."), '', # names(new2old.agb_pft)) # names(new2old.agb_pft) <- as.character(pftnums[new2old_pftnames]) # agb_co_ratios <- new2old.agb_pft[as.character(pft_co)] - - #nplant_co_plant <- restart$NPLANT - - - # Here, we adjust cohort-level AGB by adjusting the stand density + + # nplant_co_plant <- restart$NPLANT + + + # Here, we adjust cohort-level AGB by adjusting the stand density # (NPLANT) proportional to the change in biomass computed above. - #new.nplant_co_plant <- nplant_co_plant * agb_co_ratios[1,1] - # An alternative is to modify DBH and BDEAD, which requires solving - # the following allometric equation for DBH and then using ED + # new.nplant_co_plant <- nplant_co_plant * agb_co_ratios[1,1] + # An alternative is to modify DBH and BDEAD, which requires solving + # the following allometric equation for DBH and then using ED # allometric equations to recalculate BDEAD. - # - # AGB = b1L*DBH^(b2L) * (1 + qsw * agf_bs * + # + # AGB = b1L*DBH^(b2L) * (1 + qsw * agf_bs * # (h0 + a * (1-exp(b*DBH))) + b1d*DBH^(b2d) - - + + # The empty brackets (`[]`) indicate the whole vector is replaced. # This is necessary to overwrite an existing dataset - #histfile_h5[["NPLANT"]][] <- new.nplant_co_plant + # histfile_h5[["NPLANT"]][] <- new.nplant_co_plant histfile_h5[["BDEAD"]][] <- new_bdead - #histfile_h5[["BSTORAGE"]][] <- new_bstorage + # histfile_h5[["BSTORAGE"]][] <- new_bstorage histfile_h5[["DBH"]][] <- new_dbh - + # overwrite the keepers, not critical (these will be re-calculated within ED2) - histfile_h5[["AGB_CO"]][] <- new_agb + histfile_h5[["AGB_CO"]][] <- new_agb histfile_h5[["TOTAL_AGB"]][] <- new_tmp - - histfile_h5[["BLEAF"]][] <- new_bleaf - histfile_h5[["BROOT"]][] <- new_broot - histfile_h5[["BALIVE"]][] <- new_balive + + histfile_h5[["BLEAF"]][] <- new_bleaf + histfile_h5[["BROOT"]][] <- new_broot + histfile_h5[["BALIVE"]][] <- new_balive histfile_h5[["BSEEDS_CO"]][] <- new_bseeds histfile_h5[["BSAPWOODA"]][] <- new_bsapwooda histfile_h5[["BSAPWOODB"]][] <- new_bsapwoodb - - - - } else if(var_name == "GWBI"){ - + } else if (var_name == "GWBI") { # zero cumulative rate keepers, nothing is calculated back from these # so zeroing only the rate you're reading back is fine histfile_h5[["TOTAL_AGB_GROWTH"]][] <- 0 # zero both histfile_h5[["DDBH_DT"]][] <- rep(0, length(restart$DDBH_DT)) histfile_h5[["DAGB_DT"]][] <- rep(0, length(restart$DAGB_DT)) - - } else if(var_name == "storage_carbon_content"){ - + } else if (var_name == "storage_carbon_content") { bstorage <- restart$BSTORAGE # reaggregate old state plant2cohort <- bstorage * plant_dens cohort2patch <- tapply(plant2cohort, list("patch" = patch_index), sum, na.rm = TRUE) - - bstorage_ratio <- new.state$storage_carbon_content / sum(cohort2patch*patch_area, na.rm = TRUE) - - histfile_h5[["BSTORAGE"]][] <- bstorage*bstorage_ratio - - } else if(var_name == "fast_soil_pool_carbon_content"){ - + + bstorage_ratio <- new.state$storage_carbon_content / sum(cohort2patch * patch_area, na.rm = TRUE) + + histfile_h5[["BSTORAGE"]][] <- bstorage * bstorage_ratio + } else if (var_name == "fast_soil_pool_carbon_content") { fast_soil_c <- restart$FAST_SOIL_C - fsc_ratio <- new.state$fast_soil_pool_carbon_content / sum(fast_soil_c*patch_area) - - histfile_h5[["FAST_SOIL_C"]][] <- fast_soil_c*fsc_ratio - - } else if(var_name == "structural_soil_pool_carbon_content"){ - - structural_soil_c <- restart$STRUCTURAL_SOIL_C - structural_sc_ratio <- new.state$structural_soil_pool_carbon_content / sum(structural_soil_c*patch_area) - - histfile_h5[["STRUCTURAL_SOIL_C"]][] <- structural_soil_c*structural_sc_ratio - + fsc_ratio <- new.state$fast_soil_pool_carbon_content / sum(fast_soil_c * patch_area) + + histfile_h5[["FAST_SOIL_C"]][] <- fast_soil_c * fsc_ratio + } else if (var_name == "structural_soil_pool_carbon_content") { + structural_soil_c <- restart$STRUCTURAL_SOIL_C + structural_sc_ratio <- new.state$structural_soil_pool_carbon_content / sum(structural_soil_c * patch_area) + + histfile_h5[["STRUCTURAL_SOIL_C"]][] <- structural_soil_c * structural_sc_ratio } else { - PEcAn.logger::logger.error("Variable ", var_name, - " not currently supported", - " by write.restart.ED2") + PEcAn.logger::logger.error( + "Variable ", var_name, + " not currently supported", + " by write.restart.ED2" + ) } } - + # This closes the file and all objects related to the file. histfile_h5$close_all() - - # copy the history file with new states and new timestamp to remote + + # copy the history file with new states and new timestamp to remote # it's OK, because we backed up the original above - #PEcAn.remote::remote.copy.to(settings$host, histfile, remote_histfile) - + # PEcAn.remote::remote.copy.to(settings$host, histfile, remote_histfile) + ##### Modify ED2IN ed2in_path <- file.path(rundir, runid, "ED2IN") ed2in_orig <- read_ed2in(ed2in_path) - - + + ed2in_new <- modify_ed2in( ed2in_orig, start_date = lubridate::ceiling_date(start.time, "1 day"), - end_date = lubridate::ceiling_date(stop.time, "1 day"), # ED2 writes annual history files at the same month as initial - RUNTYPE = "HISTORY", + end_date = lubridate::ceiling_date(stop.time, "1 day"), # ED2 writes annual history files at the same month as initial + RUNTYPE = "HISTORY", IED_INIT_MODE = 4, SFILIN = file.path(settings$host$outdir, runid, "history") ) - - - if(settings$host$name == "localhost") check_ed2in(ed2in_new) + + + if (settings$host$name == "localhost") check_ed2in(ed2in_new) write_ed2in(ed2in_new, ed2in_path) - + # Remove old history.xml file, which job.sh looks for - file.remove(file.path(mod_outdir, runid, "history.xml")) # this is local - + file.remove(file.path(mod_outdir, runid, "history.xml")) # this is local + # read the jobsh in the rundir - jobsh <- readLines(file.path(rundir, runid, "job.sh"),-1) + jobsh <- readLines(file.path(rundir, runid, "job.sh"), -1) remote_remove_cmd <- paste0("rm -f ", file.path(settings$host$outdir, runid, "history.xml")) - jobsh[grep("@REMOVE_HISTXML@", jobsh)+1] <- remote_remove_cmd - + jobsh[grep("@REMOVE_HISTXML@", jobsh) + 1] <- remote_remove_cmd + # also update mode2netcdf.ED2 call - mod2cf_line <- grep("model2netcdf.ED2", jobsh) - mod2cf_string <- jobsh[mod2cf_line] - begin_from <- paste0("'", lubridate::year(start.time), "/") - begin_to <- paste0("'", hyear,"/") - end_from <- begin_to - end_to <- paste0("'", as.numeric(hyear)+1,"/") + mod2cf_line <- grep("model2netcdf.ED2", jobsh) + mod2cf_string <- jobsh[mod2cf_line] + begin_from <- paste0("'", lubridate::year(start.time), "/") + begin_to <- paste0("'", hyear, "/") + end_from <- begin_to + end_to <- paste0("'", as.numeric(hyear) + 1, "/") # this order matters - mod2cf_string <- gsub(end_from, end_to, mod2cf_string) # e.g. change from (...'1961/01/01', '1962/01/01'...) to (...'1961/01/01', '1963/01/01'...) - mod2cf_string <- gsub(begin_from, begin_to, mod2cf_string) # e.g. change from (...'1961/01/01', '1963/01/01'...) to (...'1962/01/01', '1963/01/01'...) + mod2cf_string <- gsub(end_from, end_to, mod2cf_string) # e.g. change from (...'1961/01/01', '1962/01/01'...) to (...'1961/01/01', '1963/01/01'...) + mod2cf_string <- gsub(begin_from, begin_to, mod2cf_string) # e.g. change from (...'1961/01/01', '1963/01/01'...) to (...'1962/01/01', '1963/01/01'...) jobsh[mod2cf_line] <- mod2cf_string - + writeLines(jobsh, file.path(rundir, runid, "job.sh")) - + PEcAn.logger::logger.info("Finished --", runid) - + return(TRUE) } # write_restart.ED2 # Phony dims: # 0 - All cohorts -# 1 - Sites +# 1 - Sites # 2 - Size classes (11) # 3 - PFTs (17) # 4 - Patches -# 5 - Months?? -# 6 - ??? +# 5 - Months?? +# 6 - ??? # 7 - Something related to height # 8 - Something to do with disturbance # 9 - Soil layers (?) # 10 - Something related to mortality # 11 - Canopy radiation profile - diff --git a/models/ed/data-raw/sysdata.R b/models/ed/data-raw/sysdata.R index e3ea82cdbed..4101d48b8c4 100644 --- a/models/ed/data-raw/sysdata.R +++ b/models/ed/data-raw/sysdata.R @@ -1,15 +1,14 @@ # Prep internal data for package. See https://r-pkgs.org/data.html for explanation -#read history files +# read history files hist_files <- list.files("models/ed/data-raw", pattern = "^history", full.names = TRUE) hist_list <- lapply(hist_files, read.csv2) names(hist_list) <- sub(basename(hist_files), pattern = ".csv", replacement = "") -#split up into separate R objects and dump in global environment +# split up into separate R objects and dump in global environment list2env(hist_list, envir = .GlobalEnv) -#read soil file and append to list +# read soil file and append to list soil <- read.csv2("models/ed/data-raw/soil.csv") -#save to sysdata.rda +# save to sysdata.rda save(list = c(names(hist_list), "soil"), file = "models/ed/R/sysdata.rda") - diff --git a/models/ed/inst/pecan.ed2.diag.plots.R b/models/ed/inst/pecan.ed2.diag.plots.R index 304918d03e8..d28e638ceb0 100644 --- a/models/ed/inst/pecan.ed2.diag.plots.R +++ b/models/ed/inst/pecan.ed2.diag.plots.R @@ -1,14 +1,14 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- ###################################################################################################### # Plot functions for PEcAn ED2 Diagnostics -# +# # v1 # # TODO: Finalize plots for various functions @@ -16,591 +16,734 @@ ###################################################################################################### -#====================================================================================================# +# ====================================================================================================# # Plot mean daily output -#====================================================================================================# +# ====================================================================================================# # UNDER DEVELOPMENT -plot_daily = function(model.run,in.dir,out.dir){ - - i = 1 +plot_daily <- function(model.run, in.dir, out.dir) { + i <- 1 for (year in start_year:end_year) { - - message(paste("--- PROCESSING YEAR: ",year," ---")) - + message(paste("--- PROCESSING YEAR: ", year, " ---")) + #---------------- Generate Subset Length --------------------------------------------------------# if (year == start_year) { start_day <- as.numeric(format(start_date, "%j")) } else { - start_day = 1 + start_day <- 1 } if (year == end_year) { - end_day = as.numeric(format(end_date, "%j")) + end_day <- as.numeric(format(end_date, "%j")) } else { - end_day = as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j")) + end_day <- as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j")) } - - } - - + } } # End of plot_daily #----------------------------------------------------------------------------------------------------# - -#====================================================================================================# + +# ====================================================================================================# # Plot mean diel function -#====================================================================================================# +# ====================================================================================================# # not implemented yet #----------------------------------------------------------------------------------------------------# -#====================================================================================================# +# ====================================================================================================# # Plot site average fluxes (i.e. "Tower" file output) -#====================================================================================================# - -site_fluxes = function(model.run,in.dir,out.dir){ +# ====================================================================================================# +site_fluxes <- function(model.run, in.dir, out.dir) { #---------------- Import prescribed pheno data, if exists -----------------------------------------# - # Info: Display prescribed phenology on diagnostic plots (if present) + # Info: Display prescribed phenology on diagnostic plots (if present) # May need to get rid of this as it is mostly ED specific - pheno = list.files(path=model.run,pattern="phenology") - if (length(pheno)==0) { - site_pheno=NA - }else{ - pheno_data = read.delim(pheno,header=F,sep="\t",skip=1) - Yr = pheno_data[,1] - GU = 1/pheno_data[,2] - LO = 1/pheno_data[,4] - site_pheno = data.frame(Year=Yr,Greenup=GU,LeafOff=LO) - print('Site Phenology Info (DoY)') + pheno <- list.files(path = model.run, pattern = "phenology") + if (length(pheno) == 0) { + site_pheno <- NA + } else { + pheno_data <- read.delim(pheno, header = F, sep = "\t", skip = 1) + Yr <- pheno_data[, 1] + GU <- 1 / pheno_data[, 2] + LO <- 1 / pheno_data[, 4] + site_pheno <- data.frame(Year = Yr, Greenup = GU, LeafOff = LO) + print("Site Phenology Info (DoY)") print(site_pheno) print("") } #--------------------------------------------------------------------------------------------------# - - i = 1 + + i <- 1 for (year in start_year:end_year) { - message(paste("--- PROCESSING YEAR: ",year," ---")) - + message(paste("--- PROCESSING YEAR: ", year, " ---")) + #---------------- Generate Subset Length --------------------------------------------------------# if (year == start_year) { start_day <- as.numeric(format(start_date, "%j")) } else { - start_day = 1 + start_day <- 1 } if (year == end_year) { - end_day = as.numeric(format(end_date, "%j")) + end_day <- as.numeric(format(end_date, "%j")) } else { - end_day = as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j")) + end_day <- as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j")) } - - polyx = start_day:end_day # <--- for plotting below - vals_day = out_day # <--- values written out per day, 86400/FRQFAST - hdflength = (vals_day*(1+end_day-start_day)) - + + polyx <- start_day:end_day # <--- for plotting below + vals_day <- out_day # <--- values written out per day, 86400/FRQFAST + hdflength <- (vals_day * (1 + end_day - start_day)) + #---------------- Init. Arrays ------------------------------------------------------------------# # Info: Initialize arrays for entire model run and populate with for loop (below) - GPP.AVG = rep(0,times=hdflength) - VLEAF.RESP.AVG = rep(0,times=hdflength) - LEAF.RESP.AVG = rep(0,times=hdflength) - STORAGE.RESP.AVG = rep(0,times=hdflength) - GROWTH.RESP.AVG = rep(0,times=hdflength) - ROOT.RESP.AVG = rep(0,times=hdflength) - PLANT.RESP.AVG = rep(0,times=hdflength) - HTROPH.RESP.AVG = rep(0,times=hdflength) - Reco.AVG = rep(0,times=hdflength) - NPP.AVG = rep(0,times=hdflength) - NEE.AVG = rep(0,times=hdflength) + GPP.AVG <- rep(0, times = hdflength) + VLEAF.RESP.AVG <- rep(0, times = hdflength) + LEAF.RESP.AVG <- rep(0, times = hdflength) + STORAGE.RESP.AVG <- rep(0, times = hdflength) + GROWTH.RESP.AVG <- rep(0, times = hdflength) + ROOT.RESP.AVG <- rep(0, times = hdflength) + PLANT.RESP.AVG <- rep(0, times = hdflength) + HTROPH.RESP.AVG <- rep(0, times = hdflength) + Reco.AVG <- rep(0, times = hdflength) + NPP.AVG <- rep(0, times = hdflength) + NEE.AVG <- rep(0, times = hdflength) #--------------------------------------------- # Units: [kg/m2/s] - #AVG.VAPOR.WC = rep(0,times=hdflength) # wood vapor flux. - AVG.VAPOR.LC = rep(0,times=hdflength) - AVG.VAPOR.GC = rep(0,times=hdflength) - AVG.VAPOR.AC = rep(0,times=hdflength) - AVG.TRANSP = rep(0,times=hdflength) - AVG.EVAP = rep(0,times=hdflength) + # AVG.VAPOR.WC = rep(0,times=hdflength) # wood vapor flux. + AVG.VAPOR.LC <- rep(0, times = hdflength) + AVG.VAPOR.GC <- rep(0, times = hdflength) + AVG.VAPOR.AC <- rep(0, times = hdflength) + AVG.TRANSP <- rep(0, times = hdflength) + AVG.EVAP <- rep(0, times = hdflength) # Units [kg/kg] - AVG.CAN.SHV = rep(0,times=hdflength) + AVG.CAN.SHV <- rep(0, times = hdflength) #--------------------------------------------- # Not implemented yet - #AVG.SOIL.TEMP = rep(0,times=hdflength) - #CAN.AIR.TEMP.AVG = rep(0,times=hdflength) - #SWC.AVG = rep(0,times=hdflength) - #AVG.SFCWATER.DEPTH = rep(0,times=hdflength) + # AVG.SOIL.TEMP = rep(0,times=hdflength) + # CAN.AIR.TEMP.AVG = rep(0,times=hdflength) + # SWC.AVG = rep(0,times=hdflength) + # AVG.SFCWATER.DEPTH = rep(0,times=hdflength) #------------------------------------------------------------------------------------------------# - - + + #------------------------------------------------------------------------------------------------# # Info from driver script - # dates contains YYmmdd, month (num), doy. fjday (0-1) - init = dates[1,4] - total = seq(1,hdflength,1) # <--- is this unused? - reps = hdflength/vals_day # <--- this should set the total number of days of data based on - # hdf length. E.g. 48 obs per day -- 17520/48 = 365 - dayfrac = rep(seq(deltaT,24,deltaT), each=1, times=reps) # <--- setup daily output rate for subset - # rep over total lenght (hdflength/vals) - subset = 0 # <--- initialize variable - - period = c(10.0,17.0) # <--- choose which times to average over. Can make user selectable. - - s = seq(period[1],period[2],deltaT) - subset = which(dayfrac >= period[1] & dayfrac <= period[2]) - hours = dayfrac[dayfrac >= period[2] & dayfrac <= period[1]] - aggrlist = rep(start_day:(end_day), each=length(s)) # subset list - - + # dates contains YYmmdd, month (num), doy. fjday (0-1) + init <- dates[1, 4] + total <- seq(1, hdflength, 1) # <--- is this unused? + reps <- hdflength / vals_day # <--- this should set the total number of days of data based on + # hdf length. E.g. 48 obs per day -- 17520/48 = 365 + dayfrac <- rep(seq(deltaT, 24, deltaT), each = 1, times = reps) # <--- setup daily output rate for subset + # rep over total lenght (hdflength/vals) + subset <- 0 # <--- initialize variable + + period <- c(10.0, 17.0) # <--- choose which times to average over. Can make user selectable. + + s <- seq(period[1], period[2], deltaT) + subset <- which(dayfrac >= period[1] & dayfrac <= period[2]) + hours <- dayfrac[dayfrac >= period[2] & dayfrac <= period[1]] + aggrlist <- rep(start_day:(end_day), each = length(s)) # subset list + + #---------------- Load ED2 Model Output (hdf5) --------------------------------------------------# - filename = list.files(in.dir,full.names=TRUE, - pattern=paste('.*-T-', year, '-.*.h5', sep=''))[1] - if (is.na(filename)==1) { + filename <- list.files(in.dir, + full.names = TRUE, + pattern = paste(".*-T-", year, "-.*.h5", sep = "") + )[1] + if (is.na(filename) == 1) { break - }else{ - data <- hdf5load(filename, load = FALSE,tidy=TRUE) # LOAD ED2 OUTPUT + } else { + data <- hdf5load(filename, load = FALSE, tidy = TRUE) # LOAD ED2 OUTPUT } - var_names = summary(data) # View info about vars. For debugging - if (i==1){ - print(paste("Site Averaged Fluxes (ITOUTPUT) for ",year)) + var_names <- summary(data) # View info about vars. For debugging + if (i == 1) { + print(paste("Site Averaged Fluxes (ITOUTPUT) for ", year)) print(var_names) # Show variable names in log file print("") - #print(str(data)) + # print(str(data)) } - i=i+1 + i <- i + 1 #------------------------------------------------------------------------------------------------# - - + + #---------------- Get Phenology Information -----------------------------------------------------# - chk = which(site_pheno==year) - if (is.nan(mean(chk))==1) { - phenology = data.frame(-9999,-9999,-9999) - names(phenology)=c("Year","Greenup","LeafOff") - GS_LENGTH = NA - }else{ - phenology = site_pheno[chk,] - GS_LENGTH = phenology[,3]-phenology[,2] + chk <- which(site_pheno == year) + if (is.nan(mean(chk)) == 1) { + phenology <- data.frame(-9999, -9999, -9999) + names(phenology) <- c("Year", "Greenup", "LeafOff") + GS_LENGTH <- NA + } else { + phenology <- site_pheno[chk, ] + GS_LENGTH <- phenology[, 3] - phenology[, 2] } #------------------------------------------------------------------------------------------------# - - + + #---------------- Generate Figures --------------------------------------------------------------# umol2gc <- 1.0368 # convert to gC ######################## SETUP PLOT PARAMETERS ################################################### - cex = 1 - labcex = 2 - axiscex = 2 - maincex = 2 - linew = 1.3 # line width + cex <- 1 + labcex <- 2 + axiscex <- 2 + maincex <- 2 + linew <- 1.3 # line width ######################## ED2 OUTPUT ############################################################## # units: umol/m2/s - GPP.AVG = data$AVG.GPP[subset]*umol2gc - GPP.AVG.mn = aggregate(GPP.AVG,by=list(aggrlist),mean)[[2]] - GPP.AVG.ll = aggregate(GPP.AVG,by=list(aggrlist),min)[[2]] - GPP.AVG.ul = aggregate(GPP.AVG,by=list(aggrlist),max)[[2]] + GPP.AVG <- data$AVG.GPP[subset] * umol2gc + GPP.AVG.mn <- aggregate(GPP.AVG, by = list(aggrlist), mean)[[2]] + GPP.AVG.ll <- aggregate(GPP.AVG, by = list(aggrlist), min)[[2]] + GPP.AVG.ul <- aggregate(GPP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# # units: umol/m2/s - LEAF.RESP.AVG = data$AVG.LEAF.RESP[subset]*umol2gc - LEAF.RESP.AVG.mn = aggregate(LEAF.RESP.AVG,by=list(aggrlist),mean)[[2]] - LEAF.RESP.AVG.ll = aggregate(LEAF.RESP.AVG,by=list(aggrlist),min)[[2]] - LEAF.RESP.AVG.ul = aggregate(LEAF.RESP.AVG,by=list(aggrlist),max)[[2]] + LEAF.RESP.AVG <- data$AVG.LEAF.RESP[subset] * umol2gc + LEAF.RESP.AVG.mn <- aggregate(LEAF.RESP.AVG, by = list(aggrlist), mean)[[2]] + LEAF.RESP.AVG.ll <- aggregate(LEAF.RESP.AVG, by = list(aggrlist), min)[[2]] + LEAF.RESP.AVG.ul <- aggregate(LEAF.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# # units: umol/m2/s - VLEAF.RESP.AVG = data$AVG.VLEAF.RESP[subset]*umol2gc - VLEAF.RESP.AVG.mn = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),mean)[[2]] - VLEAF.RESP.AVG.ll = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),min)[[2]] - VLEAF.RESP.AVG.ul = aggregate(VLEAF.RESP.AVG,by=list(aggrlist),max)[[2]] + VLEAF.RESP.AVG <- data$AVG.VLEAF.RESP[subset] * umol2gc + VLEAF.RESP.AVG.mn <- aggregate(VLEAF.RESP.AVG, by = list(aggrlist), mean)[[2]] + VLEAF.RESP.AVG.ll <- aggregate(VLEAF.RESP.AVG, by = list(aggrlist), min)[[2]] + VLEAF.RESP.AVG.ul <- aggregate(VLEAF.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# # units: umol/m2/s - STORAGE.RESP.AVG = data$AVG.STORAGE.RESP[subset]*umol2gc - STORAGE.RESP.AVG.mn = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),mean)[[2]] - STORAGE.RESP.AVG.ll = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),min)[[2]] - STORAGE.RESP.AVG.ul = aggregate(STORAGE.RESP.AVG,by=list(aggrlist),max)[[2]] + STORAGE.RESP.AVG <- data$AVG.STORAGE.RESP[subset] * umol2gc + STORAGE.RESP.AVG.mn <- aggregate(STORAGE.RESP.AVG, by = list(aggrlist), mean)[[2]] + STORAGE.RESP.AVG.ll <- aggregate(STORAGE.RESP.AVG, by = list(aggrlist), min)[[2]] + STORAGE.RESP.AVG.ul <- aggregate(STORAGE.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# # units: umol/m2/s - GROWTH.RESP.AVG = data$AVG.GROWTH.RESP[subset]*umol2gc - GROWTH.RESP.AVG.mn = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),mean)[[2]] - GROWTH.RESP.AVG.ll = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),min)[[2]] - GROWTH.RESP.AVG.ul = aggregate(GROWTH.RESP.AVG,by=list(aggrlist),max)[[2]] + GROWTH.RESP.AVG <- data$AVG.GROWTH.RESP[subset] * umol2gc + GROWTH.RESP.AVG.mn <- aggregate(GROWTH.RESP.AVG, by = list(aggrlist), mean)[[2]] + GROWTH.RESP.AVG.ll <- aggregate(GROWTH.RESP.AVG, by = list(aggrlist), min)[[2]] + GROWTH.RESP.AVG.ul <- aggregate(GROWTH.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# # units: umol/m2/s - ROOT.RESP.AVG = data$AVG.ROOT.RESP[subset]*umol2gc - ROOT.RESP.AVG.mn = aggregate(ROOT.RESP.AVG,by=list(aggrlist),mean)[[2]] - ROOT.RESP.AVG.ll = aggregate(ROOT.RESP.AVG,by=list(aggrlist),min)[[2]] - ROOT.RESP.AVG.ul = aggregate(ROOT.RESP.AVG,by=list(aggrlist),max)[[2]] + ROOT.RESP.AVG <- data$AVG.ROOT.RESP[subset] * umol2gc + ROOT.RESP.AVG.mn <- aggregate(ROOT.RESP.AVG, by = list(aggrlist), mean)[[2]] + ROOT.RESP.AVG.ll <- aggregate(ROOT.RESP.AVG, by = list(aggrlist), min)[[2]] + ROOT.RESP.AVG.ul <- aggregate(ROOT.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - PLANT.RESP.AVG = data$AVG.PLANT.RESP[subset] *umol2gc - PLANT.RESP.AVG.mn = aggregate(PLANT.RESP.AVG,by=list(aggrlist),mean)[[2]] - PLANT.RESP.AVG.ll = aggregate(PLANT.RESP.AVG,by=list(aggrlist),min)[[2]] - PLANT.RESP.AVG.ul = aggregate(PLANT.RESP.AVG,by=list(aggrlist),max)[[2]] + PLANT.RESP.AVG <- data$AVG.PLANT.RESP[subset] * umol2gc + PLANT.RESP.AVG.mn <- aggregate(PLANT.RESP.AVG, by = list(aggrlist), mean)[[2]] + PLANT.RESP.AVG.ll <- aggregate(PLANT.RESP.AVG, by = list(aggrlist), min)[[2]] + PLANT.RESP.AVG.ul <- aggregate(PLANT.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - HTROPH.RESP.AVG = data$AVG.HTROPH.RESP[subset] *umol2gc - HTROPH.RESP.AVG.mn = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),mean)[[2]] - HTROPH.RESP.AVG.ll = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),min)[[2]] - HTROPH.RESP.AVG.ul = aggregate(HTROPH.RESP.AVG,by=list(aggrlist),max)[[2]] + HTROPH.RESP.AVG <- data$AVG.HTROPH.RESP[subset] * umol2gc + HTROPH.RESP.AVG.mn <- aggregate(HTROPH.RESP.AVG, by = list(aggrlist), mean)[[2]] + HTROPH.RESP.AVG.ll <- aggregate(HTROPH.RESP.AVG, by = list(aggrlist), min)[[2]] + HTROPH.RESP.AVG.ul <- aggregate(HTROPH.RESP.AVG, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - Reco.AVG.mn = (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn) - Reco.AVG.ll = (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll) - Reco.AVG.ul = (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul) + Reco.AVG.mn <- (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn) + Reco.AVG.ll <- (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll) + Reco.AVG.ul <- (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul) #------------------------------------------------------------------------------------------------# - #NPP.AVG = data$AVG.NPPDAILY[subset] *umol2gc - #NPP.AVG.mn = aggregate(NPP.AVG,by=list(aggrlist),mean)[[2]] - #NPP.AVG.ll = aggregate(NPP.AVG,by=list(aggrlist),min)[[2]] - #NPP.AVG.ul = aggregate(NPP.AVG,by=list(aggrlist),max)[[2]] - NPP.AVG.mn = (GPP.AVG.mn - PLANT.RESP.AVG.mn) - NPP.AVG.ll = (GPP.AVG.ll - PLANT.RESP.AVG.ul) - NPP.AVG.ul = (GPP.AVG.ul - PLANT.RESP.AVG.ll) + # NPP.AVG = data$AVG.NPPDAILY[subset] *umol2gc + # NPP.AVG.mn = aggregate(NPP.AVG,by=list(aggrlist),mean)[[2]] + # NPP.AVG.ll = aggregate(NPP.AVG,by=list(aggrlist),min)[[2]] + # NPP.AVG.ul = aggregate(NPP.AVG,by=list(aggrlist),max)[[2]] + NPP.AVG.mn <- (GPP.AVG.mn - PLANT.RESP.AVG.mn) + NPP.AVG.ll <- (GPP.AVG.ll - PLANT.RESP.AVG.ul) + NPP.AVG.ul <- (GPP.AVG.ul - PLANT.RESP.AVG.ll) #------------------------------------------------------------------------------------------------# - NEE.AVG.mn = -1*(GPP.AVG.mn - (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn)) - NEE.AVG.ll = -1*(GPP.AVG.ll - (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul)) - NEE.AVG.ul = -1*(GPP.AVG.ul - (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll)) + NEE.AVG.mn <- -1 * (GPP.AVG.mn - (PLANT.RESP.AVG.mn + HTROPH.RESP.AVG.mn)) + NEE.AVG.ll <- -1 * (GPP.AVG.ll - (PLANT.RESP.AVG.ul + HTROPH.RESP.AVG.ul)) + NEE.AVG.ul <- -1 * (GPP.AVG.ul - (PLANT.RESP.AVG.ll + HTROPH.RESP.AVG.ll)) #------------------------------------------------------------------------------------------------# # [kg/m2/s] - #AVG.VAPOR.WC = data$AVG.VAPOR.WC[subset] #polygon wood to canopy air vapor flux - #AVG.VAPOR.WC.mn = aggregate(AVG.VAPOR.WC,by=list(aggrlist),mean)[[2]] - #AVG.VAPOR.WC.ll = aggregate(AVG.VAPOR.WC,by=list(aggrlist),min)[[2]] - #AVG.VAPOR.WC.ul = aggregate(AVG.VAPOR.WC,by=list(aggrlist),max)[[2]] + # AVG.VAPOR.WC = data$AVG.VAPOR.WC[subset] #polygon wood to canopy air vapor flux + # AVG.VAPOR.WC.mn = aggregate(AVG.VAPOR.WC,by=list(aggrlist),mean)[[2]] + # AVG.VAPOR.WC.ll = aggregate(AVG.VAPOR.WC,by=list(aggrlist),min)[[2]] + # AVG.VAPOR.WC.ul = aggregate(AVG.VAPOR.WC,by=list(aggrlist),max)[[2]] #------------------------------------------------------------------------------------------------# # attempt to make this backwards compatible - AVG.VAPOR.LC = tryCatch(data$AVG.VAPOR.LC[subset],finally= data$AVG.VAPOR.VC[subset]) - AVG.VAPOR.LC.mn = aggregate(AVG.VAPOR.LC,by=list(aggrlist),mean)[[2]] - AVG.VAPOR.LC.ll = aggregate(AVG.VAPOR.LC,by=list(aggrlist),min)[[2]] - AVG.VAPOR.LC.ul = aggregate(AVG.VAPOR.LC,by=list(aggrlist),max)[[2]] + AVG.VAPOR.LC <- tryCatch(data$AVG.VAPOR.LC[subset], finally = data$AVG.VAPOR.VC[subset]) + AVG.VAPOR.LC.mn <- aggregate(AVG.VAPOR.LC, by = list(aggrlist), mean)[[2]] + AVG.VAPOR.LC.ll <- aggregate(AVG.VAPOR.LC, by = list(aggrlist), min)[[2]] + AVG.VAPOR.LC.ul <- aggregate(AVG.VAPOR.LC, by = list(aggrlist), max)[[2]] + #------------------------------------------------------------------------------------------------# + AVG.VAPOR.GC <- data$AVG.VAPOR.GC[subset] # polygon moisture flux ground to canopy air + AVG.VAPOR.GC.mn <- aggregate(AVG.VAPOR.GC, by = list(aggrlist), mean)[[2]] + AVG.VAPOR.GC.ll <- aggregate(AVG.VAPOR.GC, by = list(aggrlist), min)[[2]] + AVG.VAPOR.GC.ul <- aggregate(AVG.VAPOR.GC, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - AVG.VAPOR.GC = data$AVG.VAPOR.GC[subset] #polygon moisture flux ground to canopy air - AVG.VAPOR.GC.mn = aggregate(AVG.VAPOR.GC,by=list(aggrlist),mean)[[2]] - AVG.VAPOR.GC.ll = aggregate(AVG.VAPOR.GC,by=list(aggrlist),min)[[2]] - AVG.VAPOR.GC.ul = aggregate(AVG.VAPOR.GC,by=list(aggrlist),max)[[2]] + AVG.VAPOR.AC <- data$AVG.VAPOR.AC[subset] # polygon vapor flux atmosphere to canopy air + AVG.VAPOR.AC.mn <- aggregate(AVG.VAPOR.AC, by = list(aggrlist), mean)[[2]] + AVG.VAPOR.AC.ll <- aggregate(AVG.VAPOR.AC, by = list(aggrlist), min)[[2]] + AVG.VAPOR.AC.ul <- aggregate(AVG.VAPOR.AC, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - AVG.VAPOR.AC = data$AVG.VAPOR.AC[subset]#polygon vapor flux atmosphere to canopy air - AVG.VAPOR.AC.mn = aggregate(AVG.VAPOR.AC,by=list(aggrlist),mean)[[2]] - AVG.VAPOR.AC.ll = aggregate(AVG.VAPOR.AC,by=list(aggrlist),min)[[2]] - AVG.VAPOR.AC.ul = aggregate(AVG.VAPOR.AC,by=list(aggrlist),max)[[2]] - #------------------------------------------------------------------------------------------------# - AVG.TRANSP = data$AVG.TRANSP[subset]#polygon transpiration from stomata to canopy air spac - AVG.TRANSP.mn = aggregate(AVG.TRANSP,by=list(aggrlist),mean)[[2]] - AVG.TRANSP.ll = aggregate(AVG.TRANSP,by=list(aggrlist),min)[[2]] - AVG.TRANSP.ul = aggregate(AVG.TRANSP,by=list(aggrlist),max)[[2]] + AVG.TRANSP <- data$AVG.TRANSP[subset] # polygon transpiration from stomata to canopy air spac + AVG.TRANSP.mn <- aggregate(AVG.TRANSP, by = list(aggrlist), mean)[[2]] + AVG.TRANSP.ll <- aggregate(AVG.TRANSP, by = list(aggrlist), min)[[2]] + AVG.TRANSP.ul <- aggregate(AVG.TRANSP, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - AVG.EVAP = data$AVG.EVAP[subset] #Polygon averaged evap/dew from ground and leaves to C - AVG.EVAP.mn = aggregate(AVG.EVAP,by=list(aggrlist),mean)[[2]] - AVG.EVAP.ll = aggregate(AVG.EVAP,by=list(aggrlist),min)[[2]] - AVG.EVAP.ul = aggregate(AVG.EVAP,by=list(aggrlist),max)[[2]] + AVG.EVAP <- data$AVG.EVAP[subset] # Polygon averaged evap/dew from ground and leaves to C + AVG.EVAP.mn <- aggregate(AVG.EVAP, by = list(aggrlist), mean)[[2]] + AVG.EVAP.ll <- aggregate(AVG.EVAP, by = list(aggrlist), min)[[2]] + AVG.EVAP.ul <- aggregate(AVG.EVAP, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - AVG.CAN.SHV = data$AVG.CAN.SHV[subset] #Polygon Average Specific Humidity of Canopy Air - AVG.CAN.SHV.mn = aggregate(AVG.CAN.SHV,by=list(aggrlist),mean)[[2]] - AVG.CAN.SHV.ll = aggregate(AVG.CAN.SHV,by=list(aggrlist),min)[[2]] - AVG.CAN.SHV.ul = aggregate(AVG.CAN.SHV,by=list(aggrlist),max)[[2]] + AVG.CAN.SHV <- data$AVG.CAN.SHV[subset] # Polygon Average Specific Humidity of Canopy Air + AVG.CAN.SHV.mn <- aggregate(AVG.CAN.SHV, by = list(aggrlist), mean)[[2]] + AVG.CAN.SHV.ll <- aggregate(AVG.CAN.SHV, by = list(aggrlist), min)[[2]] + AVG.CAN.SHV.ul <- aggregate(AVG.CAN.SHV, by = list(aggrlist), max)[[2]] #------------------------------------------------------------------------------------------------# - #AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,9]-273.15 #Polygon Average Soil Temperature - #AVG.SOIL.TEMP.5cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]] - #AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,8]-273.15 #Polygon Average Soil Temperature - #AVG.SOIL.TEMP.10cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]] + # AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,9]-273.15 #Polygon Average Soil Temperature + # AVG.SOIL.TEMP.5cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]] + # AVG.SOIL.TEMP = data$AVG.SOIL.TEMP[subset,1,8]-273.15 #Polygon Average Soil Temperature + # AVG.SOIL.TEMP.10cm = aggregate(AVG.SOIL.TEMP,by=list(aggrlist),mean)[[2]] #------------------------------------------------------------------------------------------------# - #CAN.AIR.TEMP.AVG = (data$AVG.CAN.TEMP[subset])-273.15 # convert to celcius - #SWC.AVG = data$AVG.SOIL.WATER[subset,1,9] # soil moisture at 5cm + # CAN.AIR.TEMP.AVG = (data$AVG.CAN.TEMP[subset])-273.15 # convert to celcius + # SWC.AVG = data$AVG.SOIL.WATER[subset,1,9] # soil moisture at 5cm ########################################################################################################### ##################################### COMPONENT FLUXES #################################################### - pdf(paste(out.dir,"/","ED2_",year,"_Site_Avg_Fluxes.pdf",sep=""),width=12,height=11, - onefile=TRUE) - par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R - - #========================================================================================================== + pdf(paste(out.dir, "/", "ED2_", year, "_Site_Avg_Fluxes.pdf", sep = ""), + width = 12, height = 11, + onefile = TRUE + ) + par(mfrow = c(3, 2), mar = c(5, 5.7, 0.9, 0.5), mgp = c(3.3, 1.5, 0), oma = c(0, 0, 3, 0)) # B, L, T, R + + # ========================================================================================================== # GPP - #========================================================================================================== - ylim = range(c(GPP.AVG.ll,GPP.AVG.ul),na.rm=TRUE) # define Y lims - plot(start_day:end_day,GPP.AVG.mn,xlab='',ylab=expression(paste(GPP," (gC",~m^{-2},")")), - ylim=ylim,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(v=phenology[,2],lty=2,lwd=1.5,col="green3") - abline(v=phenology[,3],lty=2,lwd=1.5,col="brown") - polygon(c(polyx, rev(polyx)), c(GPP.AVG.ul, rev(GPP.AVG.ll)), col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,GPP.AVG.mn,lty=1,col="black") - points(start_day:end_day,GPP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - if (is.nan(mean(chk))==0) { - legend("topleft",legend=c("Greenup","Leaf Off"),bty="n", - lty=2,lwd=1.5,col=c("green3","brown"),cex=2) - #text(37,max(GPP.AVG)-4,"GS Length:",cex=2) - #text(35,max(GPP.AVG)-5,paste(round(GS_LENGTH,2)," days",sep=""), + # ========================================================================================================== + ylim <- range(c(GPP.AVG.ll, GPP.AVG.ul), na.rm = TRUE) # define Y lims + plot(start_day:end_day, GPP.AVG.mn, + xlab = "", ylab = expression(paste(GPP, " (gC", ~ m^{ + -2 + }, ")")), + ylim = ylim, pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(v = phenology[, 2], lty = 2, lwd = 1.5, col = "green3") + abline(v = phenology[, 3], lty = 2, lwd = 1.5, col = "brown") + polygon(c(polyx, rev(polyx)), c(GPP.AVG.ul, rev(GPP.AVG.ll)), col = "light gray", border = "dark grey", lty = 2) + lines(start_day:end_day, GPP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, GPP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + if (is.nan(mean(chk)) == 0) { + legend("topleft", + legend = c("Greenup", "Leaf Off"), bty = "n", + lty = 2, lwd = 1.5, col = c("green3", "brown"), cex = 2 + ) + # text(37,max(GPP.AVG)-4,"GS Length:",cex=2) + # text(35,max(GPP.AVG)-5,paste(round(GS_LENGTH,2)," days",sep=""), # cex=2 ) } - abline(h=0,lty=2,lwd=1.5,col="black") + abline(h = 0, lty = 2, lwd = 1.5, col = "black") rm(chk) - box(lwd=2.2) - #========================================================================================================== + box(lwd = 2.2) + # ========================================================================================================== # NPP - #========================================================================================================== - ylim = range(c(NPP.AVG.ll,NPP.AVG.ul),na.rm=TRUE) # define Y lims - plot(start_day:end_day,NPP.AVG.mn,xlab='',ylab=expression(paste(NPP," (gC",~m^{-2},")")), - pch=21,col="black", bg="black",ylim=ylim, - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx, rev(polyx)), c(NPP.AVG.ul, rev(NPP.AVG.ll)), - col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,NPP.AVG.mn,lty=1,col="black") - points(start_day:end_day,NPP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(NPP.AVG.ll, NPP.AVG.ul), na.rm = TRUE) # define Y lims + plot(start_day:end_day, NPP.AVG.mn, + xlab = "", ylab = expression(paste(NPP, " (gC", ~ m^{ + -2 + }, ")")), + pch = 21, col = "black", bg = "black", ylim = ylim, + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(NPP.AVG.ul, rev(NPP.AVG.ll)), + col = "light gray", border = "dark grey", lty = 2 + ) + lines(start_day:end_day, NPP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, NPP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Plant Resp - #========================================================================================================== - ylim = range(c(PLANT.RESP.AVG.ll,PLANT.RESP.AVG.ul),na.rm=TRUE) # define Y lims - plot(start_day:end_day,PLANT.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[a]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)), - col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,PLANT.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,PLANT.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(PLANT.RESP.AVG.ll, PLANT.RESP.AVG.ul), na.rm = TRUE) # define Y lims + plot(start_day:end_day, PLANT.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[a], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)), + col = "light gray", border = "dark grey", lty = 2 + ) + lines(start_day:end_day, PLANT.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, PLANT.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Heterotrophic Resp - #========================================================================================================== - ylim = range(c(HTROPH.RESP.AVG.ll,HTROPH.RESP.AVG.ul),na.rm=TRUE) # define Y lims - plot(start_day:end_day,HTROPH.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[h]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx, rev(polyx)), c(HTROPH.RESP.AVG.ul, rev(HTROPH.RESP.AVG.ll)), - col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,HTROPH.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,HTROPH.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(HTROPH.RESP.AVG.ll, HTROPH.RESP.AVG.ul), na.rm = TRUE) # define Y lims + plot(start_day:end_day, HTROPH.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[h], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(HTROPH.RESP.AVG.ul, rev(HTROPH.RESP.AVG.ll)), + col = "light gray", border = "dark grey", lty = 2 + ) + lines(start_day:end_day, HTROPH.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, HTROPH.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Reco - #========================================================================================================== - ylim = range(c(Reco.AVG.ll,Reco.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,Reco.AVG.mn,xlab=paste("DOY",as.character(year)),ylim=ylim, - ylab=expression(paste(italic(R)[eco.]," (gC",~m^{-2},")")), - pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx, rev(polyx)), c(Reco.AVG.ul, rev(Reco.AVG.ll)),col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,Reco.AVG.mn,lty=1,col="black") - points(start_day:end_day,Reco.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(Reco.AVG.ll, Reco.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, Reco.AVG.mn, + xlab = paste("DOY", as.character(year)), ylim = ylim, + ylab = expression(paste(italic(R)[eco.], " (gC", ~ m^{ + -2 + }, ")")), + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(Reco.AVG.ul, rev(Reco.AVG.ll)), col = "light gray", border = "dark grey", lty = 2) + lines(start_day:end_day, Reco.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, Reco.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # NEE - #========================================================================================================== - ylim = range(c(NEE.AVG.ll,NEE.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,NEE.AVG.mn,xlab=paste("DOY",as.character(year)),ylim=ylim, - ylab=expression(paste(NEE," (gC",~m^{-2},")")), - pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx, rev(polyx)), c(NEE.AVG.ul, rev(NEE.AVG.ll)),col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,NEE.AVG.mn,lty=1,col="black") - points(start_day:end_day,NEE.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - + # ========================================================================================================== + ylim <- range(c(NEE.AVG.ll, NEE.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, NEE.AVG.mn, + xlab = paste("DOY", as.character(year)), ylim = ylim, + ylab = expression(paste(NEE, " (gC", ~ m^{ + -2 + }, ")")), + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(NEE.AVG.ul, rev(NEE.AVG.ll)), col = "light gray", border = "dark grey", lty = 2) + lines(start_day:end_day, NEE.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, NEE.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # add single title to plot - mtext("Site Component Fluxes", side=3, line=1, outer=TRUE, cex=1.5, font=2) - + mtext("Site Component Fluxes", side = 3, line = 1, outer = TRUE, cex = 1.5, font = 2) + ######################################## RESPIRATION COMPONENTS ########################################### - par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R - #========================================================================================================== + par(mfrow = c(3, 2), mar = c(5, 5.7, 0.9, 0.5), mgp = c(3.3, 1.5, 0), oma = c(0, 0, 3, 0)) # B, L, T, R + # ========================================================================================================== # Plant resp - #========================================================================================================== - ylim = range(c(PLANT.RESP.AVG.ll,PLANT.RESP.AVG.ul),na.rm=TRUE) # define Y lims - plot(start_day:end_day,PLANT.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[a]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) + # ========================================================================================================== + ylim <- range(c(PLANT.RESP.AVG.ll, PLANT.RESP.AVG.ul), na.rm = TRUE) # define Y lims + plot(start_day:end_day, PLANT.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[a], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) polygon(c(polyx, rev(polyx)), c(PLANT.RESP.AVG.ul, rev(PLANT.RESP.AVG.ll)), - col="light gray", border="dark grey",lty=2) - lines(start_day:end_day,PLANT.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,PLANT.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + col = "light gray", border = "dark grey", lty = 2 + ) + lines(start_day:end_day, PLANT.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, PLANT.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Leaf resp - #========================================================================================================== - ylim = range(c(LEAF.RESP.AVG.ll,LEAF.RESP.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,LEAF.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[leaf]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(LEAF.RESP.AVG.ul,rev(LEAF.RESP.AVG.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,LEAF.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,LEAF.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(LEAF.RESP.AVG.ll, LEAF.RESP.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, LEAF.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[leaf], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(LEAF.RESP.AVG.ul, rev(LEAF.RESP.AVG.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, LEAF.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, LEAF.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Root resp - #========================================================================================================== - ylim = range(c(ROOT.RESP.AVG.ll,ROOT.RESP.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,ROOT.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[root]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(ROOT.RESP.AVG.ul,rev(ROOT.RESP.AVG.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,ROOT.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,ROOT.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(ROOT.RESP.AVG.ll, ROOT.RESP.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, ROOT.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[root], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(ROOT.RESP.AVG.ul, rev(ROOT.RESP.AVG.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, ROOT.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, ROOT.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Growth Resp - #========================================================================================================== - ylim = range(c(GROWTH.RESP.AVG.ll,GROWTH.RESP.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,GROWTH.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[growth]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(GROWTH.RESP.AVG.ul,rev(GROWTH.RESP.AVG.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,GROWTH.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,GROWTH.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(GROWTH.RESP.AVG.ll, GROWTH.RESP.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, GROWTH.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[growth], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(GROWTH.RESP.AVG.ul, rev(GROWTH.RESP.AVG.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, GROWTH.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, GROWTH.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Storage Resp - #========================================================================================================== - ylim = range(c(STORAGE.RESP.AVG.ll,STORAGE.RESP.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,STORAGE.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(R)[growth]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(STORAGE.RESP.AVG.ul,rev(STORAGE.RESP.AVG.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,STORAGE.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,STORAGE.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(STORAGE.RESP.AVG.ll, STORAGE.RESP.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, STORAGE.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(R)[growth], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(STORAGE.RESP.AVG.ul, rev(STORAGE.RESP.AVG.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, STORAGE.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, STORAGE.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Vleaf resp - #========================================================================================================== - ylim = range(c(VLEAF.RESP.AVG.ll,VLEAF.RESP.AVG.ul),na.rm=TRUE) - plot(start_day:end_day,VLEAF.RESP.AVG.mn,xlab='',ylim=ylim, - ylab=expression(paste(italic(VR)[leaf]," (gC",~m^{-2},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(VLEAF.RESP.AVG.ul,rev(VLEAF.RESP.AVG.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,VLEAF.RESP.AVG.mn,lty=1,col="black") - points(start_day:end_day,VLEAF.RESP.AVG.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - + # ========================================================================================================== + ylim <- range(c(VLEAF.RESP.AVG.ll, VLEAF.RESP.AVG.ul), na.rm = TRUE) + plot(start_day:end_day, VLEAF.RESP.AVG.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(italic(VR)[leaf], " (gC", ~ m^{ + -2 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(VLEAF.RESP.AVG.ul, rev(VLEAF.RESP.AVG.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, VLEAF.RESP.AVG.mn, lty = 1, col = "black") + points(start_day:end_day, VLEAF.RESP.AVG.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # Plot title - mtext("Site Component Respiration ", side=3, line=1, outer=TRUE, cex=1.5, font=2) - + mtext("Site Component Respiration ", side = 3, line = 1, outer = TRUE, cex = 1.5, font = 2) + ########################################### Energy Balance ################################################ - par(mfrow=c(3,2),mar=c(5,5.7,0.9,0.5),mgp=c(3.3,1.5,0),oma=c(0,0,3,0)) # B, L, T, R - - #========================================================================================================== + par(mfrow = c(3, 2), mar = c(5, 5.7, 0.9, 0.5), mgp = c(3.3, 1.5, 0), oma = c(0, 0, 3, 0)) # B, L, T, R + + # ========================================================================================================== # Polygon vegetation/leaf vapor flux - #========================================================================================================== - ylim = range(c(AVG.VAPOR.LC.ll,AVG.VAPOR.LC.ul),na.rm=TRUE) - plot(start_day:end_day,AVG.VAPOR.LC.mn,xlab='',ylim=ylim, - ylab=expression(paste(V.~Flux[veg~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.LC.ul,rev(AVG.VAPOR.LC.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,AVG.VAPOR.LC.mn,lty=1,col="black") - points(start_day:end_day,AVG.VAPOR.LC.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(AVG.VAPOR.LC.ll, AVG.VAPOR.LC.ul), na.rm = TRUE) + plot(start_day:end_day, AVG.VAPOR.LC.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(V. ~ Flux[veg ~ to ~ CAS], " (kg", ~ m^{ + -2 + } ~ s^{ + -1 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(AVG.VAPOR.LC.ul, rev(AVG.VAPOR.LC.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, AVG.VAPOR.LC.mn, lty = 1, col = "black") + points(start_day:end_day, AVG.VAPOR.LC.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Polygon moisture flux ground to canopy air - #========================================================================================================== - ylim = range(c(AVG.VAPOR.GC.ll,AVG.VAPOR.GC.ul),na.rm=TRUE) - plot(start_day:end_day,AVG.VAPOR.GC.mn,xlab='',ylim=ylim, - ylab=expression(paste(V.~Flux[ground~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.GC.ll,rev(AVG.VAPOR.GC.ul)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,AVG.VAPOR.GC.mn,lty=1,col="black") - points(start_day:end_day,AVG.VAPOR.GC.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(AVG.VAPOR.GC.ll, AVG.VAPOR.GC.ul), na.rm = TRUE) + plot(start_day:end_day, AVG.VAPOR.GC.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(V. ~ Flux[ground ~ to ~ CAS], " (kg", ~ m^{ + -2 + } ~ s^{ + -1 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(AVG.VAPOR.GC.ll, rev(AVG.VAPOR.GC.ul)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, AVG.VAPOR.GC.mn, lty = 1, col = "black") + points(start_day:end_day, AVG.VAPOR.GC.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Polygon vapor flux atmosphere to canopy air - #========================================================================================================== - ylim = range(c(AVG.VAPOR.AC.ll,AVG.VAPOR.AC.ul),na.rm=TRUE) - plot(start_day:end_day,AVG.VAPOR.AC.mn,xlab='',ylim=ylim, - ylab=expression(paste(V.~Flux[atm.~to~CAS]," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.AC.ul,rev(AVG.VAPOR.AC.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,AVG.VAPOR.AC.mn,lty=1,col="black") - points(start_day:end_day,AVG.VAPOR.AC.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(AVG.VAPOR.AC.ll, AVG.VAPOR.AC.ul), na.rm = TRUE) + plot(start_day:end_day, AVG.VAPOR.AC.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(V. ~ Flux[atm. ~ to ~ CAS], " (kg", ~ m^{ + -2 + } ~ s^{ + -1 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(AVG.VAPOR.AC.ul, rev(AVG.VAPOR.AC.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, AVG.VAPOR.AC.mn, lty = 1, col = "black") + points(start_day:end_day, AVG.VAPOR.AC.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Polygon transpiration from stomata to canopy air spac - #========================================================================================================== - ylim = range(c(AVG.TRANSP.ll,AVG.TRANSP.ul),na.rm=TRUE) - plot(start_day:end_day,AVG.TRANSP.mn,xlab='',ylim=ylim, - ylab=expression(paste(Transpiration," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(AVG.TRANSP.ul,rev(AVG.TRANSP.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,AVG.TRANSP.mn,lty=1,col="black") - points(start_day:end_day,AVG.TRANSP.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(AVG.TRANSP.ll, AVG.TRANSP.ul), na.rm = TRUE) + plot(start_day:end_day, AVG.TRANSP.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(Transpiration, " (kg", ~ m^{ + -2 + } ~ s^{ + -1 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(AVG.TRANSP.ul, rev(AVG.TRANSP.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, AVG.TRANSP.mn, lty = 1, col = "black") + points(start_day:end_day, AVG.TRANSP.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Polygon averaged evap/dew from ground and leaves to C - #========================================================================================================== - ylim = range(c(AVG.EVAP.ll,AVG.EVAP.ul),na.rm=TRUE) - plot(start_day:end_day,AVG.EVAP.mn,xlab='',ylim=ylim, - ylab=expression(paste(Evaporation," (kg",~m^{-2}~s^{-1},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(AVG.EVAP.ul,rev(AVG.EVAP.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,AVG.EVAP.mn,lty=1,col="black") - points(start_day:end_day,AVG.EVAP.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(AVG.EVAP.ll, AVG.EVAP.ul), na.rm = TRUE) + plot(start_day:end_day, AVG.EVAP.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(Evaporation, " (kg", ~ m^{ + -2 + } ~ s^{ + -1 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(AVG.EVAP.ul, rev(AVG.EVAP.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, AVG.EVAP.mn, lty = 1, col = "black") + points(start_day:end_day, AVG.EVAP.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Polygon Average Specific Humidity of Canopy Air - #========================================================================================================== - ylim = range(c(AVG.CAN.SHV.ll,AVG.CAN.SHV.ul),na.rm=TRUE) - plot(start_day:end_day,AVG.CAN.SHV.mn,xlab='',ylim=ylim, - ylab=expression(paste(Sp.Humidity[CAS]," (kg",~kg^{-1},")")),pch=21,col="black", - bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - polygon(c(polyx,rev(polyx)),c(AVG.CAN.SHV.ul,rev(AVG.CAN.SHV.ll)),col="light gray", - border="dark grey",lty=2) - lines(start_day:end_day,AVG.CAN.SHV.mn,lty=1,col="black") - points(start_day:end_day,AVG.CAN.SHV.mn,pch=21,col="black", bg="black", - cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) - abline(h=0,lty=2,lwd=1.5,col="black") - box(lwd=2.2) - #========================================================================================================== + # ========================================================================================================== + ylim <- range(c(AVG.CAN.SHV.ll, AVG.CAN.SHV.ul), na.rm = TRUE) + plot(start_day:end_day, AVG.CAN.SHV.mn, + xlab = "", ylim = ylim, + ylab = expression(paste(Sp.Humidity[CAS], " (kg", ~ kg^{ + -1 + }, ")")), pch = 21, col = "black", + bg = "black", cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + polygon(c(polyx, rev(polyx)), c(AVG.CAN.SHV.ul, rev(AVG.CAN.SHV.ll)), + col = "light gray", + border = "dark grey", lty = 2 + ) + lines(start_day:end_day, AVG.CAN.SHV.mn, lty = 1, col = "black") + points(start_day:end_day, AVG.CAN.SHV.mn, + pch = 21, col = "black", bg = "black", + cex = cex, cex.lab = labcex, cex.axis = axiscex, cex.main = maincex + ) + abline(h = 0, lty = 2, lwd = 1.5, col = "black") + box(lwd = 2.2) + # ========================================================================================================== # Polygon wood to canopy air vapor flux - #========================================================================================================== -# ylim = range(c(AVG.VAPOR.WC.ll,AVG.VAPOR.WC.ul),na.rm=TRUE) -# plot(start_day:end_day,AVG.VAPOR.WC.mn,xlab='',ylim=ylim, -# ylab=expression(paste(italic(Vapor Flux)[wood]," (kg",~m^{-2},~s^{-1}")")),pch=21,col="black", -# bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) -# polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.WC.ul,rev(AVG.VAPOR.WC.ll)),col="light gray", -# border="dark grey",lty=2) -# lines(start_day:end_day,AVG.VAPOR.WC.mn,lty=1,col="black") -# points(start_day:end_day,AVG.VAPOR.WC.mn,pch=21,col="black", bg="black", -# cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) -# abline(h=0,lty=2,lwd=1.5,col="black") -# box(lwd=2.2) + # ========================================================================================================== + # ylim = range(c(AVG.VAPOR.WC.ll,AVG.VAPOR.WC.ul),na.rm=TRUE) + # plot(start_day:end_day,AVG.VAPOR.WC.mn,xlab='',ylim=ylim, + # ylab=expression(paste(italic(Vapor Flux)[wood]," (kg",~m^{-2},~s^{-1}")")),pch=21,col="black", + # bg="black",cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) + # polygon(c(polyx,rev(polyx)),c(AVG.VAPOR.WC.ul,rev(AVG.VAPOR.WC.ll)),col="light gray", + # border="dark grey",lty=2) + # lines(start_day:end_day,AVG.VAPOR.WC.mn,lty=1,col="black") + # points(start_day:end_day,AVG.VAPOR.WC.mn,pch=21,col="black", bg="black", + # cex=cex,cex.lab=labcex,cex.axis=axiscex,cex.main=maincex) + # abline(h=0,lty=2,lwd=1.5,col="black") + # box(lwd=2.2) # Plot title - mtext("Site Vapor Fluxes ", side=3, line=1, outer=TRUE, cex=1.5, font=2) + mtext("Site Vapor Fluxes ", side = 3, line = 1, outer = TRUE, cex = 1.5, font = 2) ##################################### MET ########################################## - - #plot(start_day:end_day,AVG.SOIL.TEMP.5cm) - #plot(start_day:end_day,AVG.SOIL.TEMP.10cm) - - #mtext("Site Soil Temperatures ", side=3, line=1, outer=TRUE, cex=1.5, font=2) - - dev.off() # Close PDF + # plot(start_day:end_day,AVG.SOIL.TEMP.5cm) + # plot(start_day:end_day,AVG.SOIL.TEMP.10cm) + + # mtext("Site Soil Temperatures ", side=3, line=1, outer=TRUE, cex=1.5, font=2) + + dev.off() # Close PDF } # END for loop } #----------------------------------------------------------------------------------------------------# @@ -608,178 +751,183 @@ site_fluxes = function(model.run,in.dir,out.dir){ #----------------------------------------------------------------------------------------------------# # Plot monthly -plot_monthly = function(model.run,in.dir,out.dir){ +plot_monthly <- function(model.run, in.dir, out.dir) { # UNDER DEVELOPMENT #--------------------------------------------------------------------------------------------------# - when = NULL - pft.names = c("C4 Grass","Early Tropical","Mid Tropical","Late Tropical" - ,"C3 Grass","North Pine","South Pine","Late Conifer" - ,"Early Temperate","Mid Temperate","Late Temperate" - ,"C3 Pasture","C3 Crop","C4 Pasture","C4 Crop","Subtropical C3 grass ", - "Araucaria","Total") - n.pft = length(pft.names) - 1 + when <- NULL + pft.names <- c( + "C4 Grass", "Early Tropical", "Mid Tropical", "Late Tropical", + "C3 Grass", "North Pine", "South Pine", "Late Conifer", + "Early Temperate", "Mid Temperate", "Late Temperate", + "C3 Pasture", "C3 Crop", "C4 Pasture", "C4 Crop", "Subtropical C3 grass ", + "Araucaria", "Total" + ) + n.pft <- length(pft.names) - 1 #--------------------------------------------------------------------------------------------------# - - + + #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# - - + + #----------------------------------------------------------------------------------------------# # Loop over time. # #----------------------------------------------------------------------------------------------# - i = 1 # counter printing variable names to log file + i <- 1 # counter printing variable names to log file for (year in start_year:end_year) { - message(paste("--- PROCESSING YEAR: ",year," ---")) - - + message(paste("--- PROCESSING YEAR: ", year, " ---")) + + #--------------------------------------------------------------------------------------------# - if (year == start_year){ - month.begin = IMONTHA - }else{ - month.begin = 1 - } #end if - - if (year == end_year){ - month.end = IMONTHZ - }else{ - month.end = 12 - } #end if - - #n.months = (as.numeric(month.end)-as.numeric(month.begin))+1 - - n.months = -12+as.numeric(month.end)+(12-as.numeric(month.begin)+1) - nplant.pft = matrix(0,nrow=n.months,ncol=n.pft+1) - lai.pft = matrix(0,nrow=n.months,ncol=n.pft+1) - agb.pft = matrix(0,nrow=n.months,ncol=n.pft+1) - coh.area = list() - coh.age = list() - coh.dbh = list() - coh.pft = list() - coh.nplant = list() - coh.height = list() - coh.gpp = list() - coh.resp = list() - coh.npp = list() + if (year == start_year) { + month.begin <- IMONTHA + } else { + month.begin <- 1 + } # end if + + if (year == end_year) { + month.end <- IMONTHZ + } else { + month.end <- 12 + } # end if + + # n.months = (as.numeric(month.end)-as.numeric(month.begin))+1 + + n.months <- -12 + as.numeric(month.end) + (12 - as.numeric(month.begin) + 1) + nplant.pft <- matrix(0, nrow = n.months, ncol = n.pft + 1) + lai.pft <- matrix(0, nrow = n.months, ncol = n.pft + 1) + agb.pft <- matrix(0, nrow = n.months, ncol = n.pft + 1) + coh.area <- list() + coh.age <- list() + coh.dbh <- list() + coh.pft <- list() + coh.nplant <- list() + coh.height <- list() + coh.gpp <- list() + coh.resp <- list() + coh.npp <- list() #--------------------------------------------------------------------------------------------# - - j = 0 # counter for month in output + + j <- 0 # counter for month in output for (mm in month.begin:month.end) { - j = j+1 - - mth = toupper(mon2mmm(mm,lang="English")) #<--- convert month num to 3 letter name - message(paste("-------- PROCESSING MONTH: ",mth)) - - when.now = chron(dates=paste(mm,1,year,sep="/"),times=paste(0,0,0,sep=":")) - when = c(when,when.now) - + j <- j + 1 + + mth <- toupper(mon2mmm(mm, lang = "English")) #<--- convert month num to 3 letter name + message(paste("-------- PROCESSING MONTH: ", mth)) + + when.now <- chron(dates = paste(mm, 1, year, sep = "/"), times = paste(0, 0, 0, sep = ":")) + when <- c(when, when.now) + #---------------- Load ED2 Model Output (hdf5) ----------------------------------------------# - filename = list.files(in.dir,full.names=TRUE, - pattern=paste('.*-E-', year, '-.*.h5', sep=''))[1] - if (is.na(filename)==1) { + filename <- list.files(in.dir, + full.names = TRUE, + pattern = paste(".*-E-", year, "-.*.h5", sep = "") + )[1] + if (is.na(filename) == 1) { break - }else{ - data <- hdf5load(filename, load = FALSE,tidy=TRUE) # LOAD ED2 OUTPUT + } else { + data <- hdf5load(filename, load = FALSE, tidy = TRUE) # LOAD ED2 OUTPUT } - var_names = summary(data) # View info about vars. For debugging - if (i==1){ + var_names <- summary(data) # View info about vars. For debugging + if (i == 1) { print("Mean Monthly Output Variables (IMOUTPUT)") print(var_names) print("") } # end of complex if/then #--------------------------------------------------------------------------------------------# - - + + #------------------------------------------------------------------------------------# # Get desired PFT-level variables # #------------------------------------------------------------------------------------# - lai.pft [j,1:n.pft] = data$MMEAN.LAI.PFT + lai.pft[j, 1:n.pft] <- data$MMEAN.LAI.PFT message(data.frame(data$MMEAN.LAI.PFT)) - agb.pft [j,1:n.pft] = data$AGB.PFT + agb.pft[j, 1:n.pft] <- data$AGB.PFT #------------------------------------------------------------------------------------# - - + + #------------------------------------------------------------------------------------# # Define the global number of patches and cohorts. # #------------------------------------------------------------------------------------# - npatches.global = data$NPATCHES.GLOBAL - ncohorts.global = data$NCOHORTS.GLOBAL + npatches.global <- data$NPATCHES.GLOBAL + ncohorts.global <- data$NCOHORTS.GLOBAL #----- Find the indices for the beginning and end of each patch. --------------------# - ncohorts = diff(c(data$PACO.ID,ncohorts.global+1)) - aco = data$PACO.ID - zco = data$PACO.ID + ncohorts - 1 + ncohorts <- diff(c(data$PACO.ID, ncohorts.global + 1)) + aco <- data$PACO.ID + zco <- data$PACO.ID + ncohorts - 1 #------------------------------------------------------------------------------------# - - + + #------------------------------------------------------------------------------------# # Extend the area and age of each patch so it has the same length as the # # cohorts. # #------------------------------------------------------------------------------------# - coh.area[[j]] = rep(data$AREA,times=ncohorts) - coh.age [[j]] = rep(data$AGE ,times=ncohorts) + coh.area[[j]] <- rep(data$AREA, times = ncohorts) + coh.age[[j]] <- rep(data$AGE, times = ncohorts) #------------------------------------------------------------------------------------# - - + + #----- Grab other cohort-level variables. -------------------------------------------# - coh.pft [[j]] = data$PFT + coh.pft[[j]] <- data$PFT message(data$PFT) - coh.dbh [[j]] = data$DBH - coh.nplant [[j]] = data$NPLANT*coh.area[[j]] - coh.height [[j]] = data$HITE - coh.gpp [[j]] = data$MMEAN.GPP.CO - coh.resp [[j]] = ( data$MMEAN.LEAF.RESP.CO - + data$MMEAN.ROOT.RESP.CO - + data$MMEAN.GROWTH.RESP.CO - + data$MMEAN.STORAGE.RESP.CO - + data$MMEAN.VLEAF.RESP.CO ) - coh.npp [[j]] = coh.gpp[[j]] - coh.resp[[j]] # NPP + coh.dbh[[j]] <- data$DBH + coh.nplant[[j]] <- data$NPLANT * coh.area[[j]] + coh.height[[j]] <- data$HITE + coh.gpp[[j]] <- data$MMEAN.GPP.CO + coh.resp[[j]] <- (data$MMEAN.LEAF.RESP.CO + + data$MMEAN.ROOT.RESP.CO + + data$MMEAN.GROWTH.RESP.CO + + data$MMEAN.STORAGE.RESP.CO + + data$MMEAN.VLEAF.RESP.CO) + coh.npp[[j]] <- coh.gpp[[j]] - coh.resp[[j]] # NPP #------------------------------------------------------------------------------------# - - i=i+1 # counter for printing variable names to log file - + + i <- i + 1 # counter for printing variable names to log file } # end for loop for importing monthly data for year x - - + + #------------------------------------------------------------------------------------------# # Find which PFTs we use, and set any NA to zero (in case a PFT goes extinct). # #------------------------------------------------------------------------------------------# - tot = n.pft + 1 # <---- total cohort - agb.pft [,tot] = rowSums(agb.pft [,1:n.pft]) - lai.pft [,tot] = rowSums(lai.pft [,1:n.pft]) - #message(lai.pft) - #lai.pft - pft.use = which(colSums(agb.pft) > 0) + tot <- n.pft + 1 # <---- total cohort + agb.pft[, tot] <- rowSums(agb.pft[, 1:n.pft]) + lai.pft[, tot] <- rowSums(lai.pft[, 1:n.pft]) + # message(lai.pft) + # lai.pft + pft.use <- which(colSums(agb.pft) > 0) #------------------------------------------------------------------------------------------# - - - #==========================================================================================# + + + # ==========================================================================================# # Figures # - #==========================================================================================# + # ==========================================================================================# # Plot the LAI of all PFTs together. # #------------------------------------------------------------------------------------------# - pdf(paste(out.dir,"/","ED2_",year,"_Monthly_Mean_Output.pdf",sep=""),width=10,height=10, - onefile=TRUE) - + pdf(paste(out.dir, "/", "ED2_", year, "_Monthly_Mean_Output.pdf", sep = ""), + width = 10, height = 10, + onefile = TRUE + ) + #----- Find the limits and expand the range so the legend fits. ---------------------------# - lai.ylim = range(lai.pft,na.rm=TRUE) - lai.ylim[2] = lai.ylim[2] + 0.2 * (lai.ylim[2] - lai.ylim[1]) - lai.title = paste("Leaf Area Index","US-WCr",sep=" - ") # <--- Site needs to be dynamic - lai.xlab = "Month" - lai.ylab = expression(paste("LAI (",m^{2}~m^{-2},")")) #"LAI [m2/m2]" - - plot(x=when,y=lai.pft[,1],type="n",ylim=lai.ylim,xaxt="n" - ,main=lai.title,xlab=lai.xlab,ylab=lai.ylab) - - dev.off() - - - } # end for loop - - + lai.ylim <- range(lai.pft, na.rm = TRUE) + lai.ylim[2] <- lai.ylim[2] + 0.2 * (lai.ylim[2] - lai.ylim[1]) + lai.title <- paste("Leaf Area Index", "US-WCr", sep = " - ") # <--- Site needs to be dynamic + lai.xlab <- "Month" + lai.ylab <- expression(paste("LAI (", m^{ + 2 + } ~ m^{ + -2 + }, ")")) # "LAI [m2/m2]" - + plot( + x = when, y = lai.pft[, 1], type = "n", ylim = lai.ylim, xaxt = "n", + main = lai.title, xlab = lai.xlab, ylab = lai.ylab + ) + + dev.off() + } # end for loop } # end of function #----------------------------------------------------------------------------------------------------# diff --git a/models/ed/inst/pecan.ed2.diagnostics.R b/models/ed/inst/pecan.ed2.diagnostics.R index d5436e6920f..7686315c855 100644 --- a/models/ed/inst/pecan.ed2.diagnostics.R +++ b/models/ed/inst/pecan.ed2.diagnostics.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -15,16 +15,16 @@ # output then only plot -M-, -Y-, or -T- data. Needs to become flexible # # -- TODO: Allow choice of which output to plot? Make flexbile based on input -# -- TODO: Include some dynamic time options. E.g. Day, month, year on X-axis +# -- TODO: Include some dynamic time options. E.g. Day, month, year on X-axis # for temporal plots? # -- Allow for bi-variate plots? Maybe add this as another function/script file? -# -- TODO: Clean up and document code. +# -- TODO: Clean up and document code. #################################################################################################### #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics #--------------------------------------------------------------------------------------------------# @@ -33,22 +33,22 @@ library(hdf5) library(XML) #---------------- Import command arguments from shell ---------------------------------------------# -args = commandArgs(trailingOnly = TRUE) # import any needed arguments for the terminal -print(args) #---> Print command arguments in R output. For debugging. +args <- commandArgs(trailingOnly = TRUE) # import any needed arguments for the terminal +print(args) #---> Print command arguments in R output. For debugging. pecan.home <- Sys.getenv("PECANHOME") #<--- Import PEcAn home directory # ED2 diagnostic plot functions -source(paste(pecan.home,"rscripts/pecan.ed2.diag.plots.R",sep="")) +source(paste(pecan.home, "rscripts/pecan.ed2.diag.plots.R", sep = "")) # Time utilities. Stolen from Harvard group. -source(paste(pecan.home,"rscripts/timeutils.R",sep="")) +source(paste(pecan.home, "rscripts/timeutils.R", sep = "")) #--------------------------------------------------------------------------------------------------# #---------------- Get model run location ----------------------------------------------------------# -if (args[1]=="pwd"){ - model_run=paste(getwd(),"/",sep="") # if set as pwd -}else{ - model_run=paste(args[1],"/",sep="") # if full path given +if (args[1] == "pwd") { + model_run <- paste(getwd(), "/", sep = "") # if set as pwd +} else { + model_run <- paste(args[1], "/", sep = "") # if full path given } #--------------------------------------------------------------------------------------------------# @@ -56,27 +56,27 @@ if (args[1]=="pwd"){ #---------------- Setup script output(s) ----------------------------------------------------------# # Info: Generate output folder for diagnostic plots. Will create folder if it doesn't already # exist. -output_dir = paste(model_run,"/ED2_Diagnostics/",sep="") -if (! file.exists(output_dir)) dir.create(output_dir) +output_dir <- paste(model_run, "/ED2_Diagnostics/", sep = "") +if (!file.exists(output_dir)) dir.create(output_dir) #--------------------------------------------------------------------------------------------------# #---------------- Read ED2IN or Config File -------------------------------------------------------# -print('******************** Reading ED2IN File *********************') +print("******************** Reading ED2IN File *********************") # Info: SET THIS PART UP! -#if (args[]=="xml"){ -# }else{ -#} +# if (args[]=="xml"){ +# }else{ +# } # ED2IN filename -if (args[2]=="-f"){ - ED2IN_fn = grep(list.files(),pattern='ED2IN',value=TRUE) -}else{ - ED2IN_fn = args[2] +if (args[2] == "-f") { + ED2IN_fn <- grep(list.files(), pattern = "ED2IN", value = TRUE) +} else { + ED2IN_fn <- args[2] } #---- Import ED2IN file -ED2IN = readLines(ED2IN_fn) +ED2IN <- readLines(ED2IN_fn) #--------------------------------------------------------------------------------------------------# @@ -84,167 +84,169 @@ ED2IN = readLines(ED2IN_fn) # Info: INFO HERE # Temporarily turn off warning messages when readin ED2IN -options(warn=-1) +options(warn = -1) #---- Get run type (NL%RUNTYPE) -RUNTYP = grep(ED2IN,pattern='NL%RUNTYPE',value=TRUE) -indices = gregexpr("'", RUNTYP)[[1]] -RUNTYP = substr(RUNTYP,indices[1]+1, indices[2]-1) -RUNTYP = RUNTYP[1] +RUNTYP <- grep(ED2IN, pattern = "NL%RUNTYPE", value = TRUE) +indices <- gregexpr("'", RUNTYP)[[1]] +RUNTYP <- substr(RUNTYP, indices[1] + 1, indices[2] - 1) +RUNTYP <- RUNTYP[1] #---- Get run ID (NL%EXPNME) -RUNID = grep(ED2IN,pattern='NL%EXPNME',value=TRUE) -indices = gregexpr("'", RUNID)[[1]] -RUNID = substr(RUNID,indices[1]+1, indices[2]-1) +RUNID <- grep(ED2IN, pattern = "NL%EXPNME", value = TRUE) +indices <- gregexpr("'", RUNID)[[1]] +RUNID <- substr(RUNID, indices[1] + 1, indices[2] - 1) #---- Get run length info. Start month. Can also get this from XML file -IMONTHA = grep(ED2IN,pattern='NL%IMONTHA',value=TRUE) -indices = gregexpr("[0-9]", IMONTHA)[[1]] -IMONTHA = substr(IMONTHA,indices[1], indices[2]) +IMONTHA <- grep(ED2IN, pattern = "NL%IMONTHA", value = TRUE) +indices <- gregexpr("[0-9]", IMONTHA)[[1]] +IMONTHA <- substr(IMONTHA, indices[1], indices[2]) #---- Get start date -IDATEA = grep(ED2IN,pattern='NL%IDATEA',value=TRUE) -indices = gregexpr("[0-9]", IDATEA)[[1]] -IDATEA = substr(IDATEA,indices[1], indices[2]) +IDATEA <- grep(ED2IN, pattern = "NL%IDATEA", value = TRUE) +indices <- gregexpr("[0-9]", IDATEA)[[1]] +IDATEA <- substr(IDATEA, indices[1], indices[2]) #---- Get start year -IYEARA = grep(ED2IN,pattern='NL%IYEARA',value=TRUE) -indices = gregexpr("[0-9][0-9][0-9][0-9]", IYEARA)[[1]] -IYEARA = substr(IYEARA,indices[1], indices[1]+4) +IYEARA <- grep(ED2IN, pattern = "NL%IYEARA", value = TRUE) +indices <- gregexpr("[0-9][0-9][0-9][0-9]", IYEARA)[[1]] +IYEARA <- substr(IYEARA, indices[1], indices[1] + 4) #---- Get final month -IMONTHZ = grep(ED2IN,pattern='NL%IMONTHZ',value=TRUE) -indices = gregexpr("[0-9]", IMONTHZ)[[1]] -IMONTHZ = substr(IMONTHZ,indices[1], indices[2]) +IMONTHZ <- grep(ED2IN, pattern = "NL%IMONTHZ", value = TRUE) +indices <- gregexpr("[0-9]", IMONTHZ)[[1]] +IMONTHZ <- substr(IMONTHZ, indices[1], indices[2]) #---- Get final day -IDATEZ = grep(ED2IN,pattern='NL%IDATEZ',value=TRUE) -indices = gregexpr("[0-9]", IDATEZ)[[1]] -IDATEZ = substr(IDATEZ,indices[1], indices[2]) +IDATEZ <- grep(ED2IN, pattern = "NL%IDATEZ", value = TRUE) +indices <- gregexpr("[0-9]", IDATEZ)[[1]] +IDATEZ <- substr(IDATEZ, indices[1], indices[2]) #---- Get final year -IYEARZ = grep(ED2IN,pattern='NL%IYEARZ',value=TRUE) -indices = gregexpr("[0-9][0-9][0-9][0-9]", IYEARZ)[[1]] -IYEARZ = substr(IYEARZ,indices[1], indices[1]+4) +IYEARZ <- grep(ED2IN, pattern = "NL%IYEARZ", value = TRUE) +indices <- gregexpr("[0-9][0-9][0-9][0-9]", IYEARZ)[[1]] +IYEARZ <- substr(IYEARZ, indices[1], indices[1] + 4) #---- Get location info (NL%POI_LAT,NLPOI_LON). Prob better to get from XML file -POI_LAT = grep(ED2IN,pattern='NL%[PS]OI_LAT',value=TRUE) -indices = gregexpr("[0-9]", POI_LAT)[[1]] -POI_LAT = substr(POI_LAT,indices[1], indices[length(indices)]) - -POI_LON = grep(ED2IN,pattern='NL%[PS]OI_LON',value=TRUE) -neg1 = gregexpr("-", POI_LON)[[1]] -if (neg1==-1){ - indices = gregexpr("[0-9]", POI_LON)[[1]] - POI_LON = substr(POI_LON,indices[1], indices[length(indices)]) -}else{ - neg2 = substr(POI_LON,neg1[1],neg1[1]) - indices = gregexpr("[0-9]", POI_LON)[[1]] - POI_LON = substr(POI_LON,indices[1], indices[length(indices)]) - POI_LON = paste(neg2,POI_LON,sep="") +POI_LAT <- grep(ED2IN, pattern = "NL%[PS]OI_LAT", value = TRUE) +indices <- gregexpr("[0-9]", POI_LAT)[[1]] +POI_LAT <- substr(POI_LAT, indices[1], indices[length(indices)]) + +POI_LON <- grep(ED2IN, pattern = "NL%[PS]OI_LON", value = TRUE) +neg1 <- gregexpr("-", POI_LON)[[1]] +if (neg1 == -1) { + indices <- gregexpr("[0-9]", POI_LON)[[1]] + POI_LON <- substr(POI_LON, indices[1], indices[length(indices)]) +} else { + neg2 <- substr(POI_LON, neg1[1], neg1[1]) + indices <- gregexpr("[0-9]", POI_LON)[[1]] + POI_LON <- substr(POI_LON, indices[1], indices[length(indices)]) + POI_LON <- paste(neg2, POI_LON, sep = "") } #---- Get output type settings (i.e. I, D, E, Y, T) #---- Site average inst. -IFOUTPUT = grep(ED2IN,pattern='NL%IFOUTPUT',value=TRUE) -indices = gregexpr("[0-9]", IFOUTPUT)[[1]] -IFOUTPUT = substr(IFOUTPUT,indices[1], indices[length(indices)]) -IFOUTPUT=as.numeric(IFOUTPUT) -IFOUTPUT[IFOUTPUT==0]="No" -IFOUTPUT[IFOUTPUT==0]="Yes" +IFOUTPUT <- grep(ED2IN, pattern = "NL%IFOUTPUT", value = TRUE) +indices <- gregexpr("[0-9]", IFOUTPUT)[[1]] +IFOUTPUT <- substr(IFOUTPUT, indices[1], indices[length(indices)]) +IFOUTPUT <- as.numeric(IFOUTPUT) +IFOUTPUT[IFOUTPUT == 0] <- "No" +IFOUTPUT[IFOUTPUT == 0] <- "Yes" #---- Daily mean output -IDOUTPUT = grep(ED2IN,pattern='NL%IDOUTPUT',value=TRUE) -indices = gregexpr("[0-9]", IDOUTPUT)[[1]] -IDOUTPUT = substr(IDOUTPUT,indices[1], indices[length(indices)]) -IDOUTPUT=as.numeric(IDOUTPUT) -IDOUTPUT[IDOUTPUT==0]="No" -IDOUTPUT[IDOUTPUT==3]="Yes" +IDOUTPUT <- grep(ED2IN, pattern = "NL%IDOUTPUT", value = TRUE) +indices <- gregexpr("[0-9]", IDOUTPUT)[[1]] +IDOUTPUT <- substr(IDOUTPUT, indices[1], indices[length(indices)]) +IDOUTPUT <- as.numeric(IDOUTPUT) +IDOUTPUT[IDOUTPUT == 0] <- "No" +IDOUTPUT[IDOUTPUT == 3] <- "Yes" #---- Monthly mean output -IMOUTPUT = grep(ED2IN,pattern='NL%IMOUTPUT',value=TRUE) -indices = gregexpr("[0-9]", IMOUTPUT)[[1]] -IMOUTPUT = substr(IMOUTPUT,indices[1], indices[length(indices)]) -IMOUTPUT=as.numeric(IMOUTPUT) -IMOUTPUT[IMOUTPUT==0]="No" -IMOUTPUT[IMOUTPUT==3]="Yes" +IMOUTPUT <- grep(ED2IN, pattern = "NL%IMOUTPUT", value = TRUE) +indices <- gregexpr("[0-9]", IMOUTPUT)[[1]] +IMOUTPUT <- substr(IMOUTPUT, indices[1], indices[length(indices)]) +IMOUTPUT <- as.numeric(IMOUTPUT) +IMOUTPUT[IMOUTPUT == 0] <- "No" +IMOUTPUT[IMOUTPUT == 3] <- "Yes" #---- Site average flux files -ITOUTPUT = grep(ED2IN,pattern='NL%ITOUTPUT',value=TRUE) -indices = gregexpr("[0-9]", ITOUTPUT)[[1]] -ITOUTPUT = substr(ITOUTPUT,indices[1], indices[length(indices)]) -ITOUTPUT=as.numeric(ITOUTPUT) -ITOUTPUT[ITOUTPUT==0]="No" -ITOUTPUT[ITOUTPUT==3]="Yes" +ITOUTPUT <- grep(ED2IN, pattern = "NL%ITOUTPUT", value = TRUE) +indices <- gregexpr("[0-9]", ITOUTPUT)[[1]] +ITOUTPUT <- substr(ITOUTPUT, indices[1], indices[length(indices)]) +ITOUTPUT <- as.numeric(ITOUTPUT) +ITOUTPUT[ITOUTPUT == 0] <- "No" +ITOUTPUT[ITOUTPUT == 3] <- "Yes" #---- Get output frequency (NL%FRQFAST) -FRQFAST = grep(ED2IN,pattern='NL%FRQFAST',value=TRUE) -indices = gregexpr("[0-9]", FRQFAST)[[1]] -FRQFAST = substr(FRQFAST,indices[1], indices[length(indices)]) -FRQFAST=as.numeric(FRQFAST) +FRQFAST <- grep(ED2IN, pattern = "NL%FRQFAST", value = TRUE) +indices <- gregexpr("[0-9]", FRQFAST)[[1]] +FRQFAST <- substr(FRQFAST, indices[1], indices[length(indices)]) +FRQFAST <- as.numeric(FRQFAST) #---- Get output file locations (NL%FFILOUT & NL%SFILOUT). Can also get this from XML file -FFILOUT = grep(ED2IN,pattern='NL%FFILOUT',value=TRUE) -indices = gregexpr("'", FFILOUT)[[1]] -FFILOUT = substr(FFILOUT,indices[1]+1, indices[2]-1) -indices = tail(gregexpr("/", FFILOUT)[[1]], n=1) -FFILOUT = substr(FFILOUT,1,indices) +FFILOUT <- grep(ED2IN, pattern = "NL%FFILOUT", value = TRUE) +indices <- gregexpr("'", FFILOUT)[[1]] +FFILOUT <- substr(FFILOUT, indices[1] + 1, indices[2] - 1) +indices <- tail(gregexpr("/", FFILOUT)[[1]], n = 1) +FFILOUT <- substr(FFILOUT, 1, indices) #---- Get soil flag (NL%ISOILFLG) -ISOILFLG = grep(ED2IN,pattern='NL%ISOILFLG',value=TRUE) -indices = gregexpr("[0-9]", ISOILFLG)[[1]] -ISOILFLG = substr(ISOILFLG,indices[1], indices[1]) +ISOILFLG <- grep(ED2IN, pattern = "NL%ISOILFLG", value = TRUE) +indices <- gregexpr("[0-9]", ISOILFLG)[[1]] +ISOILFLG <- substr(ISOILFLG, indices[1], indices[1]) #---- Get number of soil layers (NL%NZG) -NZG = grep(ED2IN,pattern='NL%NZG',value=TRUE) -indices = gregexpr("[0-9]", NZG)[[1]] -NZG = substr(NZG,indices[1], indices[1]) +NZG <- grep(ED2IN, pattern = "NL%NZG", value = TRUE) +indices <- gregexpr("[0-9]", NZG)[[1]] +NZG <- substr(NZG, indices[1], indices[1]) #---- Get prescribed fraction of sand and clay (NL%SLXCLAY & NL%SLXSAND) -if (ISOILFLG=="2"){ - SLXCLAY = grep(ED2IN,pattern='NL%SLXCLAY',value=TRUE) - indices = gregexpr("[0.0-1.0]", SLXCLAY)[[1]] - SLXCLAY = substr(SLXCLAY,indices[1], indices[1]+3) - - SLXSAND = grep(ED2IN,pattern='NL%SLXSAND',value=TRUE) - indices = gregexpr("[0.0-1.0]", SLXSAND)[[1]] - SLXSAND = substr(SLXSAND,indices[1], indices[1]+3) +if (ISOILFLG == "2") { + SLXCLAY <- grep(ED2IN, pattern = "NL%SLXCLAY", value = TRUE) + indices <- gregexpr("[0.0-1.0]", SLXCLAY)[[1]] + SLXCLAY <- substr(SLXCLAY, indices[1], indices[1] + 3) + + SLXSAND <- grep(ED2IN, pattern = "NL%SLXSAND", value = TRUE) + indices <- gregexpr("[0.0-1.0]", SLXSAND)[[1]] + SLXSAND <- substr(SLXSAND, indices[1], indices[1] + 3) } # TODO: Could export more info from ED2IN or eventually XML file here. Useful for review in log # file # Turn back on warning messages. Off for irrelevant errors when parsing ED2IN file -options(warn=0) +options(warn = 0) #--------------------------------------------------------------------------------------------------# #---------------- Display run info to the screen --------------------------------------------------# -message('') +message("") message("*********************************************************") message("---- ED2 Run Info ----") message("*********************************************************") -message('') -message(paste("---- ED2IN: ",ED2IN_fn)) -message(paste("---- Run Type: ",RUNTYP)) -message(paste("---- Run ID: ",RUNID)) -message(paste("---- Run Start: ",IMONTHA,"/",IDATEA,"/",IYEARA,sep="")) -message(paste("---- Run End: ",IMONTHZ,"/",IDATEZ,"/",IYEARZ,sep="")) -message(paste("---- Run Location: ",POI_LAT," Latitude, ", - POI_LON," Longitude",sep="")) -message(paste("---- ED2 Model Output Directory: ",model_run,FFILOUT,sep="")) -message(paste("---- Instantaneous Output: ",IFOUTPUT,sep="")) -message(paste("---- Daily Mean Output: ",IDOUTPUT,sep="")) -message(paste("---- Monthly Mean Output: ",IMOUTPUT,sep="")) -message(paste("---- Instantaneous Fluxes Output: ",ITOUTPUT,sep="")) -message(paste("---- Output Frequency: ",FRQFAST,"s",sep="")) -message('') +message("") +message(paste("---- ED2IN: ", ED2IN_fn)) +message(paste("---- Run Type: ", RUNTYP)) +message(paste("---- Run ID: ", RUNID)) +message(paste("---- Run Start: ", IMONTHA, "/", IDATEA, "/", IYEARA, sep = "")) +message(paste("---- Run End: ", IMONTHZ, "/", IDATEZ, "/", IYEARZ, sep = "")) +message(paste("---- Run Location: ", POI_LAT, " Latitude, ", + POI_LON, " Longitude", + sep = "" +)) +message(paste("---- ED2 Model Output Directory: ", model_run, FFILOUT, sep = "")) +message(paste("---- Instantaneous Output: ", IFOUTPUT, sep = "")) +message(paste("---- Daily Mean Output: ", IDOUTPUT, sep = "")) +message(paste("---- Monthly Mean Output: ", IMOUTPUT, sep = "")) +message(paste("---- Instantaneous Fluxes Output: ", ITOUTPUT, sep = "")) +message(paste("---- Output Frequency: ", FRQFAST, "s", sep = "")) +message("") message("---------------------------------------------------------") -message('') -message(paste("---- Soil Layers: ",NZG,sep="")) -message(paste("---- Soil Clay Frac.: ", SLXCLAY,sep="")) -message(paste("---- Soil Sand Frac.: ", SLXSAND,sep="")) -message('') +message("") +message(paste("---- Soil Layers: ", NZG, sep = "")) +message(paste("---- Soil Clay Frac.: ", SLXCLAY, sep = "")) +message(paste("---- Soil Sand Frac.: ", SLXSAND, sep = "")) +message("") message("*********************************************************") # can print more info here @@ -254,75 +256,81 @@ Sys.sleep(2) # pause for 2 seconds #---------------- Setup output for diagnostic plot ------------------------------------------------# -# Info: Get the time info for the run. There are probably things here that can be elliminated. +# Info: Get the time info for the run. There are probably things here that can be elliminated. ####### First and last day to include in the plots (MM/DD/YYYY). ####### -start_date = as.Date(paste(as.numeric(IYEARA),"/",as.numeric(IMONTHA), - "/",as.numeric(IDATEA),sep=""),format="%Y/%m/%d") -start_year = format(start_date, "%Y") -start_month = format(start_date, "%m") -start_day = format(start_date, "%d") -start = paste(start_month,"/",start_day,"/",start_year,sep="") - -end_date = as.Date(paste(as.numeric(IYEARZ),"/",as.numeric(IMONTHZ), - "/",as.numeric(IDATEZ),sep=""),format="%Y/%m/%d") -end_year = format(end_date, "%Y") -end_month = format(end_date, "%m") -end_day = format(end_date, "%d") -end = paste(end_month,"/",end_day,"/",end_year,sep="") - -out_day = 86400/FRQFAST -deltaT = 24/(86400/FRQFAST) # ---> sets the number of obs per day based on FRQFAST -daterange = seq(from=as.numeric(chron(start)),to=as.numeric(chron(end)),by=deltaT/24) -daterange = chron(daterange) -n.range = length(daterange) - -n.months = (as.numeric(IYEARZ)-as.numeric(IYEARA)-1)*12+ - as.numeric(IMONTHZ)+(12-as.numeric(IMONTHA)+1) -list.mths = nummonths(daterange) -list.days = numdays(daterange) -list.mins = minutes(daterange) -frac = hms2frac(daterange) -days = days(daterange) -times1 = rep(seq(0.0,23.5,0.5),each=1,times=1) -times2 = rep(seq(0.5,24,0.5),each=1,times=1) -dates = data.frame(Date=as.Date(daterange),mon=list.mths,doy=list.days, - fjday=frac) +start_date <- as.Date(paste(as.numeric(IYEARA), "/", as.numeric(IMONTHA), + "/", as.numeric(IDATEA), + sep = "" +), format = "%Y/%m/%d") +start_year <- format(start_date, "%Y") +start_month <- format(start_date, "%m") +start_day <- format(start_date, "%d") +start <- paste(start_month, "/", start_day, "/", start_year, sep = "") + +end_date <- as.Date(paste(as.numeric(IYEARZ), "/", as.numeric(IMONTHZ), + "/", as.numeric(IDATEZ), + sep = "" +), format = "%Y/%m/%d") +end_year <- format(end_date, "%Y") +end_month <- format(end_date, "%m") +end_day <- format(end_date, "%d") +end <- paste(end_month, "/", end_day, "/", end_year, sep = "") + +out_day <- 86400 / FRQFAST +deltaT <- 24 / (86400 / FRQFAST) # ---> sets the number of obs per day based on FRQFAST +daterange <- seq(from = as.numeric(chron(start)), to = as.numeric(chron(end)), by = deltaT / 24) +daterange <- chron(daterange) +n.range <- length(daterange) + +n.months <- (as.numeric(IYEARZ) - as.numeric(IYEARA) - 1) * 12 + + as.numeric(IMONTHZ) + (12 - as.numeric(IMONTHA) + 1) +list.mths <- nummonths(daterange) +list.days <- numdays(daterange) +list.mins <- minutes(daterange) +frac <- hms2frac(daterange) +days <- days(daterange) +times1 <- rep(seq(0.0, 23.5, 0.5), each = 1, times = 1) +times2 <- rep(seq(0.5, 24, 0.5), each = 1, times = 1) +dates <- data.frame( + Date = as.Date(daterange), mon = list.mths, doy = list.days, + fjday = frac +) #--------------------------------------------------------------------------------------------------# #---------------- Call plot functions based on setup ----------------------------------------------# # Info: MORE INFO HERE -message('') -message('') +message("") +message("") model_run FFILOUT -analysis = FFILOUT #paste(model_run,FFILOUT,sep="") +analysis <- FFILOUT # paste(model_run,FFILOUT,sep="") -if (ITOUTPUT=="Yes"){ - message('---- Plotting Site Averaged Fluxes (ITOUTPUT) ----') - site_fluxes(model_run,analysis,output_dir) +if (ITOUTPUT == "Yes") { + message("---- Plotting Site Averaged Fluxes (ITOUTPUT) ----") + site_fluxes(model_run, analysis, output_dir) } # END ITOUTPUT -message('') -#if (IDOUTPUT=="Yes"){ +message("") +# if (IDOUTPUT=="Yes"){ # message('---- Plotting Mean Daily (IDOUTPUT) ----') # plot_daily(model_run,analysis,output_dir) -#} - - -#} +# } -#if (diel==TRUE){ - # PLOT AVG DIEL CYCLE FOR SUMMER/WINTER -#} -#message('') -#if (IMOUTPUT=="Yes"){ +# } + +# if (diel==TRUE){ +# PLOT AVG DIEL CYCLE FOR SUMMER/WINTER +# } + +# message('') +# if (IMOUTPUT=="Yes"){ # message('---- Plotting Mean Monthly (IMOUTPUT) ----') # plot_monthly(model_run,analysis,output_dir) -#} +# } # PUT CODE HERE. Egs. plot_fast.r, plot_monthly.r, etc @@ -330,7 +338,7 @@ message('') #---------------- Script complete -----------------------------------------------------------------# -message('') +message("") message("*********************************************************") message("**************** PROCESSING COMPLETE! *******************") message("*********************************************************") diff --git a/models/ed/inst/plot.hdf5.R b/models/ed/inst/plot.hdf5.R index 159aa4d8702..16e2fbc9ba7 100644 --- a/models/ed/inst/plot.hdf5.R +++ b/models/ed/inst/plot.hdf5.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -13,23 +13,22 @@ library(hdf5) # COMMAND LINE ARGUMENTS # ---------------------------------------------------------------------- # arguments are --args year variable -args <- commandArgs(trailingOnly = TRUE) -year <- args[1] -xvar <- 'time' -yvar <- args[2] -width <- as.numeric(args[3]) +args <- commandArgs(trailingOnly = TRUE) +year <- args[1] +xvar <- "time" +yvar <- args[2] +width <- as.numeric(args[3]) height <- as.numeric(args[4]) -png <- args[5] +png <- args[5] # ---------------------------------------------------------------------- # SETUP # ---------------------------------------------------------------------- -settings.file <- Sys.getenv('PECANSETTINGS') +settings.file <- Sys.getenv("PECANSETTINGS") settings.xml <- XML::xmlParse(settings.file) settings <- XML::xmlToList(settings.xml) -plot.hdf5(year, yvar, xvar, width, height, filename, settings) ; - +plot.hdf5(year, yvar, xvar, width, height, filename, settings) # ---------------------------------------------------------------------- # CONSTANTS # ---------------------------------------------------------------------- @@ -42,7 +41,7 @@ umol2gc <- 1.0368 # ---------------------------------------------------------------------- # PRIVATE FUNCTIONS # ---------------------------------------------------------------------- -data.fetch2 <- function(var, start=start_day, end=end_day, values=values_day, fun=mean) { +data.fetch2 <- function(var, start = start_day, end = end_day, values = values_day, fun = mean) { # get a specific set of values from the HDF data # # Args: @@ -54,7 +53,7 @@ data.fetch2 <- function(var, start=start_day, end=end_day, values=values_day, fu # # Returns: # values extracted from the hdf data - + # find the variable in the data if (is.null(data[[var]])) { useme <- sprintf("AVG_%s", var) @@ -64,17 +63,17 @@ data.fetch2 <- function(var, start=start_day, end=end_day, values=values_day, fu } else { useme <- var } - + # some precomputations - lastval <- (values_day*(1+end-start)) - aggrlist <- list(rep(start:(end), each=values_day)) - + lastval <- (values_day * (1 + end - start)) + aggrlist <- list(rep(start:(end), each = values_day)) + # aggregate the data - val <- aggregate(data[[useme]][1:lastval], by=aggrlist, FUN=fun)$x + val <- aggregate(data[[useme]][1:lastval], by = aggrlist, FUN = fun)$x if (length(grep("TE?MP$", useme)) != 0) { - val[val<200] <- NA + val[val < 200] <- NA } - + # get the label metadata <- attr(data[[useme]], "Metadata") if (is.null(metadata)) { @@ -92,13 +91,13 @@ data.fetch2 <- function(var, start=start_day, end=end_day, values=values_day, fu attr(val, "lbl") <- paste(title, "in", units) } } - + # done return(val) } -# -data.fetch <- function(var, start=start_day, end=end_day, values=values_day, fun=mean) { +# +data.fetch <- function(var, start = start_day, end = end_day, values = values_day, fun = mean) { # get specific dataset either by computation or from dataset # # Args: @@ -115,26 +114,26 @@ data.fetch <- function(var, start=start_day, end=end_day, values=values_day, fun attr(val, "lbl") <- "Day of the year" return(val) } else if (var == "Reco") { - PLANT_RESP <- data.fetch2("AVG_PLANT_RESP", start, end, values, fun); - HTROPH_RESP <- data.fetch2("AVG_HTROPH_RESP", start, end, values, fun); - val <- (PLANT_RESP + HTROPH_RESP) * umol2gc + PLANT_RESP <- data.fetch2("AVG_PLANT_RESP", start, end, values, fun) + HTROPH_RESP <- data.fetch2("AVG_HTROPH_RESP", start, end, values, fun) + val <- (PLANT_RESP + HTROPH_RESP) * umol2gc attr(val, "lbl") <- "unknown" return(val) } else if (var == "NPP") { - GPP <- data.fetch2("AVG_GPP", start, end, values, fun); - PLANT_RESP <- data.fetch2("AVG_PLANT_RESP", start, end, values, fun); - val <- (GPP - PLANT_RESP) * umol2gc + GPP <- data.fetch2("AVG_GPP", start, end, values, fun) + PLANT_RESP <- data.fetch2("AVG_PLANT_RESP", start, end, values, fun) + val <- (GPP - PLANT_RESP) * umol2gc attr(val, "lbl") <- "unknown" return(val) } else if (var == "NEE") { - GPP <- data.fetch2("AVG_GPP", start, end, values, fun); - PLANT_RESP <- data.fetch2("AVG_PLANT_RESP", start, end, values, fun); - HTROPH_RESP <- data.fetch2("AVG_HTROPH_RESP", start, end, values, fun); - val <- (GPP - (PLANT_RESP + HTROPH_RESP)) * umol2gc + GPP <- data.fetch2("AVG_GPP", start, end, values, fun) + PLANT_RESP <- data.fetch2("AVG_PLANT_RESP", start, end, values, fun) + HTROPH_RESP <- data.fetch2("AVG_HTROPH_RESP", start, end, values, fun) + val <- (GPP - (PLANT_RESP + HTROPH_RESP)) * umol2gc attr(val, "lbl") <- "unknown" return(val) } else { - return(data.fetch2(var, start, end, values, fun)); + return(data.fetch2(var, start, end, values, fun)) } } @@ -157,70 +156,69 @@ data.fetch <- function(var, start=start_day, end=end_day, values=values_day, fun ##' @param the height of the image generated, default is 600 pixels. ##' @param filename is the name of the file name that is geneated. ##' @param settings the pecan.xml file loaded. -plot.hdf5 <- function(year, yvar, xvar='time', width=800, height=600, filename, settings) { - # find out the first/last day of the plot - start_date <- as.Date(settings$run$start.date) - start_year <- format(start_date, "%Y") - end_date <- as.Date(settings$run$end.date) - end_year <- format(end_date, "%Y") - if (year == start_year) { - start_day <- as.numeric(format(start_date, "%j")) - 1 - } else { - start_day <- 0 - } - if (year == end_year) { - end_day <- as.numeric(format(end_date, "%j")) - 1 - } else { - end_day <- as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j")) - 1 - } - - # find the Tower file - filename <- list.files(settings$host$outdir, full.names=TRUE,pattern=paste('.*-T-', year, '-.*.h5', sep=''))[1] - data <- hdf5load(filename, load = FALSE) - - # compute variables - xval_mean <- data.fetch(xvar, fun=mean) - xval_max <- data.fetch(xvar, fun=max) - xval_min <- data.fetch(xvar, fun=min) - yval_mean <- data.fetch(yvar, fun=mean) - yval_max <- data.fetch(yvar, fun=max) - yval_min <- data.fetch(yvar, fun=min) - - # setup plot (needs to be done before removing of NA since that removes attr as well). - png(filename=filename, width=width, height=height) - plot.new() - title(xlab=attr(xval_mean, "lbl")) - title(ylab=attr(yval_mean, "lbl")) - if (xvar == "time") { - title(main=paste(yvar)) - } else { - title(main=paste(xvar, "VS", yvar)) - } - - # remove all NA's - removeme <- unique(c(which(is.na(xval_min)), which(is.na(yval_min)), which(is.na(xval_mean)), which(is.na(yval_mean)), which(is.na(xval_max)), which(is.na(yval_max)))) - if (length(removeme) > 0) { - xval_mean <- xval_mean[-removeme] - xval_max <- xval_max[-removeme] - xval_min <- xval_min[-removeme] - yval_mean <- yval_mean[-removeme] - yval_max <- yval_max[-removeme] - yval_min <- yval_min[-removeme] - } - - # combine - xvals <- c(xval_max, rev(xval_min)) - yvals <- c(yval_max, rev(yval_min)) - - # plot actual data - plot.window(xlim=c(min(xvals), max(xvals)), ylim=c(min(yvals), max(yvals))) - polygon(c(xval_max, rev(xval_min)), c(yval_max, rev(yval_min)), col="gray", border="black") - points(xval_mean, yval_mean, col="black", pch=20) - - # draw axis and box - axis(1) - axis(2) - box() - dev.off() -} +plot.hdf5 <- function(year, yvar, xvar = "time", width = 800, height = 600, filename, settings) { + # find out the first/last day of the plot + start_date <- as.Date(settings$run$start.date) + start_year <- format(start_date, "%Y") + end_date <- as.Date(settings$run$end.date) + end_year <- format(end_date, "%Y") + if (year == start_year) { + start_day <- as.numeric(format(start_date, "%j")) - 1 + } else { + start_day <- 0 + } + if (year == end_year) { + end_day <- as.numeric(format(end_date, "%j")) - 1 + } else { + end_day <- as.numeric(format(as.Date(sprintf("%s-12-31", year)), "%j")) - 1 + } + # find the Tower file + filename <- list.files(settings$host$outdir, full.names = TRUE, pattern = paste(".*-T-", year, "-.*.h5", sep = ""))[1] + data <- hdf5load(filename, load = FALSE) + + # compute variables + xval_mean <- data.fetch(xvar, fun = mean) + xval_max <- data.fetch(xvar, fun = max) + xval_min <- data.fetch(xvar, fun = min) + yval_mean <- data.fetch(yvar, fun = mean) + yval_max <- data.fetch(yvar, fun = max) + yval_min <- data.fetch(yvar, fun = min) + + # setup plot (needs to be done before removing of NA since that removes attr as well). + png(filename = filename, width = width, height = height) + plot.new() + title(xlab = attr(xval_mean, "lbl")) + title(ylab = attr(yval_mean, "lbl")) + if (xvar == "time") { + title(main = paste(yvar)) + } else { + title(main = paste(xvar, "VS", yvar)) + } + + # remove all NA's + removeme <- unique(c(which(is.na(xval_min)), which(is.na(yval_min)), which(is.na(xval_mean)), which(is.na(yval_mean)), which(is.na(xval_max)), which(is.na(yval_max)))) + if (length(removeme) > 0) { + xval_mean <- xval_mean[-removeme] + xval_max <- xval_max[-removeme] + xval_min <- xval_min[-removeme] + yval_mean <- yval_mean[-removeme] + yval_max <- yval_max[-removeme] + yval_min <- yval_min[-removeme] + } + + # combine + xvals <- c(xval_max, rev(xval_min)) + yvals <- c(yval_max, rev(yval_min)) + + # plot actual data + plot.window(xlim = c(min(xvals), max(xvals)), ylim = c(min(yvals), max(yvals))) + polygon(c(xval_max, rev(xval_min)), c(yval_max, rev(yval_min)), col = "gray", border = "black") + points(xval_mean, yval_mean, col = "black", pch = 20) + + # draw axis and box + axis(1) + axis(2) + box() + dev.off() +} diff --git a/models/ed/inst/rewrite.config.R b/models/ed/inst/rewrite.config.R index 431d21209fa..31f99e00164 100644 --- a/models/ed/inst/rewrite.config.R +++ b/models/ed/inst/rewrite.config.R @@ -1,23 +1,32 @@ -#try(install.packages("~/Projects/pecan/models/ed/", repos=NULL)) -#try(detach('package:PEcAn.ED2', unload=TRUE)) +# try(install.packages("~/Projects/pecan/models/ed/", repos=NULL)) +# try(detach('package:PEcAn.ED2', unload=TRUE)) library(PEcAn.ED2) # Define settings list settings <- list() settings$model$revision <- "git" -settings$model$config.header <- '' +settings$model$config.header <- "" -settings$constants <- list(pft = list(name = "Optics.Temperate_Early_Hardwood", - constants = list(SLA = 999))) +settings$constants <- list(pft = list( + name = "Optics.Temperate_Early_Hardwood", + constants = list(SLA = 999) +)) # Set test trait values -trait.values <- list(pft = list(name = "Optics.Temperate_Early_Hardwood", - orient_factor = 999), - pft = list(name = "Optics.Temperate_Mid_Hardwood", - mort1 = 999), - pft = list(name = "Optics.Temperate_Late_Hardwood", - leaf_reflect_vis = 999) - ) +trait.values <- list( + pft = list( + name = "Optics.Temperate_Early_Hardwood", + orient_factor = 999 + ), + pft = list( + name = "Optics.Temperate_Mid_Hardwood", + mort1 = 999 + ), + pft = list( + name = "Optics.Temperate_Late_Hardwood", + leaf_reflect_vis = 999 + ) +) test.xml <- write.config.xml.ED2(settings, trait.values) print(test.xml) diff --git a/models/ed/inst/test.read.restart.R b/models/ed/inst/test.read.restart.R index 45912511cff..d975f997cdb 100644 --- a/models/ed/inst/test.read.restart.R +++ b/models/ed/inst/test.read.restart.R @@ -9,13 +9,15 @@ settings <- PEcAn.settings::read.settings(settings_file) stop.time <- as.POSIXlt("2004-06-07", tz = "UTC") rred <- function(runid) { - fc <- read.restart.ED2(outdir = outdir, - runid = runid, - stop.time = stop.time, - settings = settings, - var.names = "AGB", - params = NULL) - return(fc) + fc <- read.restart.ED2( + outdir = outdir, + runid = runid, + stop.time = stop.time, + settings = settings, + var.names = "AGB", + params = NULL + ) + return(fc) } forecast_list <- lapply(runid_vec, rred) diff --git a/models/ed/inst/test.timeutils.R b/models/ed/inst/test.timeutils.R index 7afda891ad2..f9ea9bd0a83 100644 --- a/models/ed/inst/test.timeutils.R +++ b/models/ed/inst/test.timeutils.R @@ -1,21 +1,24 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -test_that("time utils work",{ - eng.months <- c("jan", "feb", "mar", "apr", "may", "jun", - "jul", "aug", "sep", "oct", "nov", "dec") - port.months <- c("jan", "fev", "mar", "abr", "mai", "jun", - "jul", "ago", "set", "out", "nov", "dez") - expect_equal(mon2mmm(1), 'jan') - expect_equal(mmm2mon('jan'), 1) +test_that("time utils work", { + eng.months <- c( + "jan", "feb", "mar", "apr", "may", "jun", + "jul", "aug", "sep", "oct", "nov", "dec" + ) + port.months <- c( + "jan", "fev", "mar", "abr", "mai", "jun", + "jul", "ago", "set", "out", "nov", "dez" + ) + expect_equal(mon2mmm(1), "jan") + expect_equal(mmm2mon("jan"), 1) expect_equal(mon2mmm(1:12), eng.months) expect_equal(mon2mmm(1:12, "Portuguese"), port.months) expect_equal(mmm2mon(port.months, "Portuguese"), 1:12) expect_equal(mmm2mon(eng.months), 1:12) }) - diff --git a/models/ed/inst/test.write.restart.ED2.R b/models/ed/inst/test.write.restart.ED2.R index b0795a44dfe..4fa70926c63 100644 --- a/models/ed/inst/test.write.restart.ED2.R +++ b/models/ed/inst/test.write.restart.ED2.R @@ -10,12 +10,14 @@ settings <- PEcAn.settings::read.settings(settings_file) start.time <- as.POSIXlt("2004-06-07", tz = "UTC") stop.time <- as.POSIXlt("2004-06-10", tz = "UTC") -forecast <- read_restart.ED2(outdir = outdir, - runid = runid, - stop.time = stop.time, - settings = settings, - var.names = "AGB", - params = NULL) +forecast <- read_restart.ED2( + outdir = outdir, + runid = runid, + stop.time = stop.time, + settings = settings, + var.names = "AGB", + params = NULL +) npft <- length(forecast) @@ -23,11 +25,13 @@ set.seed(666) new.state <- rnorm(npft, forecast, 0.001) names(new.state) <- names(forecast) -write_restart <- write_restart.ED2(outdir = outdir, - runid = runid, - start.time = start.time, - stop.time = stop.time, - settings = settings, - new.state = new.state, - new.params = NULL, - inputs) +write_restart <- write_restart.ED2( + outdir = outdir, + runid = runid, + start.time = start.time, + stop.time = stop.time, + settings = settings, + new.state = new.state, + new.params = NULL, + inputs +) diff --git a/models/ed/inst/timeutils.R b/models/ed/inst/timeutils.R index c3fd6435f5a..9fd323b85c0 100644 --- a/models/ed/inst/timeutils.R +++ b/models/ed/inst/timeutils.R @@ -1,60 +1,60 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- ############################################################################################ # -# Set of time utilities for manipulating time variables -# and creating time axes for plots +# Set of time utilities for manipulating time variables +# and creating time axes for plots # ############################################################################################ -#data(time.constants) +# data(time.constants) -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that determines the number of days in a given month. # #------------------------------------------------------------------------------------------# -daymax = function(month, year) { - as.integer(days_in_month(as.Date(paste(year, month, 1, sep='-')))) +daymax <- function(month, year) { + as.integer(days_in_month(as.Date(paste(year, month, 1, sep = "-")))) } # daymax -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# -# +# ==========================================================================================# +# ==========================================================================================# +# #------------------------------------------------------------------------------------------# ##' convert month abbrev. to numeric ##' ##' Function that determines the number of the month given the character name. ##' @title mmm2mon ##' @param mmm three letter character string for month -##' @param lang currently english and portugese -##' @return month as three letter -mmm2mon = function(mmm,lang="English"){ - lang = tolower(lang) - if (lang == "english"){ - m3l = c("jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec") - }else if(lang == "portuguese"){ - m3l = c("jan","fev","mar","abr","mai","jun","jul","ago","set","out","nov","dez") - }#end if - - mmmloc = tolower(substring(as.character(mmm),1,3)) - monout = match(mmmloc,m3l) +##' @param lang currently english and portugese +##' @return month as three letter +mmm2mon <- function(mmm, lang = "English") { + lang <- tolower(lang) + if (lang == "english") { + m3l <- c("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") + } else if (lang == "portuguese") { + m3l <- c("jan", "fev", "mar", "abr", "mai", "jun", "jul", "ago", "set", "out", "nov", "dez") + } # end if + + mmmloc <- tolower(substring(as.character(mmm), 1, 3)) + monout <- match(mmmloc, m3l) return(monout) -} #end function -#==========================================================================================# +} # end function +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # #------------------------------------------------------------------------------------------# ##' Convert numeric month to 3-letter abbrev. @@ -68,587 +68,618 @@ mmm2mon = function(mmm,lang="English"){ ##' @examples ##' mon2mmm(1) ##' mon2mmm(1:3) -mon2mmm = function(mon,lang="English"){ - lang = tolower(lang) - if (lang == "english"){ - m3l = c("jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec") - }else if(lang == "portuguese"){ - m3l = c("jan","fev","mar","abr","mai","jun","jul","ago","set","out","nov","dez") - }#end if - - monout = m3l[mon] +mon2mmm <- function(mon, lang = "English") { + lang <- tolower(lang) + if (lang == "english") { + m3l <- c("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") + } else if (lang == "portuguese") { + m3l <- c("jan", "fev", "mar", "abr", "mai", "jun", "jul", "ago", "set", "out", "nov", "dez") + } # end if + + monout <- m3l[mon] return(monout) -} #end function -#==========================================================================================# -#==========================================================================================# +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# ##' Convert a chron object to numeric years. ##' ##' @title numyears ##' @param when chron object ##' @return numeric year ##' @author Shawn Serbin -numyears = function(when){ - yrs = years(when) - lyrs = levels(yrs) - yrout = as.numeric(lyrs[match(yrs,lyrs)]) - return(yrout) -}#end function -#==========================================================================================# -#==========================================================================================# +numyears <- function(when) { + yrs <- years(when) + lyrs <- levels(yrs) + yrout <- as.numeric(lyrs[match(yrs, lyrs)]) + return(yrout) +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that converts a chron object to numeric months. # #------------------------------------------------------------------------------------------# -nummonths = function(when){ - mos = months(when) - lmos = levels(mos) - moout = match(mos,lmos) - return(moout) -}#end function -#==========================================================================================# -#==========================================================================================# +nummonths <- function(when) { + mos <- months(when) + lmos <- levels(mos) + moout <- match(mos, lmos) + return(moout) +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that converts a chron object to numeric days. # #------------------------------------------------------------------------------------------# -numdays = function(when){ - dys = days(when) - ldys = levels(dys) - dyout = match(dys,ldys) - return(dyout) -} #end function -#==========================================================================================# -#==========================================================================================# +numdays <- function(when) { + dys <- days(when) + ldys <- levels(dys) + dyout <- match(dys, ldys) + return(dyout) +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that returns the dates as characters. # #------------------------------------------------------------------------------------------# -chardates = function(when){ - mymonth = substring(100 + nummonths(when),2,3) - myday = substring(100 + numdays (when),2,3) - myyear = substring(10000 + numyears (when),2,5) - mydate = paste(mymonth,myday,myyear,sep="/") +chardates <- function(when) { + mymonth <- substring(100 + nummonths(when), 2, 3) + myday <- substring(100 + numdays(when), 2, 3) + myyear <- substring(10000 + numyears(when), 2, 5) + mydate <- paste(mymonth, myday, myyear, sep = "/") return(mydate) -} #end function -#==========================================================================================# -#==========================================================================================# +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that returns the dates as characters. # #------------------------------------------------------------------------------------------# -label.dates = function(when,add.hours=TRUE){ - mymonth = substring(100 + nummonths(when),2,3) - myday = substring(100 + numdays (when),2,3) - myyear = substring(10000 + numyears (when),2,5) - mydate = paste(myyear,mymonth,myday,sep="-") - - if (add.hours){ - mytime = paste(substring(100 + hours (when),2,3) - ,substring(100 + minutes(when),2,3) - ,substring(100 + seconds(when),2,3) - ,sep="") - mylabel = paste(mydate,mytime,sep="-") - }else{ - mylabel = mydate - }#end if +label.dates <- function(when, add.hours = TRUE) { + mymonth <- substring(100 + nummonths(when), 2, 3) + myday <- substring(100 + numdays(when), 2, 3) + myyear <- substring(10000 + numyears(when), 2, 5) + mydate <- paste(myyear, mymonth, myday, sep = "-") + + if (add.hours) { + mytime <- paste(substring(100 + hours(when), 2, 3), + substring(100 + minutes(when), 2, 3), + substring(100 + seconds(when), 2, 3), + sep = "" + ) + mylabel <- paste(mydate, mytime, sep = "-") + } else { + mylabel <- mydate + } # end if return(mylabel) -} #end function -#==========================================================================================# -#==========================================================================================# +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that returns the times as characters. # #------------------------------------------------------------------------------------------# -chartimes = function(when){ - myhour = substring(100 + hours (when),2,3) - myminu = substring(100 + minutes(when),2,3) - myseco = substring(100 + seconds(when),2,3) - mytime = paste(myhour,myminu,myseco,sep=":") +chartimes <- function(when) { + myhour <- substring(100 + hours(when), 2, 3) + myminu <- substring(100 + minutes(when), 2, 3) + myseco <- substring(100 + seconds(when), 2, 3) + mytime <- paste(myhour, myminu, myseco, sep = ":") return(mytime) -} #end function -#==========================================================================================# -#==========================================================================================# +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that finds the fraction of the day. # #------------------------------------------------------------------------------------------# -hms2frac = function(when){ - thishour = hours (when) - thismin = minutes (when) - thissec = seconds (when) +hms2frac <- function(when) { + thishour <- hours(when) + thismin <- minutes(when) + thissec <- seconds(when) - elapsed = thishour / day.hr + thismin / day.min + thissec / day.sec - return(elapsed) -}#end function -#==========================================================================================# -#==========================================================================================# + elapsed <- thishour / day.hr + thismin / day.min + thissec / day.sec + return(elapsed) +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # Function that finds the numeric version of the days. # #------------------------------------------------------------------------------------------# -dayofyear = function(when){ - offdays = c(0, 31,59,90,120,151,181,212,243,273,304,334,365) +dayofyear <- function(when) { + offdays <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) + + thisday <- numdays(when) + thismonth <- nummonths(when) + thisyear <- numyears(when) + thisfrac <- hms2frac(when) - thisday = numdays (when) - thismonth = nummonths(when) - thisyear = numyears (when) - thisfrac = hms2frac (when) - - addone = as.integer(thismonth > 2 & lubridate::leap_year(when)) + addone <- as.integer(thismonth > 2 & lubridate::leap_year(when)) - doy = thisday + offdays[thismonth] + addone + thisfrac - return(doy) -} #end function -#==========================================================================================# -#==========================================================================================# + doy <- thisday + offdays[thismonth] + addone + thisfrac + return(doy) +} # end function +# ==========================================================================================# +# ==========================================================================================# -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # This function appends several time-related variables for a given data frame. # #------------------------------------------------------------------------------------------# -alltimes = function(datin,lon,lat,ed21=TRUE,zeronight=FALSE,meanval=FALSE,imetavg=1 - ,nmean=120,...){ - #------ Copy the input data frame, and call the other functions. -----------------------# - datout = datin - datout$year = numyears (datout$when) - datout$month = nummonths(datout$when) - datout$day = numdays (datout$when) - datout$hour = hours (datout$when) - datout$minu = minutes (datout$when) - datout$today = dates (datout$when) - datout$tomonth = chron(paste(datout$month,1,datout$year,sep="/")) - datout$doy = dayofyear(datout$when) - zenith = ed.zen (when=datout$when,lon=lon,lat=lat,ed21=ed21 - ,zeronight=zeronight,meanval=meanval,imetavg=imetavg - ,nmean=nmean,...) - datout$cosz = zenith$cosz - datout$sunhgt = zenith$hgt - datout$nighttime = zenith$night - datout$daytime = zenith$day - datout$twilight = (! zenith$night) & (! zenith$day) - datout$notdaytime = ! zenith$day - - return(datout) -}#end function -#==========================================================================================# -#==========================================================================================# - - - - - - -#==========================================================================================# -#==========================================================================================# +alltimes <- function( + datin, lon, lat, ed21 = TRUE, zeronight = FALSE, meanval = FALSE, imetavg = 1, + nmean = 120, ...) { + #------ Copy the input data frame, and call the other functions. -----------------------# + datout <- datin + datout$year <- numyears(datout$when) + datout$month <- nummonths(datout$when) + datout$day <- numdays(datout$when) + datout$hour <- hours(datout$when) + datout$minu <- minutes(datout$when) + datout$today <- dates(datout$when) + datout$tomonth <- chron(paste(datout$month, 1, datout$year, sep = "/")) + datout$doy <- dayofyear(datout$when) + zenith <- ed.zen( + when = datout$when, lon = lon, lat = lat, ed21 = ed21, + zeronight = zeronight, meanval = meanval, imetavg = imetavg, + nmean = nmean, ... + ) + datout$cosz <- zenith$cosz + datout$sunhgt <- zenith$hgt + datout$nighttime <- zenith$night + datout$daytime <- zenith$day + datout$twilight <- (!zenith$night) & (!zenith$day) + datout$notdaytime <- !zenith$day + + return(datout) +} # end function +# ==========================================================================================# +# ==========================================================================================# + + + + + + +# ==========================================================================================# +# ==========================================================================================# # List of trimestral seasons. # #------------------------------------------------------------------------------------------# -season <<- function(when,add.year=FALSE){ - - - #----- Get the year and month. ---------------------------------------------------------# - year = numyears (when) - mon = nummonths(when) - #---------------------------------------------------------------------------------------# +season <<- function(when, add.year = FALSE) { + #----- Get the year and month. ---------------------------------------------------------# + year <- numyears(when) + mon <- nummonths(when) + #---------------------------------------------------------------------------------------# - #----- We don't give summer/winter, instead we make generic season names. --------------# - sidx = c( 4, 4, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4) - #---------------------------------------------------------------------------------------# + #----- We don't give summer/winter, instead we make generic season names. --------------# + sidx <- c(4, 4, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4) + #---------------------------------------------------------------------------------------# - #---------------------------------------------------------------------------------------# - # Assign the season names depending on the month and year. # - #---------------------------------------------------------------------------------------# - if (add.year){ - #----- Add year before the season. --------------------------------------------------# - seasout = paste(year ,substring(100+sidx[mon ],2,3),sep="") - #------------------------------------------------------------------------------------# + #---------------------------------------------------------------------------------------# + # Assign the season names depending on the month and year. # + #---------------------------------------------------------------------------------------# + if (add.year) { + #----- Add year before the season. --------------------------------------------------# + seasout <- paste(year, substring(100 + sidx[mon], 2, 3), sep = "") + #------------------------------------------------------------------------------------# - #----- December, January, and February have two years. ------------------------------# - mm1 = mon %in% c(1,2) - seasout[mm1] = paste(year[mm1]-1,substring(100+sidx[mon[mm1]],2,3),sep="") - #------------------------------------------------------------------------------------# - }else{ - #----- No year to be tagged. --------------------------------------------------------# - seasout = sidx[mon] - #------------------------------------------------------------------------------------# - }#end if - #---------------------------------------------------------------------------------------# + #----- December, January, and February have two years. ------------------------------# + mm1 <- mon %in% c(1, 2) + seasout[mm1] <- paste(year[mm1] - 1, substring(100 + sidx[mon[mm1]], 2, 3), sep = "") + #------------------------------------------------------------------------------------# + } else { + #----- No year to be tagged. --------------------------------------------------------# + seasout <- sidx[mon] + #------------------------------------------------------------------------------------# + } # end if + #---------------------------------------------------------------------------------------# - #----- Return variable. ----------------------------------------------------------------# - return(seasout) - #---------------------------------------------------------------------------------------# -}#end for -#==========================================================================================# -#==========================================================================================# -#==========================================================================================# -#==========================================================================================# + #----- Return variable. ----------------------------------------------------------------# + return(seasout) + #---------------------------------------------------------------------------------------# +} # end for +# ==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # This function creates a pretty time scale. It is loosely based on pretty, but here # # we make extensive use of the chron functions, and define the suitable scales in a # # different way as time has a non-decimal scale. # # The result is a list containing the levels, and nice labels for the plots. # #------------------------------------------------------------------------------------------# -pretty.time = function(when,n=10,...){ - - #----- Find the 1st and last time. -----------------------------------------------------# - whena = min(when,na.rm=TRUE) - whenz = max(when,na.rm=TRUE) - - #----- Table for accepted bases for months, hours, minutes, and seconds. ---------------# - base.months = c(1,2,3,4,6) - base.days = c(1,2,4,7,15) - base.hours = c(1,2,3,4,6,12) - base.minsec = c(1,2,5,10,15,20,30) - - #----- Convert time to seconds. --------------------------------------------------------# - when.sec = as.numeric(when) * day.sec - - #---------------------------------------------------------------------------------------# - # Find a first guess of the step size, so we decide whether to use years, months, # - # days, hours, minutes, or seconds. # - #---------------------------------------------------------------------------------------# - wstep.1st = mean(diff(pretty(when.sec,n))) - - if (wstep.1st == 0){ - myunit=NA - #---- Whatever, use just what comes out from the regular pretty. --------------------# - vlevels = chron(pretty(when,n)) - vlabels = as.character(vlevels) - padj = rep(0,times=length(vlabels)) - }else if(wstep.1st / yr.sec > 0.8){ - myunit="years" - #------------------------------------------------------------------------------------# - # Years are the best scale for the plots. # - #------------------------------------------------------------------------------------# - yrrange = numyears(when) - vlevels = pretty(yrrange,n) - vlevels = dates(x=paste(1,1,vlevels,sep="/")) - - vlabels = paste(months(vlevels),years(vlevels),sep="-") - padj = rep(0,times=length(vlabels)) - }else if(wstep.1st / (30. * day.sec) > 0.8){ - myunit="months" - #------------------------------------------------------------------------------------# - # Months are the best scale for the plots. # - #------------------------------------------------------------------------------------# - #----- Find the time step that is the closest to the base. --------------------------# - wstep = wstep.1st / (30. * day.sec) - whichbase = base.months[which.min(abs(wstep-base.months))] - - #----- Find the list of years to plot. ----------------------------------------------# - allyears = numyears(when) - yeara = min(allyears,na.rm=TRUE) - yearz = max(allyears,na.rm=TRUE)+1 - vlevels = seq.dates(from = paste(1,1,yeara,sep="/") - ,to = paste(1,1,yearz,sep="/") - ,by = "months") - mon1st = nummonths(vlevels) - monlevs = seq(from=1,to=12,by=whichbase) - - #----- Find the limits that will keep the labels not too far from the data. ---------# - wlaba = dates(paste(nummonths(whena),1,numyears(whena),sep="/")) - monz = nummonths(whenz) %% 12 + 1 - yearz = numyears(whenz) + as.integer(monz == 1) - wlabz = dates(paste(monz,1,yearz,sep="/")) - sel = ( mon1st %in% monlevs - & vlevels >= min(wlaba,na.rm=TRUE) - & vlevels <= max(wlabz,na.rm=TRUE) ) - vlevels = dates(vlevels[sel],out.format="m/d/year") - vlabels = paste(months(vlevels),years(vlevels),sep="-") - padj = rep(0,times=length(vlabels)) - - }else if(wstep.1st / day.sec > 0.8){ - myunit="days" - #------------------------------------------------------------------------------------# - # Days are the best scale for the plots, but we keep them tethered to months, # - # even if the grid becomes slightly irregular. # - #------------------------------------------------------------------------------------# - #----- Find the time step that is the closest to the base. --------------------------# - wstep = wstep.1st / day.sec - whichbase = base.days[which.min(abs(wstep-base.days))] - #----- Find the list of years to plot. ----------------------------------------------# - allyears = numyears(when) - yeara = min(allyears,na.rm=TRUE) - yearz = max(allyears,na.rm=TRUE)+1 - #------------------------------------------------------------------------------------# - # Impose the list of months to be from January to December, we will trim the # - # numbers later. # - #------------------------------------------------------------------------------------# - montha = 1 - monthz = 12 - #------------------------------------------------------------------------------------# - # Impose the list of months to be from January to December, we will trim the # - # numbers later. # - #------------------------------------------------------------------------------------# - daylevs=seq(from=1,to=31-whichbase+1,by=whichbase) - #----- First guess for the levels. --------------------------------------------------# - vlevels = seq.dates(from = paste(1,1,yeara,sep="/") - ,to = paste(1,1,yearz,sep="/") - ,by = "days") - day1st = numdays(vlevels) - - #----- Find the limits that will keep the labels not too far from the data. ---------# - wlaba = dates(whena) - dayz = numdays(whenz) %% daymax(nummonths(whenz),numyears(whenz)) + 1 - monz = 1 + (nummonths(whenz) - 1 + as.integer(dayz==1)) %% 12 - yearz = numyears(whenz) + as.integer(monz == 1) - wlabz = dates(paste(monz,dayz,yearz,sep="/")) - sel = ( day1st %in% daylevs - & vlevels >= min(wlaba,na.rm=TRUE) - & vlevels <= max(wlabz,na.rm=TRUE) ) - vlevels = dates(vlevels[sel],out.format="m/d/y") - vlabels = paste(months(vlevels),days(vlevels),sep="/") - - padj = rep(0,times=length(vlabels)) - - sel = vlevels == vlevels[1] | (numdays(vlevels) == 1 & nummonths(vlevels) == 1) - vlabels[sel] = paste(months(vlevels[sel]),"/",days(vlevels[sel]),"\n" - ,years(vlevels[sel]),sep="") - padj[sel] = 0.5 - }else if(wstep.1st / hr.sec > 0.8){ - myunit="hours" - #------------------------------------------------------------------------------------# - # Hours are the best scale for the plots. # - #------------------------------------------------------------------------------------# - #----- Find the time step that is the closest to the base. --------------------------# - wstep = wstep.1st / hr.sec - whichbase = base.hours[which.min(abs(wstep-base.hours))] - #----- Find the list of days to plot. -----------------------------------------------# - when1st = dates(min(when ,na.rm=TRUE)) - whenlast = dates(max(when+1,na.rm=TRUE)) - mydates = seq.dates(from=when1st,to=whenlast,by="days") - mytimes = times(seq(from=0,to=day.sec-1,by=whichbase*hr.sec)) / day.sec - ndates = length(mydates) - ntimes = length(mytimes) - #----- First guess for the levels. --------------------------------------------------# - vlevels = chron(dates=rep(x=mydates,each=ntimes),times=rep(x=mytimes,times=ndates)) - wlaba = chron(dates=paste(nummonths(whena),numdays(whena),numyears(whena),sep="/"), - times=paste(hours(whena),0,0,sep=":")) - hourz = (hours(whenz) + 1) %% 24 - d2831 = daymax(nummonths(whenz),numyears(whenz)) - dayz = (numdays(whenz) - 1 + as.integer(hourz == 0)) %% d2831 + 1 - monz = (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + 1 - yearz = numyears(whenz) + as.integer(monz == 1) - wlabz = chron(dates=paste(monz,dayz,yearz,sep="/"),times=paste(hourz,0,0,sep=":")) - sel = ( vlevels >= min(wlaba,na.rm=TRUE) - & vlevels <= max(wlabz,na.rm=TRUE) ) - - #------------------------------------------------------------------------------------# - # Make the labels, and put day and month information only on the first time of # - # the day, and all information in the second time, and the first time of the year. # - #------------------------------------------------------------------------------------# - vlabels = paste(substring(100+hours(vlevels),2,3) - ,substring(100+minutes(vlevels),2,3),sep=":") - padj = rep(0,times=length(vlabels)) - #----- First time of the day. -------------------------------------------------------# - sel = hours(vlevels) == 0 - vlabels[sel] = paste(substring(100+hours(vlevels[sel]),2,3),":" - ,substring(100+minutes(vlevels[sel]),2,3),"\n" - ,months(vlevels[sel]),"-",days(vlevels[sel]),sep="") - padj[sel] = 0.5 - #----- First time of the year. ------------------------------------------------------# - sel = ( vlevels == vlevels[1] - | ( nummonths(vlevels) == 1 & numdays(vlevels) == 1 - & hours(vlevels) == 0 )) - vlabels[sel] = paste(substring(100+hours(vlevels[sel]),2,3),":" - ,substring(100+minutes(vlevels[sel]),2,3),"\n" - ,months(vlevels[sel]),"-",days(vlevels[sel]),"\n" - ,years(vlevels[sel]),sep="") - padj[sel] = 0.5 - #------------------------------------------------------------------------------------# - - - }else if(wstep.1st / min.sec > 0.8){ - myunit="minutes" - #------------------------------------------------------------------------------------# - # Minutes are the best scale for the plots. # - #------------------------------------------------------------------------------------# - #----- Find the time step that is the closest to the base. --------------------------# - wstep = wstep.1st / min.sec - whichbase = base.minsec[which.min(abs(wstep-base.minsec))] - #----- Find the list of days to plot. -----------------------------------------------# - when1st = dates(min(when ,na.rm=TRUE)) - whenlast = dates(max(when+1,na.rm=TRUE)) - mydates = seq.dates(from=when1st,to=whenlast,by="days") - mytimes = times(seq(from=0,to=day.sec-1,by=whichbase*min.sec)) / day.sec - ndates = length(mydates) - ntimes = length(mytimes) - #----- First guess for the levels. --------------------------------------------------# - vlevels = chron(dates=rep(x=mydates,each=ntimes),times=rep(x=mytimes,times=ndates)) - - wlaba = chron(dates=paste(nummonths(whena),numdays(whena),numyears(whena),sep="/"), - times=paste(hours(whena),minutes(whena),0,sep=":")) - minz = (minutes(whenz) + 1) %% 60 - hourz = (hours(whenz) + as.integer(minz == 0)) %% 24 - d2831 = daymax(nummonths(whenz),numyears(whenz)) - dayz = (numdays(whenz) - 1 + as.integer(hourz == 0)) %% d2831 + 1 - monz = (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + 1 - yearz = numyears(whenz) + as.integer(monz == 1) - wlabz = chron(dates=paste(monz,dayz,yearz,sep="/") - ,times=paste(hourz,minz,0,sep=":")) - sel = ( vlevels >= min(wlaba,na.rm=TRUE) - & vlevels <= max(wlabz,na.rm=TRUE) ) - - #------------------------------------------------------------------------------------# - # Make the labels, and put day and month information only on the first time of # - # the day, and all information in the second time, and the first time of the year. # - #------------------------------------------------------------------------------------# - vlabels = paste(substring(100+hours(vlevels),2,3) - ,substring(100+minutes(vlevels),2,3),sep=":") - padj = rep(0,times=length(vlabels)) - #----- First time of the day. -------------------------------------------------------# - sel = hours(vlevels) == 0 & minutes(vlevels) == 0 - vlabels[sel] = paste(substring(100+hours(vlevels[sel]),2,3),":" - ,substring(100+minutes(vlevels[sel]),2,3),"\n" - ,months(vlevels[sel]),"-",days(vlevels[sel]),sep="") - padj[sel] = 0.5 - #----- First time of the year. ------------------------------------------------------# - sel = ( vlevels == vlevels[1] - | ( nummonths(vlevels) == 1 & numdays(vlevels) == 1 - & hours(vlevels) == 0 & minutes(vlevels) == 0)) - vlabels[sel] = paste(substring(100+hours(vlevels[sel]),2,3),":" - ,substring(100+minutes(vlevels[sel]),2,3),"\n" - ,months(vlevels[sel]),"-",days(vlevels[sel]),"\n" - ,years(vlevels[sel]),sep="") - padj[sel] = 0.5 - #------------------------------------------------------------------------------------# - - - - }else{ - myunit="seconds" - #------------------------------------------------------------------------------------# - # Minutes are the best scale for the plots. # - #------------------------------------------------------------------------------------# - #----- Find the time step that is the closest to the base. --------------------------# - wstep = wstep.1st - whichbase = base.minsec[which.min(abs(wstep-base.minsec))] - #----- Find the list of days to plot. -----------------------------------------------# - when1st = dates(min(when ,na.rm=TRUE)) - whenlast = dates(max(when+1,na.rm=TRUE)) - mydates = seq.dates(from=when1st,to=whenlast,by="days") - mytimes = times(seq(from=0,to=day.sec-1,by=whichbase)) / day.sec - ndates = length(mydates) - ntimes = length(mytimes) - #----- First guess for the levels. --------------------------------------------------# - vlevels = chron(dates=rep(x=mydates,each=ntimes),times=rep(x=mytimes,times=ndates)) - - wlaba = chron(dates=paste(nummonths(whena),numdays(whena),numyears(whena),sep="/"), - times=paste(hours(whena),minutes(whena),seconds(whena),sep=":")) - secz = (seconds(whenz) + 1) %% 60 - minz = (minutes(whenz) + as.integer(secz == 0)) %% 60 - hourz = (hours(whenz) + as.integer(minz == 0)) %% 24 - d2831 = daymax(nummonths(whenz),numyears(whenz)) - dayz = (numdays(whenz) - 1 + as.integer(hourz == 0)) %% d2831 + 1 - monz = (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + 1 - yearz = numyears(whenz) + as.integer(monz == 1) - wlabz = chron(dates=paste(monz,dayz,yearz,sep="/") - ,times=paste(hourz,minz,secz,sep=":")) - sel = ( vlevels >= min(wlaba,na.rm=TRUE) - & vlevels <= max(wlabz,na.rm=TRUE) ) - - #------------------------------------------------------------------------------------# - # Make the labels, and put day and month information only on the first time of # - # the day, and all information in the second time, and the first time of the year. # - #------------------------------------------------------------------------------------# - vlabels = paste(substring(100+hours(vlevels),2,3) - ,substring(100+minutes(vlevels),2,3) - ,substring(100+seconds(vlevels),2,3),sep=":") - padj = rep(0,times=length(vlabels)) - #----- First time of the day. -------------------------------------------------------# - sel = hours(vlevels) == 0 & minutes(vlevels) == 0 & seconds(vlevels) == 0 - vlabels[sel] = paste(substring(100+hours(vlevels[sel]),2,3),":" - ,substring(100+minutes(vlevels[sel]),2,3),":" - ,substring(100+seconds(vlevels[sel]),2,3),"\n" - ,months(vlevels[sel]),"-",days(vlevels[sel]),sep="") - padj[sel] = 0.5 - #----- First time of the year. ------------------------------------------------------# - sel = ( vlevels == vlevels[1] - | ( nummonths(vlevels) == 1 & numdays(vlevels) == 1 - & hours(vlevels) == 0 & minutes(vlevels) == 0 - & seconds(vlevels) == 0)) - vlabels[sel] = paste(substring(100+hours(vlevels[sel]),2,3),":" - ,substring(100+minutes(vlevels[sel]),2,3),":" - ,substring(100+seconds(vlevels[sel]),2,3),"\n" - ,months(vlevels[sel]),"-",days(vlevels[sel]),"\n" - ,years(vlevels[sel]),sep="") - padj[sel] = 0.5 - #------------------------------------------------------------------------------------# - }#end if - - vresult=list(levels=vlevels,labels=vlabels,n=length(vlevels),scale=myunit,padj=padj) - return(vresult) -}#end function -#==========================================================================================# -#==========================================================================================# - - - - - - -#==========================================================================================# -#==========================================================================================# +pretty.time <- function(when, n = 10, ...) { + #----- Find the 1st and last time. -----------------------------------------------------# + whena <- min(when, na.rm = TRUE) + whenz <- max(when, na.rm = TRUE) + + #----- Table for accepted bases for months, hours, minutes, and seconds. ---------------# + base.months <- c(1, 2, 3, 4, 6) + base.days <- c(1, 2, 4, 7, 15) + base.hours <- c(1, 2, 3, 4, 6, 12) + base.minsec <- c(1, 2, 5, 10, 15, 20, 30) + + #----- Convert time to seconds. --------------------------------------------------------# + when.sec <- as.numeric(when) * day.sec + + #---------------------------------------------------------------------------------------# + # Find a first guess of the step size, so we decide whether to use years, months, # + # days, hours, minutes, or seconds. # + #---------------------------------------------------------------------------------------# + wstep.1st <- mean(diff(pretty(when.sec, n))) + + if (wstep.1st == 0) { + myunit <- NA + #---- Whatever, use just what comes out from the regular pretty. --------------------# + vlevels <- chron(pretty(when, n)) + vlabels <- as.character(vlevels) + padj <- rep(0, times = length(vlabels)) + } else if (wstep.1st / yr.sec > 0.8) { + myunit <- "years" + #------------------------------------------------------------------------------------# + # Years are the best scale for the plots. # + #------------------------------------------------------------------------------------# + yrrange <- numyears(when) + vlevels <- pretty(yrrange, n) + vlevels <- dates(x = paste(1, 1, vlevels, sep = "/")) + + vlabels <- paste(months(vlevels), years(vlevels), sep = "-") + padj <- rep(0, times = length(vlabels)) + } else if (wstep.1st / (30. * day.sec) > 0.8) { + myunit <- "months" + #------------------------------------------------------------------------------------# + # Months are the best scale for the plots. # + #------------------------------------------------------------------------------------# + #----- Find the time step that is the closest to the base. --------------------------# + wstep <- wstep.1st / (30. * day.sec) + whichbase <- base.months[which.min(abs(wstep - base.months))] + + #----- Find the list of years to plot. ----------------------------------------------# + allyears <- numyears(when) + yeara <- min(allyears, na.rm = TRUE) + yearz <- max(allyears, na.rm = TRUE) + 1 + vlevels <- seq.dates( + from = paste(1, 1, yeara, sep = "/"), + to = paste(1, 1, yearz, sep = "/"), + by = "months" + ) + mon1st <- nummonths(vlevels) + monlevs <- seq(from = 1, to = 12, by = whichbase) + + #----- Find the limits that will keep the labels not too far from the data. ---------# + wlaba <- dates(paste(nummonths(whena), 1, numyears(whena), sep = "/")) + monz <- nummonths(whenz) %% 12 + 1 + yearz <- numyears(whenz) + as.integer(monz == 1) + wlabz <- dates(paste(monz, 1, yearz, sep = "/")) + sel <- (mon1st %in% monlevs & + vlevels >= min(wlaba, na.rm = TRUE) & + vlevels <= max(wlabz, na.rm = TRUE)) + vlevels <- dates(vlevels[sel], out.format = "m/d/year") + vlabels <- paste(months(vlevels), years(vlevels), sep = "-") + padj <- rep(0, times = length(vlabels)) + } else if (wstep.1st / day.sec > 0.8) { + myunit <- "days" + #------------------------------------------------------------------------------------# + # Days are the best scale for the plots, but we keep them tethered to months, # + # even if the grid becomes slightly irregular. # + #------------------------------------------------------------------------------------# + #----- Find the time step that is the closest to the base. --------------------------# + wstep <- wstep.1st / day.sec + whichbase <- base.days[which.min(abs(wstep - base.days))] + #----- Find the list of years to plot. ----------------------------------------------# + allyears <- numyears(when) + yeara <- min(allyears, na.rm = TRUE) + yearz <- max(allyears, na.rm = TRUE) + 1 + #------------------------------------------------------------------------------------# + # Impose the list of months to be from January to December, we will trim the # + # numbers later. # + #------------------------------------------------------------------------------------# + montha <- 1 + monthz <- 12 + #------------------------------------------------------------------------------------# + # Impose the list of months to be from January to December, we will trim the # + # numbers later. # + #------------------------------------------------------------------------------------# + daylevs <- seq(from = 1, to = 31 - whichbase + 1, by = whichbase) + #----- First guess for the levels. --------------------------------------------------# + vlevels <- seq.dates( + from = paste(1, 1, yeara, sep = "/"), + to = paste(1, 1, yearz, sep = "/"), + by = "days" + ) + day1st <- numdays(vlevels) + + #----- Find the limits that will keep the labels not too far from the data. ---------# + wlaba <- dates(whena) + dayz <- numdays(whenz) %% daymax(nummonths(whenz), numyears(whenz)) + 1 + monz <- 1 + (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + yearz <- numyears(whenz) + as.integer(monz == 1) + wlabz <- dates(paste(monz, dayz, yearz, sep = "/")) + sel <- (day1st %in% daylevs & + vlevels >= min(wlaba, na.rm = TRUE) & + vlevels <= max(wlabz, na.rm = TRUE)) + vlevels <- dates(vlevels[sel], out.format = "m/d/y") + vlabels <- paste(months(vlevels), days(vlevels), sep = "/") + + padj <- rep(0, times = length(vlabels)) + + sel <- vlevels == vlevels[1] | (numdays(vlevels) == 1 & nummonths(vlevels) == 1) + vlabels[sel] <- paste(months(vlevels[sel]), "/", days(vlevels[sel]), "\n", + years(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + } else if (wstep.1st / hr.sec > 0.8) { + myunit <- "hours" + #------------------------------------------------------------------------------------# + # Hours are the best scale for the plots. # + #------------------------------------------------------------------------------------# + #----- Find the time step that is the closest to the base. --------------------------# + wstep <- wstep.1st / hr.sec + whichbase <- base.hours[which.min(abs(wstep - base.hours))] + #----- Find the list of days to plot. -----------------------------------------------# + when1st <- dates(min(when, na.rm = TRUE)) + whenlast <- dates(max(when + 1, na.rm = TRUE)) + mydates <- seq.dates(from = when1st, to = whenlast, by = "days") + mytimes <- times(seq(from = 0, to = day.sec - 1, by = whichbase * hr.sec)) / day.sec + ndates <- length(mydates) + ntimes <- length(mytimes) + #----- First guess for the levels. --------------------------------------------------# + vlevels <- chron(dates = rep(x = mydates, each = ntimes), times = rep(x = mytimes, times = ndates)) + wlaba <- chron( + dates = paste(nummonths(whena), numdays(whena), numyears(whena), sep = "/"), + times = paste(hours(whena), 0, 0, sep = ":") + ) + hourz <- (hours(whenz) + 1) %% 24 + d2831 <- daymax(nummonths(whenz), numyears(whenz)) + dayz <- (numdays(whenz) - 1 + as.integer(hourz == 0)) %% d2831 + 1 + monz <- (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + 1 + yearz <- numyears(whenz) + as.integer(monz == 1) + wlabz <- chron(dates = paste(monz, dayz, yearz, sep = "/"), times = paste(hourz, 0, 0, sep = ":")) + sel <- (vlevels >= min(wlaba, na.rm = TRUE) & + vlevels <= max(wlabz, na.rm = TRUE)) + + #------------------------------------------------------------------------------------# + # Make the labels, and put day and month information only on the first time of # + # the day, and all information in the second time, and the first time of the year. # + #------------------------------------------------------------------------------------# + vlabels <- paste(substring(100 + hours(vlevels), 2, 3), + substring(100 + minutes(vlevels), 2, 3), + sep = ":" + ) + padj <- rep(0, times = length(vlabels)) + #----- First time of the day. -------------------------------------------------------# + sel <- hours(vlevels) == 0 + vlabels[sel] <- paste(substring(100 + hours(vlevels[sel]), 2, 3), ":", + substring(100 + minutes(vlevels[sel]), 2, 3), "\n", + months(vlevels[sel]), "-", days(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + #----- First time of the year. ------------------------------------------------------# + sel <- (vlevels == vlevels[1] | + (nummonths(vlevels) == 1 & numdays(vlevels) == 1 & + hours(vlevels) == 0)) + vlabels[sel] <- paste(substring(100 + hours(vlevels[sel]), 2, 3), ":", + substring(100 + minutes(vlevels[sel]), 2, 3), "\n", + months(vlevels[sel]), "-", days(vlevels[sel]), "\n", + years(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + #------------------------------------------------------------------------------------# + } else if (wstep.1st / min.sec > 0.8) { + myunit <- "minutes" + #------------------------------------------------------------------------------------# + # Minutes are the best scale for the plots. # + #------------------------------------------------------------------------------------# + #----- Find the time step that is the closest to the base. --------------------------# + wstep <- wstep.1st / min.sec + whichbase <- base.minsec[which.min(abs(wstep - base.minsec))] + #----- Find the list of days to plot. -----------------------------------------------# + when1st <- dates(min(when, na.rm = TRUE)) + whenlast <- dates(max(when + 1, na.rm = TRUE)) + mydates <- seq.dates(from = when1st, to = whenlast, by = "days") + mytimes <- times(seq(from = 0, to = day.sec - 1, by = whichbase * min.sec)) / day.sec + ndates <- length(mydates) + ntimes <- length(mytimes) + #----- First guess for the levels. --------------------------------------------------# + vlevels <- chron(dates = rep(x = mydates, each = ntimes), times = rep(x = mytimes, times = ndates)) + + wlaba <- chron( + dates = paste(nummonths(whena), numdays(whena), numyears(whena), sep = "/"), + times = paste(hours(whena), minutes(whena), 0, sep = ":") + ) + minz <- (minutes(whenz) + 1) %% 60 + hourz <- (hours(whenz) + as.integer(minz == 0)) %% 24 + d2831 <- daymax(nummonths(whenz), numyears(whenz)) + dayz <- (numdays(whenz) - 1 + as.integer(hourz == 0)) %% d2831 + 1 + monz <- (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + 1 + yearz <- numyears(whenz) + as.integer(monz == 1) + wlabz <- chron( + dates = paste(monz, dayz, yearz, sep = "/"), + times = paste(hourz, minz, 0, sep = ":") + ) + sel <- (vlevels >= min(wlaba, na.rm = TRUE) & + vlevels <= max(wlabz, na.rm = TRUE)) + + #------------------------------------------------------------------------------------# + # Make the labels, and put day and month information only on the first time of # + # the day, and all information in the second time, and the first time of the year. # + #------------------------------------------------------------------------------------# + vlabels <- paste(substring(100 + hours(vlevels), 2, 3), + substring(100 + minutes(vlevels), 2, 3), + sep = ":" + ) + padj <- rep(0, times = length(vlabels)) + #----- First time of the day. -------------------------------------------------------# + sel <- hours(vlevels) == 0 & minutes(vlevels) == 0 + vlabels[sel] <- paste(substring(100 + hours(vlevels[sel]), 2, 3), ":", + substring(100 + minutes(vlevels[sel]), 2, 3), "\n", + months(vlevels[sel]), "-", days(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + #----- First time of the year. ------------------------------------------------------# + sel <- (vlevels == vlevels[1] | + (nummonths(vlevels) == 1 & numdays(vlevels) == 1 & + hours(vlevels) == 0 & minutes(vlevels) == 0)) + vlabels[sel] <- paste(substring(100 + hours(vlevels[sel]), 2, 3), ":", + substring(100 + minutes(vlevels[sel]), 2, 3), "\n", + months(vlevels[sel]), "-", days(vlevels[sel]), "\n", + years(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + #------------------------------------------------------------------------------------# + } else { + myunit <- "seconds" + #------------------------------------------------------------------------------------# + # Minutes are the best scale for the plots. # + #------------------------------------------------------------------------------------# + #----- Find the time step that is the closest to the base. --------------------------# + wstep <- wstep.1st + whichbase <- base.minsec[which.min(abs(wstep - base.minsec))] + #----- Find the list of days to plot. -----------------------------------------------# + when1st <- dates(min(when, na.rm = TRUE)) + whenlast <- dates(max(when + 1, na.rm = TRUE)) + mydates <- seq.dates(from = when1st, to = whenlast, by = "days") + mytimes <- times(seq(from = 0, to = day.sec - 1, by = whichbase)) / day.sec + ndates <- length(mydates) + ntimes <- length(mytimes) + #----- First guess for the levels. --------------------------------------------------# + vlevels <- chron(dates = rep(x = mydates, each = ntimes), times = rep(x = mytimes, times = ndates)) + + wlaba <- chron( + dates = paste(nummonths(whena), numdays(whena), numyears(whena), sep = "/"), + times = paste(hours(whena), minutes(whena), seconds(whena), sep = ":") + ) + secz <- (seconds(whenz) + 1) %% 60 + minz <- (minutes(whenz) + as.integer(secz == 0)) %% 60 + hourz <- (hours(whenz) + as.integer(minz == 0)) %% 24 + d2831 <- daymax(nummonths(whenz), numyears(whenz)) + dayz <- (numdays(whenz) - 1 + as.integer(hourz == 0)) %% d2831 + 1 + monz <- (nummonths(whenz) - 1 + as.integer(dayz == 1)) %% 12 + 1 + yearz <- numyears(whenz) + as.integer(monz == 1) + wlabz <- chron( + dates = paste(monz, dayz, yearz, sep = "/"), + times = paste(hourz, minz, secz, sep = ":") + ) + sel <- (vlevels >= min(wlaba, na.rm = TRUE) & + vlevels <= max(wlabz, na.rm = TRUE)) + + #------------------------------------------------------------------------------------# + # Make the labels, and put day and month information only on the first time of # + # the day, and all information in the second time, and the first time of the year. # + #------------------------------------------------------------------------------------# + vlabels <- paste(substring(100 + hours(vlevels), 2, 3), + substring(100 + minutes(vlevels), 2, 3), + substring(100 + seconds(vlevels), 2, 3), + sep = ":" + ) + padj <- rep(0, times = length(vlabels)) + #----- First time of the day. -------------------------------------------------------# + sel <- hours(vlevels) == 0 & minutes(vlevels) == 0 & seconds(vlevels) == 0 + vlabels[sel] <- paste(substring(100 + hours(vlevels[sel]), 2, 3), ":", + substring(100 + minutes(vlevels[sel]), 2, 3), ":", + substring(100 + seconds(vlevels[sel]), 2, 3), "\n", + months(vlevels[sel]), "-", days(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + #----- First time of the year. ------------------------------------------------------# + sel <- (vlevels == vlevels[1] | + (nummonths(vlevels) == 1 & numdays(vlevels) == 1 & + hours(vlevels) == 0 & minutes(vlevels) == 0 & + seconds(vlevels) == 0)) + vlabels[sel] <- paste(substring(100 + hours(vlevels[sel]), 2, 3), ":", + substring(100 + minutes(vlevels[sel]), 2, 3), ":", + substring(100 + seconds(vlevels[sel]), 2, 3), "\n", + months(vlevels[sel]), "-", days(vlevels[sel]), "\n", + years(vlevels[sel]), + sep = "" + ) + padj[sel] <- 0.5 + #------------------------------------------------------------------------------------# + } # end if + + vresult <- list(levels = vlevels, labels = vlabels, n = length(vlevels), scale = myunit, padj = padj) + return(vresult) +} # end function +# ==========================================================================================# +# ==========================================================================================# + + + + + + +# ==========================================================================================# +# ==========================================================================================# # List with the names of months and seasons. # #------------------------------------------------------------------------------------------# -mlist <<- c("January","February","March","April","May","June","July","August" - ,"September","October","November","December") -season.list <<- c("MAM","JJA","SON","DJF") -#==========================================================================================# -#==========================================================================================# +mlist <<- c( + "January", "February", "March", "April", "May", "June", "July", "August", + "September", "October", "November", "December" +) +season.list <<- c("MAM", "JJA", "SON", "DJF") +# ==========================================================================================# +# ==========================================================================================# diff --git a/models/ed/scripts/get.model.output.ed.R b/models/ed/scripts/get.model.output.ed.R index 207354d9c54..8133cf4b3b7 100644 --- a/models/ed/scripts/get.model.output.ed.R +++ b/models/ed/scripts/get.model.output.ed.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -20,46 +20,65 @@ # } else { # settings.file <- Sys.getenv("PECANSETTINGS") # ## settings.file <- commandArgs(trailingOnly=TRUE) -# } -# +# } +# # settings.xml <- xmlParse(settings.file) # settings <- XML::xmlToList(settings.xml) ### TODO: Update this code to work within new PEcAn organization. Remove hard coded paths to source. Fix ensemble.Rdata and sensitivity.Rdata filenames to include ensemble id. -get.model.output.ed <- function(){ - +get.model.output.ed <- function() { # This should no longer be needed. Depreciate. - #if(!is.null(settings$Rlib)){ .libPaths(settings$Rlib)} - if(settings$host$name == 'localhost'){ - send.files <- function(x){ - file.copy(from = x, - to = settings$outdir, - overwrite = TRUE) + # if(!is.null(settings$Rlib)){ .libPaths(settings$Rlib)} + if (settings$host$name == "localhost") { + send.files <- function(x) { + file.copy( + from = x, + to = settings$outdir, + overwrite = TRUE + ) } - lapply( c(paste(settings$pecanDir,c('R/utils.R', 'R/model.specific.R', - 'rscripts/read.output.R'),sep=""), - paste(settings$outdir, 'samples.Rdata', sep = '')), send.files) + lapply(c( + paste(settings$pecanDir, c( + "R/utils.R", "R/model.specific.R", + "rscripts/read.output.R" + ), sep = ""), + paste(settings$outdir, "samples.Rdata", sep = "") + ), send.files) setwd(settings$outdir) - source('read.output.R') + source("read.output.R") } else { ## if not on localhost - send.files <- function(filename){ - rsync(from = filename, - to = paste(settings$host$name, ':',settings$host$outdir, sep = '')) - } - lapply(c(paste(settings$outdir, 'samples.Rdata ', sep = ''), - paste(settings$pecanDir, c('R/utils.R', - 'R/model.specific.R', - 'rscripts/read.output.R'),sep = '')), - send.files) - system(paste("ssh -T", settings$host$name, "'", - "cd", settings$host$outdir, "; R --vanilla < read.output.R'")) - - rsync(from = paste(settings$host$name, ':', settings$host$outdir, 'ensemble.Rdata', sep=''), - to = settings$outdir) - rsync(from = paste(settings$host$name, ':', settings$host$outdir, 'sensitivity.Rdata', sep=''), - to = settings$outdir) + send.files <- function(filename) { + rsync( + from = filename, + to = paste(settings$host$name, ":", settings$host$outdir, sep = "") + ) } + lapply( + c( + paste(settings$outdir, "samples.Rdata ", sep = ""), + paste(settings$pecanDir, c( + "R/utils.R", + "R/model.specific.R", + "rscripts/read.output.R" + ), sep = "") + ), + send.files + ) + system(paste( + "ssh -T", settings$host$name, "'", + "cd", settings$host$outdir, "; R --vanilla < read.output.R'" + )) + + rsync( + from = paste(settings$host$name, ":", settings$host$outdir, "ensemble.Rdata", sep = ""), + to = settings$outdir + ) + rsync( + from = paste(settings$host$name, ":", settings$host$outdir, "sensitivity.Rdata", sep = ""), + to = settings$outdir + ) + } } ## debugging ## source('rscripts/get.model.output.R', echo = TRUE, print.eval = TRUE, verbose = TRUE) diff --git a/models/ed/scripts/read.output.ed.R b/models/ed/scripts/read.output.ed.R index eacd7da1486..ed81c795f69 100644 --- a/models/ed/scripts/read.output.ed.R +++ b/models/ed/scripts/read.output.ed.R @@ -1,59 +1,61 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------# -##' +##' ##' @name read.output.ed ##' @title Extract ED2 model output for analysis ##' ##' @import PEcAn.utils ##' @export -read.output.ed <- function(){ +read.output.ed <- function() { ### OLD CODE THAT NEEDS TO BE UPDATED. SPS - ### Rkelly: For one, need to fix ensemble.Rdata and sensitivity.Rdata filenames to include ensemble id. + ### Rkelly: For one, need to fix ensemble.Rdata and sensitivity.Rdata filenames to include ensemble id. sensitivity.output <- list() - ensemble.output <- list() - + ensemble.output <- list() + start.year <- ifelse(is.null(settings$sensitivity.analysis$start.year), - NA, settings$sensitivity.analysis$start.year) - end.year <- ifelse(is.null(settings$sensitivity.analysis$end.year), - NA, settings$sensitivity.analysis$end.year) - - if('sensitivity.analysis' %in% names(settings)) { - - for(pft.name in names(trait.samples)){ - + NA, settings$sensitivity.analysis$start.year + ) + end.year <- ifelse(is.null(settings$sensitivity.analysis$end.year), + NA, settings$sensitivity.analysis$end.year + ) + + if ("sensitivity.analysis" %in% names(settings)) { + for (pft.name in names(trait.samples)) { traits <- names(trait.samples[[pft.name]]) quantiles.str <- rownames(sa.samples[[pft.name]]) - quantiles.str <- quantiles.str[which(quantiles.str != '50')] - quantiles <- as.numeric(quantiles.str)/100 - + quantiles.str <- quantiles.str[which(quantiles.str != "50")] + quantiles <- as.numeric(quantiles.str) / 100 + sensitivity.output[[pft.name]] <- read.sa.output(traits, - quantiles, - outdir = getwd(), - pft.name=pft.name, - start.year, - end.year) + quantiles, + outdir = getwd(), + pft.name = pft.name, + start.year, + end.year + ) } - save(sensitivity.output, file = file.path(outdir, 'sensitivity.Rdata')) + save(sensitivity.output, file = file.path(outdir, "sensitivity.Rdata")) } - - if('ensemble' %in% names(settings)) { + + if ("ensemble" %in% names(settings)) { ensemble.output <- read.ensemble.output(settings$ensemble$size, - outdir = getwd(), - start.year, - end.year) - save(ensemble.output, file = file.path(outdir, 'ensemble.Rdata')) + outdir = getwd(), + start.year, + end.year + ) + save(ensemble.output, file = file.path(outdir, "ensemble.Rdata")) } } -#==================================================================================================# +# ==================================================================================================# #################################################################################################### -### EOF. End of R script file. -#################################################################################################### \ No newline at end of file +### EOF. End of R script file. +#################################################################################################### diff --git a/models/ed/scripts/write.configs.rscript.R b/models/ed/scripts/write.configs.rscript.R index d75e2778b1d..c6b3616958d 100644 --- a/models/ed/scripts/write.configs.rscript.R +++ b/models/ed/scripts/write.configs.rscript.R @@ -1,35 +1,37 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- ### LOAD SETTINGS ### library(XML) -if(interactive()){ - user <- Sys.getenv('USER') - if(user == 'ed'){ - settings.file = '~/in/ebifarm/fast/ebifarm.pavi.xml' - } else if(user == 'davids14') { - settings.file = '~/pecan/atqasuk.xml' +if (interactive()) { + user <- Sys.getenv("USER") + if (user == "ed") { + settings.file <- "~/in/ebifarm/fast/ebifarm.pavi.xml" + } else if (user == "davids14") { + settings.file <- "~/pecan/atqasuk.xml" } else { - paste('please specify settings file in write.configs.R') + paste("please specify settings file in write.configs.R") } } else { settings.file <- Sys.getenv("PECANSETTINGS") } settings.xml <- XML::xmlParse(settings.file) settings <- XML::xmlToList(settings.xml) -outdir <- settings$outdir +outdir <- settings$outdir host <- settings$host -if(!is.null(settings$Rlib)){ .libPaths(settings$Rlib)} +if (!is.null(settings$Rlib)) { + .libPaths(settings$Rlib) +} library(PEcAn) -pft.names <- unlist(xpathApply(settings.xml, '//pfts//pft//name', xmlValue)) -outdirs <- unlist(xpathApply(settings.xml, '//pfts//pft//outdir', xmlValue)) +pft.names <- unlist(xpathApply(settings.xml, "//pfts//pft//name", xmlValue)) +outdirs <- unlist(xpathApply(settings.xml, "//pfts//pft//outdir", xmlValue)) trait.samples <- list() sa.samples <- list() @@ -37,59 +39,62 @@ ensemble.samples <- list() env.samples <- list() ## Remove existing config files -if(FALSE){ - todelete <- dir(paste(settings$pfts$pft$outdir, 'out/', sep = ''), - c('ED2INc.*','c.*'), - recursive=TRUE, full.names = TRUE) - if(length(todelete>0)) file.remove(todelete) - - filename.root <- get.run.id('c.','*') - - if(host$name == 'localhost'){ - if(length(dir(host$rundir, pattern = filename.root)) > 0) { +if (FALSE) { + todelete <- dir(paste(settings$pfts$pft$outdir, "out/", sep = ""), + c("ED2INc.*", "c.*"), + recursive = TRUE, full.names = TRUE + ) + if (length(todelete > 0)) file.remove(todelete) + + filename.root <- get.run.id("c.", "*") + + if (host$name == "localhost") { + if (length(dir(host$rundir, pattern = filename.root)) > 0) { todelete <- dir(host$outdir, - pattern = paste(filename.root, "*[^log]", sep = ''), - recursive=TRUE, full.names = TRUE) + pattern = paste(filename.root, "*[^log]", sep = ""), + recursive = TRUE, full.names = TRUE + ) file.remove(todelete) } } else { - files <- system(paste("ssh ", host$name, " 'ls ", host$rundir, "*", filename.root, "*'", sep = ''), intern = TRUE) - if(length(files) > 0 ) { - todelete <- files[-grep('log', files)] + files <- system(paste("ssh ", host$name, " 'ls ", host$rundir, "*", filename.root, "*'", sep = ""), intern = TRUE) + if (length(files) > 0) { + todelete <- files[-grep("log", files)] system(paste("ssh -T ", host$name, - " 'for f in ", paste(todelete, collapse = ' '),"; do rm $f; done'",sep='')) + " 'for f in ", paste(todelete, collapse = " "), "; do rm $f; done'", + sep = "" + )) } } } ## Load PFT priors and posteriors -for (i in seq(pft.names)){ - - load(paste(outdirs[i], 'prior.distns.Rdata', sep='')) - - if("trait.mcmc.Rdata" %in% dir(outdirs)) { - load(paste(outdirs[i], 'trait.mcmc.Rdata', sep='')) +for (i in seq(pft.names)) { + load(paste(outdirs[i], "prior.distns.Rdata", sep = "")) + + if ("trait.mcmc.Rdata" %in% dir(outdirs)) { + load(paste(outdirs[i], "trait.mcmc.Rdata", sep = "")) } - + pft.name <- pft.names[i] - + ## when no ma for a trait, sample from prior ## trim all chains to shortest mcmc chain, else 20000 samples - if(exists('trait.mcmc')) { + if (exists("trait.mcmc")) { traits <- names(trait.mcmc) samples.num <- min(sapply(trait.mcmc, function(x) nrow(as.matrix(x)))) } else { traits <- NA samples.num <- 20000 } - + priors <- rownames(prior.distns) for (prior in priors) { if (prior %in% traits) { - samples <- as.matrix(trait.mcmc[[prior]][,'beta.o']) + samples <- as.matrix(trait.mcmc[[prior]][, "beta.o"]) } else { - samples <- get.sample(prior.distns[prior,], samples.num) + samples <- get.sample(prior.distns[prior, ], samples.num) } trait.samples[[pft.name]][[prior]] <- samples } @@ -100,45 +105,56 @@ for (i in seq(pft.names)){ ## write SENSITIVITY ANALYSIS -if('sensitivity.analysis' %in% names(settings)) { +if ("sensitivity.analysis" %in% names(settings)) { quantiles <- get.quantiles(settings$sensitivity.analysis$quantiles) - if( is.null(settings$sensitivity.analysis)) { - print(paste('sensitivity analysis settings are NULL')) + if (is.null(settings$sensitivity.analysis)) { + print(paste("sensitivity analysis settings are NULL")) } else { - sa.samples <- get.sa.sample.list(trait.samples, - env.samples, - quantiles) - write.sa.configs(settings$pfts, sa.samples, - host, outdir, settings) + sa.samples <- get.sa.sample.list( + trait.samples, + env.samples, + quantiles + ) + write.sa.configs( + settings$pfts, sa.samples, + host, outdir, settings + ) } } ## Write ENSEMBLE -if('ensemble' %in% names(settings) && settings$ensemble$size > 0) { - ## subset the trait.samples to ensemble size using Halton sequence +if ("ensemble" %in% names(settings) && settings$ensemble$size > 0) { + ## subset the trait.samples to ensemble size using Halton sequence ensemble.samples <- get.ensemble.samples(settings$ensemble$size, trait.samples, env.samples) - write.ensemble.configs(settings$pfts, ensemble.samples, - host, outdir, settings) + write.ensemble.configs( + settings$pfts, ensemble.samples, + host, outdir, settings + ) } save(ensemble.samples, trait.samples, sa.samples, settings, - file = paste(outdir, 'samples.Rdata', sep='')) + file = paste(outdir, "samples.Rdata", sep = "") +) ## Make outdirectory, send samples to outdir -if(host$name == 'localhost'){ - if(!host$outdir == outdir) { +if (host$name == "localhost") { + if (!host$outdir == outdir) { dir.create(host$outdir) - file.copy(from = paste(outdir, 'samples.Rdata', sep=''), - to = paste(host$outdir, 'samples.Rdata', sep = ''), - overwrite = TRUE) + file.copy( + from = paste(outdir, "samples.Rdata", sep = ""), + to = paste(host$outdir, "samples.Rdata", sep = ""), + overwrite = TRUE + ) } -} else { - mkdir.cmd <- paste("'if ! ls ", host$outdir, " > /dev/null ; then mkdir -p ", host$outdir," ; fi'",sep='') +} else { + mkdir.cmd <- paste("'if ! ls ", host$outdir, " > /dev/null ; then mkdir -p ", host$outdir, " ; fi'", sep = "") system(paste("ssh", host$name, mkdir.cmd)) - system(paste('rsync -routi ', paste(outdir, 'samples.Rdata', sep=''), - paste(host$name, ':', host$outdir, sep=''))) + system(paste( + "rsync -routi ", paste(outdir, "samples.Rdata", sep = ""), + paste(host$name, ":", host$outdir, sep = "") + )) } diff --git a/models/ed/tests/test_ed_integration.R b/models/ed/tests/test_ed_integration.R index 96c271f7566..0039d2ef86c 100644 --- a/models/ed/tests/test_ed_integration.R +++ b/models/ed/tests/test_ed_integration.R @@ -1,7 +1,6 @@ -if(FALSE){ +if (FALSE) { devtools::load_all("models/ed") - + commandArgs <- function(...) "~/pecan/tests/pecan64.ed.xml" source("~/pecan/web/workflow.R") } - diff --git a/models/ed/tests/testthat.R b/models/ed/tests/testthat.R index 241e0a79814..caa7193b12a 100644 --- a/models/ed/tests/testthat.R +++ b/models/ed/tests/testthat.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html diff --git a/models/ed/tests/testthat/test-put_ET_files.R b/models/ed/tests/testthat/test-put_ET_files.R index fd173e00041..4e773e1ead6 100644 --- a/models/ed/tests/testthat/test-put_ET_files.R +++ b/models/ed/tests/testthat/test-put_ET_files.R @@ -2,7 +2,7 @@ testdir <- tempfile() dir.create(testdir) withr::defer(unlink(testdir, recursive = TRUE)) unzip("data/outdir.zip", exdir = testdir) -#for interactive use: +# for interactive use: # unzip("models/ed/tests/testthat/data/outdir.zip", exdir = testdir) e_file <- "analysis-E-2004-07-00-000000-g01.h5" @@ -35,11 +35,13 @@ var_list_T <- sitelat <- settings$run$site$lat sitelon <- settings$run$site$lon lat <- ncdf4::ncdim_def("lat", "degrees_north", - vals = as.numeric(sitelat), - longname = "station_latitude") + vals = as.numeric(sitelat), + longname = "station_latitude" +) lon <- ncdf4::ncdim_def("lon", "degrees_east", - vals = as.numeric(sitelon), - longname = "station_longitude") + vals = as.numeric(sitelon), + longname = "station_longitude" +) test_that("put_E_values() runs", { @@ -74,7 +76,7 @@ test_that("put_T_values() runs", { test_that("put_E_values() outputs match", { - e_list <- + e_list <- put_E_values( yr = year, nc_var = list(), @@ -87,6 +89,6 @@ test_that("put_E_values() outputs match", { expect_equal(names(e_list$out), c("AGB_PFT", "BSEEDS", "DBH", "NPP_PFT", "TRANSP_PFT", "DENS", "PFT", "dtime_bounds")) }) -#TODO: test if all vars are in output -#TODO: test if dimensions are correct and consistent -#TODO: test behavior when nc_var is not empty \ No newline at end of file +# TODO: test if all vars are in output +# TODO: test if dimensions are correct and consistent +# TODO: test behavior when nc_var is not empty diff --git a/models/ed/tests/testthat/test-read_ET_files.R b/models/ed/tests/testthat/test-read_ET_files.R index ef7fd6c0ce8..a6262aa8fcc 100644 --- a/models/ed/tests/testthat/test-read_ET_files.R +++ b/models/ed/tests/testthat/test-read_ET_files.R @@ -2,7 +2,7 @@ testdir <- tempfile() dir.create(testdir) withr::defer(unlink(testdir, recursive = TRUE)) unzip("data/outdir.zip", exdir = testdir) -#for interactive use: +# for interactive use: # unzip("models/ed/tests/testthat/data/outdir.zip", exdir = testdir) e_file <- "analysis-E-2004-07-00-000000-g01.h5" @@ -16,26 +16,26 @@ settings$outdir <- file.path(testdir, "outdir") test_that("read E files without ED2 pft number", { pfts_without_number <- list( pft = list( - name = 'SetariaWT', + name = "SetariaWT", ed2_pft_number = "1" ), pft = list( - name = 'ebifarm.c3grass' + name = "ebifarm.c3grass" ) ) - settings2<-settings + settings2 <- settings settings2$pfts <- pfts_without_number result <- read_E_files( yr = 2004, yfiles = 2004, - h5_files = e_file, + h5_files = e_file, outdir = file.path(settings$outdir, "out", "ENS-00001-76"), settings = settings ) expect_type(result, "list") - expect_equal(length(result), 7) #TODO: expectation of number of variables will have to change - #TODO: better test would be to check for specific variables in output + expect_equal(length(result), 7) # TODO: expectation of number of variables will have to change + # TODO: better test would be to check for specific variables in output }) test_that("read E files without settings arg and with ED2 pft number", { @@ -44,7 +44,7 @@ test_that("read E files without settings arg and with ED2 pft number", { read_E_files( yr = 2004, yfiles = 2004, - h5_files = e_file, + h5_files = e_file, outdir = file.path(settings$outdir, "out", "ENS-00001-76"), start_date = "2004/07/01", end_date = "2004/08/01", @@ -62,7 +62,7 @@ test_that("read E files without only settings arg", { yr = 2004, yfiles = 2004, outdir = file.path(settings$outdir, "out", "ENS-00001-76"), - h5_files = e_file, + h5_files = e_file, settings = settings ) expect_type(result, "list") @@ -83,4 +83,3 @@ test_that("read_T_files() runs", { "list" ) }) - diff --git a/models/ed/tests/testthat/test-read_S_files.R b/models/ed/tests/testthat/test-read_S_files.R index 9f75e00aa89..f1d480fc9e4 100644 --- a/models/ed/tests/testthat/test-read_S_files.R +++ b/models/ed/tests/testthat/test-read_S_files.R @@ -2,7 +2,7 @@ testdir <- tempfile() dir.create(testdir) withr::defer(unlink(testdir, recursive = TRUE)) unzip("data/outdir.zip", exdir = testdir) -#for interactive use: +# for interactive use: # unzip("models/ed/tests/testthat/data/outdir.zip", exdir = testdir) s_file <- "history-S-2004-07-01-000000-g01.h5" diff --git a/models/ed/tests/testthat/test.met2model.R b/models/ed/tests/testthat/test.met2model.R index 7b51c65eb68..43f5abbd5ef 100644 --- a/models/ed/tests/testthat/test.met2model.R +++ b/models/ed/tests/testthat/test.met2model.R @@ -1,9 +1,10 @@ test_that("Met conversion runs without error", { - outdir <- tempfile() #creates a directory + outdir <- tempfile() # creates a directory withr::defer(unlink(outdir, recursive = TRUE)) - + nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" @@ -11,4 +12,4 @@ test_that("Met conversion runs without error", { result <- met2model.ED2(in.path, in.prefix, outdir, start_date, end_date) expect_s3_class(result, "data.frame") expect_true(file.exists(result[["file"]][[1]])) -}) \ No newline at end of file +}) diff --git a/models/ed/tests/testthat/test.model2netcdf.ED2.R b/models/ed/tests/testthat/test.model2netcdf.ED2.R index 64ca5b24c0a..24805219df9 100644 --- a/models/ed/tests/testthat/test.model2netcdf.ED2.R +++ b/models/ed/tests/testthat/test.model2netcdf.ED2.R @@ -1,58 +1,60 @@ library(stringr) -#set up tempdir +# set up tempdir testdir <- tempfile() dir.create(testdir) withr::defer(unlink(testdir, recursive = TRUE)) unzip("data/outdir.zip", exdir = testdir) -#for interactive use: +# for interactive use: # unzip("models/ed/tests/testthat/data/outdir.zip", exdir = testdir) settings <- PEcAn.settings::read.settings(file.path(testdir, "outdir", "pecan_checked.xml")) settings$outdir <- file.path(testdir, "outdir") -#not to be confused with outdir +# not to be confused with outdir outdir <- file.path(settings$outdir, "out", "ENS-00001-76") test_that("model2netcdf.ED2 runs without error", { - - #hacky way to check for errors b/c PEcAn.logger errors are non-standard and - #not captured by testthat::expect_message() or expect_error() + # hacky way to check for errors b/c PEcAn.logger errors are non-standard and + # not captured by testthat::expect_message() or expect_error() x <- capture.output( - model2netcdf.ED2(outdir = outdir, - settings = settings), + model2netcdf.ED2( + outdir = outdir, + settings = settings + ), type = "message" ) expect_false(any(str_detect(x, "ERROR"))) }) -#remove .nc file +# remove .nc file file.remove(list.files(outdir, "*.nc", full.names = TRUE)) -#run function to create outputs +# run function to create outputs model2netcdf.ED2(outdir = outdir, settings = settings) test_that("a valid .nc file is produced for each corresponding ED2 output", { - h5_T_files <- dir(outdir, pattern = "-T-.*.h5") nc_files <- dir(outdir, pattern = ".nc$") nc_var_files <- dir(outdir, pattern = ".nc.var$") - + expect_equal(length(h5_T_files), length(nc_files)) expect_equal(length(h5_T_files), length(nc_var_files)) - h5years <- str_extract(h5_T_files, "\\d{4}") - ncyears <- str_extract(nc_files, "\\d{4}") + h5years <- str_extract(h5_T_files, "\\d{4}") + ncyears <- str_extract(nc_files, "\\d{4}") expect_equal(as.numeric(ncyears), as.numeric(h5years)) ncvaryears <- str_extract(nc_var_files, "\\d{4}") expect_equal(as.numeric(ncvaryears), as.numeric(h5years)) }) -test_that("read.output extracts data from nc file",{ +test_that("read.output extracts data from nc file", { vars <- c("GPP", "NEE", "DOC_flux", "Evap", "TVeg", "Qsb", "Rainf") x <- - PEcAn.utils::read.output(runid = runid, - outdir = outdir, - variables = vars) + PEcAn.utils::read.output( + runid = runid, + outdir = outdir, + variables = vars + ) expect_true(all(names(x) %in% vars)) }) @@ -68,14 +70,16 @@ test_that("nc files have correct attributes", { testthat::local_edition(3) expect_s3_class(tmp.nc, "ncdf4") time <- ncdf4::ncvar_get(tmp.nc, "time") - gpp <- ncdf4::ncvar_get(tmp.nc, "GPP") + gpp <- ncdf4::ncvar_get(tmp.nc, "GPP") expect_equal(length(gpp), length(time)) expect_equal(ncdf4::ncvar_get(tmp.nc, "lat"), - as.numeric(settings$run$site$lat), - ignore_attr = TRUE) + as.numeric(settings$run$site$lat), + ignore_attr = TRUE + ) expect_equal(ncdf4::ncvar_get(tmp.nc, "lon"), - as.numeric(settings$run$site$lon), - ignore_attr = TRUE) + as.numeric(settings$run$site$lon), + ignore_attr = TRUE + ) }) test_that("both PFTs are found in nc files", { @@ -90,27 +94,25 @@ test_that("dimenstions have MsTMIP standard units", { expect_true(grepl("days since", dims$time$units)) }) -test_that("variables have MsTMIP standard units",{ +test_that("variables have MsTMIP standard units", { testthat::local_edition(3) data(standard_vars, package = "PEcAn.utils") - #exclude dimensions + # exclude dimensions std_var_names <- !standard_vars$Variable.Name %in% c("lat", "lon", "time") - - #drop any vars not in standard_vars - std_vars <- purrr::keep(vars, ~.x[["name"]] %in% std_var_names) - - #make dataframes for comparison - x <- std_vars %>% purrr::map_chr(~.x[["units"]]) + + # drop any vars not in standard_vars + std_vars <- purrr::keep(vars, ~ .x[["name"]] %in% std_var_names) + + # make dataframes for comparison + x <- std_vars %>% purrr::map_chr(~ .x[["units"]]) out_units <- data.frame(Variable.Name = names(x), Units = as.character(x)) std_units <- standard_vars[standard_vars$Variable.Name %in% names(x), c("Variable.Name", "Units")] - - #check for equality + + # check for equality expect_equal( dplyr::arrange(out_units, Variable.Name), dplyr::arrange(std_units, Variable.Name), ignore_attr = TRUE ) }) - - diff --git a/models/ed/tests/testthat/test.no_storage_resp.R b/models/ed/tests/testthat/test.no_storage_resp.R index b9c6935e2bd..6ad26254190 100644 --- a/models/ed/tests/testthat/test.no_storage_resp.R +++ b/models/ed/tests/testthat/test.no_storage_resp.R @@ -1,4 +1,3 @@ - l <- data(package = "PEcAn.ED2") histfiles <- grep("history", l$results[, "Item"], value = TRUE) myenv <- new.env() diff --git a/models/ed/tests/testthat/test.write.configs.ed.R b/models/ed/tests/testthat/test.write.configs.ed.R index 9609c739edd..9dd118cc709 100644 --- a/models/ed/tests/testthat/test.write.configs.ed.R +++ b/models/ed/tests/testthat/test.write.configs.ed.R @@ -1,7 +1,7 @@ ## #------------------------------------------------------------------------------- ## # Copyright (c) 2012 University of Illinois, NCSA. ## # All rights reserved. This program and the accompanying materials -## # are made available under the terms of the +## # are made available under the terms of the ## # University of Illinois/NCSA Open Source License ## # which accompanies this distribution, and is available at ## # http://opensource.ncsa.illinois.edu/license.html @@ -51,12 +51,15 @@ defaults <- test_that("convert.samples.ED works as expected", { testthat::local_edition(3) - expect_equal(convert.samples.ED(c("Vcmax" = 1))[["Vcmax"]], - 0.7052557) + expect_equal( + convert.samples.ED(c("Vcmax" = 1))[["Vcmax"]], + 0.7052557 + ) expect_equal(convert.samples.ED(c("plant_min_temp" = 0))[["plant_min_temp"]], 273.15) expect_equal(convert.samples.ED(c("root_respiration_rate" = 1))[["root_respiration_factor"]], - 0.35263, - tolerance = 1e-5) + 0.35263, + tolerance = 1e-5 + ) }) testdir <- tempfile() @@ -67,7 +70,7 @@ unzip("data/outdir.zip", exdir = testdir) outdir <- file.path(testdir, "outdir") test_that("write.config.jobsh.ED2() writes correct model2netcdf.ED2() args", { - settings <- + settings <- PEcAn.settings::read.settings(file.path(outdir, "pecan_checked.xml")) settings$outdir <- outdir job.sh <- write.config.jobsh.ED2(settings, run.id = "test_run") @@ -76,7 +79,7 @@ test_that("write.config.jobsh.ED2() writes correct model2netcdf.ED2() args", { }) test_that("write.config.jobsh.ED2() works with long list of PFTs", { - settings <- + settings <- PEcAn.settings::read.settings(file.path(outdir, "pecan_checked.xml")) more_pfts <- list( pft = list(name = "tempconif", ed2_pft_number = 7), @@ -84,7 +87,7 @@ test_that("write.config.jobsh.ED2() works with long list of PFTs", { pft = list(name = "temperate.Early_Hardwood", ed2_pft_number = 9), pft = list(name = "temperate.North_Mid_Hardwood", ed2_pft_number = 10), pft = list(name = "temperate.Late_Hardwood", ed2_pft_number = 11) - ) + ) settings$pfts <- append(settings$pfts, more_pfts) job.sh <- write.config.jobsh.ED2(settings, run.id = "test_run") expect <- deparse1(dput(extract_pfts(settings$pfts))) @@ -92,21 +95,21 @@ test_that("write.config.jobsh.ED2() works with long list of PFTs", { }) test_that("New ED2IN tags get added at bottom of file", { - #1. read in pecan.xml in data/pecan_checked.xml + # 1. read in pecan.xml in data/pecan_checked.xml settings <- PEcAn.settings::read.settings("data/pecan_checked.xml") - #for debugging: + # for debugging: # settings <- PEcAn.settings::read.settings("models/ed/tests/testthat/data/pecan_checked.xml") - - #2. Set rundir to tempdir + + # 2. Set rundir to tempdir rundir <- tempfile() dir.create(rundir) on.exit(unlink(rundir, recursive = TRUE)) settings$rundir <- rundir run.id <- "ENS-00001-76" dir.create(file.path(rundir, run.id)) - #3. add arbitrary ed2in_tag to settings list + # 3. add arbitrary ed2in_tag to settings list settings$model$ed2in_tags$NEW_TAG <- "0" - #4. run write.config.ED2 + # 4. run write.config.ED2 trait.values <- list( SetariaWT = structure( @@ -138,7 +141,7 @@ test_that("New ED2IN tags get added at bottom of file", { class = "data.frame" ) ) - + defaults <- list( pft = list( @@ -160,43 +163,43 @@ test_that("New ED2IN tags get added at bottom of file", { type = "message" ) PEcAn.logger::logger.setLevel(old_level) - - - #5. check if new tag exists + + + # 5. check if new tag exists ed2in_out <- read_ed2in(file.path(rundir, run.id, "ED2IN")) expect_equal(ed2in_out$NEW_TAG, 0) - - #check that info is printed + + # check that info is printed expect_true(any(stringr::str_detect(x, "NEW_TAG"))) - - #check that last non-comment line of ED2IN is "$END" - #TODO someone better at regex could do this more efficiently + + # check that last non-comment line of ED2IN is "$END" + # TODO someone better at regex could do this more efficiently lines <- trimws(readLines(file.path(rundir, run.id, "ED2IN"))) not_comments <- lines[stringr::str_detect(lines, "^!", negate = TRUE)] not_spaces <- not_comments[stringr::str_detect(not_comments, ".+")] expect_equal(not_spaces[length(not_spaces)], "$END") - - #6. compare to template + + # 6. compare to template # ed2in_template <- read_ed2in(system.file(settings$model$edin, package = "PEcAn.ED2")) # Not sure what to expect regarding tag names or number of tags relative to template }) test_that("write.config.xml.ED2() uses correct history file", { - #1. read in pecan.xml in data/pecan_checked.xml + # 1. read in pecan.xml in data/pecan_checked.xml settings <- PEcAn.settings::read.settings("data/pecan_checked.xml") - #for debugging: + # for debugging: # settings <- PEcAn.settings::read.settings("models/ed/tests/testthat/data/pecan_checked.xml") - - #2. Set rundir to tempdir + + # 2. Set rundir to tempdir rundir <- tempfile() dir.create(rundir) on.exit(unlink(rundir, recursive = TRUE)) settings$rundir <- rundir run.id <- "ENS-00001-76" dir.create(file.path(rundir, run.id)) - #3. set revision to 81 + # 3. set revision to 81 settings$model$revision <- "81" x <- capture.output( @@ -207,9 +210,8 @@ test_that("write.config.xml.ED2() uses correct history file", { ), type = "message" ) - + expect_true(any(stringr::str_detect(x, "history.r81"))) - }) @@ -218,7 +220,7 @@ test_that("write.config.xml.ED2() uses correct history file", { ## run = list(host = list(name = "ebi-cluster.igb.illinois.edu", ## rundir = "/home/scratch/tmp/", ## outdir = "/home/scratch/tmp/"))) - + ## system("ssh ebi-cluster.igb.illinois.edu 'touch /home/scratch/tmp/c.foo'") ## expect_output(remove.config.ED2(main.outdir = settings$outdir, settings = settings), ## "/home/scratch/tmp/c.foo") diff --git a/models/fates/R/met2model.FATES.R b/models/fates/R/met2model.FATES.R index 0398ff7a547..3c8e5dd8605 100755 --- a/models/fates/R/met2model.FATES.R +++ b/models/fates/R/met2model.FATES.R @@ -1,8 +1,7 @@ - # R Code to convert NetCDF CF met files into NetCDF FATES met files. ##' met2model wrapper for FATES -##' +##' ##' @title met2model for FATES ##' @export ##' @param in.path location on disk where inputs are stored @@ -17,139 +16,155 @@ ##' @param ... additional arguments, currently ignored ##' @importFrom ncdf4 ncvar_get ncdim_def ncatt_get ncvar_put -met2model.FATES <- function(in.path,in.prefix,outfolder,start_date,end_date,lst=0,lat, lon, overwrite = FALSE, verbose = FALSE, ...) { +met2model.FATES <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, lat, lon, overwrite = FALSE, verbose = FALSE, ...) { # General Structure- FATES Uses Netcdf so we need to rename vars, split files from years into months, and generate the header file # Get Met file from inpath. # Loop over years (Open nc.file,rename vars,change dimensions as needed,close/save .nc file) # close - # defining temporal dimension needs to be figured out. If we configure FATES to use same tstep then we may not need to change dimensions + # defining temporal dimension needs to be figured out. If we configure FATES to use same tstep then we may not need to change dimensions insert <- function(ncout, name, unit, data, dim) { - var <- ncdf4::ncvar_def(name, unit, dim = dim, missval = as.numeric(1.0e36), verbose = verbose) + var <- ncdf4::ncvar_def(name, unit, dim = dim, missval = as.numeric(1.0e36), verbose = verbose) ncout <- ncdf4::ncvar_add(ncout, var) ncdf4::ncvar_put(nc = ncout, varid = name, vals = data) return(invisible(ncout)) } - + ## Create output directory - if (!file.exists(outfolder)){ + if (!file.exists(outfolder)) { dir.create(outfolder) } - + ## Process start, end dates start_date <- as.POSIXlt(start_date, tz = "UTC", origin = "1700-01-01") - end_date <- as.POSIXlt(end_date, tz = "UTC", origin = "1700-01-01") + end_date <- as.POSIXlt(end_date, tz = "UTC", origin = "1700-01-01") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) ## Build met for (year in start_year:end_year) { - ## Process time - base_time <- difftime(paste0(year,"-01-01"),"1700-01-01", units="days") ## days of the year - if (lubridate::leap_year(year)){ # True + base_time <- difftime(paste0(year, "-01-01"), "1700-01-01", units = "days") ## days of the year + if (lubridate::leap_year(year)) { # True sm <- c(0, 31, 58, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) - } - else { + } else { sm <- c(0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) } in.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) if (file.exists(in.file)) { - ## Open netcdf file nc <- ncdf4::nc_open(in.file) ## extract variables. These need to be read in and converted to CLM names (all units are correct) - time <- ncdf4::ncvar_get(nc, "time") - LATIXY <- ncdf4::ncvar_get(nc, "latitude") - LONGXY <- ncdf4::ncvar_get(nc, "longitude") - FLDS <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") ## W/m2 - FSDS <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## W/m2 - PRECTmms <- ncdf4::ncvar_get(nc, "precipitation_flux") ## kg/m2/s -> mm/s (same val, diff name) - PSRF <- ncdf4::ncvar_get(nc, "air_pressure") ## Pa - QBOT <- ncdf4::ncvar_get(nc, "specific_humidity") ## g/g -> kg/kg - TBOT <- ncdf4::ncvar_get(nc, "air_temperature") ## K - WIND <- sqrt(ncdf4::ncvar_get(nc, "eastward_wind") ^ 2 + ncdf4::ncvar_get(nc, "northward_wind") ^ 2) ## m/s - + time <- ncdf4::ncvar_get(nc, "time") + LATIXY <- ncdf4::ncvar_get(nc, "latitude") + LONGXY <- ncdf4::ncvar_get(nc, "longitude") + FLDS <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") ## W/m2 + FSDS <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## W/m2 + PRECTmms <- ncdf4::ncvar_get(nc, "precipitation_flux") ## kg/m2/s -> mm/s (same val, diff name) + PSRF <- ncdf4::ncvar_get(nc, "air_pressure") ## Pa + QBOT <- ncdf4::ncvar_get(nc, "specific_humidity") ## g/g -> kg/kg + TBOT <- ncdf4::ncvar_get(nc, "air_temperature") ## K + WIND <- sqrt(ncdf4::ncvar_get(nc, "eastward_wind")^2 + ncdf4::ncvar_get(nc, "northward_wind")^2) ## m/s + ## CREATE MONTHLY FILES for (mo in 1:12) { - if (((year==start_year) & (molubridate::month(end_date)))){ + if (((year == start_year) & (mo < lubridate::month(start_date))) | ((year == end_year) & (mo > lubridate::month(end_date)))) { next - } - else { + } else { # slice - tsel <- which(time > base_time+sm[mo] & time <= base_time+sm[mo+1]) + tsel <- which(time > base_time + sm[mo] & time <= base_time + sm[mo + 1]) print(mo) - if (length(tsel)!=0){ + if (length(tsel) != 0) { # define dim - lat.dim <- ncdf4::ncdim_def(name = "lat", units = "", vals = 1:1, create_dimvar=FALSE) - lon.dim <- ncdf4::ncdim_def(name = "lon", units = "", vals = 1:1, create_dimvar=FALSE) - time.dim <- ncdf4::ncdim_def(name = "time", units = "", vals = 1:length(time[tsel]),create_dimvar = TRUE, calendar="standard", unlim = FALSE) #left to CTSM automatically transfer - scalar.dim <- ncdf4::ncdim_def(name="scalar", units = "", vals = 1:1) - dim <- list(time.dim, lat.dim, lon.dim) - + lat.dim <- ncdf4::ncdim_def(name = "lat", units = "", vals = 1:1, create_dimvar = FALSE) + lon.dim <- ncdf4::ncdim_def(name = "lon", units = "", vals = 1:1, create_dimvar = FALSE) + time.dim <- ncdf4::ncdim_def(name = "time", units = "", vals = 1:length(time[tsel]), create_dimvar = TRUE, calendar = "standard", unlim = FALSE) # left to CTSM automatically transfer + scalar.dim <- ncdf4::ncdim_def(name = "scalar", units = "", vals = 1:1) + dim <- list(time.dim, lat.dim, lon.dim) + # LATITUDE - var_lat <- ncdf4::ncvar_def(name = "LATIXY", units = "degree_north", - dim = list(lat.dim, lon.dim), missval = as.numeric(-9999)) + var_lat <- ncdf4::ncvar_def( + name = "LATIXY", units = "degree_north", + dim = list(lat.dim, lon.dim), missval = as.numeric(-9999) + ) # LONGITUDE - var_long <- ncdf4::ncvar_def(name = "LONGXY", units = "degree_east", - dim = list(lat.dim, lon.dim), missval = as.numeric(-9999)) + var_long <- ncdf4::ncvar_def( + name = "LONGXY", units = "degree_east", + dim = list(lat.dim, lon.dim), missval = as.numeric(-9999) + ) # time - var_time <- ncdf4::ncvar_def(name = "time", units = "days since 1700-01-01", prec = "float", - dim = list(time.dim), missval = as.numeric(-9999)) + var_time <- ncdf4::ncvar_def( + name = "time", units = "days since 1700-01-01", prec = "float", + dim = list(time.dim), missval = as.numeric(-9999) + ) # EDGEE - var_E <- ncdf4::ncvar_def(name = "EDGEE", units = "degrees_east", - dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + var_E <- ncdf4::ncvar_def( + name = "EDGEE", units = "degrees_east", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999) + ) # EDGEW edge for resolution , edge-central 0.005, # PEcAn provide range of grid? - var_W <- ncdf4::ncvar_def(name = "EDGEW", units = "degrees_west", - dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + var_W <- ncdf4::ncvar_def( + name = "EDGEW", units = "degrees_west", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999) + ) # EDGES - var_S <- ncdf4::ncvar_def(name = "EDGES", units = "degrees_south", - dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) + var_S <- ncdf4::ncvar_def( + name = "EDGES", units = "degrees_south", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999) + ) # EDGEN - var_N <- ncdf4::ncvar_def(name = "EDGEN", units = "degrees_north", - dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999)) - + var_N <- ncdf4::ncvar_def( + name = "EDGEN", units = "degrees_north", + dim = list(scalar.dim, lat.dim, lon.dim), missval = as.numeric(-9999) + ) + ## SAPERATELY CREATE FILES - put_var <- function(ncout){ - ncdf4::ncvar_put(nc = ncout, varid = "LATIXY", vals = LATIXY) #same with FATES + put_var <- function(ncout) { + ncdf4::ncvar_put(nc = ncout, varid = "LATIXY", vals = LATIXY) # same with FATES ncdf4::ncvar_put(nc = ncout, varid = "LONGXY", vals = LONGXY) - ncdf4::ncvar_put(nc = ncout, varid = "EDGEE", vals = LONGXY+0.005) - ncdf4::ncvar_put(nc = ncout, varid = "EDGEW", vals = LONGXY-0.005) - ncdf4::ncvar_put(nc = ncout, varid = "EDGES", vals = LATIXY-0.005) - ncdf4::ncvar_put(nc = ncout, varid = "EDGEN", vals = LATIXY+0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGEE", vals = LONGXY + 0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGEW", vals = LONGXY - 0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGES", vals = LATIXY - 0.005) + ncdf4::ncvar_put(nc = ncout, varid = "EDGEN", vals = LATIXY + 0.005) } ## Precipitation - outfile_prec <- file.path(outfolder, paste0("Prec", formatC(year, width = 4, flag = "0"), "-", - formatC(mo, width = 2, flag = "0"), ".nc")) + outfile_prec <- file.path(outfolder, paste0( + "Prec", formatC(year, width = 4, flag = "0"), "-", + formatC(mo, width = 2, flag = "0"), ".nc" + )) if (file.exists(outfile_prec) & overwrite == FALSE) { next } - ncout_prec <- ncdf4::nc_create(outfile_prec, vars = list(var_lat,var_long,var_E,var_W,var_S,var_N), verbose = verbose) + ncout_prec <- ncdf4::nc_create(outfile_prec, vars = list(var_lat, var_long, var_E, var_W, var_S, var_N), verbose = verbose) put_var(ncout_prec) ## precipitation_flux ncout_prec <- insert(ncout_prec, "PRECTmms", "mm/s", PRECTmms[tsel], dim) ncdf4::nc_close(ncout_prec) ## Solar - outfile_slr <- file.path(outfolder, paste0("Slr", formatC(year, width = 4, flag = "0"), "-", - formatC(mo, width = 2, flag = "0"), ".nc")) + outfile_slr <- file.path(outfolder, paste0( + "Slr", formatC(year, width = 4, flag = "0"), "-", + formatC(mo, width = 2, flag = "0"), ".nc" + )) if (file.exists(outfile_slr) & overwrite == FALSE) { next } - ncout_slr <- ncdf4::nc_create(outfile_slr, vars = list(var_lat,var_long,var_E,var_W,var_S,var_N), verbose = verbose) + ncout_slr <- ncdf4::nc_create(outfile_slr, vars = list(var_lat, var_long, var_E, var_W, var_S, var_N), verbose = verbose) put_var(ncout_slr) ## surface_downwelling_shortwave_flux_in_air ncout_slr <- insert(ncout_slr, "FSDS", "W m-2", FSDS[tsel], dim) ncdf4::nc_close(ncout_slr) - + ## Temerature and humidity - outfile_tem <- file.path(outfolder, paste0("Tem", formatC(year, width = 4, flag = "0"), "-", - formatC(mo, width = 2, flag = "0"), ".nc")) + outfile_tem <- file.path(outfolder, paste0( + "Tem", formatC(year, width = 4, flag = "0"), "-", + formatC(mo, width = 2, flag = "0"), ".nc" + )) if (file.exists(outfile_tem) & overwrite == FALSE) { next } - ncout_tem <- ncdf4::nc_create(outfile_tem, vars = list(var_lat,var_long,var_E,var_W,var_S,var_N), verbose = verbose) + ncout_tem <- ncdf4::nc_create(outfile_tem, vars = list(var_lat, var_long, var_E, var_W, var_S, var_N), verbose = verbose) put_var(ncout_tem) ## surface_downwelling_longwave_flux_in_air ncout_tem <- insert(ncout_tem, "FLDS", "W m-2", FLDS[tsel], dim) @@ -161,22 +176,24 @@ met2model.FATES <- function(in.path,in.prefix,outfolder,start_date,end_date,lst= ncout_tem <- insert(ncout_tem, "TBOT", "K", TBOT[tsel], dim) ## eastward_wind & northward_wind ncout_tem <- insert(ncout_tem, "WIND", "m/s", WIND[tsel], dim) - ncdf4::nc_close(ncout_tem) + ncdf4::nc_close(ncout_tem) } } } - ncdf4::nc_close(nc) + ncdf4::nc_close(nc) } ## end input file } ## end year loop over met files - results <- data.frame(file = paste0(outfolder, "/"), - host = c(PEcAn.remote::fqdn()), - mimetype = c("application/x-netcdf"), - formatname = c("CLM met"), - startdate = c(start_date), - enddate = c(end_date), - dbfile.name = "", - stringsAsFactors = FALSE) - + results <- data.frame( + file = paste0(outfolder, "/"), + host = c(PEcAn.remote::fqdn()), + mimetype = c("application/x-netcdf"), + formatname = c("CLM met"), + startdate = c(start_date), + enddate = c(end_date), + dbfile.name = "", + stringsAsFactors = FALSE + ) + PEcAn.logger::logger.info("Done with met2model.FATES") return(invisible(results)) } # met2model.FATES diff --git a/models/fates/R/model2netcdf.FATES.R b/models/fates/R/model2netcdf.FATES.R index 382c23056f1..d951c6d8410 100644 --- a/models/fates/R/model2netcdf.FATES.R +++ b/models/fates/R/model2netcdf.FATES.R @@ -1,4 +1,3 @@ - ##' @name model2netcdf.FATES ##' @title Code to convert FATES netcdf output into into CF standard ##' @@ -9,8 +8,8 @@ ##' @param end_date End time of the simulation, not string ##' @param vars_names Names of Selected variables in PEcAn format, (e.g. c("","")) ##' @param pfts a named vector of PFT numbers where the names are PFT names -##' -##' @examples +##' +##' @examples ##' \dontrun{ ##' example.output <- system.file("case.clm2.h0.2004-01-01-00000.nc",package="PEcAn.FATES") ##' model2netcdf.FATES(outdir="~/",sitelat, sitelon, start_date, end_date, vars_names, pfts) @@ -18,150 +17,161 @@ ##' ##' @author Michael Dietze, Shawn Serbin ## modified Yucong Hu 22/07/24 -##' +##' ##' @export -model2netcdf.FATES <- function(outdir, sitelat, sitelon, start_date, end_date, vars_names, pfts){ +model2netcdf.FATES <- function(outdir, sitelat, sitelon, start_date, end_date, vars_names, pfts) { ## Tips: matched_var could be expanded for more selected variables matched_var <- tibble::tribble( ~fatesname, ~pecanname, ~pecanunits, ~longname, - "FATES_GPP_PF","GPP","kgC m-2 s-1","Gross Primary Productivity", - "FATES_NPP_PF","NPP","kg m-2 yr-1", "Total PFT-level NPP in kg carbon per m2 land area per second", - "NEE","NEE","kgC m-2 s-1", "Net Ecosystem Exchange of carbon, includes fire and hrv_xsmrpool", - "TLAI","LAI","m2 m-2","Total projected leaf area index", - "ER","TotalResp","kgC m-2 s-1","Total Respiration", - "AR","AutoResp","kgC m-2 s-1","Autotrophic respiration (MR + GR)", - "HR","HeteroResp","kgC m-2 s-1","Total heterotrophic respiration", - "SR","SoilResp","kgC m-2 s-1","Total soil respiration (HR + root resp)", - "Qle","Evap","kgC m-2 s-1","Total evaporation", - "QVEGT","Transp","kg m-2 s-1","Canopy transpiration") - - ## Update unit, dimension and - var_update <- function(out,oldname,newname,nc_month,nc_month_names,newunits=NULL,long_name=NULL){ - if (oldname %in% nc_month_names) { + "FATES_GPP_PF", "GPP", "kgC m-2 s-1", "Gross Primary Productivity", + "FATES_NPP_PF", "NPP", "kg m-2 yr-1", "Total PFT-level NPP in kg carbon per m2 land area per second", + "NEE", "NEE", "kgC m-2 s-1", "Net Ecosystem Exchange of carbon, includes fire and hrv_xsmrpool", + "TLAI", "LAI", "m2 m-2", "Total projected leaf area index", + "ER", "TotalResp", "kgC m-2 s-1", "Total Respiration", + "AR", "AutoResp", "kgC m-2 s-1", "Autotrophic respiration (MR + GR)", + "HR", "HeteroResp", "kgC m-2 s-1", "Total heterotrophic respiration", + "SR", "SoilResp", "kgC m-2 s-1", "Total soil respiration (HR + root resp)", + "Qle", "Evap", "kgC m-2 s-1", "Total evaporation", + "QVEGT", "Transp", "kg m-2 s-1", "Canopy transpiration" + ) + ## Update unit, dimension and + var_update <- function(out, oldname, newname, nc_month, nc_month_names, newunits = NULL, long_name = NULL) { + if (oldname %in% nc_month_names) { ## define units of variables - oldunits <- ncdf4::ncatt_get(nc_month,oldname,"units")$value - if (oldunits=="gC/m^2/s") oldunits <- "gC m-2 s-1" - if (oldname=="TLAI") oldunits <- "m2 m-2" # delete old unit ='none' - if (is.null(newunits)) newunits = oldunits - + oldunits <- ncdf4::ncatt_get(nc_month, oldname, "units")$value + if (oldunits == "gC/m^2/s") oldunits <- "gC m-2 s-1" + if (oldname == "TLAI") oldunits <- "m2 m-2" # delete old unit ='none' + if (is.null(newunits)) newunits <- oldunits + ## check pft dimensions d_name <- c() - for (i in (nc_month$var[[oldname]]$dim)){ - d_name <- append(d_name, i$name) + for (i in (nc_month$var[[oldname]]$dim)) { + d_name <- append(d_name, i$name) } - if (any(grepl('pft',d_name))){ + if (any(grepl("pft", d_name))) { dimension <- xypt # include fates_levpft - }else{ + } else { dimension <- xyt # only xyt - } + } ## transpose dimensions into (,t) - if (d_name[length(d_name)]=='time'){ - dat_0 <- ncdf4::ncvar_get(nc_month,oldname) # time at the tail of dims - dat.new <- PEcAn.utils::misc.convert(dat_0,oldunits,newunits) # convert data units + if (d_name[length(d_name)] == "time") { + dat_0 <- ncdf4::ncvar_get(nc_month, oldname) # time at the tail of dims + dat.new <- PEcAn.utils::misc.convert(dat_0, oldunits, newunits) # convert data units } - newvar <- ncdf4::ncvar_def(name = newname, units = newunits, longname=long_name, dim = dimension) - - ## adding target variables into out - if(is.null(out)) { - out <- list(var <- list(),dat <- list(), dimm<-list()) + newvar <- ncdf4::ncvar_def(name = newname, units = newunits, longname = long_name, dim = dimension) + + ## adding target variables into out + if (is.null(out)) { + out <- list(var <- list(), dat <- list(), dimm <- list()) out$var[[1]] <- newvar out$dat[[1]] <- dat.new - out$dimm[[1]]<- length(dimension) + out$dimm[[1]] <- length(dimension) } else { i <- length(out$var) + 1 out$var[[i]] <- newvar out$dat[[i]] <- dat.new - out$dimm[[i]]<- length(dimension) + out$dimm[[i]] <- length(dimension) } - return(out) + return(out) } } - + ## Get files and years - files <- dir(outdir, "*clm2.h0.*.nc", full.names = TRUE) # currently specific to clm2.h0 files + files <- dir(outdir, "*clm2.h0.*.nc", full.names = TRUE) # currently specific to clm2.h0 files start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) start_month <- lubridate::month(start_date) end_month <- lubridate::month(end_date) ## Loop over years - for (year in start_year:end_year){ + for (year in start_year:end_year) { oname <- file.path(dirname(files[1]), paste0(year, ".nc")) out <- NULL - + ## monthly write files - for (mo in 1:12){ - if (((year == start_year) & mo < start_month) | ((year == end_year) & mo > end_month)){ + for (mo in 1:12) { + if (((year == start_year) & mo < start_month) | ((year == end_year) & mo > end_month)) { next ## skip unselected months - } - else{ - if (mo < 10){ - month_file <- paste0(gsub("h0.*.nc","",files[1]),"h0.",year,"-0",mo,".nc") - }else{ - month_file <- paste0(gsub("h0.*.nc","",files[1]),"h0.",year,"-",mo,".nc") + } else { + if (mo < 10) { + month_file <- paste0(gsub("h0.*.nc", "", files[1]), "h0.", year, "-0", mo, ".nc") + } else { + month_file <- paste0(gsub("h0.*.nc", "", files[1]), "h0.", year, "-", mo, ".nc") } nc_month <- ncdf4::nc_open(month_file) # read monthly output file of FATES model nc_month_names <- names(nc_month$var) ## create time bounds to populate time_bounds variable iteratively var_bound <- ncdf4::ncvar_get(nc_month, "time_bounds") # start,end day of month - - ## define dimensions - t <- ncdf4::ncdim_def(name = "time", units = "days since 1700-01-01 00:00:00", - vals = as.double(1.0:1.0), calendar = "noleap", unlim = TRUE) - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname = "history time interval endpoint dimensions",vals = 1:2, units = "") + + ## define dimensions + t <- ncdf4::ncdim_def( + name = "time", units = "days since 1700-01-01 00:00:00", + vals = as.double(1.0:1.0), calendar = "noleap", unlim = TRUE + ) + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", vals = 1:2, units = "" + ) lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.double(1.0:1.0), longname = "coordinate_latitude") - lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.double(1.0:1.0), longname = "coordinate_longitude") - pft <- ncdf4::ncdim_def('pft', '', vals=1:12, longname = "FATES pft number") - xyt <- list(lon, lat, t) + lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.double(1.0:1.0), longname = "coordinate_longitude") + pft <- ncdf4::ncdim_def("pft", "", vals = 1:12, longname = "FATES pft number") + xyt <- list(lon, lat, t) xypt <- list(lon, lat, pft, t) - + ## write monthly files with start(1,1,i) - for (var_s in vars_names){ + for (var_s in vars_names) { matched_ind <- which(matched_var$pecanname == var_s) - out <- var_update(out, matched_var$fatesname[matched_ind],matched_var$pecanname[matched_ind], - nc_month,nc_month_names,matched_var$pecanunits[matched_ind],matched_var$longname[matched_ind]) + out <- var_update( + out, matched_var$fatesname[matched_ind], matched_var$pecanname[matched_ind], + nc_month, nc_month_names, matched_var$pecanunits[matched_ind], matched_var$longname[matched_ind] + ) } - out$var[[length(out$var) + 1]] <- ncdf4::ncvar_def(name="time_bounds", units='', - longname = "history time interval endpoints", dim=list(time_interval,t), prec = "double") - out$dat[[length(out$dat) + 1]] <- c(rbind(var_bound[1], var_bound[2])) #start, end days of the year + out$var[[length(out$var) + 1]] <- ncdf4::ncvar_def( + name = "time_bounds", units = "", + longname = "history time interval endpoints", dim = list(time_interval, t), prec = "double" + ) + out$dat[[length(out$dat) + 1]] <- c(rbind(var_bound[1], var_bound[2])) # start, end days of the year out$dimm[[length(out$dimm) + 1]] <- 2 ## define vars - if (((year != start_year) & (mo == 1)) | ((year == start_year) & (mo == start_month))){ + if (((year != start_year) & (mo == 1)) | ((year == start_year) & (mo == start_month))) { ncout <- ncdf4::nc_create(oname, out$var) # create yearly nc file - time_var <- ncdf4::ncvar_def(name = "time", units = "days since 1700-01-01 00:00:00",longname = "time", dim = list(t), prec = "double") - lat_var <- ncdf4::ncvar_def(name = "lat", units = "degrees_north", longname = "coordinate_latitude", dim = list(lat), prec = "double") - lon_var <- ncdf4::ncvar_def(name = "lon", units = "degrees_east", longname = "coordinate_longitude", dim = list(lon), prec = "double") - + time_var <- ncdf4::ncvar_def(name = "time", units = "days since 1700-01-01 00:00:00", longname = "time", dim = list(t), prec = "double") + lat_var <- ncdf4::ncvar_def(name = "lat", units = "degrees_north", longname = "coordinate_latitude", dim = list(lat), prec = "double") + lon_var <- ncdf4::ncvar_def(name = "lon", units = "degrees_east", longname = "coordinate_longitude", dim = list(lon), prec = "double") + ncdf4::ncvar_put(ncout, lat_var, sitelat, start = c(1)) ncdf4::ncvar_put(ncout, lon_var, sitelon, start = c(1)) } - ## put time and vars - ncdf4::ncvar_put(ncout, time_var, mean(var_bound), start=c(mo), count=c(1)) + ## put time and vars + ncdf4::ncvar_put(ncout, time_var, mean(var_bound), start = c(mo), count = c(1)) for (i in seq_along(out$var)) { - if(out$dimm[[i]]==4){ # xypt - ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start=c(1,1,1,mo), count=c(1,1,12,1)) - }else if (out$dimm[[i]]==3) { # xyt - ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start=c(1,1,mo)) - }else{ # time_bounds - ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start=c(1,mo)) + if (out$dimm[[i]] == 4) { # xypt + ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start = c(1, 1, 1, mo), count = c(1, 1, 12, 1)) + } else if (out$dimm[[i]] == 3) { # xyt + ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start = c(1, 1, mo)) + } else { # time_bounds + ncdf4::ncvar_put(ncout, out$var[[i]], out$dat[[i]], start = c(1, mo)) } - } + } } - } ## monthly convert variable into PEcAn format + } ## monthly convert variable into PEcAn format } ## extract variable and long names to VAR file for PEcAn vis - utils::write.table(sapply(ncout$var, function(x) { x$longname }), - file = paste0(oname, ".var"), - col.names = FALSE, - row.names = TRUE, - quote = FALSE) + utils::write.table( + sapply(ncout$var, function(x) { + x$longname + }), + file = paste0(oname, ".var"), + col.names = FALSE, + row.names = TRUE, + quote = FALSE + ) try(ncdf4::nc_close(ncout)) ## end of year for loop -} ## model2netcdf.FATES \ No newline at end of file +} ## model2netcdf.FATES diff --git a/models/fates/R/recurse.create.R b/models/fates/R/recurse.create.R index c274f34a871..84efe3d9d25 100644 --- a/models/fates/R/recurse.create.R +++ b/models/fates/R/recurse.create.R @@ -1,4 +1,3 @@ - ##' @name recurse.create ##' @title recurse.create ##' @description recursively follow the file structure in 'ins' and create all the same folders in 'path' as well as symbolic links to all the file. This is done, rather than creating a symbolic link to the whole structure, so individual files can later be unlinked and replaced with different files/links. @@ -11,19 +10,19 @@ recurse.create <- function(path, ins) { if (length(files) == 0) { return() } - is.dir <- dir.exists(files) ## determine which files are actually folders + is.dir <- dir.exists(files) ## determine which files are actually folders ## create links to true files curr <- files[!is.dir] for (i in seq_along(curr)) { file.symlink(curr[i], file.path(path, basename(curr[i]))) } - + down <- files[is.dir] for (i in seq_along(down)) { ndir <- file.path(path, basename(down[i])) ## create folders dir.create(ndir) - + ## recurse to fill folders recurse.create(ndir, down[i]) } diff --git a/models/fates/R/write.configs.FATES.R b/models/fates/R/write.configs.FATES.R index 5b8fabb6a6f..077609b0778 100644 --- a/models/fates/R/write.configs.FATES.R +++ b/models/fates/R/write.configs.FATES.R @@ -9,827 +9,1020 @@ ##' @return none ##' @export ##' @author Mike Dietze, Shawn Serbin -write.config.FATES <- function(defaults, trait.values, settings, run.id){ - - ## site information - site <- settings$run$site - site.id <- as.numeric(site$id) - - # find out where things are - local.rundir <- file.path(settings$rundir, run.id) ## this is on local machine for staging - rundir <- file.path(settings$host$rundir, run.id) ## this is on remote machine for execution - casedir <- file.path(rundir,"case") - outdir <- file.path(settings$host$outdir, run.id) - refcase <- settings$model$binary - bld <- file.path(refcase,"bld") - binary <- file.path(bld,"cesm.exe") - indir <- file.path(rundir,"input") ## input directory - default <- settings$run$inputs$default$path ## reference inputs file structure - site_name <- paste0(site.id %/% 1000000000, "-", site.id %% 1000000000) - - ## DATES - ## CLM is a bit odd and takes a start date and length, so we need to precompute - ## this needs to be generalized to fractional years, but accounting for 365 day year - start_date <- as.Date(settings$run$start.date) - end_date <- as.Date(settings$run$end.date) - stop_n <- as.numeric(end_date - start_date, units="days") - PEcAn.utils::n_leap_day(start_date,end_date) + 1 - - ##-----------------------------------------------------------------------## - ## ## - ## INPUTS ## - ## ## - ##-----------------------------------------------------------------------## - - ## SITE INFO --> DOMAIN FILE (lat/lon) - # - should also store this in the refcase directory for PEcAn so we can grab from there, and not the PEcAn package - gridres <- 0.125 ## ultimately this should be a variable - lat <- as.numeric(site$lat) - lon <- (as.numeric(site$lon) + 360) %% 360 ## make sure coords in 0-360 range, not negative - domain.default <- system.file("domain.lnd.1x1pt-brazil_navy.090715.nc",package="PEcAn.FATES") - domain.file <- file.path(local.rundir,paste0("domain.lnd.",site_name,".nc")) - file.copy(domain.default,domain.file) - domain.nc <- ncdf4::nc_open(domain.file,write=TRUE) - ncdf4::ncvar_put(nc=domain.nc, varid='xc', vals=lon) - ncdf4::ncvar_put(nc=domain.nc, varid='yc', vals=lat) - ncdf4::ncvar_put(nc=domain.nc, varid='xv', vals=lon+c(-1,1,1,-1)*gridres) - ncdf4::ncvar_put(nc=domain.nc, varid='yv', vals=lat+c(-1,-1,1,1)*gridres) - ncdf4::ncvar_put(nc=domain.nc, varid='area', vals=(2*gridres*pi/180)^2) - ncdf4::nc_close(domain.nc) - - ## SURF - should also store this in the refcase directory for PEcAn so we can grab from there, and not the PEcAn package - surf.default <- system.file("surfdata_1x1_brazil_16pfts_Irrig_CMIP6_simyr2000_c171214.nc",package = "PEcAn.FATES") - surf.file <- file.path(local.rundir,paste0("surfdata_",site_name,"_simyr2000.nc")) - file.copy(surf.default,surf.file) - Sys.chmod(surf.file) - surf.nc <- ncdf4::nc_open(surf.file,write=TRUE) - ncdf4::ncvar_put(nc=surf.nc, varid='LONGXY', vals=lon) - ncdf4::ncvar_put(nc=surf.nc, varid='LATIXY', vals=lat) - ncdf4::nc_close(surf.nc) - - ## MET HEADERS - if(!is.null(settings$run$inputs$met)){ - - ## DATM HEADER: datm_atm_in - datm <- readLines(con=system.file("datm_atm_in.template",package = "PEcAn.FATES"),n=-1) - datm <- gsub('@DOMAIN@', file.path(indir,"share/domains/domain.clm",basename(domain.file)), datm) - datm <- gsub('@START_YEAR@',lubridate::year(start_date), datm) - datm <- gsub('@END_YEAR@',lubridate::year(end_date), datm) - writeLines(datm, con=file.path(local.rundir, "datm_atm_in")) - - ## DATM STREAM MET - met <- readLines(con=system.file("datm.streams.txt.PEcAn_met.template",package = "PEcAn.FATES"),n=-1) - met <- gsub('@INDIR@',indir, met) - #domain.file.name <- paste0("domain.lnd.",site_name,".nc") - #met <- gsub('@DOMAIN@',domain.file.name, met) # attempting to provide correct domain file name - met <- gsub('@MET_PATH@',settings$run$inputs$met$path, met) - met.files <- dir(settings$run$inputs$met$path,"*.nc") - met <- gsub('@MET_FILES@',paste(met.files,collapse = "\n "), met) - writeLines(met, con=file.path(local.rundir, "datm.streams.txt.PEcAn_met")) - - } -# ... need to set this up so that if MET is blank it can run with default CLM met -# ... fill in this template, the met template, and then have jobs.sh put them in the right place. -# ... Test, then adjust DB to have met required - - - ##-----------------------------------------------------------------------## - ## ## - ## JOB.SH ## - ## ## - ##-----------------------------------------------------------------------## - -# create launch script (which will create symlink) - if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { - jobsh <- readLines(con=settings$model$jobtemplate, n=-1) - } else { - jobsh <- readLines(con=system.file("template.job", package = "PEcAn.FATES"), n=-1) - } - - # create host specific setttings - hostsetup <- "" - if (!is.null(settings$model$prerun)) { - hostsetup <- paste(hostsetup, sep="\n", paste(settings$model$prerun, collapse="\n")) - } - if (!is.null(settings$host$prerun)) { - hostsetup <- paste(hostsetup, sep="\n", paste(settings$host$prerun, collapse="\n")) - } - - hostteardown <- "" - if (!is.null(settings$model$postrun)) { - hostteardown <- paste(hostteardown, sep="\n", paste(settings$model$postrun, collapse="\n")) - } - if (!is.null(settings$host$postrun)) { - hostteardown <- paste(hostteardown, sep="\n", paste(settings$host$postrun, collapse="\n")) - } - -# create job.sh - jobsh <- gsub('@HOST_SETUP@', hostsetup, jobsh) - jobsh <- gsub('@HOST_TEARDOWN@', hostteardown, jobsh) - - ## Machine configs - # ./create_newcase -case @CASEDIR@ -res 1x1_brazil -compset ICLM45ED -mach @MACHINE@ -compiler @COMPILER@ -project @PROJECT@ - if (!is.null(settings$model$machine)) { - machine <- paste(settings$model$machine, collapse="\n") - } else { - machine <- "eddi" - } - jobsh <- gsub('@MACHINE@', machine, jobsh) - if (!is.null(settings$model$compiler)) { - compiler <- paste(settings$model$compiler, collapse="\n") - } else { - compiler <- "gnu" - } - jobsh <- gsub('@COMPILER@', compiler, jobsh) - if (!is.null(settings$model$resolution)) { - resolution <- paste(settings$model$resolution, collapse="\n") - } else { - resolution <- "1x1_brazil" - } - jobsh <- gsub('@RES@', resolution, jobsh) - if (!is.null(settings$model$compset)) { - compset <- paste(settings$model$compset, collapse="\n") - } else { - compset <- "I2000Clm50FatesGs" - } - jobsh <- gsub('@COMPSET@', compset, jobsh) - if (!is.null(settings$model$project)) { - project <- paste(settings$model$project, collapse="\n") - } else { - project <- "pecan" - } - jobsh <- gsub('@PROJECT@', project, jobsh) - - ## PATHS - jobsh <- gsub('@RUNDIR@', rundir, jobsh) - jobsh <- gsub('@CASEDIR@', casedir, jobsh) - jobsh <- gsub('@OUTDIR@', outdir, jobsh) - jobsh <- gsub('@REFCASE@', refcase, jobsh) - jobsh <- gsub('@BLD@', bld, jobsh) - jobsh <- gsub('@BINARY@', binary, jobsh) - jobsh <- gsub('@INDIR@', indir, jobsh) - jobsh <- gsub('@DEFAULT@', default, jobsh) - jobsh <- gsub('@SITE_NAME@', site_name, jobsh) - - ## DATES -> ENV_RUN - jobsh <- gsub('@START_DATE@', start_date, jobsh) - jobsh <- gsub('@STOP_N@', stop_n, jobsh) - jobsh <- gsub('@RUN_ID@', run.id, jobsh) - - ## MET --> DATM -# jobsh <- gsub('@SITE_MET@', settings$run$inputs$met$path, jobsh) - ## FOR FIRST STEP, CAN USE DEFAULT - - writeLines(jobsh, con=file.path(settings$rundir, run.id, "job.sh")) - Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) -# -# ## Write PARAMETER file - - ## COPY AND OPEN DEFAULT PARAMETER FILES - # TODO: update this to read param files (CLM and FATES) out of the refcase directory, not the PEcAn package - # TODO: update to allow it to pick between CLM4.5 and CLM5 parameter set based on refcase, user selection - ## See issue https://github.com/PecanProject/pecan/issues/1008 - # CLM - #clm.param.default <- system.file("clm5_params.c171117.nc",package="PEcAn.FATES") - #clm.param.file <- file.path(local.rundir,paste0("clm_params.",run.id,".nc")) - clm.param.default <- file.path(refcase,"clm5_params.c171117.nc") # probably need to allow custom param file names here (pecan.xml?) - clm.param.file <- file.path(local.rundir,paste0("clm_params.",run.id,".nc")) - file.copy(clm.param.default,clm.param.file) - clm.param.nc <- ncdf4::nc_open(clm.param.file,write=TRUE) - - # FATES - #fates.param.default <- system.file("fates_params_2troppftclones.c171018_sps.nc",package="PEcAn.FATES") - # above is a temporary param file corrected for the tropics by lowering freezing tolerace parameters - fates.param.default <- file.path(refcase,"fates_params_2troppftclones.c171018_sps.nc") # probably need to allow custom param file names here (pecan.xml?) - fates.param.file <- file.path(local.rundir,paste0("fates_params.",run.id,".nc")) - file.copy(fates.param.default,fates.param.file) - fates.param.nc <- ncdf4::nc_open(fates.param.file,write=TRUE) - - ## Loop over PFTS - npft <- length(trait.values) - PEcAn.logger::logger.debug(npft) - PEcAn.logger::logger.debug(dim(trait.values)) - PEcAn.logger::logger.debug(names(trait.values)) - #pftnames <- stringr::str_trim(tolower(ncvar_get(param.nc,"pftname"))) - pftnames <- stringr::str_trim(tolower(ncdf4::ncvar_get(clm.param.nc,"pftname"))) - PEcAn.logger::logger.debug(paste0("CLM PFT names: "),pftnames) - for (i in seq_len(npft)) { - pft <- trait.values[[i]] - print(c("PFT",i)) - PEcAn.logger::logger.info(pft) - pft.name <- names(trait.values)[i] - if(is.null(pft.name) | is.na(pft.name)){ - PEcAn.logger::logger.error("pft.name missing") - } else { - PEcAn.logger::logger.info(paste("PFT =",pft.name)) - PEcAn.logger::logger.debug(paste0("fates-clm PFT number: ",which(pftnames==pft.name))) - } - if(pft.name == 'env') next ## HACK, need to remove env from default - - ## Match PFT name to COLUMN - ipft <- match(tolower(pft.name),pftnames) - PEcAn.logger::logger.debug(paste0("ipft: ",ipft)) - - if(is.na(ipft)){ - PEcAn.logger::logger.severe(paste("Unmatched PFT",pft.name, - "in FATES. PEcAn does not yet support non-default PFTs for this model")) - } - - # hard code hack until we can use more than 2 pfts in FATES - ipft <- 2 - PEcAn.logger::logger.debug(paste0("*** PFT number hard-coded to ", ipft," in fates. This will be updated when FATES allows more PFTs")) - - ## Special variables used in conversions -# leafC <- pft['leafC']/100 ## percent to proportion - leafC <- NA - if(is.na(leafC)) leafC <- 0.48 - - # determine photo pathway - photo_flag <- ncdf4::ncvar_get(fates.param.nc,varid="fates_c3psn", start = ipft, count = 1) - PEcAn.logger::logger.debug(paste0("Photosynthesis pathway flag value: ", photo_flag)) - - ## Loop over VARIABLES - for (v in seq_along(pft)) { - var <- names(pft)[v] - - ## THESE NEED SOME FOLLOW UP - - ### ----- Leaf physiological parameters - # Vcmax - if(var == "Vcmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_vcmax25top', start = ipft, count = 1, - vals=pft[v]) ## (umol CO2 m-2 s-1) - } - # Ball-Berry slope - if(var == "stomatal_slope.BB"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_BB_slope', start = ipft, count = 1, - vals=pft[v]) - } - - # Ball-Berry intercept - c3. We need to figure out how to set either C3 or C4 values? Based on the PFT? - # TODO: allow setting this for C3 and/or C4 PFTs - # right now, each are just one dimension, will need to revist this if this changes. - if(var == "cuticular_cond"){ - if (photo_flag==0) { - PEcAn.logger::logger.debug("** Setting C4 cuticular conductance value") - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_bbopt_c4', start = 1, count = 1, - vals=pft[v]) - } else if (photo_flag==1) { - PEcAn.logger::logger.debug("** Setting C3 cuticular conductance value") - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_bbopt_c3', start = 1, count = 1, - vals=pft[v]) - } else { - PEcAn.logger::logger.warn(" ** FATES photosynthesis pathway flag not set. cuticular conductance not set **") - } - } - - ## missing from params.nc - # if(var == "cuticular_cond"){ - # gH2O_per_mol <- 18.01528 - # ncvar_put(nc=param.nc, varid='gsmin', start = ipft, count = 1, - # vals=pft[v]*gH2O_per_mol*1e-12) ### umol H2O m-2 s-1 -> [m s-1] - # } - - # T response params - modified Arrhenius params for Vcmax, Jmax, and TPU - # -- NOT YET IMPLEMENTED IN BETYdb. FATES params: - # fates_vcmaxha, fates_jmaxha, fates_tpuha, fates_vcmaxhd, fates_jmaxhd, fates_tpuhd, - # fates_vcmaxse, fates_jmaxse, fates_tpuse - - # Ha activation energy for vcmax - FATES units: J/mol - if(var == "Ha_Modified_Arrhenius_Vcmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_vcmaxha', start = ipft, count = 1, - vals=pft[v]*1000) ## convert from kj/mol to J/mol (FATES units) - } - - # Hd deactivation energy for vcmax - FATES units: J/mol - if(var == "Hd_Modified_Arrhenius_Vcmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_vcmaxhd', start = ipft, count = 1, - vals=pft[v]*1000) ## convert from kj/mol to J/mol (FATES units) - } - - # Ha activation energy for Jmax - FATES units: J/mol - if(var == "Ha_Modified_Arrhenius_Jmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_jmaxha', start = ipft, count = 1, - vals=pft[v]*1000) ## convert from kj/mol to J/mol (FATES units) - } - - # Hd deactivation energy for Jmax - FATES units: J/mol - if(var == "Hd_Modified_Arrhenius_Jmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_jmaxhd', start = ipft, count = 1, - vals=pft[v]*1000) ## convert from kj/mol to J/mol (FATES units) - } - - # deltaS Vcmax - BETY units:J/mol/K; FATES units: J/mol/K - if(var == "deltaS_Vcmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_vcmaxse', start = ipft, count = 1, - vals=pft[v]) ## convert from kj/mol to J/mol (FATES units) - } - # deltaS Jmax - BETY units:J/mol/K; FATES units: J/mol/K - if(var == "deltaS_Jmax"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_jmaxse', start = ipft, count = 1, - vals=pft[v]) ## convert from kj/mol to J/mol (FATES units) - } - ### ----- Leaf physiological parameters - - - ### These variable names (from ED2) should updated in BETY to be more generic - ## missing from params.nc -# if(var == "mort3"){ -# ncvar_put(nc=param.nc, varid='background_mort_rate', start = ipft, count = 1, -# vals=pft[v]) -# } - if(var == "r_fract"){ ## Fraction of carbon balance remaining after maintenance costs have been met that is dedicated to seed production. [0-1] - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_seed_alloc', start = ipft, count = 1, - vals=pft[v]) - } - ## This one is currently allpft level but should be pft level - no longer in FATES params, what was this changed to? - if(var == "agf_bs"){ ## The fraction of sapwood and structural biomass that is above ground [0-1] - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_allom_agb_frac', start = ipft, count = 1, - vals=pft[v]) - } - - ## PFT-level variables - if(var == "seed_rain_kgC"){ ## External seed rain from outside site (non-mass conserving) ; - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_seed_rain', start = ipft, count = 1, - vals=pft[v]) - } -## missing from params.nc -# if(var == "cuticular_cond"){ -# gH2O_per_mol <- 18.01528 -# ncvar_put(nc=param.nc, varid='gsmin', start = ipft, count = 1, -# vals=pft[v]*gH2O_per_mol*1e-12) ### umol H2O m-2 s-1 -> [m s-1] -# } - if(var == "DBH_at_HTMAX"){ ## note in FATES parameter list about switching to HTMAX - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_allom_dbh_maxheight', start = ipft, count = 1, - vals=pft[v]) ## [cm] - } - if(var == "growth_resp_factor"){ ## r_growth = grperc * (gpp+r_maint) fates_grperc:long_name = "Growth respiration factor" ; - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_grperc', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "SLA"){ ## default 0.012 - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_slatop', start = ipft, count = 1, - vals=PEcAn.utils::ud_convert(pft[v],"m2 kg-1","m2 g-1")/leafC) - } - if(var == "leaf_turnover_rate"){ ## fates_leaf_long:long_name = "Leaf longevity (ie turnover timescale)" ; - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_leaf_long', start = ipft, count = 1, - vals=1/pft[v]) ## leaf_long = 1/leaf_turnover_rate, 1/years -> years - } - if(var == "root_turnover_rate"){ ## fates_root_long:long_name = "root longevity (alternatively, turnover time)" ; - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_root_long', start = ipft, count = 1, - vals=1/pft[v]) ## root_long = 1/root_turnover_rate, 1/years -> years - } - if(var == "c2n_leaf"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_leafcn', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "fineroot2leaf"){ #"Allocation parameter: new fine root C per new leaf C" units = "gC/gC" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_froot_leaf', start = ipft, count = 1, - vals=pft[v]) - } - - # if(var == "sapwood_ratio"){ # leaf to sapwood area ratio. IS THIS NOW fates_sapwood_ratio(fates_pft)?? - # ncvar_put(nc=fates.param.nc, varid='latosa', start = ipft, count = 1, - # vals=PEcAn.utils::ud_convert(pft[v],"m2 m-2","m2 cm-2")) - # } - - # leaf to sapwood area ratio. This is the INTERCEPT parameter in FATES - # [sserbin@modex paramdata]$ ncdump fates_params_2troppftclones.c171018.nc | grep latosa - # double fates_allom_latosa_int(fates_pft) ; - # fates_allom_latosa_int:long_name = "Leaf area to sap area ratio, intercept [m2/cm2]" ; - #fates_allom_latosa_int:units = "ratio" ; - # double fates_allom_latosa_slp(fates_pft) ; - # fates_allom_latosa_slp:long_name = "Leaf area to sap area ratio, slope (optional)" ; - # fates_allom_latosa_slp:units = "unitless" ; - # fates_allom_latosa_int = 0.001, 0.001 ; - # fates_allom_latosa_slp = 0, 0 ; - if(var == "sapwood_ratio"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_allom_latosa_int', start = ipft, count = 1, - vals=PEcAn.utils::ud_convert(pft[v],"m2 m-2","m2 cm-2")) - } - if(var == "leaf_width"){ # Characteristic leaf dimension use for aerodynamic resistance - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_dleaf', start = ipft, count = 1, - vals=PEcAn.utils::ud_convert(pft[v],"mm","m")) - #PEcAn.logger::logger.debug(paste0("fates_dleaf: ",PEcAn.utils::ud_convert(pft[v],"mm","m"))) # temp debugging - } - ## Currently not in param.nc file despite being on NGEE-T parameter list - # if(var == "nonlocal_dispersal"){ # Place-holder parameter for important seed dispersal parameters - # ncvar_put(nc=param.nc, varid='seed_dispersal_x', start = ipft, count = 1, - # vals=pft[v]) - # } - if(var == "hgt_min"){ # the minimum height (ie starting height) of a newly recruited plant" ; - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_hgt_min', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "leaf_reflect_nir"){ # Leaf reflectance: near-IR [0-1] - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_rholnir', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "leaf_reflect_vis"){ # Leaf reflectance: visible [0-1] - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_rholvis', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "wood_reflect_nir"){ # Stem reflectance: near-IR [0-1] - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_rhosnir', start = ipft, count = 1, - vals=pft[v]) - } - - if(var == "wood_reflect_vis"){ # Stem reflectance: visible [0-1] - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_rhosvis', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "leaf_trans_nir"){ # Leaf transmittance: near-IR - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_taulnir', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "leaf_trans_vis"){ # Leaf transmittance: visible pft - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_taulvis', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "wood_trans_nir"){ # Stem transmittance: near-IR - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_tausnir', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "wood_trans_vis"){ # Stem transmittance: visible - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_tausvis', start = ipft, count = 1, - vals=pft[v]) - } - if(var == "orient_factor"){ # Leaf/stem orientation index [-0/4 SOM 1 - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='rf_l1s1_bgc', start = 1, count = 1, - vals=pft[v]) - } - if(var == "rf_l2s1_bgc"){ ## respiration fraction litter 2 to SOM 1 - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='rf_l2s1_bgc', start = 1, count = 1, - vals=pft[v]) - } - if(var == "rf_l3s2_bgc"){ ## respiration fraction from litter 3 to SOM 2 - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='rf_l3s2_bgc', start = 1, count = 1, - vals=pft[v]) - } - if(var == "rf_s2s1_bgc"){ ## respiration fraction SOM 2 to SOM 1 - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='rf_s2s1_bgc', start = 1, count = 1, - vals=pft[v]) - } - if(var == "rf_s2s3_bgc"){ ## Respiration fraction for SOM 2 -> SOM 3 - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='rf_s2s3_bgc', start = 1, count = 1, - vals=pft[v]) - } - if(var == "rf_s3s1_bgc"){ ## respiration fraction SOM 3 to SOM 1 - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='rf_s3s1_bgc', start = 1, count = 1, - vals=pft[v]) - } - if(var == "Q10_frozen_soil"){ ## Separate q10 for frozen soil respiration rates - REMOVED FROM FATES PARAMS? - ncdf4::ncvar_put(nc=fates.param.nc, varid='froz_q10', start = 1, count = 1, - vals=pft[v]) - } - - ## NONE indexed - ## -- FIRE - if(var == "max_fire_duration"){ ## maximum duration of fire none hours - # fates_max_durat:long_name = "spitfire parameter, fire maximum duration, Equation 14 Thonicke et al 2010" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_max_durat',vals=pft[v]) - } - if(var == "nfires"){ ## The number of fires initiated per m2 per year, from lightning and humans - # fates_nignitions:long_name = "number of daily ignitions (nfires = nignitions*FDI*area_scaling)" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_nignitions',vals=pft[v]) - } - if(var == "fuel_energy"){ ## energy content of fuel [kj kg-1] - # fates_fuel_energy:long_name = "pitfire parameter, heat content of fuel" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_fuel_energy',vals=pft[v]) - } - if(var == "fuel_particle_density"){ ## particle density of fuel [kg m-3] - # fates_part_dens:long_name = "spitfire parameter, oven dry particle density, Table A1 Thonicke et al 2010" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_part_dens',vals=pft[v]) - } - if(var == "durat_slope"){ ## SPITFIRE: change in fire duration with fire danger index. from Canadian Forest Service - # fates_durat_slope:long_name = "spitfire parameter, fire max duration slope, Equation 14 Thonicke et al 2010" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_durat_slope',vals=pft[v]) - } - if(var == "miner_damp"){ ## SPITFIRE mineral dampening coefficient - # fates_miner_damp:long_name = "spitfire parameter, mineral-dampening coefficient EQ A1 Thonicke et al 2010 " - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_miner_damp',vals=pft[v]) - } - if(var == "fuel_minerals"){ ## mineral content of fuel - # fates_miner_total:long_name = "spitfire parameter, total mineral content, Table A1 Thonicke et al 2010" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_miner_total',vals=pft[v]) - } - if(var == "alpha_scorch_height"){ ## SPITFIRE scorch height parameter - # fates_alpha_SH:long_name = "spitfire parameter, alpha scorch height, Equation 16 Thonicke et al 2010" - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_alpha_SH',vals=pft[v]) - } - if(var == "fdi_a"){ ## SPITFIRE Constant in calculation of dewpoint for Fire Danger Index (FDI) - # fates_fdi_a:long_name = "spitfire parameter (unknown) " - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_fdi_a',vals=pft[v]) - } - if(var == "fdi_alpha"){ ## SPITFIRE Constant in calculation of dewpoint for Fire Danger Index (FDI) - # fates_fdi_alpha:long_name = "spitfire parameter, EQ 7 Venevsky et al. GCB 2002,(modified EQ 8 Thonicke et al. 2010) " - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_fdi_alpha',vals=pft[v]) - } - if(var == "fdi_b"){ ## SPITFIRE Constant in calculation of dewpoint for Fire Danger Index (FDI) - # fates_fdi_b:long_name = "spitfire parameter (unknown) " - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_fdi_b',vals=pft[v]) - } - ## -- CANOPY - #if(var == "canopy_max_spread"){ ## Maximum allowable "dynamic ratio of dbh to canopy area" for cohorts in closed canopies. - [cm/m2] - # ncdf4::ncvar_put(nc=fates.param.nc, varid='maxspread',vals=pft[v]) - #} - # - #if(var == "canopy_min_spread"){ ## Minimum allowable "dynamic ratio of dbh to canopy area" for cohorts in closed canopies. - [cm/m2] - # ncdf4::ncvar_put(nc=fates.param.nc, varid='minspread',vals=pft[v]) - #} - - ## LITTERCLASS indexed (Size:6) - ## MCD: skipping for now until there's demonstrated demand because it requires expanding every variable out into VARNAME_[1..n] - # low_moisture_C Intercept (constant) of fuel moisture to burned fraction term for drier fuel litterclass - # low_moisture_S Slope of fuel moisture to burned fraction term for drier fuel litterclass - # max_decomp Maximum decomposition rate of litter in the absence of moisture or temperature stress, per fuel class litterclass y-1 - # mid_moisture Parameter of burned fraction term. Below this 'low' constants apply, above this, 'mid' constants apply, litterclass - # mid_moisture_C Intercept (constant) of fuel moisture to burned fraction term for wetter fuel litterclass - # min_moisture Parameter of burned fraction term. Below this value all litter is burned by a fire. Above, 'low' constants apply litterclass - # FBD Fuel Bulk Density of fuel class litterclass kg m-3 - # alpha_FMC Parameter of function relating fuel moisture content to meteorological fire danger index litterclass - # SAV Surface Area to Volume Ratio of fuel class litterclass cm-1 - - ## NCWD dimensioned Size:4 - if(var == "CWD_frac1"){ ##Fraction of coarse woody debris (CWD) that is moved into each of the four woody fuel classes - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_CWD_frac', start = 1, count = 1, - vals=pft[v]) - } - if(var == "CWD_frac2"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_CWD_frac', start = 2, count = 1, - vals=pft[v]) - } - if(var == "CWD_frac3"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_CWD_frac', start = 3, count = 1, - vals=pft[v]) - } - if(var == "CWD_frac4"){ - ncdf4::ncvar_put(nc=fates.param.nc, varid='fates_CWD_frac', start = 4, count = 1, - vals=pft[v]) - } - - - } ## end loop over VARIABLES - } ## end loop over PFTs - #ncdf4::nc_close(param.nc) - ncdf4::nc_close(clm.param.nc) - ncdf4::nc_close(fates.param.nc) - -# ## Write SETTINGS file -# +write.config.FATES <- function(defaults, trait.values, settings, run.id) { + ## site information + site <- settings$run$site + site.id <- as.numeric(site$id) + + # find out where things are + local.rundir <- file.path(settings$rundir, run.id) ## this is on local machine for staging + rundir <- file.path(settings$host$rundir, run.id) ## this is on remote machine for execution + casedir <- file.path(rundir, "case") + outdir <- file.path(settings$host$outdir, run.id) + refcase <- settings$model$binary + bld <- file.path(refcase, "bld") + binary <- file.path(bld, "cesm.exe") + indir <- file.path(rundir, "input") ## input directory + default <- settings$run$inputs$default$path ## reference inputs file structure + site_name <- paste0(site.id %/% 1000000000, "-", site.id %% 1000000000) + + ## DATES + ## CLM is a bit odd and takes a start date and length, so we need to precompute + ## this needs to be generalized to fractional years, but accounting for 365 day year + start_date <- as.Date(settings$run$start.date) + end_date <- as.Date(settings$run$end.date) + stop_n <- as.numeric(end_date - start_date, units = "days") - PEcAn.utils::n_leap_day(start_date, end_date) + 1 + + ## -----------------------------------------------------------------------## + ## ## + ## INPUTS ## + ## ## + ## -----------------------------------------------------------------------## + + ## SITE INFO --> DOMAIN FILE (lat/lon) + # - should also store this in the refcase directory for PEcAn so we can grab from there, and not the PEcAn package + gridres <- 0.125 ## ultimately this should be a variable + lat <- as.numeric(site$lat) + lon <- (as.numeric(site$lon) + 360) %% 360 ## make sure coords in 0-360 range, not negative + domain.default <- system.file("domain.lnd.1x1pt-brazil_navy.090715.nc", package = "PEcAn.FATES") + domain.file <- file.path(local.rundir, paste0("domain.lnd.", site_name, ".nc")) + file.copy(domain.default, domain.file) + domain.nc <- ncdf4::nc_open(domain.file, write = TRUE) + ncdf4::ncvar_put(nc = domain.nc, varid = "xc", vals = lon) + ncdf4::ncvar_put(nc = domain.nc, varid = "yc", vals = lat) + ncdf4::ncvar_put(nc = domain.nc, varid = "xv", vals = lon + c(-1, 1, 1, -1) * gridres) + ncdf4::ncvar_put(nc = domain.nc, varid = "yv", vals = lat + c(-1, -1, 1, 1) * gridres) + ncdf4::ncvar_put(nc = domain.nc, varid = "area", vals = (2 * gridres * pi / 180)^2) + ncdf4::nc_close(domain.nc) + + ## SURF - should also store this in the refcase directory for PEcAn so we can grab from there, and not the PEcAn package + surf.default <- system.file("surfdata_1x1_brazil_16pfts_Irrig_CMIP6_simyr2000_c171214.nc", package = "PEcAn.FATES") + surf.file <- file.path(local.rundir, paste0("surfdata_", site_name, "_simyr2000.nc")) + file.copy(surf.default, surf.file) + Sys.chmod(surf.file) + surf.nc <- ncdf4::nc_open(surf.file, write = TRUE) + ncdf4::ncvar_put(nc = surf.nc, varid = "LONGXY", vals = lon) + ncdf4::ncvar_put(nc = surf.nc, varid = "LATIXY", vals = lat) + ncdf4::nc_close(surf.nc) + + ## MET HEADERS + if (!is.null(settings$run$inputs$met)) { + ## DATM HEADER: datm_atm_in + datm <- readLines(con = system.file("datm_atm_in.template", package = "PEcAn.FATES"), n = -1) + datm <- gsub("@DOMAIN@", file.path(indir, "share/domains/domain.clm", basename(domain.file)), datm) + datm <- gsub("@START_YEAR@", lubridate::year(start_date), datm) + datm <- gsub("@END_YEAR@", lubridate::year(end_date), datm) + writeLines(datm, con = file.path(local.rundir, "datm_atm_in")) + + ## DATM STREAM MET + met <- readLines(con = system.file("datm.streams.txt.PEcAn_met.template", package = "PEcAn.FATES"), n = -1) + met <- gsub("@INDIR@", indir, met) + # domain.file.name <- paste0("domain.lnd.",site_name,".nc") + # met <- gsub('@DOMAIN@',domain.file.name, met) # attempting to provide correct domain file name + met <- gsub("@MET_PATH@", settings$run$inputs$met$path, met) + met.files <- dir(settings$run$inputs$met$path, "*.nc") + met <- gsub("@MET_FILES@", paste(met.files, collapse = "\n "), met) + writeLines(met, con = file.path(local.rundir, "datm.streams.txt.PEcAn_met")) + } + # ... need to set this up so that if MET is blank it can run with default CLM met + # ... fill in this template, the met template, and then have jobs.sh put them in the right place. + # ... Test, then adjust DB to have met required + + + ## -----------------------------------------------------------------------## + ## ## + ## JOB.SH ## + ## ## + ## -----------------------------------------------------------------------## + + # create launch script (which will create symlink) + if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { + jobsh <- readLines(con = settings$model$jobtemplate, n = -1) + } else { + jobsh <- readLines(con = system.file("template.job", package = "PEcAn.FATES"), n = -1) + } + + # create host specific setttings + hostsetup <- "" + if (!is.null(settings$model$prerun)) { + hostsetup <- paste(hostsetup, sep = "\n", paste(settings$model$prerun, collapse = "\n")) + } + if (!is.null(settings$host$prerun)) { + hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) + } + + hostteardown <- "" + if (!is.null(settings$model$postrun)) { + hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) + } + if (!is.null(settings$host$postrun)) { + hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) + } + + # create job.sh + jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) + jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) + + ## Machine configs + # ./create_newcase -case @CASEDIR@ -res 1x1_brazil -compset ICLM45ED -mach @MACHINE@ -compiler @COMPILER@ -project @PROJECT@ + if (!is.null(settings$model$machine)) { + machine <- paste(settings$model$machine, collapse = "\n") + } else { + machine <- "eddi" + } + jobsh <- gsub("@MACHINE@", machine, jobsh) + if (!is.null(settings$model$compiler)) { + compiler <- paste(settings$model$compiler, collapse = "\n") + } else { + compiler <- "gnu" + } + jobsh <- gsub("@COMPILER@", compiler, jobsh) + if (!is.null(settings$model$resolution)) { + resolution <- paste(settings$model$resolution, collapse = "\n") + } else { + resolution <- "1x1_brazil" + } + jobsh <- gsub("@RES@", resolution, jobsh) + if (!is.null(settings$model$compset)) { + compset <- paste(settings$model$compset, collapse = "\n") + } else { + compset <- "I2000Clm50FatesGs" + } + jobsh <- gsub("@COMPSET@", compset, jobsh) + if (!is.null(settings$model$project)) { + project <- paste(settings$model$project, collapse = "\n") + } else { + project <- "pecan" + } + jobsh <- gsub("@PROJECT@", project, jobsh) + + ## PATHS + jobsh <- gsub("@RUNDIR@", rundir, jobsh) + jobsh <- gsub("@CASEDIR@", casedir, jobsh) + jobsh <- gsub("@OUTDIR@", outdir, jobsh) + jobsh <- gsub("@REFCASE@", refcase, jobsh) + jobsh <- gsub("@BLD@", bld, jobsh) + jobsh <- gsub("@BINARY@", binary, jobsh) + jobsh <- gsub("@INDIR@", indir, jobsh) + jobsh <- gsub("@DEFAULT@", default, jobsh) + jobsh <- gsub("@SITE_NAME@", site_name, jobsh) + + ## DATES -> ENV_RUN + jobsh <- gsub("@START_DATE@", start_date, jobsh) + jobsh <- gsub("@STOP_N@", stop_n, jobsh) + jobsh <- gsub("@RUN_ID@", run.id, jobsh) + + ## MET --> DATM + # jobsh <- gsub('@SITE_MET@', settings$run$inputs$met$path, jobsh) + ## FOR FIRST STEP, CAN USE DEFAULT + + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) + Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) + # + # ## Write PARAMETER file + + ## COPY AND OPEN DEFAULT PARAMETER FILES + # TODO: update this to read param files (CLM and FATES) out of the refcase directory, not the PEcAn package + # TODO: update to allow it to pick between CLM4.5 and CLM5 parameter set based on refcase, user selection + ## See issue https://github.com/PecanProject/pecan/issues/1008 + # CLM + # clm.param.default <- system.file("clm5_params.c171117.nc",package="PEcAn.FATES") + # clm.param.file <- file.path(local.rundir,paste0("clm_params.",run.id,".nc")) + clm.param.default <- file.path(refcase, "clm5_params.c171117.nc") # probably need to allow custom param file names here (pecan.xml?) + clm.param.file <- file.path(local.rundir, paste0("clm_params.", run.id, ".nc")) + file.copy(clm.param.default, clm.param.file) + clm.param.nc <- ncdf4::nc_open(clm.param.file, write = TRUE) + + # FATES + # fates.param.default <- system.file("fates_params_2troppftclones.c171018_sps.nc",package="PEcAn.FATES") + # above is a temporary param file corrected for the tropics by lowering freezing tolerace parameters + fates.param.default <- file.path(refcase, "fates_params_2troppftclones.c171018_sps.nc") # probably need to allow custom param file names here (pecan.xml?) + fates.param.file <- file.path(local.rundir, paste0("fates_params.", run.id, ".nc")) + file.copy(fates.param.default, fates.param.file) + fates.param.nc <- ncdf4::nc_open(fates.param.file, write = TRUE) + + ## Loop over PFTS + npft <- length(trait.values) + PEcAn.logger::logger.debug(npft) + PEcAn.logger::logger.debug(dim(trait.values)) + PEcAn.logger::logger.debug(names(trait.values)) + # pftnames <- stringr::str_trim(tolower(ncvar_get(param.nc,"pftname"))) + pftnames <- stringr::str_trim(tolower(ncdf4::ncvar_get(clm.param.nc, "pftname"))) + PEcAn.logger::logger.debug(paste0("CLM PFT names: "), pftnames) + for (i in seq_len(npft)) { + pft <- trait.values[[i]] + print(c("PFT", i)) + PEcAn.logger::logger.info(pft) + pft.name <- names(trait.values)[i] + if (is.null(pft.name) | is.na(pft.name)) { + PEcAn.logger::logger.error("pft.name missing") + } else { + PEcAn.logger::logger.info(paste("PFT =", pft.name)) + PEcAn.logger::logger.debug(paste0("fates-clm PFT number: ", which(pftnames == pft.name))) + } + if (pft.name == "env") next ## HACK, need to remove env from default + + ## Match PFT name to COLUMN + ipft <- match(tolower(pft.name), pftnames) + PEcAn.logger::logger.debug(paste0("ipft: ", ipft)) + + if (is.na(ipft)) { + PEcAn.logger::logger.severe(paste( + "Unmatched PFT", pft.name, + "in FATES. PEcAn does not yet support non-default PFTs for this model" + )) + } + + # hard code hack until we can use more than 2 pfts in FATES + ipft <- 2 + PEcAn.logger::logger.debug(paste0("*** PFT number hard-coded to ", ipft, " in fates. This will be updated when FATES allows more PFTs")) + + ## Special variables used in conversions + # leafC <- pft['leafC']/100 ## percent to proportion + leafC <- NA + if (is.na(leafC)) leafC <- 0.48 + + # determine photo pathway + photo_flag <- ncdf4::ncvar_get(fates.param.nc, varid = "fates_c3psn", start = ipft, count = 1) + PEcAn.logger::logger.debug(paste0("Photosynthesis pathway flag value: ", photo_flag)) + + ## Loop over VARIABLES + for (v in seq_along(pft)) { + var <- names(pft)[v] + + ## THESE NEED SOME FOLLOW UP + + ### ----- Leaf physiological parameters + # Vcmax + if (var == "Vcmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_vcmax25top", start = ipft, count = 1, + vals = pft[v] + ) ## (umol CO2 m-2 s-1) + } + # Ball-Berry slope + if (var == "stomatal_slope.BB") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_BB_slope", start = ipft, count = 1, + vals = pft[v] + ) + } + + # Ball-Berry intercept - c3. We need to figure out how to set either C3 or C4 values? Based on the PFT? + # TODO: allow setting this for C3 and/or C4 PFTs + # right now, each are just one dimension, will need to revist this if this changes. + if (var == "cuticular_cond") { + if (photo_flag == 0) { + PEcAn.logger::logger.debug("** Setting C4 cuticular conductance value") + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_bbopt_c4", start = 1, count = 1, + vals = pft[v] + ) + } else if (photo_flag == 1) { + PEcAn.logger::logger.debug("** Setting C3 cuticular conductance value") + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_bbopt_c3", start = 1, count = 1, + vals = pft[v] + ) + } else { + PEcAn.logger::logger.warn(" ** FATES photosynthesis pathway flag not set. cuticular conductance not set **") + } + } + + ## missing from params.nc + # if(var == "cuticular_cond"){ + # gH2O_per_mol <- 18.01528 + # ncvar_put(nc=param.nc, varid='gsmin', start = ipft, count = 1, + # vals=pft[v]*gH2O_per_mol*1e-12) ### umol H2O m-2 s-1 -> [m s-1] + # } + + # T response params - modified Arrhenius params for Vcmax, Jmax, and TPU + # -- NOT YET IMPLEMENTED IN BETYdb. FATES params: + # fates_vcmaxha, fates_jmaxha, fates_tpuha, fates_vcmaxhd, fates_jmaxhd, fates_tpuhd, + # fates_vcmaxse, fates_jmaxse, fates_tpuse + + # Ha activation energy for vcmax - FATES units: J/mol + if (var == "Ha_Modified_Arrhenius_Vcmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_vcmaxha", start = ipft, count = 1, + vals = pft[v] * 1000 + ) ## convert from kj/mol to J/mol (FATES units) + } + + # Hd deactivation energy for vcmax - FATES units: J/mol + if (var == "Hd_Modified_Arrhenius_Vcmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_vcmaxhd", start = ipft, count = 1, + vals = pft[v] * 1000 + ) ## convert from kj/mol to J/mol (FATES units) + } + + # Ha activation energy for Jmax - FATES units: J/mol + if (var == "Ha_Modified_Arrhenius_Jmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_jmaxha", start = ipft, count = 1, + vals = pft[v] * 1000 + ) ## convert from kj/mol to J/mol (FATES units) + } + + # Hd deactivation energy for Jmax - FATES units: J/mol + if (var == "Hd_Modified_Arrhenius_Jmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_jmaxhd", start = ipft, count = 1, + vals = pft[v] * 1000 + ) ## convert from kj/mol to J/mol (FATES units) + } + + # deltaS Vcmax - BETY units:J/mol/K; FATES units: J/mol/K + if (var == "deltaS_Vcmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_vcmaxse", start = ipft, count = 1, + vals = pft[v] + ) ## convert from kj/mol to J/mol (FATES units) + } + # deltaS Jmax - BETY units:J/mol/K; FATES units: J/mol/K + if (var == "deltaS_Jmax") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_jmaxse", start = ipft, count = 1, + vals = pft[v] + ) ## convert from kj/mol to J/mol (FATES units) + } + ### ----- Leaf physiological parameters + + + ### These variable names (from ED2) should updated in BETY to be more generic + ## missing from params.nc + # if(var == "mort3"){ + # ncvar_put(nc=param.nc, varid='background_mort_rate', start = ipft, count = 1, + # vals=pft[v]) + # } + if (var == "r_fract") { ## Fraction of carbon balance remaining after maintenance costs have been met that is dedicated to seed production. [0-1] + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_seed_alloc", start = ipft, count = 1, + vals = pft[v] + ) + } + ## This one is currently allpft level but should be pft level - no longer in FATES params, what was this changed to? + if (var == "agf_bs") { ## The fraction of sapwood and structural biomass that is above ground [0-1] + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_allom_agb_frac", start = ipft, count = 1, + vals = pft[v] + ) + } + + ## PFT-level variables + if (var == "seed_rain_kgC") { ## External seed rain from outside site (non-mass conserving) ; + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_seed_rain", start = ipft, count = 1, + vals = pft[v] + ) + } + ## missing from params.nc + # if(var == "cuticular_cond"){ + # gH2O_per_mol <- 18.01528 + # ncvar_put(nc=param.nc, varid='gsmin', start = ipft, count = 1, + # vals=pft[v]*gH2O_per_mol*1e-12) ### umol H2O m-2 s-1 -> [m s-1] + # } + if (var == "DBH_at_HTMAX") { ## note in FATES parameter list about switching to HTMAX + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_allom_dbh_maxheight", start = ipft, count = 1, + vals = pft[v] + ) ## [cm] + } + if (var == "growth_resp_factor") { ## r_growth = grperc * (gpp+r_maint) fates_grperc:long_name = "Growth respiration factor" ; + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_grperc", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "SLA") { ## default 0.012 + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_slatop", start = ipft, count = 1, + vals = PEcAn.utils::ud_convert(pft[v], "m2 kg-1", "m2 g-1") / leafC + ) + } + if (var == "leaf_turnover_rate") { ## fates_leaf_long:long_name = "Leaf longevity (ie turnover timescale)" ; + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_leaf_long", start = ipft, count = 1, + vals = 1 / pft[v] + ) ## leaf_long = 1/leaf_turnover_rate, 1/years -> years + } + if (var == "root_turnover_rate") { ## fates_root_long:long_name = "root longevity (alternatively, turnover time)" ; + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_root_long", start = ipft, count = 1, + vals = 1 / pft[v] + ) ## root_long = 1/root_turnover_rate, 1/years -> years + } + if (var == "c2n_leaf") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_leafcn", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "fineroot2leaf") { # "Allocation parameter: new fine root C per new leaf C" units = "gC/gC" + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_froot_leaf", start = ipft, count = 1, + vals = pft[v] + ) + } + + # if(var == "sapwood_ratio"){ # leaf to sapwood area ratio. IS THIS NOW fates_sapwood_ratio(fates_pft)?? + # ncvar_put(nc=fates.param.nc, varid='latosa', start = ipft, count = 1, + # vals=PEcAn.utils::ud_convert(pft[v],"m2 m-2","m2 cm-2")) + # } + + # leaf to sapwood area ratio. This is the INTERCEPT parameter in FATES + # [sserbin@modex paramdata]$ ncdump fates_params_2troppftclones.c171018.nc | grep latosa + # double fates_allom_latosa_int(fates_pft) ; + # fates_allom_latosa_int:long_name = "Leaf area to sap area ratio, intercept [m2/cm2]" ; + # fates_allom_latosa_int:units = "ratio" ; + # double fates_allom_latosa_slp(fates_pft) ; + # fates_allom_latosa_slp:long_name = "Leaf area to sap area ratio, slope (optional)" ; + # fates_allom_latosa_slp:units = "unitless" ; + # fates_allom_latosa_int = 0.001, 0.001 ; + # fates_allom_latosa_slp = 0, 0 ; + if (var == "sapwood_ratio") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_allom_latosa_int", start = ipft, count = 1, + vals = PEcAn.utils::ud_convert(pft[v], "m2 m-2", "m2 cm-2") + ) + } + if (var == "leaf_width") { # Characteristic leaf dimension use for aerodynamic resistance + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_dleaf", start = ipft, count = 1, + vals = PEcAn.utils::ud_convert(pft[v], "mm", "m") + ) + # PEcAn.logger::logger.debug(paste0("fates_dleaf: ",PEcAn.utils::ud_convert(pft[v],"mm","m"))) # temp debugging + } + ## Currently not in param.nc file despite being on NGEE-T parameter list + # if(var == "nonlocal_dispersal"){ # Place-holder parameter for important seed dispersal parameters + # ncvar_put(nc=param.nc, varid='seed_dispersal_x', start = ipft, count = 1, + # vals=pft[v]) + # } + if (var == "hgt_min") { # the minimum height (ie starting height) of a newly recruited plant" ; + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_hgt_min", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "leaf_reflect_nir") { # Leaf reflectance: near-IR [0-1] + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_rholnir", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "leaf_reflect_vis") { # Leaf reflectance: visible [0-1] + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_rholvis", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "wood_reflect_nir") { # Stem reflectance: near-IR [0-1] + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_rhosnir", start = ipft, count = 1, + vals = pft[v] + ) + } + + if (var == "wood_reflect_vis") { # Stem reflectance: visible [0-1] + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_rhosvis", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "leaf_trans_nir") { # Leaf transmittance: near-IR + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_taulnir", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "leaf_trans_vis") { # Leaf transmittance: visible pft + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_taulvis", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "wood_trans_nir") { # Stem transmittance: near-IR + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_tausnir", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "wood_trans_vis") { # Stem transmittance: visible + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_tausvis", start = ipft, count = 1, + vals = pft[v] + ) + } + if (var == "orient_factor") { # Leaf/stem orientation index [-0/4 SOM 1 - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "rf_l1s1_bgc", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "rf_l2s1_bgc") { ## respiration fraction litter 2 to SOM 1 - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "rf_l2s1_bgc", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "rf_l3s2_bgc") { ## respiration fraction from litter 3 to SOM 2 - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "rf_l3s2_bgc", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "rf_s2s1_bgc") { ## respiration fraction SOM 2 to SOM 1 - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "rf_s2s1_bgc", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "rf_s2s3_bgc") { ## Respiration fraction for SOM 2 -> SOM 3 - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "rf_s2s3_bgc", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "rf_s3s1_bgc") { ## respiration fraction SOM 3 to SOM 1 - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "rf_s3s1_bgc", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "Q10_frozen_soil") { ## Separate q10 for frozen soil respiration rates - REMOVED FROM FATES PARAMS? + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "froz_q10", start = 1, count = 1, + vals = pft[v] + ) + } + + ## NONE indexed + ## -- FIRE + if (var == "max_fire_duration") { ## maximum duration of fire none hours + # fates_max_durat:long_name = "spitfire parameter, fire maximum duration, Equation 14 Thonicke et al 2010" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_max_durat", vals = pft[v]) + } + if (var == "nfires") { ## The number of fires initiated per m2 per year, from lightning and humans + # fates_nignitions:long_name = "number of daily ignitions (nfires = nignitions*FDI*area_scaling)" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_nignitions", vals = pft[v]) + } + if (var == "fuel_energy") { ## energy content of fuel [kj kg-1] + # fates_fuel_energy:long_name = "pitfire parameter, heat content of fuel" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_fuel_energy", vals = pft[v]) + } + if (var == "fuel_particle_density") { ## particle density of fuel [kg m-3] + # fates_part_dens:long_name = "spitfire parameter, oven dry particle density, Table A1 Thonicke et al 2010" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_part_dens", vals = pft[v]) + } + if (var == "durat_slope") { ## SPITFIRE: change in fire duration with fire danger index. from Canadian Forest Service + # fates_durat_slope:long_name = "spitfire parameter, fire max duration slope, Equation 14 Thonicke et al 2010" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_durat_slope", vals = pft[v]) + } + if (var == "miner_damp") { ## SPITFIRE mineral dampening coefficient + # fates_miner_damp:long_name = "spitfire parameter, mineral-dampening coefficient EQ A1 Thonicke et al 2010 " + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_miner_damp", vals = pft[v]) + } + if (var == "fuel_minerals") { ## mineral content of fuel + # fates_miner_total:long_name = "spitfire parameter, total mineral content, Table A1 Thonicke et al 2010" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_miner_total", vals = pft[v]) + } + if (var == "alpha_scorch_height") { ## SPITFIRE scorch height parameter + # fates_alpha_SH:long_name = "spitfire parameter, alpha scorch height, Equation 16 Thonicke et al 2010" + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_alpha_SH", vals = pft[v]) + } + if (var == "fdi_a") { ## SPITFIRE Constant in calculation of dewpoint for Fire Danger Index (FDI) + # fates_fdi_a:long_name = "spitfire parameter (unknown) " + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_fdi_a", vals = pft[v]) + } + if (var == "fdi_alpha") { ## SPITFIRE Constant in calculation of dewpoint for Fire Danger Index (FDI) + # fates_fdi_alpha:long_name = "spitfire parameter, EQ 7 Venevsky et al. GCB 2002,(modified EQ 8 Thonicke et al. 2010) " + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_fdi_alpha", vals = pft[v]) + } + if (var == "fdi_b") { ## SPITFIRE Constant in calculation of dewpoint for Fire Danger Index (FDI) + # fates_fdi_b:long_name = "spitfire parameter (unknown) " + ncdf4::ncvar_put(nc = fates.param.nc, varid = "fates_fdi_b", vals = pft[v]) + } + ## -- CANOPY + # if(var == "canopy_max_spread"){ ## Maximum allowable "dynamic ratio of dbh to canopy area" for cohorts in closed canopies. - [cm/m2] + # ncdf4::ncvar_put(nc=fates.param.nc, varid='maxspread',vals=pft[v]) + # } + # + # if(var == "canopy_min_spread"){ ## Minimum allowable "dynamic ratio of dbh to canopy area" for cohorts in closed canopies. - [cm/m2] + # ncdf4::ncvar_put(nc=fates.param.nc, varid='minspread',vals=pft[v]) + # } + + ## LITTERCLASS indexed (Size:6) + ## MCD: skipping for now until there's demonstrated demand because it requires expanding every variable out into VARNAME_[1..n] + # low_moisture_C Intercept (constant) of fuel moisture to burned fraction term for drier fuel litterclass + # low_moisture_S Slope of fuel moisture to burned fraction term for drier fuel litterclass + # max_decomp Maximum decomposition rate of litter in the absence of moisture or temperature stress, per fuel class litterclass y-1 + # mid_moisture Parameter of burned fraction term. Below this 'low' constants apply, above this, 'mid' constants apply, litterclass + # mid_moisture_C Intercept (constant) of fuel moisture to burned fraction term for wetter fuel litterclass + # min_moisture Parameter of burned fraction term. Below this value all litter is burned by a fire. Above, 'low' constants apply litterclass + # FBD Fuel Bulk Density of fuel class litterclass kg m-3 + # alpha_FMC Parameter of function relating fuel moisture content to meteorological fire danger index litterclass + # SAV Surface Area to Volume Ratio of fuel class litterclass cm-1 + + ## NCWD dimensioned Size:4 + if (var == "CWD_frac1") { ## Fraction of coarse woody debris (CWD) that is moved into each of the four woody fuel classes + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_CWD_frac", start = 1, count = 1, + vals = pft[v] + ) + } + if (var == "CWD_frac2") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_CWD_frac", start = 2, count = 1, + vals = pft[v] + ) + } + if (var == "CWD_frac3") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_CWD_frac", start = 3, count = 1, + vals = pft[v] + ) + } + if (var == "CWD_frac4") { + ncdf4::ncvar_put( + nc = fates.param.nc, varid = "fates_CWD_frac", start = 4, count = 1, + vals = pft[v] + ) + } + } ## end loop over VARIABLES + } ## end loop over PFTs + # ncdf4::nc_close(param.nc) + ncdf4::nc_close(clm.param.nc) + ncdf4::nc_close(fates.param.nc) + + # ## Write SETTINGS file + # } #---------------------------------------------------------------------------------------------------------------------# ### EOF diff --git a/models/fates/inst/model2netcdf_test.R b/models/fates/inst/model2netcdf_test.R index 1946413aa64..6a797cc8117 100644 --- a/models/fates/inst/model2netcdf_test.R +++ b/models/fates/inst/model2netcdf_test.R @@ -1,16 +1,16 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files dlm <- .Platform$file.sep # <--- What is the platform specific delimiter? #--------------------------------------------------------------------------------------------------# outdir <- "/data/sserbin/Modeling/FATES/output/" -start_date <- '2004-01-01 00:00:00' -end_date <- '2005-12-31 00:00:00' +start_date <- "2004-01-01 00:00:00" +end_date <- "2005-12-31 00:00:00" sitelat <- 42.56341 sitelon <- 289.1853 year <- "2004" library(PEcAn.FATES) -model2netcdf.FATES(outdir="/data/sserbin/Modeling/FATES/output/",sitelat=42.56341,sitelon=289.1853,start_date="2004-01-01 00:00:00",end_date="2005-12-31 00:00:00") \ No newline at end of file +model2netcdf.FATES(outdir = "/data/sserbin/Modeling/FATES/output/", sitelat = 42.56341, sitelon = 289.1853, start_date = "2004-01-01 00:00:00", end_date = "2005-12-31 00:00:00") diff --git a/models/fates/tests/testthat.R b/models/fates/tests/testthat.R index 508e1f9653f..e6b3ef876cb 100644 --- a/models/fates/tests/testthat.R +++ b/models/fates/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.FATES") +# test_check("PEcAn.FATES") diff --git a/models/fates/tests/testthat/test.met2model.R b/models/fates/tests/testthat/test.met2model.R index 0e03baed7da..a14ba3c4000 100644 --- a/models/fates/tests/testthat/test.met2model.R +++ b/models/fates/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/gday/R/met2model.GDAY.R b/models/gday/R/met2model.GDAY.R index c38338f2aba..bab81257aea 100644 --- a/models/gday/R/met2model.GDAY.R +++ b/models/gday/R/met2model.GDAY.R @@ -9,9 +9,9 @@ ##' @title met2model.GDAY ##' @description ##' Function to convert NetCDF met files in PEcAn-CF format into GDAY met driver files. -##' This function is an R wrapper to the python script "generate_forcing_data.py" -##' in the inst/ folder. The python script supports arguments to generate sub-daily (30 min) -##' weather data as well as soil temperature from 6 day running mean. These arguments are +##' This function is an R wrapper to the python script "generate_forcing_data.py" +##' in the inst/ folder. The python script supports arguments to generate sub-daily (30 min) +##' weather data as well as soil temperature from 6 day running mean. These arguments are ##' hard-coded in this function to generate daily GDAY files without soil temperature. ##' @export ##' @param in.path location on disk where inputs are stored @@ -27,9 +27,8 @@ ##' @return generates GDAY formatted met file as a side affect, returns file metadata ##' that will be inserted into database ##' @author Martin De Kauwe, Tony Gardella -met2model.GDAY <- function(in.path, in.prefix, outfolder, start_date, end_date, +met2model.GDAY <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - ## GDAY driver format (.csv): ## ## Daily: year (-), doy (-; NB. leap years), tair (deg C), @@ -41,24 +40,27 @@ met2model.GDAY <- function(in.path, in.prefix, outfolder, start_date, end_date, ## 30min: year (-), doy (-; NB. leap years), hod (-), rainfall (mm 30 min-1), ## par (umol m-2 s-1), tair (deg C), tsoil (deg C), vpd (kPa), ## co2 (ppm), ndep (t ha-1 30 min-1), wind (m-2 s-1), press (kPa) - + start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") - out.file <- paste(in.prefix, strptime(start_date, "%Y-%m-%d"), - strptime(end_date, "%Y-%m-%d"), - sep = ".") + end_date <- as.POSIXlt(end_date, tz = "UTC") + out.file <- paste(in.prefix, strptime(start_date, "%Y-%m-%d"), + strptime(end_date, "%Y-%m-%d"), + sep = "." + ) out.file.full <- file.path(outfolder, out.file) - + ## file metadata to be entered into database - results <- data.frame(file = out.file.full, - host = PEcAn.remote::fqdn(), - mimetype = "text/csv", - formatname = "GDAY-met", - startdate = start_date, - enddate = end_date, - dbfile.name = out.file, - stringsAsFactors = FALSE) - + results <- data.frame( + file = out.file.full, + host = PEcAn.remote::fqdn(), + mimetype = "text/csv", + formatname = "GDAY-met", + startdate = start_date, + enddate = end_date, + dbfile.name = out.file, + stringsAsFactors = FALSE + ) + if (file.exists(out.file.full) && !overwrite) { PEcAn.logger::logger.debug("File '", out.file.full, "' already exists, skipping to next file.") return(invisible(results)) @@ -68,23 +70,25 @@ met2model.GDAY <- function(in.path, in.prefix, outfolder, start_date, end_date, if (!file.exists(outfolder)) { dir.create(outfolder) } - + ## set arguments to generate_forcing_data.py script - site <- in.prefix - fpath <- in.path - outfile_tag <- out.file.full - sub_daily <- "false" # Make 30-min file vs. Day, stick with day for now - tsoil_run_mean <- "false" # Generate Tsoil from 7-day running mean or not - - command <- "python3" - path2script <- system.file("generate_forcing_data.py", package = "PEcAn.GDAY") - + site <- in.prefix + fpath <- in.path + outfile_tag <- out.file.full + sub_daily <- "false" # Make 30-min file vs. Day, stick with day for now + tsoil_run_mean <- "false" # Generate Tsoil from 7-day running mean or not + + command <- "python3" + path2script <- system.file("generate_forcing_data.py", package = "PEcAn.GDAY") + ## construct command line argument - all_args <- paste(command, path2script, site, fpath, outfile_tag, sub_daily, - tsoil_run_mean) + all_args <- paste( + command, path2script, site, fpath, outfile_tag, sub_daily, + tsoil_run_mean + ) ## call conversion script system(all_args, ignore.stdout = FALSE, ignore.stderr = TRUE) - + return(invisible(results)) } # met2model.GDAY diff --git a/models/gday/R/model2netcdf.GDAY.R b/models/gday/R/model2netcdf.GDAY.R index 60abe96bf6f..8c0f10ae34d 100644 --- a/models/gday/R/model2netcdf.GDAY.R +++ b/models/gday/R/model2netcdf.GDAY.R @@ -13,99 +13,100 @@ ##' @importFrom ncdf4 ncvar_def ncdim_def nc_create ncvar_put nc_close model2netcdf.GDAY <- function(outdir, sitelat, sitelon, start_date, end_date) { - - G_2_KG <- 0.001 TONNES_PER_HA_TO_G_M2 <- 100 THA_2_KG_M2 <- TONNES_PER_HA_TO_G_M2 * 0.001 - + ### Read in model output in GDAY format GDAY.output <- utils::read.csv(file.path(outdir, "gday_out.csv"), header = TRUE, sep = ",", skip = 1) GDAY.output.dims <- dim(GDAY.output) - + ### Determine number of years and output timestep days <- as.Date(start_date):as.Date(end_date) year <- strftime(as.Date(days, origin = "1970-01-01"), "%Y") num.years <- length(unique(year)) years <- unique(year) timestep.s <- 86400 - + ### Loop over years in GDAY output to create separate netCDF outputs for (y in years) { if (file.exists(file.path(outdir, paste(y, "nc", sep = ".")))) { next } - + ## Subset data for processing sub.GDAY.output <- subset(GDAY.output, year == y) sub.GDAY.output.dims <- dim(sub.GDAY.output) - + ## Setup outputs for netCDF file in appropriate units output <- list() - + ## standard variables: C-Fluxes output[[1]] <- (sub.GDAY.output[, "auto_resp"] * THA_2_KG_M2) / timestep.s output[[2]] <- (sub.GDAY.output[, "hetero_resp"] * THA_2_KG_M2) / timestep.s output[[3]] <- (sub.GDAY.output[, "auto_resp"] + sub.GDAY.output[, "hetero_resp"] * - THA_2_KG_M2) / timestep.s + THA_2_KG_M2) / timestep.s output[[4]] <- (sub.GDAY.output[, "gpp"] * THA_2_KG_M2) / timestep.s output[[5]] <- (sub.GDAY.output[, "nep"] * -1 * THA_2_KG_M2) / timestep.s output[[6]] <- (sub.GDAY.output[, "npp"] * THA_2_KG_M2) / timestep.s - + ## standard variables: C-State output[[7]] <- (sub.GDAY.output[, "stem"] + sub.GDAY.output[, "branch"] * THA_2_KG_M2) / timestep.s output[[8]] <- (sub.GDAY.output[, "soilc"] * THA_2_KG_M2) / timestep.s output[[9]] <- (sub.GDAY.output[, "lai"]) - + ## standard variables: water fluxes output[[10]] <- (sub.GDAY.output[, "et"]) / timestep.s output[[11]] <- (sub.GDAY.output[, "transpiration"]) / timestep.s - + # ******************** Declare netCDF variables ********************# - t <- ncdim_def(name = "time", units = paste0("days since ", y, "-01-01 00:00:00"), - vals = 1:nrow(sub.GDAY.output), - calendar = "standard", unlim = TRUE) + t <- ncdim_def( + name = "time", units = paste0("days since ", y, "-01-01 00:00:00"), + vals = 1:nrow(sub.GDAY.output), + calendar = "standard", unlim = TRUE + ) lat <- ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") - + ## ***** Need to dynamically update the UTC offset here ***** - + for (i in seq_along(output)) { - if (length(output[[i]]) == 0) + if (length(output[[i]]) == 0) { output[[i]] <- rep(-999, length(t$vals)) + } } - + dims <- list(lon = lon, lat = lat, time = t) - + nc_var <- list() ## C-Fluxes - nc_var[[1]] <- PEcAn.utils::to_ncvar("AutoResp",dims) + nc_var[[1]] <- PEcAn.utils::to_ncvar("AutoResp", dims) nc_var[[2]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) - nc_var[[3]] <- PEcAn.utils::to_ncvar("TotalResp",dims) + nc_var[[3]] <- PEcAn.utils::to_ncvar("TotalResp", dims) nc_var[[4]] <- PEcAn.utils::to_ncvar("GPP", dims) nc_var[[5]] <- PEcAn.utils::to_ncvar("NEE", dims) nc_var[[6]] <- PEcAn.utils::to_ncvar("NPP", dims) - + ## C-State nc_var[[7]] <- PEcAn.utils::to_ncvar("AbvGrndWood", dims) nc_var[[8]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) nc_var[[9]] <- PEcAn.utils::to_ncvar("LAI", dims) - + ## Water fluxes nc_var[[10]] <- PEcAn.utils::to_ncvar("Evap", dims) nc_var[[11]] <- PEcAn.utils::to_ncvar("TVeg", dims) - - #nc_var[[6]] <- PEcAn.utils::to_ncvar("LeafLitter", "kgC/m2/s", list(lon,lat,t), -999 ) - #nc_var[[7]] <- PEcAn.utils::to_ncvar("WoodyLitter", "kgC/m2/s", list(lon,lat,t), -999) - #nc_var[[8]] <- PEcAn.utils::to_ncvar("RootLitter", "kgC/m2/s", list(lon,lat,t), -999) - #nc_var[[9]] <- PEcAn.utils::to_ncvar("LeafBiomass", "kgC/m2", list(lon,lat,t), -999) - #nc_var[[10]] <- PEcAn.utils::to_ncvar("WoodBiomass", "kgC/m2", list(lon,lat,t), -999) - #nc_var[[11]] <- PEcAn.utils::to_ncvar("RootBiomass", "kgC/m2", list(lon,lat,t), -999) - #nc_var[[12]] <- PEcAn.utils::to_ncvar("LitterBiomass", "kgC/m2", list(lon,lat,t), -999) - #nc_var[[13]] <- PEcAn.utils::to_ncvar("SoilC", "kgC/m2", list(lon,lat,t), -999) - + + # nc_var[[6]] <- PEcAn.utils::to_ncvar("LeafLitter", "kgC/m2/s", list(lon,lat,t), -999 ) + # nc_var[[7]] <- PEcAn.utils::to_ncvar("WoodyLitter", "kgC/m2/s", list(lon,lat,t), -999) + # nc_var[[8]] <- PEcAn.utils::to_ncvar("RootLitter", "kgC/m2/s", list(lon,lat,t), -999) + # nc_var[[9]] <- PEcAn.utils::to_ncvar("LeafBiomass", "kgC/m2", list(lon,lat,t), -999) + # nc_var[[10]] <- PEcAn.utils::to_ncvar("WoodBiomass", "kgC/m2", list(lon,lat,t), -999) + # nc_var[[11]] <- PEcAn.utils::to_ncvar("RootBiomass", "kgC/m2", list(lon,lat,t), -999) + # nc_var[[12]] <- PEcAn.utils::to_ncvar("LitterBiomass", "kgC/m2", list(lon,lat,t), -999) + # nc_var[[13]] <- PEcAn.utils::to_ncvar("SoilC", "kgC/m2", list(lon,lat,t), -999) + # ******************** Declare netCDF variables ********************# - + ### Output netCDF data nc <- nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") @@ -115,5 +116,5 @@ model2netcdf.GDAY <- function(outdir, sitelat, sitelon, start_date, end_date) { } close(varfile) nc_close(nc) - } ### End of year loop + } ### End of year loop } # model2netcdf.GDAY diff --git a/models/gday/R/write.config.GDAY.R b/models/gday/R/write.config.GDAY.R index cd7267327c0..12551ae7688 100644 --- a/models/gday/R/write.config.GDAY.R +++ b/models/gday/R/write.config.GDAY.R @@ -12,89 +12,88 @@ ##' @return configuration file for GDAY for given run ##' @export ##' @author Martin De Kauwe -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.GDAY <- function(defaults, trait.values, settings, run.id) { - - # find out where to write run/ouput - rundir <- file.path(settings$host$rundir, as.character(run.id)) - outdir <- file.path(settings$host$outdir, as.character(run.id)) - if (is.null(settings$host$qsub) && (settings$host$name == "localhost")) { - rundir <- file.path(settings$rundir, as.character(run.id)) - outdir <- file.path(settings$modeloutdir, as.character(run.id)) - } - #----------------------------------------------------------------------- - # create launch script (which will create symlink) - if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { - jobsh <- readLines(con = settings$model$jobtemplate, n = -1) - } else { - jobsh <- readLines(con = system.file("template.job", package = "PEcAn.GDAY"), n = -1) - } - - # create host specific setttings - hostsetup <- "" - if (!is.null(settings$model$prerun)) { - hostsetup <- paste(hostsetup, sep = "\n", paste(settings$model$prerun, collapse = "\n")) - } - if (!is.null(settings$host$prerun)) { - hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) - } - - hostteardown <- "" - if (!is.null(settings$model$postrun)) { - hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) - } - if (!is.null(settings$host$postrun)) { - hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) - } - - - # Write out base param file to run directory - params <- readLines(con = system.file("base_start.cfg", package = "PEcAn.GDAY"), n = -1) - writeLines(params, con = file.path(settings$rundir, run.id, "base_start.cfg")) - - - # Create Python Run script - runpy<- readLines(con = system.file("run_simulations.py", package = "PEcAn.GDAY"), n = -1) - - executable <- settings$model$binary - gday_path <- strsplit(executable, "src")[[1]] - scripts <- paste(gday_path[1],"scripts",sep = "") - - runpy <- gsub("@PATH_SCRIPTS@",scripts,runpy) - runpy <- gsub("@PATHTOGDAY@",executable,runpy) - runpy <- gsub("@PATH_PARAMS@",rundir, runpy) - - exp <- strsplit(settings$run$input$met$path, split = "/")[[1]] - exp_tag <- exp[length(exp)] - met_path <- strsplit(settings$run$input$met$path,exp_tag)[[1]] - - runpy <- gsub("@SITE_MET@", met_path , runpy) - runpy <- gsub("@RUNDIR@", rundir, runpy) - runpy <- gsub("@LATITUDE@", settings$run$site$lat,runpy) - - - runpy <- gsub("@SITE@",exp_tag,runpy) - - writeLines(runpy, con = file.path(settings$rundir, run.id, "run_simulations.py")) - Sys.chmod(file.path(settings$rundir, run.id, "run_simulations.py")) - - - # create job.sh - jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) - jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - - jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) - jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) - jobsh <- gsub("@SITE_MET@", settings$run$input$met$path, jobsh) - - jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) - jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - - jobsh <- gsub("@OUTDIR@", outdir, jobsh) - jobsh <- gsub("@RUNDIR@", rundir, jobsh) - - jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) - - writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) - Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) + # find out where to write run/ouput + rundir <- file.path(settings$host$rundir, as.character(run.id)) + outdir <- file.path(settings$host$outdir, as.character(run.id)) + if (is.null(settings$host$qsub) && (settings$host$name == "localhost")) { + rundir <- file.path(settings$rundir, as.character(run.id)) + outdir <- file.path(settings$modeloutdir, as.character(run.id)) + } + #----------------------------------------------------------------------- + # create launch script (which will create symlink) + if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { + jobsh <- readLines(con = settings$model$jobtemplate, n = -1) + } else { + jobsh <- readLines(con = system.file("template.job", package = "PEcAn.GDAY"), n = -1) + } + + # create host specific setttings + hostsetup <- "" + if (!is.null(settings$model$prerun)) { + hostsetup <- paste(hostsetup, sep = "\n", paste(settings$model$prerun, collapse = "\n")) + } + if (!is.null(settings$host$prerun)) { + hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) + } + + hostteardown <- "" + if (!is.null(settings$model$postrun)) { + hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) + } + if (!is.null(settings$host$postrun)) { + hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) + } + + + # Write out base param file to run directory + params <- readLines(con = system.file("base_start.cfg", package = "PEcAn.GDAY"), n = -1) + writeLines(params, con = file.path(settings$rundir, run.id, "base_start.cfg")) + + + # Create Python Run script + runpy <- readLines(con = system.file("run_simulations.py", package = "PEcAn.GDAY"), n = -1) + + executable <- settings$model$binary + gday_path <- strsplit(executable, "src")[[1]] + scripts <- paste(gday_path[1], "scripts", sep = "") + + runpy <- gsub("@PATH_SCRIPTS@", scripts, runpy) + runpy <- gsub("@PATHTOGDAY@", executable, runpy) + runpy <- gsub("@PATH_PARAMS@", rundir, runpy) + + exp <- strsplit(settings$run$input$met$path, split = "/")[[1]] + exp_tag <- exp[length(exp)] + met_path <- strsplit(settings$run$input$met$path, exp_tag)[[1]] + + runpy <- gsub("@SITE_MET@", met_path, runpy) + runpy <- gsub("@RUNDIR@", rundir, runpy) + runpy <- gsub("@LATITUDE@", settings$run$site$lat, runpy) + + + runpy <- gsub("@SITE@", exp_tag, runpy) + + writeLines(runpy, con = file.path(settings$rundir, run.id, "run_simulations.py")) + Sys.chmod(file.path(settings$rundir, run.id, "run_simulations.py")) + + + # create job.sh + jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) + jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) + + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) + jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) + jobsh <- gsub("@SITE_MET@", settings$run$input$met$path, jobsh) + + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) + jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) + + jobsh <- gsub("@OUTDIR@", outdir, jobsh) + jobsh <- gsub("@RUNDIR@", rundir, jobsh) + + jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) + + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) + Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) } # write.config.GDAY diff --git a/models/gday/tests/testthat.R b/models/gday/tests/testthat.R index b2350127a7c..a5a13c309ef 100644 --- a/models/gday/tests/testthat.R +++ b/models/gday/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.GDAY") +# test_check("PEcAn.GDAY") diff --git a/models/gday/tests/testthat/test.met2model.R b/models/gday/tests/testthat/test.met2model.R index 83498dc3356..900a78803f9 100644 --- a/models/gday/tests/testthat/test.met2model.R +++ b/models/gday/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/jules/R/model2netcdf.JULES.R b/models/jules/R/model2netcdf.JULES.R index 733bef29e1b..b5abfdf2fd0 100755 --- a/models/jules/R/model2netcdf.JULES.R +++ b/models/jules/R/model2netcdf.JULES.R @@ -1,5 +1,5 @@ ##' Convert MODEL output into the PEcAn standard -##' +##' ##' @name model2netcdf.JULES ##' @title Code to convert JULES output into netCDF format ##' @param outdir Location of model output @@ -9,96 +9,101 @@ model2netcdf.JULES <- function(outdir) { files <- dir(outdir, pattern = ".nc$", full.names = TRUE) dumps <- files[grep(pattern = "dump", files)] statics <- files[grep(pattern = "STATIC", files)] - files <- setdiff(setdiff(files, dumps),statics) - + files <- setdiff(setdiff(files, dumps), statics) + print(files) for (fname in files) { print(fname) nc <- ncdf4::nc_open(fname, write = TRUE) ## extract variable and long names - utils::write.table(sapply(nc$var, function(x) { x$longname }), - file = paste0(fname, ".var"), - col.names = FALSE, - row.names = TRUE, - quote = FALSE) - + utils::write.table( + sapply(nc$var, function(x) { + x$longname + }), + file = paste0(fname, ".var"), + col.names = FALSE, + row.names = TRUE, + quote = FALSE + ) + vars <- names(nc[["var"]]) # Check that frac is reported - if("frac_grid" %in% vars){ + if ("frac_grid" %in% vars) { frac <- ncdf4::ncvar_get(nc, "frac_grid") } else { PEcAn.logger::logger.warn("Surface type fraction is not an output and thus other outputs may not be parseable") } - - base_dims <- vapply(nc$var[["GPP"]]$dim, `[[`, character(1), "name")# This isn't the best example, except that GPP is the default with read.output (and it is not reported by surface type) - for(i in seq_along(vars)){ + + base_dims <- vapply(nc$var[["GPP"]]$dim, `[[`, character(1), "name") # This isn't the best example, except that GPP is the default with read.output (and it is not reported by surface type) + for (i in seq_along(vars)) { var <- vars[i] - nonstd_var <- nrow(PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == var),]) == 0 + nonstd_var <- nrow(PEcAn.utils::standard_vars[which(PEcAn.utils::standard_vars$Variable.Name == var), ]) == 0 dims <- vapply(nc$var[["GPP"]]$dim, `[[`, character(1), "name") - diff_dims <- setdiff(dims,base_dims) - if(length(diff_dims) > 0){# ie more than just x, y, time + diff_dims <- setdiff(dims, base_dims) + if (length(diff_dims) > 0) { # ie more than just x, y, time PEcAn.logger::logger.warn("Variable", vars[i], "has additional dimension", diff_dims, "attempting to aggregate and/or select appropriate data") - - if(diff_dims %in% c("pft","type")){ + + if (diff_dims %in% c("pft", "type")) { # Value reported for multiple surface types (or PFTs) # Sum over all types, weighted by frac - + x_raw <- ncdf4::ncvar_get(nc, var) - x <- matrix(0,nrow(x_raw),ncol(x_raw)) - for(j in 1:nrow(x_raw)){ - x[j,] <- x_raw[j,] * frac[j,] + x <- matrix(0, nrow(x_raw), ncol(x_raw)) + for (j in 1:nrow(x_raw)) { + x[j, ] <- x_raw[j, ] * frac[j, ] } x <- colSums(x) - - }else if(diff_dims == "soil"){ + } else if (diff_dims == "soil") { # Value reported for multiple soil layers # Select a default layer? Or integrate? - - x <- ncdf4::ncvar_get(nc, vars[i])[1,] # THIS IS A PLACEHOLDER - - }else{PEcAn.logger::logger.error("Can't properly convert", vars[i])} - + + x <- ncdf4::ncvar_get(nc, vars[i])[1, ] # THIS IS A PLACEHOLDER + } else { + PEcAn.logger::logger.error("Can't properly convert", vars[i]) + } + # If non-standard variable, we need to save the variable info for later - if(nonstd_var) var_dump <- nc$var[[var]] - + if (nonstd_var) var_dump <- nc$var[[var]] + # Have to delete the variable from the nc file # and add it over again because the dimensions have changed cmd <- sprintf("ncks -O -x -v %s %s %s", var, fname, fname) system(cmd) ncdf4::nc_close(nc) nc <- ncdf4::nc_open(fname, write = TRUE) - # Check did the variable get deleted + # Check did the variable get deleted if (!(var %in% names(nc[["var"]]))) { PEcAn.logger::logger.debug(var, "successfully removed from", fname) } - dim = list(time = nc$dim$time, x = nc$dim$x, y = nc$dim$y) + dim <- list(time = nc$dim$time, x = nc$dim$x, y = nc$dim$y) - if(nonstd_var){ - nc_var <- ncdf4::ncvar_def(var, units = var_dump$units, dim = list(time = nc$dim$time), - missval = var_dump$missval, - longname = var_dump$longname) - }else{ + if (nonstd_var) { + nc_var <- ncdf4::ncvar_def(var, + units = var_dump$units, dim = list(time = nc$dim$time), + missval = var_dump$missval, + longname = var_dump$longname + ) + } else { nc_var <- PEcAn.utils::to_ncvar(var, dim) } - + ncdf4::ncvar_add(nc, nc_var) ncdf4::nc_close(nc) nc <- ncdf4::nc_open(fname, write = TRUE) # Why do I have to close and reopen it? ncdf4::ncvar_put(nc, nc_var, x) - - } + } } ## JULES time is in seconds; convert to DOY time <- ncdf4::ncvar_get(nc, "time") / 86400 ncdf4::ncvar_put(nc, "time", time) ncdf4::nc_close(nc) - dir.create(file.path(outdir,"dump")) - for(dump in dumps){ - file.rename(dump, file.path(dirname(dump),"dump",basename(dump))) + dir.create(file.path(outdir, "dump")) + for (dump in dumps) { + file.rename(dump, file.path(dirname(dump), "dump", basename(dump))) } - dir.create(file.path(outdir,"STATIC")) - for(static in statics){ - file.rename(static, file.path(dirname(static),"dump",basename(static))) + dir.create(file.path(outdir, "STATIC")) + for (static in statics) { + file.rename(static, file.path(dirname(static), "dump", basename(static))) } } } # model2netcdf.JULES diff --git a/models/jules/R/write.config.JULES.R b/models/jules/R/write.config.JULES.R index a4843cc7a29..c45a14fc2ae 100644 --- a/models/jules/R/write.config.JULES.R +++ b/models/jules/R/write.config.JULES.R @@ -15,7 +15,7 @@ ##' \dontrun{ ##' write.config.JULES(defaults, trait.values, settings, run.id) ##' } -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.JULES <- function(defaults, trait.values, settings, run.id) { # constants molH2O_to_grams <- 18.01528 @@ -27,7 +27,7 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { # find out where to write run/output rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - local.outdir <- file.path(settings$outdir,run.id) + local.outdir <- file.path(settings$outdir, run.id) local.rundir <- file.path(settings$rundir, run.id) #----------------------------------------------------------------------- @@ -77,7 +77,7 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## ------------------ Detect time step of met data ------------------ nchar.path <- nchar(settings$run$inputs$met$path) - if(substring(settings$run$inputs$met$path,nchar.path)=="/"){ + if (substring(settings$run$inputs$met$path, nchar.path) == "/") { prefix <- "" met.regexp <- NULL met.dir <- settings$run$inputs$met$path @@ -85,51 +85,58 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { met.regexp <- prefix <- basename(settings$run$inputs$met$path) met.dir <- dirname(settings$run$inputs$met$path) } - if(nchar(prefix)>0) prefix <- paste0(prefix,".") - if(run.local){ - dt <- detect.timestep(met.dir,met.regexp,start_date) + if (nchar(prefix) > 0) prefix <- paste0(prefix, ".") + if (run.local) { + dt <- detect.timestep(met.dir, met.regexp, start_date) } else { - rmt.cmd <- paste0("PEcAn.JULES::detect.timestep(met.dir='", - met.dir,"', met.regexp='",met.regexp, - "', start_date= '",start_date,"')") - dt <- PEcAn.remote::remote.execute.R(script=rmt.cmd,host=settings$host,verbose=TRUE) + rmt.cmd <- paste0( + "PEcAn.JULES::detect.timestep(met.dir='", + met.dir, "', met.regexp='", met.regexp, + "', start_date= '", start_date, "')" + ) + dt <- PEcAn.remote::remote.execute.R(script = rmt.cmd, host = settings$host, verbose = TRUE) } ## -------------------- END DETECT TIMESTEP -------------------- ## PEcAn SPIN-UP: symlink met files, change start date. - if(!is.null(settings$spin)){ - if(run.local){ + if (!is.null(settings$spin)) { + if (run.local) { ## run local - start_date <- PEcAn.data.atmosphere::spin.met(dirname(settings$run$inputs$met$path), - prefix, - settings$run$site$met.start, - settings$run$site$met.end, - settings$spin$nyear, - settings$spin$nsample, - settings$spin$resample, - start_date) + start_date <- PEcAn.data.atmosphere::spin.met( + dirname(settings$run$inputs$met$path), + prefix, + settings$run$site$met.start, + settings$run$site$met.end, + settings$spin$nyear, + settings$spin$nsample, + settings$spin$resample, + start_date + ) } else { ## run spin for remote met - rmt.cmd <- paste0("PEcAn.data.atmosphere::spin.met(in.path ='", - dirname(settings$run$inputs$met$path), - "', in.prefix ='",prefix, - "', start_date = '",settings$run$site$met.start, - "', end_date = '",settings$run$site$met.end, - "', nyear = ",settings$spin$nyear, - ", nsample = ",settings$spin$nsample, - ", resample = ",settings$spin$resample, - ", run_start_date = '",start_date, - "')") - start_date <- PEcAn.remote::remote.execute.R(script=rmt.cmd,host=settings$host,verbose=TRUE) + rmt.cmd <- paste0( + "PEcAn.data.atmosphere::spin.met(in.path ='", + dirname(settings$run$inputs$met$path), + "', in.prefix ='", prefix, + "', start_date = '", settings$run$site$met.start, + "', end_date = '", settings$run$site$met.end, + "', nyear = ", settings$spin$nyear, + ", nsample = ", settings$spin$nsample, + ", resample = ", settings$spin$resample, + ", run_start_date = '", start_date, + "')" + ) + start_date <- PEcAn.remote::remote.execute.R(script = rmt.cmd, host = settings$host, verbose = TRUE) } - } ## end spin + } ## end spin ## set up date strings start_char <- format(as.Date(start_date), "%F %H:%M:%S") - year_char <- strsplit(start_char,"-")[[1]][1] - if(nchar(year_char)<4){ - start_char <- paste0(formatC(as.numeric(year_char),width = 4, format = "d", flag = "0"), - substr(start_char,nchar(year_char)+1,nchar(start_char)) + year_char <- strsplit(start_char, "-")[[1]][1] + if (nchar(year_char) < 4) { + start_char <- paste0( + formatC(as.numeric(year_char), width = 4, format = "d", flag = "0"), + substr(start_char, nchar(year_char) + 1, nchar(start_char)) ) } end_char <- format(as.Date(settings$run$end.date), "%F %H:%M:%S") @@ -141,7 +148,7 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { drive.text <- readLines(con = drive.file, n = -1) drive.text <- gsub("@MET_START@", start_char, drive.text) drive.text <- gsub("@MET_END@", met_end_char, drive.text) - drive.text <- gsub("@SITE_MET@", file.path(dirname(settings$run$inputs$met$path),prefix), drive.text) + drive.text <- gsub("@SITE_MET@", file.path(dirname(settings$run$inputs$met$path), prefix), drive.text) drive.text <- gsub("@DT@", as.numeric(dt), drive.text) writeLines(drive.text, con = drive.file) @@ -153,44 +160,44 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { writeLines(timesteps.text, con = timesteps.file) ## Edit PRESCRIBED_DATA.NML to add CO2 data - if("co2" %in% tolower(names(settings$run$inputs))){ + if ("co2" %in% tolower(names(settings$run$inputs))) { pd.file <- file.path(local.rundir, "prescribed_data.nml") pd.text <- readLines(con = pd.file, n = -1) ## SPIN the CO2 file - if(!is.null(settings$spin)){ - dt.co2 = PEcAn.utils::ud_convert(as.numeric(as.Date(settings$run$end.date)- - as.Date(settings$run$start.date)),"days","years") - co2.dat <- utils::read.table(settings$run$inputs$co2$path,header=FALSE) - co2.per.year <- round(nrow(co2.dat)/dt.co2) + if (!is.null(settings$spin)) { + dt.co2 <- PEcAn.utils::ud_convert(as.numeric(as.Date(settings$run$end.date) - + as.Date(settings$run$start.date)), "days", "years") + co2.dat <- utils::read.table(settings$run$inputs$co2$path, header = FALSE) + co2.per.year <- round(nrow(co2.dat) / dt.co2) ## as first pass, just repeat the whole sequence. Not doing resampling. Not worrying about how to loop the file - co2.dat <- c(as.vector(co2.dat[seq_len(as.numeric(settings$spin$nyear)*co2.per.year+1),]),unlist(co2.dat)) + co2.dat <- c(as.vector(co2.dat[seq_len(as.numeric(settings$spin$nyear) * co2.per.year + 1), ]), unlist(co2.dat)) - co2.local <- file.path(local.rundir,basename(settings$run$inputs$co2$path)) - utils::write.table(co2.dat,file = co2.local,col.names = FALSE,row.names = FALSE) - if(run.local){ + co2.local <- file.path(local.rundir, basename(settings$run$inputs$co2$path)) + utils::write.table(co2.dat, file = co2.local, col.names = FALSE, row.names = FALSE) + if (run.local) { settings$run$inputs$co2$path <- co2.local } else { - co2.remote <- file.path(rundir,basename(settings$run$inputs$co2$path)) + co2.remote <- file.path(rundir, basename(settings$run$inputs$co2$path)) settings$run$inputs$co2$path <- co2.remote } - PEcAn.logger::logger.debug("co2.local",co2.local,length(co2.dat)) + PEcAn.logger::logger.debug("co2.local", co2.local, length(co2.dat)) } ## add CO2 file pdn <- length(pd.text) - pd.text[pdn+1] <- paste0("") - pd.text[pdn+2] <- paste0("&JULES_PRESCRIBED_DATASET") - pd.text[pdn+3] <- paste0("data_start = '",start_char,"',") - pd.text[pdn+4] <- paste0("data_end = '",end_char,"',") - pd.text[pdn+5] <- paste0("data_period=-1") - pd.text[pdn+6] <- paste0("file='",settings$run$inputs$co2$path,"',") - pd.text[pdn+7] <- paste0("nvars=1") - pd.text[pdn+8] <- paste0("var='co2_mmr'") - pd.text[pdn+9] <- paste0("interp='i'") - pd.text[pdn+10] <- paste0("/") + pd.text[pdn + 1] <- paste0("") + pd.text[pdn + 2] <- paste0("&JULES_PRESCRIBED_DATASET") + pd.text[pdn + 3] <- paste0("data_start = '", start_char, "',") + pd.text[pdn + 4] <- paste0("data_end = '", end_char, "',") + pd.text[pdn + 5] <- paste0("data_period=-1") + pd.text[pdn + 6] <- paste0("file='", settings$run$inputs$co2$path, "',") + pd.text[pdn + 7] <- paste0("nvars=1") + pd.text[pdn + 8] <- paste0("var='co2_mmr'") + pd.text[pdn + 9] <- paste0("interp='i'") + pd.text[pdn + 10] <- paste0("/") # EXAMPLE # &JULES_PRESCRIBED_DATASET @@ -208,9 +215,9 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { # / ## update n_datasets - nd_i <- grep("n_datasets",pd.text) - pd_nd <- as.numeric(sub(",","",strsplit(pd.text[nd_i],"=")[[1]][2])) - pd.text[nd_i] = paste0("n_datasets=",pd_nd+1,",") + nd_i <- grep("n_datasets", pd.text) + pd_nd <- as.numeric(sub(",", "", strsplit(pd.text[nd_i], "=")[[1]][2])) + pd.text[nd_i] <- paste0("n_datasets=", pd_nd + 1, ",") writeLines(pd.text, con = pd.file) } @@ -227,52 +234,56 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { output.text <- readLines(con = output.file, n = -1) output.text <- gsub("@RUNID@", run.id, output.text) output.text <- gsub("@OUTDIR@", outdir, output.text) - if(useTRIFFID){ + if (useTRIFFID) { ## find rows in output.nml - out_nvar_i <- grep("nvars",output.text) - out_varname_i <- grep("var_name",output.text) - out_var_i <- grep("var",output.text) - out_var_i <- out_var_i[which(!(out_var_i %in% c(out_nvar_i,out_varname_i)))] ## process of elimination - out_type_i <- grep("output_type",output.text) + out_nvar_i <- grep("nvars", output.text) + out_varname_i <- grep("var_name", output.text) + out_var_i <- grep("var", output.text) + out_var_i <- out_var_i[which(!(out_var_i %in% c(out_nvar_i, out_varname_i)))] ## process of elimination + out_type_i <- grep("output_type", output.text) len <- nchar(trimws(output.text)) ## update number of variables - out_nvar <- as.numeric(sub(",","",strsplit(output.text[out_nvar_i],"=")[[1]][2])) - output.text[out_nvar_i] = paste0("nvars = ",out_nvar+3,",") - output.text[out_type_i] = paste0("output_type = ",out_nvar+3,"*'M',") + out_nvar <- as.numeric(sub(",", "", strsplit(output.text[out_nvar_i], "=")[[1]][2])) + output.text[out_nvar_i] <- paste0("nvars = ", out_nvar + 3, ",") + output.text[out_type_i] <- paste0("output_type = ", out_nvar + 3, "*'M',") ## add to out_varname - k <- which(rev((len > 0)[1:(out_type_i-1)]))[1] ## how many lines back is previous block - output.text[out_type_i-k] <- paste0(output.text[out_type_i-k], - " 'Fcomp', 'TotLivBio_PFT', 'Height',") + k <- which(rev((len > 0)[1:(out_type_i - 1)]))[1] ## how many lines back is previous block + output.text[out_type_i - k] <- paste0( + output.text[out_type_i - k], + " 'Fcomp', 'TotLivBio_PFT', 'Height'," + ) ## add extra output variables - k <- which(rev((len > 0)[1:(out_varname_i-1)]))[1] ## how many lines back is previous block - output.text[out_varname_i-k] <- paste0(output.text[out_varname_i-k], - " 'frac', 'c_veg', 'canht',") + k <- which(rev((len > 0)[1:(out_varname_i - 1)]))[1] ## how many lines back is previous block + output.text[out_varname_i - k] <- paste0( + output.text[out_varname_i - k], + " 'frac', 'c_veg', 'canht'," + ) } writeLines(output.text, con = output.file) ## Edit ANCILLARIES.NML tile frac soil physical parameters [[OPTIONAL]] - if("soil" %in% names(settings$run$inputs)){ + if ("soil" %in% names(settings$run$inputs)) { ## open soil file soil <- settings$run$inputs$soil nc.soil <- ncdf4::nc_open(soil$path) ## extract JULES variables in.soil <- list() - in.soil[['b']] <- ncdf4::ncvar_get(nc.soil,"soil_hydraulic_b") + in.soil[["b"]] <- ncdf4::ncvar_get(nc.soil, "soil_hydraulic_b") # sathh - in.soil[['satcon']] <- ncdf4::ncvar_get(nc.soil,"soil_hydraulic_conductivity_at_saturation") - in.soil[['satcon']] <- PEcAn.utils::ud_convert(in.soil[['satcon']],"m s-1","mm s-1") - in.soil[['sm_sat']] <- ncdf4::ncvar_get(nc.soil,"volume_fraction_of_water_in_soil_at_saturation") - #sm_crit - in.soil[['sm_wilt']] <- ncdf4::ncvar_get(nc.soil,"volume_fraction_of_condensed_water_in_soil_at_wilting_point") - hcap <- ncdf4::ncvar_get(nc.soil,"soil_thermal_capacity") ## J/kg/K - bulk <- ncdf4::ncvar_get(nc.soil,"soil_bulk_density") ## kg m-3 - in.soil[['hcap']] <- hcap * bulk ## J/kg/K * kg m-3 -> J m-3 K-1 - in.soil[['hcon']] <- ncdf4::ncvar_get(nc.soil,"soil_thermal_conductivity") ## W m-1 K-1 - in.soil[['albsoil']] <- ncdf4::ncvar_get(nc.soil,"soil_albedo") + in.soil[["satcon"]] <- ncdf4::ncvar_get(nc.soil, "soil_hydraulic_conductivity_at_saturation") + in.soil[["satcon"]] <- PEcAn.utils::ud_convert(in.soil[["satcon"]], "m s-1", "mm s-1") + in.soil[["sm_sat"]] <- ncdf4::ncvar_get(nc.soil, "volume_fraction_of_water_in_soil_at_saturation") + # sm_crit + in.soil[["sm_wilt"]] <- ncdf4::ncvar_get(nc.soil, "volume_fraction_of_condensed_water_in_soil_at_wilting_point") + hcap <- ncdf4::ncvar_get(nc.soil, "soil_thermal_capacity") ## J/kg/K + bulk <- ncdf4::ncvar_get(nc.soil, "soil_bulk_density") ## kg m-3 + in.soil[["hcap"]] <- hcap * bulk ## J/kg/K * kg m-3 -> J m-3 K-1 + in.soil[["hcon"]] <- ncdf4::ncvar_get(nc.soil, "soil_thermal_conductivity") ## W m-1 K-1 + in.soil[["albsoil"]] <- ncdf4::ncvar_get(nc.soil, "soil_albedo") ncdf4::nc_close(nc.soil) ## open namelist @@ -280,64 +291,61 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { anc.text <- readLines(con = anc.file, n = -1) ## parse variable names - const_val_i <- grep("const_val",anc.text) - const_val <- strsplit(strsplit(anc.text[const_val_i],"=")[[1]][2],",")[[1]] - soil_var_i <- grep("^var",anc.text) - soil_var <- strsplit(strsplit(anc.text[soil_var_i],"=")[[1]][2],",")[[1]] - soil_var <- gsub("'","",soil_var) + const_val_i <- grep("const_val", anc.text) + const_val <- strsplit(strsplit(anc.text[const_val_i], "=")[[1]][2], ",")[[1]] + soil_var_i <- grep("^var", anc.text) + soil_var <- strsplit(strsplit(anc.text[soil_var_i], "=")[[1]][2], ",")[[1]] + soil_var <- gsub("'", "", soil_var) ## substitute in new values - for(i in seq_along(soil_var)){ - k = which(names(in.soil) == soil_var[i]) - if(length(k)==1){ + for (i in seq_along(soil_var)) { + k <- which(names(in.soil) == soil_var[i]) + if (length(k) == 1) { const_val[i] <- in.soil[[k]][1] ## for now only use surface values - ## need to figure out how to set depth profile later + ## need to figure out how to set depth profile later } } ## insert back into text - anc.text[const_val_i] <- paste0("const_val=",paste(const_val,sep = "",collapse = ","),",") + anc.text[const_val_i] <- paste0("const_val=", paste(const_val, sep = "", collapse = ","), ",") writeLines(anc.text, con = anc.file) - } ## end ancillary ## PARSE JULES_VEGETATION.NML some of these settings affect which parameter settings are used - veg.file <- file.path(local.rundir, "jules_vegetation.nml") - veg.text <- readLines(con = veg.file, n = -1) + veg.file <- file.path(local.rundir, "jules_vegetation.nml") + veg.text <- readLines(con = veg.file, n = -1) l_trait_phys <- grep("l_trait_phys", veg.text) if (length(l_trait_phys) > 0) { l_trait_phys <- grepl("true", veg.text[l_trait_phys], ignore.case = TRUE) } else { - l_trait_phys <- FALSE ## default value + l_trait_phys <- FALSE ## default value } ## Turn on TRIFFID?? - if(useTRIFFID){ + if (useTRIFFID) { + l_triffid <- grep("l_triffid", veg.text) + veg.text[l_triffid] <- sub("false", "true", veg.text[l_triffid]) - l_triffid <- grep("l_triffid",veg.text) - veg.text[l_triffid] <- sub("false",'true',veg.text[l_triffid]) - - l_trif_eq <- grep("l_trif_eq",veg.text) - if(length(l_trif_eq) == 0){ + l_trif_eq <- grep("l_trif_eq", veg.text) + if (length(l_trif_eq) == 0) { veg.text[length(veg.text)] <- "l_trif_eq=.false.," - veg.text[length(veg.text)+1] <- "/" + veg.text[length(veg.text) + 1] <- "/" } else { - veg.text[l_trif_eq] <- sub("true",'false',veg.text[l_triffid]) # set to FALSE + veg.text[l_trif_eq] <- sub("true", "false", veg.text[l_triffid]) # set to FALSE } - l_veg_compete <- grep("l_veg_compete",veg.text) - if(length(l_veg_compete) == 0){ + l_veg_compete <- grep("l_veg_compete", veg.text) + if (length(l_veg_compete) == 0) { veg.text[length(veg.text)] <- "l_veg_compete=.true.," - veg.text[length(veg.text)+1] <- "/" + veg.text[length(veg.text) + 1] <- "/" } else { - veg.text[l_veg_compete] <- sub('false',"true",veg.text[l_triffid]) # set to TRUE + veg.text[l_veg_compete] <- sub("false", "true", veg.text[l_triffid]) # set to TRUE } - l_triffid_period <- grep("l_triffid_period",veg.text) - if(length(l_triffid_period) == 0){ + l_triffid_period <- grep("l_triffid_period", veg.text) + if (length(l_triffid_period) == 0) { veg.text[length(veg.text)] <- "triffid_period=10," - veg.text[length(veg.text)+1] <- "/" + veg.text[length(veg.text) + 1] <- "/" } ## no else because right now not adjusting dynamically - } writeLines(veg.text, con = veg.file) @@ -362,7 +370,7 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## parse into matrix of current defaults defaults <- utils::read.csv(textConnection(defaults), header = FALSE) - defaults <- defaults[, -ncol(defaults)] ## remove extra column created by NML line ending comma + defaults <- defaults[, -ncol(defaults)] ## remove extra column created by NML line ending comma rownames(defaults) <- variables colnames(defaults) <- c("DEC", "EV", "C3", "C4", "SH")[1:ncol(defaults)] @@ -401,12 +409,11 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { pft <- trait.values[[i]] for (v in seq_along(pft)) { - ## convert names and units see JULES variable definitions at ## http://jules-lsm.github.io/vn4.2/namelists/pft_params.nml.html var <- names(pft)[v] if (var == "height") { - names(pft)[v] <- "canht_ft_io" ## Canopy height, JULES: meters NOTE: prognostic if TRIFFID is on; BETY: meters + names(pft)[v] <- "canht_ft_io" ## Canopy height, JULES: meters NOTE: prognostic if TRIFFID is on; BETY: meters } ## c3_io ## C3 photosynthesis = 1; C4 = 0; BETY: unmatched ## orient_io ## leaf angle distribution. 0 - Spherical. 1 - Horizontal; BETY: unmatched @@ -421,14 +428,14 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## albsnf_maxu_io ## Upper bound for the snow-free albedo for large LAI, when scaled to match input obs ## albsnf_maxl_io ## Lower bound for the snow-free albedo for large LAI, when scaled to match input obs if (var == "quantum_efficiency") { - names(pft)[v] <- "alpha_io" ## JULES: mol CO2 per mol PAR photons; BETY: fraction + names(pft)[v] <- "alpha_io" ## JULES: mol CO2 per mol PAR photons; BETY: fraction } if (var == "leaf_reflect_nir") { - names(pft)[v] <- "alnir_io" ## Leaf reflection coefficient for NIR + names(pft)[v] <- "alnir_io" ## Leaf reflection coefficient for NIR } ## alniru_io ## Upper limit for the leaf reflection coefficient for NIR ## alnirl_io ## Lower limit for the leaf reflection coefficient for NIR - if (var == "leaf_reflect_par") { + if (var == "leaf_reflect_par") { names(pft)[v] <- "alpar_io" } ## alparu_io ## Upper limit for the leaf reflection coefficient for VIS @@ -454,11 +461,11 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { if (var == "cuticular_cond") { ## Minimum leaf conductance for H2O (m s-1) -> m3/m2 s-1, BETY: umol H2O m-2 s-1 names(pft)[v] <- "glmin_io" - pft[v] <- pft[v] * molH2O_to_grams * 1e-12 # 10^-6 mol/umol * 18 g/mol * 1kg(= 1 mm)/1000g * 1m/1000mm + pft[v] <- pft[v] * molH2O_to_grams * 1e-12 # 10^-6 mol/umol * 18 g/mol * 1kg(= 1 mm)/1000g * 1m/1000mm } ## infil_f_io ## Infiltration enhancement factor if (var == "extinction_coefficient\t") { - names(pft)[v] <- "kext_io" ## Light extinction coefficient - used with Beer’s Law for light absorption through tile canopies + names(pft)[v] <- "kext_io" ## Light extinction coefficient - used with Beer’s Law for light absorption through tile canopies } ## kpar_io ## PAR Extinction coefficient (m2 leaf / m2 ground) ## neff_io ## Scale factor relating Vcmax with leaf nitrogen concentration @@ -473,24 +480,24 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## omniru_io ## Upper limit for the leaf scattering coefficient for NIR ## omnirl_io ## Lower limit for the leaf scattering coefficient for NIR if (var == "growth_resp_factor") { - names(pft)[v] <- "r_grow_io" ## Growth respiration fraction (fraction of NPP = GPP - Ra) + names(pft)[v] <- "r_grow_io" ## Growth respiration fraction (fraction of NPP = GPP - Ra) } ## rootd_ft_io ## Root depth e-folding length assuming exponential model (meters). BETY: m2 kg-1 if (var == "SLA") { if (l_trait_phys) { - names(pft)[v] <- "lma_io" ## Leaf mass per unit area (kgLeaf m-2). + names(pft)[v] <- "lma_io" ## Leaf mass per unit area (kgLeaf m-2). pft[v] <- 1 / pft[v] } else { - names(pft)[v] <- "xsigl_io" ## Specific density of leaf carbon (kg C/m2 leaf). + names(pft)[v] <- "xsigl_io" ## Specific density of leaf carbon (kg C/m2 leaf). pft[v] <- leafC / pft[v] } } ## tleaf_of_io ## Temperature below which leaves are dropped (K). if (var == "pstemp_min") { - names(pft)[v] <- "tlow_io" ## Lower temperature for photosynthesis (deg C); BETY: degrees C + names(pft)[v] <- "tlow_io" ## Lower temperature for photosynthesis (deg C); BETY: degrees C } if (var == "pstemp_max") { - names(pft)[v] <- "tupp_io" ## Upper temperature for photosynthesis (deg C) + names(pft)[v] <- "tupp_io" ## Upper temperature for photosynthesis (deg C) } if (var == "emis_v") { ## Surface emissivity. BETY: per mil @@ -522,23 +529,26 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## detect any unmatched variables mch <- which(rownames(defaults) == names(pft[v])) if (length(mch) != 1) { - PEcAn.logger::logger.warn("unmatched parameter in write.configs.JULES", names(pft[v]), "in PFT", - names(trait.values)[i]) + PEcAn.logger::logger.warn( + "unmatched parameter in write.configs.JULES", names(pft[v]), "in PFT", + names(trait.values)[i] + ) } else { ## insert into defaults table -# defaults[mch, i] <- pft[v] ## STATIC allows pft reorder - defaults[mch, pft.id[i]] <- pft[v] ## TRIFFID enforces pft order + # defaults[mch, i] <- pft[v] ## STATIC allows pft reorder + defaults[mch, pft.id[i]] <- pft[v] ## TRIFFID enforces pft order } - } ## end loop over parameters - } ## end loop over PFTs + } ## end loop over parameters + } ## end loop over PFTs ## write out new file - write(pft.text[1], pft.file) ## Header + write(pft.text[1], pft.file) ## Header for (i in seq_len(nrow(defaults))) { write(paste0(rownames(defaults)[i], "=", paste(defaults[i, ], collapse = ","), ","), pft.file, - append = TRUE) + append = TRUE + ) } - write(pft.text[length(pft.text)], pft.file, append = TRUE) ## Footer + write(pft.text[length(pft.text)], pft.file, append = TRUE) ## Footer ## set npft to the value needed for surface type definition npft <- max(c(npft, 5)) @@ -548,22 +558,22 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## Edit jules_surface_types.nml to set correct number of PFTS ## Edit INITIAL_CONDITIONS.NML soil carbon LAI - if(useTRIFFID){ + if (useTRIFFID) { ic.file <- file.path(local.rundir, "initial_conditions.nml") ic.text <- readLines(con = ic.file, n = -1) ## update number of variables - ic_nvar_i <- grep("nvars",ic.text) - ic_nvar <- as.numeric(sub(",","",strsplit(ic.text[ic_nvar_i],"=")[[1]][2])) - ic.text[ic_nvar_i] <- paste0("nvars = ",ic_nvar+2,",") + ic_nvar_i <- grep("nvars", ic.text) + ic_nvar <- as.numeric(sub(",", "", strsplit(ic.text[ic_nvar_i], "=")[[1]][2])) + ic.text[ic_nvar_i] <- paste0("nvars = ", ic_nvar + 2, ",") ## update use_file - use_file <- grep("use_file",ic.text) - ic.text[use_file] <- paste0(ic.text[use_file],".true.,.true.,") + use_file <- grep("use_file", ic.text) + ic.text[use_file] <- paste0(ic.text[use_file], ".true.,.true.,") ## update var - ic_var <- grep("^var=",ic.text) - ic.text[ic_var] <- paste0(ic.text[ic_var],",'canht','frac',") + ic_var <- grep("^var=", ic.text) + ic.text[ic_var] <- paste0(ic.text[ic_var], ",'canht','frac',") ## write namelist writeLines(ic.text, con = ic.file) @@ -571,7 +581,7 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { ## also load and parse IC dat file ic.dat <- file.path(local.rundir, "initial_conditions.dat") ic.text <- readLines(con = ic.dat, n = -1) - ic.text[2] <- paste(ic.text[2]," 5.0 5.0 0.5 0.5 0.5 0.2 0.2 0.2 0.2 0.2 0.0 0.0 0.0 0.0") + ic.text[2] <- paste(ic.text[2], " 5.0 5.0 0.5 0.5 0.5 0.2 0.2 0.2 0.2 0.2 0.0 0.0 0.0 0.0") writeLines(ic.text, con = ic.dat) } } # write.config.JULES @@ -588,9 +598,9 @@ write.config.JULES <- function(defaults, trait.values, settings, run.id) { #' @return a difftime object #' @export #' -detect.timestep <- function(met.dir,met.regexp,start_date){ +detect.timestep <- function(met.dir, met.regexp, start_date) { met.file <- dir(met.dir, pattern = met.regexp, full.names = TRUE)[1] - PEcAn.logger::logger.info("Detect timestep:",met.dir,met.regexp) + PEcAn.logger::logger.info("Detect timestep:", met.dir, met.regexp) met.header <- system(paste("ncdump -h ", met.file), intern = TRUE) id <- grep("time:delta_t", met.header) if (length(id) > 0) { @@ -618,7 +628,7 @@ detect.timestep <- function(met.dir,met.regexp,start_date){ if (length(tlen) > 0) { tlen <- as.numeric(gsub(pattern = "[^[:digit:]]", "", met.header[tlen])) diy <- PEcAn.utils::days_in_year(lubridate::year(as.Date(start_date))) - dt <- 86400 / round(tlen/(diy)) + dt <- 86400 / round(tlen / (diy)) } else { print(c("write.config.JULES timestep not detected", dt)) dt <- 1800 diff --git a/models/jules/tests/testthat.R b/models/jules/tests/testthat.R index ba6e49d00b7..f10d7883e11 100644 --- a/models/jules/tests/testthat.R +++ b/models/jules/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.JULES") +# test_check("PEcAn.JULES") diff --git a/models/jules/tests/testthat/test.met2model.R b/models/jules/tests/testthat/test.met2model.R index 83971a807f3..49de573027d 100644 --- a/models/jules/tests/testthat/test.met2model.R +++ b/models/jules/tests/testthat/test.met2model.R @@ -7,7 +7,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { skip("Not implemented") nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/ldndc/R/met2model.LDNDC.R b/models/ldndc/R/met2model.LDNDC.R index cdf29faf245..fdc2346a38e 100644 --- a/models/ldndc/R/met2model.LDNDC.R +++ b/models/ldndc/R/met2model.LDNDC.R @@ -1,5 +1,4 @@ - -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Converts a met CF file to a model specific met file. The input ##' files are calld /.YYYY.cf ##' @@ -15,254 +14,243 @@ ##' @return invisible(results) ##' @export ##' @author Henri Kajasilta -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# met2model.LDNDC <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, ...) { - # Logger info PEcAn.logger::logger.info("START met2model.LDNDC") - + # Years start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - - + + # Check the nc-files nc_file <- list.files(in.path)[grep("*.nc", list.files(in.path))] - + # Set-up the outfile that will be returned as results out.file <- paste(in.prefix, format(as.Date(start_date), "%Y-%m-%d"), - format(as.Date(end_date), "%Y-%m-%d"), - "txt", - sep = ".") - + format(as.Date(end_date), "%Y-%m-%d"), + "txt", + sep = "." + ) + out.file.full <- file.path(outfolder, out.file) - + # Results - results <- data.frame(file = out.file.full, - host = PEcAn.remote::fqdn(), - mimetype = "text/plain", - formatname = "LDNDC_Climate", - startdate = start_date, - enddate = end_date, - dbfile.name = out.file, - stringsAsFactors = FALSE) - - - - if(length(nc_file) == 0){ + results <- data.frame( + file = out.file.full, + host = PEcAn.remote::fqdn(), + mimetype = "text/plain", + formatname = "LDNDC_Climate", + startdate = start_date, + enddate = end_date, + dbfile.name = out.file, + stringsAsFactors = FALSE + ) + + + + if (length(nc_file) == 0) { PEcAn.logger::logger.severe("Based on the given file path, nc-files was not found") } - - - for(year in start_year:end_year){ - + + + for (year in start_year:end_year) { # Year PEcAn.logger::logger.info(year) - - + + old.file <- file.path(in.path, paste0(in.prefix, ".", year, ".nc")) - - - if(file.exists(old.file)){ - + + + if (file.exists(old.file)) { ## Open netCDF file nc <- ncdf4::nc_open(old.file) - + # Data points are relational to this date units <- nc$dim$time$units - + # Check that the simulation doesn't take place before there are data points - if(year == start_year & PEcAn.utils::datetime2cf(start_date, units, tz = "UTC") < 0){ + if (year == start_year & PEcAn.utils::datetime2cf(start_date, units, tz = "UTC") < 0) { PEcAn.logger::logger.severe("Data in the met drivers seem not to be available for given start date specified in simulation. Consider applying a later start date") } - - + + # Convert the time fractions to be seconds by starting from the date in file's units sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(units, " "))[1], "seconds") - - + + # Calculate the time steps # 3600 * 24 = 86 400 (seconds in a day) - tstep <- 86400/(sec[2]- sec[1]) - - - - + tstep <- 86400 / (sec[2] - sec[1]) + + + + ## Determine the simulation days and calculate the start and end indexes ## by using start_index and end_index functions. These functions are found ## at the end of this file - - if(year != start_year & year != end_year){ - + + if (year != start_year & year != end_year) { # Simulation days simu_days <- 1:PEcAn.utils::days_in_year(year) - + # Whole year, from first to last index ind_s <- 1 ind_e <- which.max(sec) - - }else if(year == start_year & year != end_year){ - + } else if (year == start_year & year != end_year) { # Simulation days - simu_days <- seq(lubridate::yday(start_date),PEcAn.utils::days_in_year(year)) - + simu_days <- seq(lubridate::yday(start_date), PEcAn.utils::days_in_year(year)) + # Check the function to see how the start index is calculated ind_s <- start_index(units, start_date, sec) ind_e <- which.max(sec) - - }else if(year != start_year & year == end_year){ - + } else if (year != start_year & year == end_year) { # Simulation days - simu_days <- seq(1,lubridate::yday(end_date)) - + simu_days <- seq(1, lubridate::yday(end_date)) + # Check the function to see how the end index is calculated ind_s <- 1 ind_e <- end_index(units, start_date, end_date, sec, tstep) - - }else{ - + } else { # Simulation days simu_days <- seq(lubridate::yday(start_date), lubridate::yday(end_date)) - + # Need to calculate both first and last index by using functions ind_s <- start_index(units, start_date, sec) ind_e <- end_index(units, start_date, end_date, sec, tstep) - } - + # Based on the simulation days, this is year, day and subday info # for model to use. - y <- rep(year, length(simu_days)*tstep) #year - d <- rep(simu_days, each = tstep) #day - subd <- rep(1:tstep, length(simu_days)) #subday - + y <- rep(year, length(simu_days) * tstep) # year + d <- rep(simu_days, each = tstep) # day + subd <- rep(1:tstep, length(simu_days)) # subday + # On above the starting and ending indexes has been determined. # Here those are just collapsed. ind <- ind_s:ind_e - - - + + + ## Info for climate file that model is using ## # Latitude and longitude lat <- ncdf4::ncvar_get(nc, "latitude") lon <- ncdf4::ncvar_get(nc, "longitude") - - + + # Average air temperature - Tair <-ncdf4::ncvar_get(nc, "air_temperature")[ind] ## in Kelvin + Tair <- ncdf4::ncvar_get(nc, "air_temperature")[ind] ## in Kelvin tavg <- PEcAn.utils::ud_convert(Tair, "K", "degC") - + # Wind speed wind <- try(ncdf4::ncvar_get(nc, "wind_speed"))[ind] if (!is.numeric(wind)) { U <- ncdf4::ncvar_get(nc, "eastward_wind")[ind] V <- ncdf4::ncvar_get(nc, "northward_wind")[ind] - wind <- sqrt(U ^ 2 + V ^ 2) + wind <- sqrt(U^2 + V^2) PEcAn.logger::logger.info("wind_speed absent; calculated from eastward_wind and northward_wind") } - #wind <- try(ncdf4::ncvar_get(nc, "wind_speed"))[ind] - + # wind <- try(ncdf4::ncvar_get(nc, "wind_speed"))[ind] + # Precipation prec <- ncdf4::ncvar_get(nc, "precipitation_flux")[ind] - + # Global radiation - grad <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air")[ind] ## in W/m2 - + grad <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air")[ind] ## in W/m2 + # Air pressure Pa ---> mbar, needs to be converted press <- ncdf4::ncvar_get(nc, "air_pressure")[ind] press <- PEcAn.utils::ud_convert(press, "Pa", "millibar") - + # Vapor Pressure Deficit Pa ---> kPa, needs to be converted, check below - VPD <- try(ncdf4::ncvar_get(nc, "water_vapor_saturation_deficit"))[ind] ## in Pa + VPD <- try(ncdf4::ncvar_get(nc, "water_vapor_saturation_deficit"))[ind] ## in Pa if (!is.numeric(VPD)) { - # Fetch these values in order to construct VPD - Qair <-ncdf4::ncvar_get(nc, "specific_humidity")[ind] #humidity (kg/kg) - SVP <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(tavg), "millibar", "Pa") ## Saturation vapor pressure - + Qair <- ncdf4::ncvar_get(nc, "specific_humidity")[ind] # humidity (kg/kg) + SVP <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(tavg), "millibar", "Pa") ## Saturation vapor pressure + # VPD calculated, if not directly found from the nc-file VPD <- SVP * (1 - PEcAn.data.atmosphere::qair2rh(Qair, tavg, press)) PEcAn.logger::logger.info("water_vapor_saturation_deficit absent; VPD calculated from Qair, Tair, and SVP (saturation vapor pressure) ") } VPD <- PEcAn.utils::ud_convert(VPD, "Pa", "kPa") # Pa ---> kPa - + # Relative humidity (%) rhum <- ncdf4::ncvar_get(nc, "relative_humidity")[ind] - + # Close connection after all necessary values have been fetch ncdf4::nc_close(nc) - + # Gather the vectors to dataframe - data <- as.data.frame(do.call("cbind", list(y = y, d = d, s = subd, - prec = prec*86400/tstep, #mm - tavg = tavg, #degC - grad = grad, #W m-2 - vpd = VPD, #kPa - wind = wind, #m s-1 - press = press))) #mbar - + data <- as.data.frame(do.call("cbind", list( + y = y, d = d, s = subd, + prec = prec * 86400 / tstep, # mm + tavg = tavg, # degC + grad = grad, # W m-2 + vpd = VPD, # kPa + wind = wind, # m s-1 + press = press + ))) # mbar + # Write prefix before the actual data - if(year == start_year){ - + if (year == start_year) { # General information before the daily/subdaily climate data - prefix_global <- paste0('\t time = "', start_date, '/', tstep, '"') + prefix_global <- paste0('\t time = "', start_date, "/", tstep, '"') prefix_climate <- paste0('\t id = "', 0, '"') prefix_latitude <- paste0('\t latitude = "', lat, '"') prefix_longitude <- paste0('\t longitude = "', lon, '"') - + data_prefix <- paste("%global", prefix_global, # global includes the global time, but this is already got - #from elsewhere and not necessary here(?). - "%climate", prefix_climate, - "%attributes", prefix_latitude, prefix_longitude, - "%data \n", sep = "\n") - + # from elsewhere and not necessary here(?). + "%climate", prefix_climate, + "%attributes", prefix_latitude, prefix_longitude, + "%data \n", + sep = "\n" + ) + # Write prefix information before the data cat(data_prefix, file = file.path(outfolder, out.file)) - + # For the first year, keep col.names as TRUE - readr::write_delim(x = data, file = file.path(outfolder, out.file), - delim = "\t", append = T, quote = "none") - - - }else{ + readr::write_delim( + x = data, file = file.path(outfolder, out.file), + delim = "\t", append = T, quote = "none" + ) + } else { # For the other years, col.names are FALSE - readr::write_delim(x = data, file = file.path(outfolder, out.file), - delim = "\t", col_names = F, append = T) + readr::write_delim( + x = data, file = file.path(outfolder, out.file), + delim = "\t", col_names = F, append = T + ) } - - - - } else{ + } else { PEcAn.logger::logger.severe(paste(old.file, "file does not exist")) } - - - } return(invisible(results)) - } # met2model.LDNDC # Calculates the start index. This function determines the difference between # netcdf file's starting date and simulation's starting date and converting that # difference to seconds. Returns +1 index based on the matching seconds. -start_index <- function(units, start_date, sec){ - timediff <-round((PEcAn.utils::datetime2cf(start_date, units, tz = "UTC"))*86400) - if(timediff == 0){ +start_index <- function(units, start_date, sec) { + timediff <- round((PEcAn.utils::datetime2cf(start_date, units, tz = "UTC")) * 86400) + if (timediff == 0) { return(1) - }else{ + } else { return(which(sec == timediff)) } } -end_index <- function(units, start_date, end_date, sec, tstep){ - #if(lubridate::year(start_date) == lubridate::year(end_date)){ - timediff <- round((PEcAn.utils::datetime2cf(end_date, units, tz = "UTC")+1)*86400) - return(which(sec == (timediff-86400/tstep))) +end_index <- function(units, start_date, end_date, sec, tstep) { + # if(lubridate::year(start_date) == lubridate::year(end_date)){ + timediff <- round((PEcAn.utils::datetime2cf(end_date, units, tz = "UTC") + 1) * 86400) + return(which(sec == (timediff - 86400 / tstep))) } diff --git a/models/ldndc/R/model2netcdf.LDNDC.R b/models/ldndc/R/model2netcdf.LDNDC.R index 2c9b5f89b1e..c2190e10a5b 100644 --- a/models/ldndc/R/model2netcdf.LDNDC.R +++ b/models/ldndc/R/model2netcdf.LDNDC.R @@ -1,7 +1,6 @@ - -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Convert LDNDC output into the NACP Intercomparison format (ALMA using netCDF) -##' +##' ##' @name model2netcdf.LDNDC ##' @title Code to convert LDNDC's output into netCDF format ##' @@ -18,202 +17,228 @@ ##' ##' @author Henri Kajasilta model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, delete.raw = FALSE) { - - # File path to Output directory wherein the raw model results are located + # File path to Output directory wherein the raw model results are located output_dir <- file.path(outdir, "Output") - + #### Something to check that data has same timesteps. Either take all of the necessary # files as a sub-daily or if not possible, then choose the daily option Subdailyfiles <- list.files(output_dir)[grep("*.-subdaily", list.files(output_dir))] - + # Test, if required files are all available subdaily - ##---- Currently meant to work only with subdaily timesteps ----## - if(all(c("physiology-subdaily.txt") %in% Subdailyfiles)){ + ## ---- Currently meant to work only with subdaily timesteps ----## + if (all(c("physiology-subdaily.txt") %in% Subdailyfiles)) { PEcAn.logger::logger.info("Files with sub-daily timesteps found: ", Subdailyfiles) - + # Physiology data: LAI, Photosynthesis rate physiology <- subset(read.csv(file.path(output_dir, "physiology-subdaily.txt"), header = T, sep = "\t"), - select = c("datetime", "species", "lai", "dC_co2_upt.kgCm.2.", "dC_maintenance_resp.kgCm.2.", - "dC_transport_resp.kgCm.2.", "dC_growth_resp.kgCm.2.", "DW_below.kgDWm.2.", "DW_above.kgDWm.2.")) - - - soilchemistry <- subset(read.csv(file.path(output_dir, "soilchemistry-subdaily.txt"), header = T, sep ="\t"), - select = c("datetime", "sC_co2_hetero.kgCm.2.")) - + select = c( + "datetime", "species", "lai", "dC_co2_upt.kgCm.2.", "dC_maintenance_resp.kgCm.2.", + "dC_transport_resp.kgCm.2.", "dC_growth_resp.kgCm.2.", "DW_below.kgDWm.2.", "DW_above.kgDWm.2." + ) + ) + + + soilchemistry <- subset(read.csv(file.path(output_dir, "soilchemistry-subdaily.txt"), header = T, sep = "\t"), + select = c("datetime", "sC_co2_hetero.kgCm.2.") + ) + # Soil moisture information - watercycle <- subset(read.csv(file.path(output_dir, "watercycle-subdaily.txt"), header = T, sep ="\t"), - select = c("datetime", "soilwater_10cm...", "soilwater_30cm...")) - - + watercycle <- subset(read.csv(file.path(output_dir, "watercycle-subdaily.txt"), header = T, sep = "\t"), + select = c("datetime", "soilwater_10cm...", "soilwater_30cm...") + ) + + # Harvest - harvest <- subset(read.csv(file.path(output_dir, "report-harvest.txt"), header = T, sep ="\t"), - select = c("datetime", "dC_fru_export.kgCha.1.", "dC_fol_export.kgCha.1.", "dC_frt_export.kgCha.1.", - "dC_lst_above_export.kgCha.1.", "dC_lst_below_export.kgCha.1.", "dC_dst_above_export.kgCha.1.", - "dC_dst_below_export.kgCha.1.", "dC_straw_export.kgCha.1.")) - harvest$total <- rowSums(harvest[,-1]) - harvest <- harvest[,c("datetime", "total")] - + harvest <- subset(read.csv(file.path(output_dir, "report-harvest.txt"), header = T, sep = "\t"), + select = c( + "datetime", "dC_fru_export.kgCha.1.", "dC_fol_export.kgCha.1.", "dC_frt_export.kgCha.1.", + "dC_lst_above_export.kgCha.1.", "dC_lst_below_export.kgCha.1.", "dC_dst_above_export.kgCha.1.", + "dC_dst_below_export.kgCha.1.", "dC_straw_export.kgCha.1." + ) + ) + harvest$total <- rowSums(harvest[, -1]) + harvest <- harvest[, c("datetime", "total")] + # Cut - cut <- subset(read.csv(paste(output_dir, "report-cut.txt", sep = "/"), header = T, sep ="\t"), - select = c("datetime", "dC_fru_export.kgCha.1.", "dC_fol_export.kgCha.1.", "dC_dfol_export.kgCha.1.", - "dC_lst_export.kgCha.1.", "dC_dst_export.kgCha.1.", "dC_frt_export.kgCha.1.")) - - cut$total <- rowSums(cut[,-1]) - cut <- cut[,c("datetime", "total")] - - } else{ + cut <- subset(read.csv(paste(output_dir, "report-cut.txt", sep = "/"), header = T, sep = "\t"), + select = c( + "datetime", "dC_fru_export.kgCha.1.", "dC_fol_export.kgCha.1.", "dC_dfol_export.kgCha.1.", + "dC_lst_export.kgCha.1.", "dC_dst_export.kgCha.1.", "dC_frt_export.kgCha.1." + ) + ) + + cut$total <- rowSums(cut[, -1]) + cut <- cut[, c("datetime", "total")] + } else { PEcAn.logger::logger.severe("Subdaily output files not found, check the configurations for the LDNDC runs") } - + # This approach should be more reliable compared to previous since just choose one unique datetime # and the last one will be the "all", if there are several species on the field - physiology <- physiology[!duplicated(physiology$datetime, fromLast = T),] - - + physiology <- physiology[!duplicated(physiology$datetime, fromLast = T), ] + + # Combine harvest and cut as one event - harvest <- rbind(harvest, cut) %>% dplyr::group_by(.data$datetime) %>% dplyr::summarise(harvest_carbon_flux = sum(.data$total)/10000) %>% + harvest <- rbind(harvest, cut) %>% + dplyr::group_by(.data$datetime) %>% + dplyr::summarise(harvest_carbon_flux = sum(.data$total) / 10000) %>% as.data.frame() - + # Temporary solution to get "no visible binding" note off from the variables: 'Date', 'Year' and 'Day' Date <- Year <- Day <- Step <- NULL - + ## Merge subdaily-files - ldndc.raw.out <- merge(physiology, soilchemistry, by = 'datetime', all = TRUE) - ldndc.raw.out <- merge(ldndc.raw.out, watercycle, by = 'datetime', all = TRUE) - ldndc.raw.out <- merge(ldndc.raw.out, harvest, by = 'datetime', all = TRUE) - + ldndc.raw.out <- merge(physiology, soilchemistry, by = "datetime", all = TRUE) + ldndc.raw.out <- merge(ldndc.raw.out, watercycle, by = "datetime", all = TRUE) + ldndc.raw.out <- merge(ldndc.raw.out, harvest, by = "datetime", all = TRUE) + ldndc.out <- ldndc.raw.out %>% - dplyr:: mutate(Date = format(as.POSIXct(.data$datetime, format = "%Y-%m-%d")), .keep = "unused") %>% - dplyr::slice(1:(dplyr::n()-1)) %>% # Removing one extra line in output - dplyr::mutate(Year = lubridate::year(Date), Day = as.numeric(strftime(Date, format = "%j")), - Step = rep(0:(length(which(Date %in% unique(Date)[1]))-1),len = length(Date))) %>% - dplyr::select("Year", "Day", "Step", "lai", "dC_maintenance_resp.kgCm.2.", "dC_transport_resp.kgCm.2.", - "dC_growth_resp.kgCm.2.", "dC_co2_upt.kgCm.2.", "sC_co2_hetero.kgCm.2.", - "DW_below.kgDWm.2.", "DW_above.kgDWm.2.", "soilwater_10cm...", - "soilwater_30cm...", "harvest_carbon_flux") - - - + dplyr::mutate(Date = format(as.POSIXct(.data$datetime, format = "%Y-%m-%d")), .keep = "unused") %>% + dplyr::slice(1:(dplyr::n() - 1)) %>% # Removing one extra line in output + dplyr::mutate( + Year = lubridate::year(Date), Day = as.numeric(strftime(Date, format = "%j")), + Step = rep(0:(length(which(Date %in% unique(Date)[1])) - 1), len = length(Date)) + ) %>% + dplyr::select( + "Year", "Day", "Step", "lai", "dC_maintenance_resp.kgCm.2.", "dC_transport_resp.kgCm.2.", + "dC_growth_resp.kgCm.2.", "dC_co2_upt.kgCm.2.", "sC_co2_hetero.kgCm.2.", + "DW_below.kgDWm.2.", "DW_above.kgDWm.2.", "soilwater_10cm...", + "soilwater_30cm...", "harvest_carbon_flux" + ) + + + ## Check that the data match, based on the years we want simu_years <- unique(ldndc.out$Year) - + year_seq <- seq(lubridate::year(start_date), lubridate::year(end_date)) - - + + ## Output is given sub-daily at this point --- Will see, if this will change later ## timesteps in a day - out_day <- sum(ldndc.out$Year == simu_years[1] & - ldndc.out$Day == unique(ldndc.out$Day)[1], - na.rm = T) - - timestep.s <- 86400/out_day - + out_day <- sum( + ldndc.out$Year == simu_years[1] & + ldndc.out$Day == unique(ldndc.out$Day)[1], + na.rm = T + ) + + timestep.s <- 86400 / out_day + ## Loop over years in output to create separate netCDF outputs - for(y in year_seq){ + for (y in year_seq) { # if file exist and overwrite is F, then move on to the next - + print(paste("---- Prosessing year: ", y)) - + # Subset data for prosessing sub.ldndc.out <- subset(ldndc.out, Year == y) - - - - + + + + # Generate start/end dates for processing if (y == strftime(start_date, "%Y")) { begin_date <- lubridate::yday(start_date) } else { begin_date <- 1 } - + if (y == strftime(end_date, "%Y")) { end_d <- lubridate::yday(end_date) } else { end_d <- PEcAn.utils::days_in_year(y) } - + ## Subset the years we are interested in sub.ldndc.out <- subset(sub.ldndc.out, Day >= begin_date & Day <= end_d) - + # Create the tvals that are used in nc-files - tvals <- sub.ldndc.out[["Day"]] + sub.ldndc.out[["Step"]] /out_day -1 - - + tvals <- sub.ldndc.out[["Day"]] + sub.ldndc.out[["Step"]] / out_day - 1 + + ## Outputs need to be an appropriate units, this can be done here output <- list() - + # LAI output[[1]] <- ifelse(!is.na(sub.ldndc.out$lai), sub.ldndc.out$lai, 0) - + # Photosynthesis rate - GPP - GPP <- ifelse(!is.na(sub.ldndc.out$dC_co2_upt.kgCm.2.), sub.ldndc.out$dC_co2_upt.kgCm.2./timestep.s, 0) + GPP <- ifelse(!is.na(sub.ldndc.out$dC_co2_upt.kgCm.2.), sub.ldndc.out$dC_co2_upt.kgCm.2. / timestep.s, 0) output[[2]] <- GPP - + # Autotrophic respiration Autotrophic <- ifelse(!is.na((sub.ldndc.out$dC_maintenance_resp.kgCm.2. + sub.ldndc.out$dC_transport_resp.kgCm.2. + sub.ldndc.out$dC_growth_resp.kgCm.2.)), - (sub.ldndc.out$dC_maintenance_resp.kgCm.2. + sub.ldndc.out$dC_transport_resp.kgCm.2. + sub.ldndc.out$dC_growth_resp.kgCm.2.)/timestep.s, 0) + (sub.ldndc.out$dC_maintenance_resp.kgCm.2. + sub.ldndc.out$dC_transport_resp.kgCm.2. + sub.ldndc.out$dC_growth_resp.kgCm.2.) / timestep.s, 0 + ) output[[3]] <- Autotrophic - + # Heterotrophic respiration - Heterotrophic <- sub.ldndc.out$sC_co2_hetero.kgCm.2./timestep.s + Heterotrophic <- sub.ldndc.out$sC_co2_hetero.kgCm.2. / timestep.s output[[4]] <- Heterotrophic - + # Total respiration output[[5]] <- Autotrophic + Heterotrophic - + # NPP output[[6]] <- GPP - Autotrophic - + # NEE output[[7]] <- ifelse(!is.na(Autotrophic), Autotrophic, 0) + Heterotrophic - GPP - + # Soilmoisture at 10 cm output[[8]] <- c(t(data.frame(sub.ldndc.out$soilwater_10cm..., sub.ldndc.out$soilwater_30cm...))) - + # Aboveground biomass - output[[9]] <- ifelse(!is.na(sub.ldndc.out$DW_above.kgDWm.2.), sub.ldndc.out$DW_above.kgDWm.2., 0)/timestep.s - + output[[9]] <- ifelse(!is.na(sub.ldndc.out$DW_above.kgDWm.2.), sub.ldndc.out$DW_above.kgDWm.2., 0) / timestep.s + # Belowground biomass # Using constant 0.45 to calculate the C from dry matter output[[10]] <- ifelse(!is.na(sub.ldndc.out$DW_below.kgDWm.2.), sub.ldndc.out$DW_below.kgDWm.2., 0) * 0.45 / timestep.s - + harvest <- ifelse(!is.na(sub.ldndc.out$harvest_carbon_flux), sub.ldndc.out$harvest_carbon_flux, 0) * 0.45 / timestep.s output[[11]] <- harvest - - + + #### Declare netCDF variables #### - t <- ncdf4::ncdim_def(name = "time", - longname = "time", - units = paste0("days since ", y, "-01-01 00:00:00"), #00:00:00 - vals = tvals, - calendar = "standard", - unlim = TRUE) - - - lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), - longname = "station_latitude") - lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), - longname = "station_longitude") - - + t <- ncdf4::ncdim_def( + name = "time", + longname = "time", + units = paste0("days since ", y, "-01-01 00:00:00"), # 00:00:00 + vals = tvals, + calendar = "standard", + unlim = TRUE + ) + + + lat <- ncdf4::ncdim_def("lat", "degrees_north", + vals = as.numeric(sitelat), + longname = "station_latitude" + ) + lon <- ncdf4::ncdim_def("lon", "degrees_east", + vals = as.numeric(sitelon), + longname = "station_longitude" + ) + + depth <- ncdf4::ncdim_def("depth", "m", vals = c(.10, .30)) - + dims <- list(lon = lon, lat = lat, time = t) dims_added <- list(lon = lon, lat = lat, depth = depth, time = t) - - #dims_daily <- list(lon = lon, lat = lat, time = t_daily) - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname="history time interval endpoint dimensions", - vals = 1:2, units="") - - - + + # dims_daily <- list(lon = lon, lat = lat, time = t_daily) + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, units = "" + ) + + + ## Declare netCDF variables ## nc_var <- list() - + # Subdaily values nc_var[[1]] <- PEcAn.utils::to_ncvar("LAI", dims) nc_var[[2]] <- PEcAn.utils::to_ncvar("GPP", dims) @@ -222,41 +247,45 @@ model2netcdf.LDNDC <- function(outdir, sitelat, sitelon, start_date, end_date, d nc_var[[5]] <- PEcAn.utils::to_ncvar("TotalResp", dims) nc_var[[6]] <- PEcAn.utils::to_ncvar("NPP", dims) nc_var[[7]] <- PEcAn.utils::to_ncvar("NEE", dims) - + # Soilwater nc_var[[8]] <- PEcAn.utils::to_ncvar("SoilMoist", dims_added) - + # Biomass aboveground and belowground - nc_var[[9]] <- ncdf4::ncvar_def("AGB", units = "kg C m-2", dim = dims, missval = -999, - longname = "above ground biomass") - nc_var[[10]] <- ncdf4::ncvar_def("below_ground_carbon_content", units = "kg C m-2", dim = dims, missval = -999, - longname = "below ground biomass") - - nc_var[[length(nc_var)+1]] <- ncdf4::ncvar_def("harvest_carbon_flux", units = "kg m-2", dim = dims, missval = -999, - longname = "biomass of harvested organs") - + nc_var[[9]] <- ncdf4::ncvar_def("AGB", + units = "kg C m-2", dim = dims, missval = -999, + longname = "above ground biomass" + ) + nc_var[[10]] <- ncdf4::ncvar_def("below_ground_carbon_content", + units = "kg C m-2", dim = dims, missval = -999, + longname = "below ground biomass" + ) + + nc_var[[length(nc_var) + 1]] <- ncdf4::ncvar_def("harvest_carbon_flux", + units = "kg m-2", dim = dims, missval = -999, + longname = "biomass of harvested organs" + ) + # Daily values - # nc_var[[7]] <- PEcAn.utils::to_ncvar("LAI_Daily", dims_daily) - - + # nc_var[[7]] <- PEcAn.utils::to_ncvar("LAI_Daily", dims_daily) + + ## Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec = NA) - - - - for(i in seq_along(nc_var)){ + + + + for (i in seq_along(nc_var)) { ncdf4::ncvar_put(nc, nc_var[[i]], output[[i]]) cat(paste(nc_var[[i]]$name, nc_var[[i]]$longname), file = varfile, sep = "\n") } close(varfile) ncdf4::nc_close(nc) - } # Delete the raw results if (delete.raw) { - unlink(output_dir, recursive=TRUE) + unlink(output_dir, recursive = TRUE) } - } # model2netcdf.LDNDC diff --git a/models/ldndc/R/write.config.LDNDC.R b/models/ldndc/R/write.config.LDNDC.R index 6f13bd29ae2..80bc7474425 100644 --- a/models/ldndc/R/write.config.LDNDC.R +++ b/models/ldndc/R/write.config.LDNDC.R @@ -1,5 +1,4 @@ - -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Writes a LDNDC config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -14,33 +13,32 @@ ##' @return configuration file for LDNDC for given run ##' @export ##' @author Henri Kajasilta -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { - - MinPackReq <- "1.35" # Current version 1.35 - - + + # Create Schedule time - if(!is.null(settings$run$start.date) & !is.null(settings$run$end.date)){ - + if (!is.null(settings$run$start.date) & !is.null(settings$run$end.date)) { steps <- 48 # Hard-coded for now - ScheduleTime <- paste0(format(as.POSIXlt(settings$run$start.date), "%Y-%m-%d"), "/", - steps, " -> ", as.Date(format(as.POSIXlt(settings$run$end.date), "%Y-%m-%d"))+1) - # One extra day added to the end day of simulations, because the simulations will stop to - # the first timestep in a given end day. As a result we got our real end date simulated - # and one extra output line from the day after. - # This one extra output line is taken out it model2netcdf to not include extra values - # in netcdf file + ScheduleTime <- paste0( + format(as.POSIXlt(settings$run$start.date), "%Y-%m-%d"), "/", + steps, " -> ", as.Date(format(as.POSIXlt(settings$run$end.date), "%Y-%m-%d")) + 1 + ) + # One extra day added to the end day of simulations, because the simulations will stop to + # the first timestep in a given end day. As a result we got our real end date simulated + # and one extra output line from the day after. + # This one extra output line is taken out it model2netcdf to not include extra values + # in netcdf file } - + # Find out where to write run/ouput dirs rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - + # Source - if(!is.null(settings$run$inputs$met$path)){ + if (!is.null(settings$run$inputs$met$path)) { # For climate data MetPath <- settings$run$inputs$met$path # Info for project file from which directory to read the inputs @@ -48,40 +46,42 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { # Raw model outputs are written into own directory OutputPrefix <- file.path(outdir, "Output/") } - - + + # Add groundwater file, if it is available for site # Not obligatory file for model run - if(!is.null(settings$run$inputs$groundwater$path1)){ - GroundWater = '' + if (!is.null(settings$run$inputs$groundwater$path1)) { + GroundWater <- '' groundwaterfile <- readLines(con = file.path(settings$run$inputs$groundwater$path1)) writeLines(groundwaterfile, con = file.path(settings$rundir, run.id, "groundwater.txt")) - }else{GroundWater = ""} - - - - + } else { + GroundWater <- "" + } + + + + #----------------------------------------------------------------------- ## Fill .ldndc template with the given settings projectfile <- readLines(con = system.file("project.ldndc", package = "PEcAn.LDNDC"), n = -1) - - + + # Changes - projectfile <- gsub('@PackMinVerReq@', MinPackReq, projectfile) - - projectfile <- gsub('@ScheduleTime@', ScheduleTime, projectfile) - - projectfile <- gsub('@SourcePrefix@', SourcePrefix, projectfile) - projectfile <- gsub('@OutputPrefix@', OutputPrefix, projectfile) - - projectfile <- gsub('@Groundwater@', GroundWater, projectfile) - + projectfile <- gsub("@PackMinVerReq@", MinPackReq, projectfile) + + projectfile <- gsub("@ScheduleTime@", ScheduleTime, projectfile) + + projectfile <- gsub("@SourcePrefix@", SourcePrefix, projectfile) + projectfile <- gsub("@OutputPrefix@", OutputPrefix, projectfile) + + projectfile <- gsub("@Groundwater@", GroundWater, projectfile) + # Write project file to rundir writeLines(projectfile, con = file.path(settings$rundir, run.id, "project.ldndc")) - - - - + + + + #----------------------------------------------------------------------- # Create launch script (which will create symlink) if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { @@ -89,7 +89,7 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.LDNDC"), n = -1) } - + # Create host specific settings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -98,7 +98,7 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -106,60 +106,60 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - - - + + + # Create job.sh based on the given settings jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) jobsh <- gsub("@METPATH@", MetPath, jobsh) - + # LDNDC binaries in this server are located here. This binary also points to model own configurations. jobsh <- gsub("@BINARY@", paste(settings$model$binary, paste0(rundir, "/project.ldndc")), jobsh) - - if(is.null(settings$model$delete.raw)){ + + if (is.null(settings$model$delete.raw)) { settings$model$delete.raw <- FALSE } - + jobsh <- gsub("@DELETE.RAW@", settings$model$delete.raw, jobsh) - + # Write job.sh file to rundir writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(rundir, "job.sh")) # Permissions - - - + + + ## ----- Preparing the setup file ----- ## - + ## Setup file -- This may differ based on the site properties and the ecosystem we are simulating setupfile <- readLines(con = system.file("setup_template.xml", package = "PEcAn.LDNDC"), n = -1) - + ## Timemode # Timemode currently supports only subdaily timemode <- "subdaily" setupfile <- gsub("@timemode@", timemode, setupfile) - - + + ## Elevation, latitude and longitude setupfile <- gsub("@elevation@", "10", setupfile) setupfile <- gsub("@latitude@", settings$run$site$lat, setupfile) setupfile <- gsub("@longitude@", settings$run$site$lon, setupfile) - - + + ## Check the site id site_id <- settings$run$site$id - - - + + + ## Handle the setups, when working with grass, crop and forest fields # Possibly to hard code species to the list, this differentiation is done only # for the purpose of separating the setups between forest and grassland/crops @@ -169,1629 +169,1622 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { pfts_grasscrops <- c("barley", "oat", "triticale", "timothy", "meadow", "soil") pfts_forest <- c("pipy") pfts_run <- NULL - for(pft_names in 1:length(settings$pfts)){ + for (pft_names in 1:length(settings$pfts)) { pfts_run <- c(pfts_run, settings$pfts[[pft_names]]$name) } - - + + # Setup file created for grass and crop simulations: - if(all(pfts_run %in% pfts_grasscrops)){ - + if (all(pfts_run %in% pfts_grasscrops)) { ## Modules # Microclimate module setupfile <- gsub("@microclimate@", "canopyecm", setupfile) - + # Watercycle module and option setupfile <- gsub("@watercycle@", "watercycledndc", setupfile) setupfile <- gsub("@pevapotrans@", "penman", setupfile) - + # Airchemistry module setupfile <- gsub("@airchemistry@", "airchemistrydndc", setupfile) - + # Physiology module setupfile <- gsub("@physiology@", "plamox", setupfile) setupfile <- gsub("@plantfamilies@", "crops grass", setupfile) - + # Soil modules and options setupfile <- gsub("@soilchemistry@", "metrx", setupfile) - + # Report setupfile <- gsub("@reportarable@", "", setupfile) - + # Write the populated setup file as an xml-file writeLines(setupfile, con = file.path(settings$rundir, run.id, "setup.xml")) - } - + # Setup file created for forest simulations - else if(all(pfts_run %in% pfts_forest)){ - + else if (all(pfts_run %in% pfts_forest)) { ## Modules # Microclimate module setupfile <- gsub("@microclimate@", "canopyecm", setupfile) - + # Watercycle module and option setupfile <- gsub("@watercycle@", "echy", setupfile) setupfile <- gsub("@pevapotrans@", "penman", setupfile) - + # Airchemistry module setupfile <- gsub("@airchemistry@", "airchemistrydndc", setupfile) - + # Physiology module setupfile <- gsub("@physiology@", "psim", setupfile) - + # Soil modules and options setupfile <- gsub("@soilchemistry@", "metrx", setupfile) - + # Report setupfile <- gsub("@reportarable@", "\n", setupfile) - + # Write the populated setup file as an xml-file writeLines(setupfile, con = file.path(settings$rundir, run.id, "setup.xml")) - - } - + # Given pfts were not among the supported species - else{ + else { PEcAn.logger::logger.severe("Given species are not currently supported. This can be fixed by updating the write.config.LDNDC.R file.") } - - + + ## ----- Fetching other site specific file templates ----- ## - + ### Event, site and airchemistry files ### - + # Fetch event file from the given path, this might be modified, if initial # conditions are given, check the part of handling initial conditions later on eventsfile <- readLines(con = file.path(settings$run$inputs$events$path1)) - + # Fetch default site file. Will also be populated based on the given initial conditions sitefile <- readLines(con = system.file("site_template.xml", package = "PEcAn.LDNDC"), n = -1) - + # Use airchemistry file, which represents Finland - if(!is.null(settings$run$inputs$airchemistry$path1)){ + if (!is.null(settings$run$inputs$airchemistry$path1)) { airchemistryfile <- readLines(con = file.path(settings$run$inputs$airchemistry$path1)) - } else{ + } else { airchemistryfile <- readLines(con = system.file("airchemistry.txt", package = "PEcAn.LDNDC"), n = -1) } - - + + #### write run-specific PFT parameters here #### Get parameters being handled by PEcAn # For species, read the speciesparameters template speciesparfile <- readLines(con = system.file("speciesparameter_template.xml", package = "PEcAn.LDNDC"), n = -1) - + # For site (parameters), read the siteparameters template siteparfile <- readLines(con = system.file("siteparameters_template.xml", package = "PEcAn.LDNDC"), n = -1) - - + + #---------------------- - + ## Set-up the necessary files in to the run directory so ## model is able to function properly. Later on, these ## files should be populated with initial values. - + # Species and Siteparameters b.2 <- "" h.2 <- "" - + species_par_values <- list() - + for (pft in seq_along(trait.values)) { - pft.traits <- unlist(trait.values[[pft]]) pft.names <- names(pft.traits) - - + + # Number at the beginning refers to the number of species parameters in LDNDC guide book. # NOTE! LDNDC Userguide has been updated later on so the numbering can be a little bit off compared # to the latest version. # First there is name in LDNDC and the second is name in BETY database - - - #8 NDFLUSH - + + + # 8 NDFLUSH - if ("ndflush" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #9 NDMORTA - + + # 9 NDMORTA - if ("ndmorta" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #10 DLEAFSHED - + + # 10 DLEAFSHED - if ("dleafshed" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #12 AEJM J/mol - + + # 12 AEJM J/mol - if ("aejm" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #13 AEKC J/mol - + + # 13 AEKC J/mol - if ("aekc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #14 AEKO J/mol - + + # 14 AEKO J/mol - if ("aeko" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #15 AERD J/mol - + + # 15 AERD J/mol - if ("aerd" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #16 AEVC J/mol - + + # 16 AEVC J/mol - if ("aevc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #17 AEVO J/mol - + + # 17 AEVO J/mol - if ("aevo" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #18 ALB (-) - SW_albedo (-) + + # 18 ALB (-) - SW_albedo (-) if ("SW_albedo" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #21 AMAXA (-) - + + # 21 AMAXA (-) - if ("amaxa" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #22 AMAXB (-) - Amax (-) + + # 22 AMAXB (-) - Amax (-) if ("Amax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #23 AMAXFRAC - + + # 23 AMAXFRAC - if ("amaxfrac" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #24 BASEFOLRESPFRAC - + + # 24 BASEFOLRESPFRAC - if ("basefolrespfrac" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #25 CB - + + # 25 CB - if ("cb" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #26 CDAMP - + + # 26 CDAMP - if ("cdamp" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #27 CL_P1 - + + # 27 CL_P1 - if ("cl_p1" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #28 CL_P2 - + + # 28 CL_P2 - if ("cl_p2" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #32 CELLULOSE - + + # 32 CELLULOSE - if ("cellulose" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #34 CHILL_UNITS - + + # 34 CHILL_UNITS - if ("chill_units" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #35 CHILL_TEMP_MAX - + + # 35 CHILL_TEMP_MAX - if ("chill_temp_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #36 CT_IS - + + # 36 CT_IS - if ("ct_is" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #37 CT_MT - + + # 37 CT_MT - if ("ct_mt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #38 DBRANCH kg/m3 - + + # 38 DBRANCH kg/m3 - if ("dbranch" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #39 DF_EXP - + + # 39 DF_EXP - if ("df_exp" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #40 DF_LIMIT m2/ha - + + # 40 DF_LIMIT m2/ha - if ("df_limit" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #41 DFOL - leaf_density + + # 41 DFOL - leaf_density if ("leaf_density" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #42 DFRTOPT - + + # 42 DFRTOPT - if ("dfrtopt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #43 DIAMMAX (m) - stem_diameter (cm) + + # 43 DIAMMAX (m) - stem_diameter (cm) if ("stem_diameter" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0( + "\t\t\t\t \n" + ), collapse = "") } - - #44 DOC_RESP_RATIO - coarseRootExudation + + # 44 DOC_RESP_RATIO - coarseRootExudation if ("coarseRootExudation" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #45 DRAGC - + + # 45 DRAGC - if ("dragc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #46 DSAP - + + # 46 DSAP - if ("dsap" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #47 DS_IS J/mol K - + + # 47 DS_IS J/mol K - if ("ds_is" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #48 DS_MT J/mol K - + + # 48 DS_MT J/mol K - if ("ds_mt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #49 DVPD1 - + + # 49 DVPD1 - if ("dvpd1" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #50 DVPD2 - + + # 50 DVPD2 - if ("dvpd2" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #54 EF_OVOC ug/gDW h - + + # 54 EF_OVOC ug/gDW h - if ("ef_ovoc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #55 EXPL_NH4 - + + # 55 EXPL_NH4 - if ("expl_nh4" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #56 EXPL_NO3 - + + # 56 EXPL_NO3 - if ("expl_no3" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #57 EXP_ROOT_DISTRIBUTION - + + # 57 EXP_ROOT_DISTRIBUTION - if ("exp_root_distribution" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t\t \n"), collapse = "") } - - #58 EXT - extinction_coefficient_diffuse + + # 58 EXT - extinction_coefficient_diffuse if ("extinction_coefficient_diffuse" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #59 FAGE - + + # 59 FAGE - if ("fage" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #62 FFACMAX - + + # 62 FFACMAX - if ("ffacmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #78 FOLRELGROMAX - + + # 78 FOLRELGROMAX - if ("folrelgromax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #79 FRACTION_ROOT - root_biomass_fraction + + # 79 FRACTION_ROOT - root_biomass_fraction if ("root_biomass_fraction" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #80 FRACTION_FRUIT - + + # 80 FRACTION_FRUIT - if ("fraction_fruit" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #81 FRACTION_FOLIAGE - + + # 81 FRACTION_FOLIAGE - if ("fraction_foliage" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #82 FRET_N - + + # 82 FRET_N - if ("fret_n" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #86 FRTALLOC_REL - + + # 86 FRTALLOC_REL - if ("frtalloc_rel" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #87 FRTLOSS_SCALE - + + # 87 FRTLOSS_SCALE - if ("frtloss_scale" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #88 FYIELD - + + # 88 FYIELD - if ("fyield" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #89 GDD_BASE_TEMPERATURE (C) - gdd_tbase (C) + + # 89 GDD_BASE_TEMPERATURE (C) - gdd_tbase (C) if ("gdd_tbase" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #90 GDD_MAX_TEMPERATURE - gdd_tmax + + # 90 GDD_MAX_TEMPERATURE - gdd_tmax if ("gdd_tmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #91 GDD_EMERGENCE - + + # 91 GDD_EMERGENCE - if ("gdd_emergence" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #92 GDD_STEM_ELONGATION - + + # 92 GDD_STEM_ELONGATION - if ("gdd_stem_elongation" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #93 GDD_FLOWERING - + + # 93 GDD_FLOWERING - if ("gdd_flowering" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - + # #94 GDD_GRAIN_FILLING - GRAIN FILLING RELATIVE TO FLOWERING # if ("gdd_grain_filling" %in% pft.names) { # b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") # } - - #95 GDD_MATURITY - + + # 95 GDD_MATURITY - if ("gdd_maturity" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #96 GDDFOLEND - + + # 96 GDDFOLEND - if ("gddfolend" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #97 GDDFOLSTART - + + # 97 GDDFOLSTART - if ("gddfolstart" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #100 GGDPS_B (umol L-1 s-1) - + + # 100 GGDPS_B (umol L-1 s-1) - if ("ggdps_b" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #101 GSMAX (mmolH2O m-2 s-1) - + + # 101 GSMAX (mmolH2O m-2 s-1) - if ("gsmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #102 GSMIN (mmolH2O m-2 s-1) - + + # 102 GSMIN (mmolH2O m-2 s-1) - if ("gsmin" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #103 GZRTZ - + + # 103 GZRTZ - if ("gzrtz" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #104 H2OREF_A - + + # 104 H2OREF_A - if ("h2oref_a" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #107 H2OREF_GS - + + # 107 H2OREF_GS - if ("h2oref_gs" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #109 HALFSAT - + + # 109 HALFSAT - if ("halfsat" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #110 HA_IS (J mol-1) - + + # 110 HA_IS (J mol-1) - if ("ha_is" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #111 HA_MT (J mol-1) - + + # 111 HA_MT (J mol-1) - if ("ha_mt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #112 HD_IS (J mol-1) - + + # 112 HD_IS (J mol-1) - if ("hd_is" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #113 HDJ - + + # 113 HDJ - if ("hdj" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #114 HD_EXP - + + # 114 HD_EXP - if ("hd_exp" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #115 HD_MAX (m m-1) - + + # 115 HD_MAX (m m-1) - if ("hd_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #116 HD_MIN (m m-1) - + + # 116 HD_MIN (m m-1) - if ("hd_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #117 HD_MT (J mol-1) - + + # 117 HD_MT (J mol-1) - if ("hd_mt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #118 HREF - + + # 118 HREF - if ("href" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #119 INI_N_FIX - + + # 119 INI_N_FIX - if ("ini_n_fix" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #120 KC25 (mmol mol-1 mbar-1)- + + # 120 KC25 (mmol mol-1 mbar-1)- if ("kc25" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #121 KM20 - + + # 121 KM20 - if ("km20" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #126 K_MM_NITROGEN_UPTAKE - + + # 126 K_MM_NITROGEN_UPTAKE - if ("k_mm_nitrogen_uptake" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #127 KO25 - + + # 127 KO25 - if ("ko25" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #128 KRC_WOOD - + + # 128 KRC_WOOD - if ("krc_wood" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #129 LIGNIN - + + # 129 LIGNIN - if ("lignin" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #130 MAINTENANCE_TEMP_REF - + + # 130 MAINTENANCE_TEMP_REF - if ("maintenance_temp_ref" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #131 MC_LEAF - + + # 131 MC_LEAF - if ("mc_leaf" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #132 MC_STEM - + + # 132 MC_STEM - if ("mc_stem" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #133 MC_ROOT - + + # 133 MC_ROOT - if ("mc_root" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #134 MC_STORAGE - + + # 134 MC_STORAGE - if ("mc_storage" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #135 MFOLOPT - + + # 135 MFOLOPT - if ("mfolopt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #136 M_FRUIT_OPT - + + # 136 M_FRUIT_OPT - if ("m_fruit_opt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #139 MUE_IS (s-1) - + + # 139 MUE_IS (s-1) - if ("mue_is" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #140 MUE_MT (s-1) - + + # 140 MUE_MT (s-1) - if ("mue_mt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #141 MWFM - + + # 141 MWFM - if ("mwfm" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #143 NC_FOLIAGE_MIN - + + # 143 NC_FOLIAGE_MIN - if ("nc_foliage_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #144 NC_FOLIAGE_MAX - + + # 144 NC_FOLIAGE_MAX - if ("nc_foliage_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #145 NCFOLOPT (kg kg-1) - + + # 145 NCFOLOPT (kg kg-1) - if ("ncfolopt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #146 NC_FINEROOTS_MAX - + + # 146 NC_FINEROOTS_MAX - if ("nc_fineroots_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #147 NC_FINEROOTS_MIN - + + # 147 NC_FINEROOTS_MIN - if ("nc_fineroots_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #148 NC_FRUIT_MAX - + + # 148 NC_FRUIT_MAX - if ("nc_fruit_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #149 NC_FRUIT_MIN - + + # 149 NC_FRUIT_MIN - if ("nc_fruit_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #150 NC_STRUCTURAL_TISSUE_MAX - + + # 150 NC_STRUCTURAL_TISSUE_MAX - if ("nc_structural_tissue_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #151 NC_STRUCTURAL_TISSUE_MIN - + + # 151 NC_STRUCTURAL_TISSUE_MIN - if ("nc_structural_tissue_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #152 NCSAPOPT (kg kg-1) - + + # 152 NCSAPOPT (kg kg-1) - if ("ncsapopt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #153 N_DEF_FACTOR - + + # 153 N_DEF_FACTOR - if ("n_def_factor" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #154 N_DEMAND_VEG - + + # 154 N_DEMAND_VEG - if ("n_demand_veg" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #155 N_DEMAND_REPROD - + + # 155 N_DEMAND_REPROD - if ("n_demand_reprod" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #156 NFIX_CEFF - + + # 156 NFIX_CEFF - if ("nfix_ceff" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #157 NFIX_TMAX - + + # 157 NFIX_TMAX - if ("nfix_tmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #158 NFIX_TOPT - + + # 158 NFIX_TOPT - if ("nfix_topt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #159 NFIX_TMIN - + + # 159 NFIX_TMIN - if ("nfix_tmin" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #160 NFIX_W - + + # 160 NFIX_W - if ("nfix_w" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #161 NFIX_RATE (kg N kg-1 DM-1 d-1) - + + # 161 NFIX_RATE (kg N kg-1 DM-1 d-1) - if ("nfix_rate" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #163 PEXS - + + # 163 PEXS - if ("pexs" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #164 PFL - + + # 164 PFL - if ("pfl" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #165 PSL - + + # 165 PSL - if ("psl" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #167 PSNTMAX (C) - pstemp_max (C) + + # 167 PSNTMAX (C) - pstemp_max (C) if ("pstemp_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #168 PSNTMIN (C) - pstemp_min (C) + + # 168 PSNTMIN (C) - pstemp_min (C) if ("pstemp_min" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #169 PSNTOPT (C) - psnTOpt (C) + + # 169 PSNTOPT (C) - psnTOpt (C) if ("psnTOpt" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #170 QHRD - + + # 170 QHRD - if ("qhrd" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #171 QJVC - + + # 171 QJVC - if ("qjvc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #172 QRD25 (umol m-2 s-1) - + + # 172 QRD25 (umol m-2 s-1) - if ("qrd25" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #173 QRF - + + # 173 QRF - if ("qrf" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #174 QSF_P1 (m2 cm-2) - + + # 174 QSF_P1 (m2 cm-2) - if ("qsf_p1" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #175 QSF_P2 (m2 cm-2) - + + # 175 QSF_P2 (m2 cm-2) - if ("qsf_p2" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #176 QVOVC - + + # 176 QVOVC - if ("qvovc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #177 QWODFOLMIN - + + # 177 QWODFOLMIN - if ("qwodfolmin" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #178 RBUDDEM - + + # 178 RBUDDEM - if ("rbuddem" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - - #179 RESP - resp + + + # 179 RESP - resp if ("resp" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #180 RESPQ10 - leaf_respiration_Q10 + + # 180 RESPQ10 - leaf_respiration_Q10 if ("leaf_respiration_Q10" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #181 ROOTMRESPFRAC - + + # 181 ROOTMRESPFRAC - if ("rootmrespfrac" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #182 RS_CONDUCT - + + # 182 RS_CONDUCT - if ("rs_conduct" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #183 SCALE_I - + + # 183 SCALE_I - if ("scale_i" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #184 SCALE_M - + + # 184 SCALE_M - if ("scale_m" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #185 SDJ - + + # 185 SDJ - if ("sdj" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #186 SENESCENCE_AGE - + + # 186 SENESCENCE_AGE - if ("senescence_age" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #187 SENESCENCE_DROUGHT - + + # 187 SENESCENCE_DROUGHT - if ("senescence_drought" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") # In order to have zeros + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") # In order to have zeros } - - #188 SENESCENCE_FROST - + + # 188 SENESCENCE_FROST - if ("senescence_frost" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #?? SENESCSTART - + + # ?? SENESCSTART - if ("senescstart" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #192 SHOOT_STIMULATION_REPROD - + + # 192 SHOOT_STIMULATION_REPROD - if ("shoot_stimulation_reprod" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #193 SLAMAX (m2 kg-1) - SLAMAX (m2 kg-1) + + # 193 SLAMAX (m2 kg-1) - SLAMAX (m2 kg-1) if ("SLAMAX" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #194 SLAMIN (m2 kg-1) - SLAMIN (m2 kg-1) + + # 194 SLAMIN (m2 kg-1) - SLAMIN (m2 kg-1) if ("SLAMIN" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #195 SLADECLINE - + + # 195 SLADECLINE - if ("sladecline" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #196 SLOPE_GSA - + + # 196 SLOPE_GSA - if ("slope_gsa" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #197 SLOPE_GSCO2 - + + # 197 SLOPE_GSCO2 - if ("slope_gsco2" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #198 SLOPE_GSH2O - + + # 198 SLOPE_GSH2O - if ("slope_gsh2o" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #199 SLOPE_NC - + + # 199 SLOPE_NC - if ("slope_nc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #200 TAP_P1 - + + # 200 TAP_P1 - if ("tap_p1" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #201 TAP_P2 - + + # 201 TAP_P2 - if ("tap_p2" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #202 TAP_P3 - + + # 202 TAP_P3 - if ("tap_p3" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #203 TAU - + + # 203 TAU - if ("tau" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #204 THETA - + + # 204 THETA - if ("theta" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #205 TLIMIT - + + # 205 TLIMIT - if ("tlimit" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #206 TOFRTBAS - + + # 206 TOFRTBAS - if ("tofrtbas" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #207 TOSAPMAX - + + # 207 TOSAPMAX - if ("tosapmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #208 UCMAX (kgN m-2 leaf area) - + + # 208 UCMAX (kgN m-2 leaf area) - if ("ucmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #210 US_NH4 (kgN kg-1 fine root dry weight day-1) - + + # 210 US_NH4 (kgN kg-1 fine root dry weight day-1) - if ("us_nh4" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #211 US_NH4MYC (kgN kg-1 fine root dry weight day-1) - + + # 211 US_NH4MYC (kgN kg-1 fine root dry weight day-1) - if ("us_nh4myc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #213 US_NO3 (kgN kg-1 fine root dry weight day-1) - + + # 213 US_NO3 (kgN kg-1 fine root dry weight day-1) - if ("us_no3" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #214 US_NO3MYC (kgN kg-1 fine root dry weight day-1) - + + # 214 US_NO3MYC (kgN kg-1 fine root dry weight day-1) - if ("us_no3myc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #215 VCFACT - + + # 215 VCFACT - if ("vcfact" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #216 VCMAX25 - + + # 216 VCMAX25 - if ("vcmax25" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #217 VPDREF (kPa) - + + # 217 VPDREF (kPa) - if ("vpdref" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #219 WOODMRESPA - + + # 219 WOODMRESPA - if ("woodmrespa" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #220 WUECMAX - wuecmax + + # 220 WUECMAX - wuecmax if ("wuecmax" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #221 WUECMIN - wuecmin + + # 221 WUECMIN - wuecmin if ("wuecmin" %in% pft.names) { # CHECK THAT THE VALUE IS NOT OVER MAX wuecmin_val <- ifelse(pft.traits[which(pft.names == "wuecmin")] > pft.traits[which(pft.names == "wuecmax")], pft.traits[which(pft.names == "wuecmax")], pft.traits[which(pft.names == "wuecmin")]) - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #222 ZRTMC - + + # 222 ZRTMC - if ("zrtmc" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - - #??? HEIGHT_MAX - + + # ??? HEIGHT_MAX - if ("height_max" %in% pft.names) { - b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse="") + b.2 <- paste(b.2, paste0("\t\t\t\t \n"), collapse = "") } - + ## SITEPARAMETERS # Number at the beginning refers to the number of site parameters in LDNDC guide book. - - - #58 EVALIM + + + # 58 EVALIM if ("evalim" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #82 GROUNDWATER_NUTRIENT_RENEWAL - + + # 82 GROUNDWATER_NUTRIENT_RENEWAL - if ("groundwater_nutrient_renewal" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #121 METRX_AMAX - + + # 121 METRX_AMAX - if ("metrx_amax" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #122 METRX_AMAX_ALGAE - + + # 122 METRX_AMAX_ALGAE - if ("metrx_amax_algae" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #123 METRX_BETA_LITTER_TYPE - + + # 123 METRX_BETA_LITTER_TYPE - if ("metrx_beta_litter_type" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #124 METRX_BIOSYNTH_EFF - + + # 124 METRX_BIOSYNTH_EFF - if ("metrx_biosynth_eff" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #125 METRX_CN_ALGAE - + + # 125 METRX_CN_ALGAE - if ("metrx_cn_algae" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #127 METRX_CN_MIC_MAX - + + # 127 METRX_CN_MIC_MAX - if ("metrx_cn_mic_max" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #128 METRX_CN_MIC_MIN - + + # 128 METRX_CN_MIC_MIN - if ("metrx_cn_mic_min" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #129 METRX_CO2_PROD_DECOMP - + + # 129 METRX_CO2_PROD_DECOMP - if ("metrx_co2_prod_decomp" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #130 METRX_D_EFF_REDUCTION - + + # 130 METRX_D_EFF_REDUCTION - if ("metrx_d_eff_reduction" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #131 METRX_F_CHEMODENIT_PH_ONEILL_1 - + + # 131 METRX_F_CHEMODENIT_PH_ONEILL_1 - if ("metrx_f_chemodenit_ph_oneill_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #132 METRX_F_CHEMODENIT_PH_ONEILL_2 - + + # 132 METRX_F_CHEMODENIT_PH_ONEILL_2 - if ("metrx_f_chemodenit_ph_oneill_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #133 METRX_F_CHEMODENIT_PH_ONEILL_3 - + + # 133 METRX_F_CHEMODENIT_PH_ONEILL_3 - if ("metrx_f_chemodenit_ph_oneill_3" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #134 METRX_F_CHEMODENIT_T_EXP_1 - + + # 134 METRX_F_CHEMODENIT_T_EXP_1 - if ("metrx_f_chemodenit_t_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #135 METRX_F_CHEMODENIT_T_EXP_2 - + + # 135 METRX_F_CHEMODENIT_T_EXP_2 - if ("metrx_f_chemodenit_t_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #136 METRX_F_DECOMP_M_WEIBULL_1 - + + # 136 METRX_F_DECOMP_M_WEIBULL_1 - if ("metrx_f_decomp_m_weibull_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #137 METRX_F_DECOMP_M_WEIBULL_2 - + + # 137 METRX_F_DECOMP_M_WEIBULL_2 - if ("metrx_f_decomp_m_weibull_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #138 METRX_F_DECOMP_M_WEIBULL_3 - + + # 138 METRX_F_DECOMP_M_WEIBULL_3 - if ("metrx_f_decomp_m_weibull_3" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #139 METRX_F_CH4_OXIDATION_T_EXP_1 - + + # 139 METRX_F_CH4_OXIDATION_T_EXP_1 - if ("metrx_f_ch4_oxidation_t_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #140 METRX_F_CH4_OXIDATION_T_EXP_2 - + + # 140 METRX_F_CH4_OXIDATION_T_EXP_2 - if ("metrx_f_ch4_oxidation_t_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #141 METRX_F_CH4_PRODUCTION_T_EXP_1 - + + # 141 METRX_F_CH4_PRODUCTION_T_EXP_1 - if ("metrx_f_ch4_production_t_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #142 METRX_F_CH4_PRODUCTION_T_EXP_2 - + + # 142 METRX_F_CH4_PRODUCTION_T_EXP_2 - if ("metrx_f_ch4_production_t_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #143 METRX_F_DECOMP_T_EXP_1 - + + # 143 METRX_F_DECOMP_T_EXP_1 - if ("metrx_f_decomp_t_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #144 METRX_F_DECOMP_T_EXP_2 - + + # 144 METRX_F_DECOMP_T_EXP_2 - if ("metrx_f_decomp_t_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #145 METRX_F_DECOMP_CLAY_1 - + + # 145 METRX_F_DECOMP_CLAY_1 - if ("metrx_f_decomp_clay_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #146 METRX_F_DECOMP_CLAY_2 - + + # 146 METRX_F_DECOMP_CLAY_2 - if ("metrx_f_decomp_clay_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #147 METRX_F_DENIT_N2_MIN - + + # 147 METRX_F_DENIT_N2_MIN - if ("metrx_f_denit_n2_min" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #148 METRX_F_DENIT_N2_MAX - + + # 148 METRX_F_DENIT_N2_MAX - if ("metrx_f_denit_n2_max" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #149 METRX_F_DENIT_NO - + + # 149 METRX_F_DENIT_NO - if ("metrx_f_denit_no" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #150 METRX_F_DENIT_PH_EXP_1 - + + # 150 METRX_F_DENIT_PH_EXP_1 - if ("metrx_f_denit_ph_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #151 METRX_F_DENIT_PH_EXP_2 - + + # 151 METRX_F_DENIT_PH_EXP_2 - if ("metrx_f_denit_ph_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #152 METRX_F_DENIT_M_WEIBULL_1 - + + # 152 METRX_F_DENIT_M_WEIBULL_1 - if ("metrx_f_denit_m_weibull_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #153 METRX_F_DENIT_M_WEIBULL_2 - + + # 153 METRX_F_DENIT_M_WEIBULL_2 - if ("metrx_f_denit_m_weibull_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #154 METRX_F_N_ALGAE - + + # 154 METRX_F_N_ALGAE - if ("metrx_f_n_algae" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #155 METRX_F_N_CH4_OXIDATION - + + # 155 METRX_F_N_CH4_OXIDATION - if ("metrx_f_n_ch4_oxidation" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #156 METRX_F_NIT_NO_M_EXP_1 - + + # 156 METRX_F_NIT_NO_M_EXP_1 - if ("metrx_f_nit_no_m_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #157 METRX_F_NIT_NO_M_EXP_2 - + + # 157 METRX_F_NIT_NO_M_EXP_2 - if ("metrx_f_nit_no_m_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #158 METRX_F_NIT_NO_T_EXP_1 - + + # 158 METRX_F_NIT_NO_T_EXP_1 - if ("metrx_f_nit_no_t_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #159 METRX_F_NIT_NO_T_EXP_2 - + + # 159 METRX_F_NIT_NO_T_EXP_2 - if ("metrx_f_nit_no_t_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #160 METRX_F_NIT_NO_PH_LIN_1 - + + # 160 METRX_F_NIT_NO_PH_LIN_1 - if ("metrx_f_nit_no_ph_lin_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #161 METRX_F_NIT_NO_PH_LIN_2 - + + # 161 METRX_F_NIT_NO_PH_LIN_2 - if ("metrx_f_nit_no_ph_lin_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #162 METRX_F_NIT_N2O_M_WEIBULL_1 - + + # 162 METRX_F_NIT_N2O_M_WEIBULL_1 - if ("metrx_f_nit_n2o_m_weibull_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #163 METRX_F_NIT_N2O_M_WEIBULL_2 - + + # 163 METRX_F_NIT_N2O_M_WEIBULL_2 - if ("metrx_f_nit_n2o_m_weibull_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #164 METRX_F_NIT_N2O_M_WEIBULL_3 - + + # 164 METRX_F_NIT_N2O_M_WEIBULL_3 - if ("metrx_f_nit_n2o_m_weibull_3" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #165 METRX_F_NIT_N2O_T_EXP_1 - + + # 165 METRX_F_NIT_N2O_T_EXP_1 - if ("metrx_f_nit_n2o_t_exp_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #166 METRX_F_NIT_N2O_T_EXP_2 - + + # 166 METRX_F_NIT_N2O_T_EXP_2 - if ("metrx_f_nit_n2o_t_exp_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #167 METRX_F_NIT_PH_ONEILL_1 - + + # 167 METRX_F_NIT_PH_ONEILL_1 - if ("metrx_f_nit_ph_oneill_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #168 METRX_F_NIT_PH_ONEILL_2 - + + # 168 METRX_F_NIT_PH_ONEILL_2 - if ("metrx_f_nit_ph_oneill_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #169 METRX_F_NIT_PH_ONEILL_3 - + + # 169 METRX_F_NIT_PH_ONEILL_3 - if ("metrx_f_nit_ph_oneill_3" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #170 METRX_FE_REDUCTION - + + # 170 METRX_FE_REDUCTION - if ("metrx_fe_reduction" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #171 METRX_FRAC_FE_CH4_PROD - + + # 171 METRX_FRAC_FE_CH4_PROD - if ("metrx_frac_fe_ch4_prod" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #172 METRX_MAX_DEPTH_DENIT - + + # 172 METRX_MAX_DEPTH_DENIT - if ("metrx_max_depth_denit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #173 METRX_MIC_EFF - + + # 173 METRX_MIC_EFF - if ("metrx_mic_eff" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #174 METRX_MIC_EFF_METANE_OX - + + # 174 METRX_MIC_EFF_METANE_OX - if ("metrx_mic_eff_metane_ox" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #175 METRX_MUEMAX_C_ALGAE - + + # 175 METRX_MUEMAX_C_ALGAE - if ("metrx_muemax_c_algae" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #176 METRX_MUEMAX_C_CH4_OX - + + # 176 METRX_MUEMAX_C_CH4_OX - if ("metrx_muemax_c_ch4_ox" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #177 METRX_MUEMAX_C_CH4_PROD - + + # 177 METRX_MUEMAX_C_CH4_PROD - if ("metrx_muemax_c_ch4_prod" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #178 METRX_MUEMAX_C_DENIT - + + # 178 METRX_MUEMAX_C_DENIT - if ("metrx_muemax_c_denit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #179 METRX_MUEMAX_C_FERM - + + # 179 METRX_MUEMAX_C_FERM - if ("metrx_muemax_c_ferm" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #180 METRX_MUEMAX_C_NIT - + + # 180 METRX_MUEMAX_C_NIT - if ("metrx_muemax_c_nit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #181 METRX_MUEMAX_C_FE_RED - + + # 181 METRX_MUEMAX_C_FE_RED - if ("metrx_muemax_c_fe_red" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #182 METRX_MUEMAX_H2_CH4_PROD - + + # 182 METRX_MUEMAX_H2_CH4_PROD - if ("metrx_muemax_h2_ch4_prod" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #183 METRX_MUEMAX_N_ASSI - + + # 183 METRX_MUEMAX_N_ASSI - if ("metrx_muemax_n_assi" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #184 METRX_NITRIFY_MAX - + + # 184 METRX_NITRIFY_MAX - if ("metrx_nitrify_max" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #185 METRX_KF_NIT_NO - + + # 185 METRX_KF_NIT_NO - if ("metrx_kf_nit_no" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #186 METRX_KF_NIT_N2O - + + # 186 METRX_KF_NIT_N2O - if ("metrx_kf_nit_n2o" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #187 METRX_K_O2_CH4_PROD - + + # 187 METRX_K_O2_CH4_PROD - if ("metrx_k_o2_ch4_prod" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #188 METRX_K_O2_FE_RED - + + # 188 METRX_K_O2_FE_RED - if ("metrx_k_o2_fe_red" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #189 METRX_KF_FE_FE_RED - + + # 189 METRX_KF_FE_FE_RED - if ("metrx_kf_fe_fe_red" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #190 METRX_KMM_AC_CH4_PROD - + + # 190 METRX_KMM_AC_CH4_PROD - if ("metrx_kmm_ac_ch4_prod" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #191 METRX_KMM_AC_FE_RED - + + # 191 METRX_KMM_AC_FE_RED - if ("metrx_kmm_ac_fe_red" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #192 METRX_KMM_H2_FE_RED - + + # 192 METRX_KMM_H2_FE_RED - if ("metrx_kmm_h2_fe_red" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #193 METRX_KMM_C_DENIT - + + # 193 METRX_KMM_C_DENIT - if ("metrx_kmm_c_denit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #194 METRX_KMM_CH4_CH4_OX - + + # 194 METRX_KMM_CH4_CH4_OX - if ("metrx_kmm_ch4_ch4_ox" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #195 METRX_KMM_C_MIC - + + # 195 METRX_KMM_C_MIC - if ("metrx_kmm_c_mic" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #196 METRX_KMM_O2_NIT - + + # 196 METRX_KMM_O2_NIT - if ("metrx_kmm_o2_nit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #197 METRX_KMM_H2_FERM - + + # 197 METRX_KMM_H2_FERM - if ("metrx_kmm_h2_ferm" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #198 METRX_KMM_H2_CH4_PROD - + + # 198 METRX_KMM_H2_CH4_PROD - if ("metrx_kmm_h2_ch4_prod" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #201 METRX_KMM_N_DENIT - + + # 201 METRX_KMM_N_DENIT - if ("metrx_kmm_n_denit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #202 METRX_KMM_N_MIC - + + # 202 METRX_KMM_N_MIC - if ("metrx_kmm_n_mic" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #203 METRX_KMM_NH4_NIT - + + # 203 METRX_KMM_NH4_NIT - if ("metrx_kmm_nh4_nit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #204 METRX_KMM_NO2_NIT - + + # 204 METRX_KMM_NO2_NIT - if ("metrx_kmm_no2_nit" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #205 METRX_KMM_O2_CH4_OX - + + # 205 METRX_KMM_O2_CH4_OX - if ("metrx_kmm_o2_ch4_ox" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #206 METRX_KMM_FE_OX - + + # 206 METRX_KMM_FE_OX - if ("metrx_kmm_fe_ox" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #207 METRX_KMM_PH_INCREASE_FROM_UREA - + + # 207 METRX_KMM_PH_INCREASE_FROM_UREA - if ("metrx_kmm_ph_increase_from_urea" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #208 METRX_KR_ANVF_DIFF_GAS - + + # 208 METRX_KR_ANVF_DIFF_GAS - if ("metrx_kr_anvf_diff_gas" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #209 METRX_KR_ANVF_DIFF_LIQ - + + # 209 METRX_KR_ANVF_DIFF_LIQ - if ("metrx_kr_anvf_diff_liq" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #210 METRX_KR_DC_ALGAE - + + # 210 METRX_KR_DC_ALGAE - if ("metrx_kr_dc_algae" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #211 METRX_KR_DC_AORG - + + # 211 METRX_KR_DC_AORG - if ("metrx_kr_dc_aorg" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #212 METRX_KR_DC_CEL - + + # 212 METRX_KR_DC_CEL - if ("metrx_kr_dc_cel" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #213 METRX_KR_DC_HUM_0 - + + # 213 METRX_KR_DC_HUM_0 - if ("metrx_kr_dc_hum_0" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #214 METRX_KR_DC_HUM_1 - + + # 214 METRX_KR_DC_HUM_1 - if ("metrx_kr_dc_hum_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #215 METRX_KR_DC_HUM_2 - + + # 215 METRX_KR_DC_HUM_2 - if ("metrx_kr_dc_hum_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #216 METRX_KR_DC_LIG - + + # 216 METRX_KR_DC_LIG - if ("metrx_kr_dc_lig" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #217 METRX_KR_DC_RAW_LITTER - + + # 217 METRX_KR_DC_RAW_LITTER - if ("metrx_kr_dc_raw_litter" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #218 METRX_KR_DC_SOL - + + # 218 METRX_KR_DC_SOL - if ("metrx_kr_dc_sol" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #219 METRX_KR_DC_WOOD - + + # 219 METRX_KR_DC_WOOD - if ("metrx_kr_dc_wood" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #220 METRX_KR_DENIT_CHEMO - + + # 220 METRX_KR_DENIT_CHEMO - if ("metrx_kr_denit_chemo" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #221 METRX_KR_FRAC_FRAG_ABOVE - + + # 221 METRX_KR_FRAC_FRAG_ABOVE - if ("metrx_kr_frac_frag_above" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #223 METRX_KR_OX_FE - + + # 223 METRX_KR_OX_FE - if ("metrx_kr_ox_fe" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #224 METRX_KR_HU_AORG_HUM_0 - + + # 224 METRX_KR_HU_AORG_HUM_0 - if ("metrx_kr_hu_aorg_hum_0" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #225 METRX_KR_HU_AORG_HUM_1 - + + # 225 METRX_KR_HU_AORG_HUM_1 - if ("metrx_kr_hu_aorg_hum_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #227 METRX_KR_HU_SOL - + + # 227 METRX_KR_HU_SOL - if ("metrx_kr_hu_sol" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #229 METRX_KR_HU_HUM_0 - + + # 229 METRX_KR_HU_HUM_0 - if ("metrx_kr_hu_hum_0" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #230 METRX_KR_HU_HUM_1 - + + # 230 METRX_KR_HU_HUM_1 - if ("metrx_kr_hu_hum_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #231 METRX_KR_HU_LIG - + + # 231 METRX_KR_HU_LIG - if ("metrx_kr_hu_lig" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #233 METRX_KR_REDUCTION_CN - + + # 233 METRX_KR_REDUCTION_CN - if ("metrx_kr_reduction_cn" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #234 METRX_KR_REDUCTION_ANVF - + + # 234 METRX_KR_REDUCTION_ANVF - if ("metrx_kr_reduction_anvf" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #235 METRX_KR_REDUCTION_CONIFEROUS - + + # 235 METRX_KR_REDUCTION_CONIFEROUS - if ("metrx_kr_reduction_coniferous" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #236 METRX_LIG_HUMIFICATION - + + # 236 METRX_LIG_HUMIFICATION - if ("metrx_lig_humification" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #237 METRX_RET_HUMUS - + + # 237 METRX_RET_HUMUS - if ("metrx_ret_humus" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #238 METRX_RET_LITTER - + + # 238 METRX_RET_LITTER - if ("metrx_ret_litter" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #239 METRX_RET_MICROBES - + + # 239 METRX_RET_MICROBES - if ("metrx_ret_microbes" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #240 METRX_TILL_STIMULATION_1 - + + # 240 METRX_TILL_STIMULATION_1 - if ("metrx_till_stimulation_1" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #241 METRX_TILL_STIMULATION_2 - + + # 241 METRX_TILL_STIMULATION_2 - if ("metrx_till_stimulation_2" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #242 METRX_V_EBULLITION - + + # 242 METRX_V_EBULLITION - if ("metrx_v_ebullition" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #303 RETDOC - + + # 303 RETDOC - if ("retdoc" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #304 RETNO3 - + + # 304 RETNO3 - if ("retno3" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #317 TEXP - + + # 317 TEXP - if ("texp" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - + + # SOILWATER RELATED - - #2 BY_PASSF - + + # 2 BY_PASSF - if ("by_passf" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #69 FPERCOL - + + # 69 FPERCOL - if ("fpercol" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #80 FRUNOFF - + + # 80 FRUNOFF - if ("frunoff" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #84 IMPEDANCE_PAR - + + # 84 IMPEDANCE_PAR - if ("impedance_par" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #307 ROOT_DEPENDENT_TRANS - + + # 307 ROOT_DEPENDENT_TRANS - if ("root_dependent_trans" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - + # 349 WCDNDC_EVALIM_FRAC_WCMIN if ("wcdndc_evalim_frac_wcmin" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - #335 WCDNDC_INCREASE_POT_EVAPOTRANS - + + # 335 WCDNDC_INCREASE_POT_EVAPOTRANS - if ("wcdndc_increase_pot_evapotrans" %in% pft.names) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - - - - - + + + + + # Assigning pft values species_par_values[names(trait.values)[pft]] <- b.2 - + b.2 <- "" - - } - + ## INITIAL SOIL CONDITIONS soil_layer <- list() - - + + ## Soil layers, if not external files are given - if(is.null(settings$run$inputs$poolinitcond$path)){ - + if (is.null(settings$run$inputs$poolinitcond$path)) { # Set different layers, which will be used based on the soil data that is available # For example, if we have soil data for top layer, then that will be used instead of soil_layer_1 soil_layer[1] <- '' @@ -1801,322 +1794,322 @@ write.config.LDNDC <- function(defaults, trait.values, settings, run.id) { soil_layer[5] <- '' soil_layer[6] <- '' soil_layer[7] <- '' - + soil_layer_values <- paste(soil_layer, collapse = "\n \t") } - + ## One soil layer is given - else if(!is.null(settings$run$inputs$poolinitcond$path)){ + else if (!is.null(settings$run$inputs$poolinitcond$path)) { # Set empty soil_all_block <- NULL - + # Reading soil file soil_IC_list <- PEcAn.data.land::pool_ic_netcdf2list(settings$run$inputs$poolinitcond$path) - - - + + + ## --- Initial condtions for the site --- ## - + # Before moving to write site file, check siteparameter initial conditions and site initial condition - + ## Siteparameter file - #300 RCNM - + # 300 RCNM - # C:N ratio of humus if ("c2n_humus" %in% names(soil_IC_list$vals)) { - h.2 <- paste(h.2, paste0("\t\t \n"), collapse="") + h.2 <- paste(h.2, paste0("\t\t \n"), collapse = "") } - + ## Event file # Populate the events file, if there are placeholders for initial biomasses or fractional cover # Initial biomass in the field - if(any(grepl("@InitialBiomass@", eventsfile))){ + if (any(grepl("@InitialBiomass@", eventsfile))) { if ("AGB" %in% names(soil_IC_list$vals)) { - initialbiomass <- round( PEcAn.utils::ud_convert(unlist(soil_IC_list$vals["AGB"])[[1]], "kg m-2", "kg ha-1"), 1 ) - } - else{ + initialbiomass <- round(PEcAn.utils::ud_convert(unlist(soil_IC_list$vals["AGB"])[[1]], "kg m-2", "kg ha-1"), 1) + } else { initialbiomass <- 100 } # Fill in the value eventsfile <- gsub("@InitialBiomass@", paste0("'", initialbiomass, "'"), eventsfile) } # Fractional cover of the plants - if(any(grepl("@FractionalCover@", eventsfile))){ + if (any(grepl("@FractionalCover@", eventsfile))) { if ("fractional_cover" %in% names(soil_IC_list$vals)) { fractionalcover <- unlist(soil_IC_list$vals["fractional_cover"])[[1]] - } - else{ + } else { fractionalcover <- 0.5 } # Fill in the value eventsfile <- gsub("@FractionalCover@", paste0("'", fractionalcover, "'"), eventsfile) } - - + + ## Site file (general) # Soil use history - if(any(grepl("@Info_Use_History@", sitefile))){ - if ("history" %in% names(soil_IC_list$vals)){ + if (any(grepl("@Info_Use_History@", sitefile))) { + if ("history" %in% names(soil_IC_list$vals)) { soil_use_history <- unlist(soil_IC_list$vals["history"])[[1]] - } - else{ + } else { soil_use_history <- "arable" } sitefile <- gsub("@Info_Use_History@", paste0("'", soil_use_history, "'"), sitefile) } - + # Soil type - if(any(grepl("@Soil_Type@", sitefile))){ - if ("soil_type" %in% names(soil_IC_list$vals)){ + if (any(grepl("@Soil_Type@", sitefile))) { + if ("soil_type" %in% names(soil_IC_list$vals)) { soil_type <- unlist(soil_IC_list$vals["soil_type"])[[1]] - } - else{ + } else { soil_type <- "SALO" } sitefile <- gsub("@Soil_Type@", paste0("'", soil_type, "'"), sitefile) } - + # Litter height - if(any(grepl("@Litter_Height@", sitefile))){ - if ("litter_height" %in% names(soil_IC_list$vals)){ + if (any(grepl("@Litter_Height@", sitefile))) { + if ("litter_height" %in% names(soil_IC_list$vals)) { litter_height <- unlist(soil_IC_list$vals["litter_height"])[[1]] - } - else{ + } else { litter_height <- "0.0" } sitefile <- gsub("@Litter_Height@", paste0("'", litter_height, "'"), sitefile) } - + ## -- Layers -- ## - + # Check how many depth layers is given and the depth of each depth <- soil_IC_list$dims$depth layer_count <- length(depth) # Check what stratums is given for the layers layer_div <- soil_IC_list$vals$stratum - - - for(depth_level in 1:layer_count){ - + + + for (depth_level in 1:layer_count) { soil_one_block <- NULL # Diskretization -- Every soil layer is still divided to several layers, this layer that contains these # sublayers are here called a block. In LDNDC it is not suggested to use too tight layers so still will be # divided to smaller layers that are not so thick. - - - + + + # For 1st level - if(depth_level == 1){ + if (depth_level == 1) { disk <- depth[depth_level] * 1000 / layer_div[depth_level] } # For rest of layers, depth is informed as cumulative, but LDNDC uses thickness - else{ - disk <- (depth[depth_level] - depth[depth_level-1]) * 1000 / layer_div[depth_level] + else { + disk <- (depth[depth_level] - depth[depth_level - 1]) * 1000 / layer_div[depth_level] } - - for(disk_level in 1:layer_div[depth_level]){ - - + + for (disk_level in 1:layer_div[depth_level]) { # Start creating a soil layer soil_layer_values <- paste0(" \n") - + # Add one individual layer to the block soil_one_block <- paste(soil_one_block, soil_layer_values) } - + # Combine the previous block of layers this and inform that "layer" changes which indicates that new # parameter values has been used - if(depth_level != layer_count){ + if (depth_level != layer_count) { soil_all_block <- paste(soil_all_block, soil_one_block, "\n \n") } else { soil_all_block <- paste(soil_all_block, soil_one_block, "\n") } } - + # If there is less than seven layer blocks initialised, use the default ones for bottom # if(depth_level < 6){ # soil_combine <- paste(soil_all_block, "\t\t", paste(soil_layer[-c(1:depth_level)], collapse = "\n \t\t")) # } # else{ soil_combine <- soil_all_block - #} - - } - - else{ + # } + } else { PEcAn.logger::logger.severe("More than one soil path given: only one soil path is supported") } - - + + ## Writing and saving species- and siteparameters + initial soil conditions speciesparfile_pfts <- NULL - + # Handle the populating of speciesparameters after we have read the info from priors - for(pftn in pfts_run){ + for (pftn in pfts_run) { ## Crops ## # Barley - if(pftn == "barley"){ - speciesparfile_pfts <- paste0(speciesparfile_pfts, - "\t\t\t \n", - species_par_values["barley"][[1]], - "\t\t\t \n\n") + if (pftn == "barley") { + speciesparfile_pfts <- paste0( + speciesparfile_pfts, + "\t\t\t \n", + species_par_values["barley"][[1]], + "\t\t\t \n\n" + ) } # Oat - if(pftn == "oat"){ - speciesparfile_pfts <- paste0(speciesparfile_pfts, - "\t\t\t \n", - species_par_values["oat"][[1]], - "\t\t\t \n\n") + if (pftn == "oat") { + speciesparfile_pfts <- paste0( + speciesparfile_pfts, + "\t\t\t \n", + species_par_values["oat"][[1]], + "\t\t\t \n\n" + ) } # Triticale - if(pftn == "triticale"){ - speciesparfile_pfts <- paste0(speciesparfile_pfts, - "\t\t\t \n", - species_par_values["triticale"][[1]], - "\t\t\t \n\n") + if (pftn == "triticale") { + speciesparfile_pfts <- paste0( + speciesparfile_pfts, + "\t\t\t \n", + species_par_values["triticale"][[1]], + "\t\t\t \n\n" + ) } ## Grass # Timothy - if(pftn == "timothy"){ - speciesparfile_pfts <- paste0(speciesparfile_pfts, - "\t\t\t \n", - species_par_values["timothy"][[1]], - "\t\t\t \n\n") + if (pftn == "timothy") { + speciesparfile_pfts <- paste0( + speciesparfile_pfts, + "\t\t\t \n", + species_par_values["timothy"][[1]], + "\t\t\t \n\n" + ) } - + # Meadow - if(pftn == "meadow"){ - speciesparfile_pfts <- paste0(speciesparfile_pfts, - "\t\t\t \n", - species_par_values["meadow"][[1]], - "\t\t\t \n\n") + if (pftn == "meadow") { + speciesparfile_pfts <- paste0( + speciesparfile_pfts, + "\t\t\t \n", + species_par_values["meadow"][[1]], + "\t\t\t \n\n" + ) } - + ## Forest # Pipy, need to check a correct name for this wood species - if(pftn == "pipy"){ - speciesparfile_pfts <- paste0(speciesparfile_pfts, - "\t\t\t \n", - species_par_values["pipy"][[1]], - "\t\t\t \n\n") + if (pftn == "pipy") { + speciesparfile_pfts <- paste0( + speciesparfile_pfts, + "\t\t\t \n", + species_par_values["pipy"][[1]], + "\t\t\t \n\n" + ) } } - - + + # Combine the speciesparameter info speciesparfile <- gsub("@Info@", speciesparfile_pfts, speciesparfile) - - + + # Write to a new xml-file, which will be used on a run. Every simulation run will have # their own set of speciesparameters values writeLines(speciesparfile, con = file.path(settings$rundir, run.id, "speciesparameters.xml")) - + ## Write events to a new xml file writeLines(eventsfile, con = file.path(settings$rundir, run.id, "events.xml")) - - + + # Handle the populating of siteparameters siteparfile <- gsub("@Info@", h.2, siteparfile) - + # Write siteparameters writeLines(siteparfile, con = file.path(settings$rundir, run.id, "siteparameters.xml")) - - - + + + # Populate sitefile layer info with given parameter sitefile <- gsub("@Info_Surface_Layer@", soil_combine, sitefile) - + # Write soil conditions writeLines(sitefile, con = file.path(settings$rundir, run.id, "site.xml")) - + # Write airchemistry file (not modified anywhere) writeLines(airchemistryfile, con = file.path(settings$rundir, run.id, "airchemistry.txt")) - - - + + + #------------------------ - } # write.config.LDNDC diff --git a/models/ldndc/tests/testthat.R b/models/ldndc/tests/testthat.R index f44dabc6ffb..8052bcf138b 100644 --- a/models/ldndc/tests/testthat.R +++ b/models/ldndc/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.ModelName") +# test_check("PEcAn.ModelName") diff --git a/models/ldndc/tests/testthat/test.met2model.R b/models/ldndc/tests/testthat/test.met2model.R index af32dd93baf..ac61e9bfb78 100644 --- a/models/ldndc/tests/testthat/test.met2model.R +++ b/models/ldndc/tests/testthat/test.met2model.R @@ -7,7 +7,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { skip("This is a template test that will not run. To run it, remove this `skip` call.") nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/linkages/R/met2model.LINKAGES.R b/models/linkages/R/met2model.LINKAGES.R index f73a2008fd6..65300544d6d 100644 --- a/models/linkages/R/met2model.LINKAGES.R +++ b/models/linkages/R/met2model.LINKAGES.R @@ -17,38 +17,38 @@ #' met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - start_date <- as.POSIXlt(start_date, tz = "GMT") end_date <- as.POSIXlt(end_date, tz = "GMT") out.file <- file.path(outfolder, "climate.Rdata") # out.file <- file.path(outfolder, paste(in.prefix, strptime(start_date, '%Y-%m-%d'), # strptime(end_date, '%Y-%m-%d'), 'dat', sep='.')) - + # get start/end year since inputs are specified on year basis - # use years to check if met data contains all of the necessary years + # use years to check if met data contains all of the necessary years start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - results <- data.frame(file = c(out.file), - host = c(PEcAn.remote::fqdn()), - mimetype = c("text/plain"), - formatname = c("LINKAGES meteorology"), - startdate = c(start_date), - enddate = c(end_date), - dbfile.name = "climate.Rdata", - stringsAsFactors = FALSE) + results <- data.frame( + file = c(out.file), + host = c(PEcAn.remote::fqdn()), + mimetype = c("text/plain"), + formatname = c("LINKAGES meteorology"), + startdate = c(start_date), + enddate = c(end_date), + dbfile.name = "climate.Rdata", + stringsAsFactors = FALSE + ) print("internal results") print(results) if (file.exists(out.file) && !overwrite) { - # get year span for current data file load(out.file) - data_start = min(rownames(temp.mat)) - data_end = max(rownames(temp.mat)) - + data_start <- min(rownames(temp.mat)) + data_end <- max(rownames(temp.mat)) + # check to see if needed years fall into the current data year span; if not, rewrite - if ((data_start <= start_year) & (data_end >= end_year)){ + if ((data_start <= start_year) & (data_end >= end_year)) { PEcAn.logger::logger.debug("File '", out.file, "' already exists, skipping to next file.") return(invisible(results)) } @@ -64,11 +64,11 @@ met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_da year <- sprintf("%04d", seq(start_year, end_year, 1)) month <- sprintf("%02d", seq(1, 12, 1)) - nyear <- length(year) # number of years to simulate + nyear <- length(year) # number of years to simulate month_matrix_precip <- matrix(NA, nyear, 12) - - if(nchar(in.prefix)>0 & substr(in.prefix,nchar(in.prefix),nchar(in.prefix)) != ".") in.prefix = paste0(in.prefix,".") + + if (nchar(in.prefix) > 0 & substr(in.prefix, nchar(in.prefix), nchar(in.prefix)) != ".") in.prefix <- paste0(in.prefix, ".") for (i in seq_len(nyear)) { year_txt <- formatC(year[i], width = 4, format = "d", flag = "0") @@ -80,12 +80,12 @@ met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_da sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(ncin$dim$time$units, " "))[1], "seconds") dt <- PEcAn.utils::seconds_in_year(as.numeric(year[i])) / length(sec) tstep <- 86400 / dt - - # adjust vector depending on the time step of data + + # adjust vector depending on the time step of data # assumes evenly-spaced measurements DOY_vec_hr <- c(1, c(32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365) * as.integer(tstep)) - ncprecipf <- ncdf4::ncvar_get(ncin, "precipitation_flux") # units are kg m-2 s-1 + ncprecipf <- ncdf4::ncvar_get(ncin, "precipitation_flux") # units are kg m-2 s-1 for (m in 1:12) { month_matrix_precip[i, m] <- (sum(ncprecipf[DOY_vec_hr[m]:(DOY_vec_hr[m + 1] - 1)]) * dt / 10) } @@ -96,17 +96,16 @@ met2model.LINKAGES <- function(in.path, in.prefix, outfolder, start_date, end_da month_matrix_temp_mean <- matrix(NA, nyear, 12) for (i in seq_len(nyear)) { - year_txt <- formatC(year[i], width = 4, format = "d", flag = "0") infile <- file.path(in.path, paste0(in.prefix, year_txt, ".nc")) ncin <- ncdf4::nc_open(infile) # print(ncin) - nctemp <- ncdf4::ncvar_get(ncin, "air_temperature") #units are kg m-2 s-1 + nctemp <- ncdf4::ncvar_get(ncin, "air_temperature") # units are kg m-2 s-1 for (m in 1:12) { month_matrix_temp_mean[i, m] <- (mean(nctemp[DOY_vec_hr[m]:(DOY_vec_hr[m + 1] - 1)]) - - 273.15) #sub daily to monthly + 273.15) # sub daily to monthly } ncdf4::nc_close(ncin) if (i %% 100 == 0) { diff --git a/models/linkages/R/model2netcdf.LINKAGES.R b/models/linkages/R/model2netcdf.LINKAGES.R index 68afa4ecd6f..5fc3eff7c84 100644 --- a/models/linkages/R/model2netcdf.LINKAGES.R +++ b/models/linkages/R/model2netcdf.LINKAGES.R @@ -15,108 +15,112 @@ model2netcdf.LINKAGES <- function(outdir, sitelat, sitelon, start_date = NULL, end_date = NULL, pft_names = NULL) { # , PFTs) { logger.severe('NOT IMPLEMENTED') - + ### Read in model output in linkages format load(file.path(outdir, "linkages.out.Rdata")) # linkages.output.dims <- dim(linkages.output) - + ### Determine number of years and output timestep - + start_year <- as.numeric(strftime(start_date, "%Y")) end_year <- as.numeric(strftime(end_date, "%Y")) years <- start_year:end_year - + # IF : no need this, write.configs --> job.sh --> will pass model2netcsf.LINKAGES pft_names # if(is.null(pft_names)){ # pft_names <- as.character(1:length(agb.pft[, 1, 1])) # } - + ### Loop over years in linkages output to create separate netCDF outputs for (y in seq_along(years)) { if (file.exists(file.path(outdir, paste(years[y], "nc", sep = ".")))) { next } - print(paste("---- Processing year: ", years[y])) # turn on for debugging - + print(paste("---- Processing year: ", years[y])) # turn on for debugging + ## Subset data for processing sub.linkages.output <- subset(linkages.output, year == y) ## sub.linkages.output.dims <- dim(sub.linkages.output) - + # sub.linkages.pft <- subset(linkages.pft, year == y) sub.linkages.pft.dims <- # dim(sub.linkages.pft) - + ## Setup outputs for netCDF file in appropriate units output <- list() ## standard variables: Carbon Pools - output[[1]] <- ag.biomass[y, ] # Above Ground Biomass in kgC/m2 - output[[2]] <- ag.biomass[y, ] # Total Live Biomass in kgC/m2 (no distinction from AGB in linkages) - output[[3]] <- total.soil.carbon[y, ] # TotSoilCarb in kgC/m2 - output[[4]] <- c(ag.biomass[y, ], total.soil.carbon[y, ], leaf.litter[y, ], area[y, ]) # Carb Pools in kgC/m2 - output[[5]] <- c("AGB", "Soil Organic Matter", "Leaf Litter", "LAI") # poolname - output[[6]] <- ag.npp[y, ] # GWBI = NPP in linkages - output[[7]] <- hetero.resp[y, ] # HeteroResp in kgC/m^2/s - output[[8]] <- ag.npp[y, ] # NPP = GWBI in linkages - output[[9]] <- nee[y, ] # NEE #possibly questionable - output[[10]] <- et[y, ] # Evap in kg/m^2/s + output[[1]] <- ag.biomass[y, ] # Above Ground Biomass in kgC/m2 + output[[2]] <- ag.biomass[y, ] # Total Live Biomass in kgC/m2 (no distinction from AGB in linkages) + output[[3]] <- total.soil.carbon[y, ] # TotSoilCarb in kgC/m2 + output[[4]] <- c(ag.biomass[y, ], total.soil.carbon[y, ], leaf.litter[y, ], area[y, ]) # Carb Pools in kgC/m2 + output[[5]] <- c("AGB", "Soil Organic Matter", "Leaf Litter", "LAI") # poolname + output[[6]] <- ag.npp[y, ] # GWBI = NPP in linkages + output[[7]] <- hetero.resp[y, ] # HeteroResp in kgC/m^2/s + output[[8]] <- ag.npp[y, ] # NPP = GWBI in linkages + output[[9]] <- nee[y, ] # NEE #possibly questionable + output[[10]] <- et[y, ] # Evap in kg/m^2/s output[[11]] <- agb.pft[, y, ] output[[12]] <- f.comp[, y] - output[[13]] <- area[y, ] #LAI - output[[14]] <- water[y, ] #soil moisture - output[[15]] <- abvgroundwood.biomass[y,] #AbvGroundWood just wood no leaves - output[[16]] <- seq_along(pft_names) - + output[[13]] <- area[y, ] # LAI + output[[14]] <- water[y, ] # soil moisture + output[[15]] <- abvgroundwood.biomass[y, ] # AbvGroundWood just wood no leaves + output[[16]] <- seq_along(pft_names) + # ******************** Declare netCDF variables ********************# - dim.t <- ncdf4::ncdim_def(name = "time", - units = paste0("days since ", years[y], "-01-01 00:00:00"), - vals = 0, calendar = "standard", - unlim = TRUE) + dim.t <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since ", years[y], "-01-01 00:00:00"), + vals = 0, calendar = "standard", + unlim = TRUE + ) dim.lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") dim.lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") dim.string <- ncdf4::ncdim_def("names", "", 1:24, create_dimvar = FALSE) dim.cpools <- ncdf4::ncdim_def("cpools", "", vals = 1:4, longname = "Carbon Pools") dim.cpools1 <- ncdf4::ncdim_def("cpools", "", vals = 1:4, longname = "Carbon Pools", create_dimvar = FALSE) - #dim.pfts <- ncdim_def("pfts", "", vals = 1:nrow(agb.pft), longname = "PFTs", create_dimvar = FALSE) + # dim.pfts <- ncdim_def("pfts", "", vals = 1:nrow(agb.pft), longname = "PFTs", create_dimvar = FALSE) dim.pfts <- ncdf4::ncdim_def(name = "pft", units = "unitless", vals = 1:length(agb.pft[, 1, 1]), longname = "Plant Functional Type", unlim = TRUE) - - + + for (i in seq_along(output)) { - if (length(output[[i]]) == 0) + if (length(output[[i]]) == 0) { output[[i]] <- rep(-999, length(dim.t$vals)) + } } - + dims <- list(lon = dim.lon, lat = dim.lat, time = dim.t) - + nc_var <- list() - nc_var[[1]] <- PEcAn.utils::to_ncvar("AGB", dims) - nc_var[[2]] <- PEcAn.utils::to_ncvar("TotLivBiom", dims) - nc_var[[3]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) - nc_var[[4]] <- ncdf4::ncvar_def("CarbPools", "kgC/m2", list(dim.cpools, dim.lat, dim.lon, dim.t), -999) - nc_var[[5]] <- ncdf4::ncvar_def("poolnames", units = "", dim = list(dim.string, dim.cpools1), longname = "Carbon Pool Names", prec = "char") - nc_var[[6]] <- ncdf4::ncvar_def("GWBI", "kgC/m2", list(dim.lat, dim.lon, dim.t), -999) - nc_var[[7]] <- ncdf4::ncvar_def("HeteroResp", "kgC/m2/s", list(dim.lat, dim.lon, dim.t), -999) - nc_var[[8]] <- ncdf4::ncvar_def("NPP", "kgC/m2/s", list(dim.lat, dim.lon, dim.t), -999) - nc_var[[9]] <- ncdf4::ncvar_def("NEE", "kgC/m2/s", list(dim.lat, dim.lon, dim.t), -999) + nc_var[[1]] <- PEcAn.utils::to_ncvar("AGB", dims) + nc_var[[2]] <- PEcAn.utils::to_ncvar("TotLivBiom", dims) + nc_var[[3]] <- PEcAn.utils::to_ncvar("TotSoilCarb", dims) + nc_var[[4]] <- ncdf4::ncvar_def("CarbPools", "kgC/m2", list(dim.cpools, dim.lat, dim.lon, dim.t), -999) + nc_var[[5]] <- ncdf4::ncvar_def("poolnames", units = "", dim = list(dim.string, dim.cpools1), longname = "Carbon Pool Names", prec = "char") + nc_var[[6]] <- ncdf4::ncvar_def("GWBI", "kgC/m2", list(dim.lat, dim.lon, dim.t), -999) + nc_var[[7]] <- ncdf4::ncvar_def("HeteroResp", "kgC/m2/s", list(dim.lat, dim.lon, dim.t), -999) + nc_var[[8]] <- ncdf4::ncvar_def("NPP", "kgC/m2/s", list(dim.lat, dim.lon, dim.t), -999) + nc_var[[9]] <- ncdf4::ncvar_def("NEE", "kgC/m2/s", list(dim.lat, dim.lon, dim.t), -999) nc_var[[10]] <- ncdf4::ncvar_def("Evap", "kg/m2/s", list(dim.lat, dim.lon, dim.t), -999) nc_var[[11]] <- ncdf4::ncvar_def("AGB.pft", "kgC/m2", list(dim.lat, dim.lon, dim.t, dim.pfts), -999) nc_var[[12]] <- ncdf4::ncvar_def("Fcomp", "kgC/kgC", list(dim.lat, dim.lon, dim.t, dim.pfts), -999) nc_var[[13]] <- ncdf4::ncvar_def("LAI", "m2/m2", list(dim.lat, dim.lon, dim.t), -999) nc_var[[14]] <- ncdf4::ncvar_def("SoilMoist", "m2/m2", list(dim.lat, dim.lon, dim.t), -999) nc_var[[15]] <- ncdf4::ncvar_def("AbvGrndWood", "kgC/m2", list(dim.lat, dim.lon, dim.t), -999) - nc_var[[16]] <- ncdf4::ncvar_def("PFT", units = "", dim = list(dim.pfts), - longname = paste(pft_names, collapse=",")) - + nc_var[[16]] <- ncdf4::ncvar_def("PFT", + units = "", dim = list(dim.pfts), + longname = paste(pft_names, collapse = ",") + ) + # ******************** Declare netCDF variables ********************# - + ### Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(formatC(years[y], width = 4, format = "d", flag = "0"), "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(formatC(years[y], width = 4, format = "d", flag = "0"), "nc", "var", sep = ".")), "w") - - for (i in seq_along(nc_var)) { + + for (i in seq_along(nc_var)) { print(i) ncdf4::ncvar_put(nc, nc_var[[i]], output[[i]]) cat(paste(nc_var[[i]]$name, nc_var[[i]]$longname), file = varfile, sep = "\n") } close(varfile) ncdf4::nc_close(nc) - - } ### End of year loop + } ### End of year loop } # model2netcdf.LINKAGES diff --git a/models/linkages/R/read_restart.LINKAGES.R b/models/linkages/R/read_restart.LINKAGES.R index 2fa8d820ba4..9f74fa6b0bd 100644 --- a/models/linkages/R/read_restart.LINKAGES.R +++ b/models/linkages/R/read_restart.LINKAGES.R @@ -13,50 +13,49 @@ #' @export #' read_restart.LINKAGES <- function(outdir, runid, stop.time, settings, var.names = NULL, params = NULL) { - # Read ensemble output - ens <- PEcAn.utils::read.output(runid = runid, - outdir = file.path(outdir, runid), - start.year = lubridate::year(stop.time), - end.year = lubridate::year(stop.time), - variables = var.names, pft.name = unlist(sapply(settings$pfts,'[[', "name"))) # change to just 'AGB' for plot level biomass - if(!is.na(ens)){ - # Add PFT name to variable if applicable - pft.names <- numeric(length(settings$pfts)) - for (i in seq_along(settings$pfts)) { - pft.names[i] <- settings$pfts[i]$pft$name - } - #ens.pft.names <- grep("pft", names(ens)) - #names(ens[[grep("pft", names(ens))]]) <- pft.names - - forecast <- list() - - if ("Fcomp" %in% var.names) { - forecast[[length(forecast)+1]] <- ens$AGB.pft #already has C #* unit.conv - names(forecast[[length(forecast)]]) <- paste0('Fcomp.',pft.names) - } + ens <- PEcAn.utils::read.output( + runid = runid, + outdir = file.path(outdir, runid), + start.year = lubridate::year(stop.time), + end.year = lubridate::year(stop.time), + variables = var.names, pft.name = unlist(sapply(settings$pfts, "[[", "name")) + ) # change to just 'AGB' for plot level biomass + if (!is.na(ens)) { + # Add PFT name to variable if applicable + pft.names <- numeric(length(settings$pfts)) + for (i in seq_along(settings$pfts)) { + pft.names[i] <- settings$pfts[i]$pft$name + } + # ens.pft.names <- grep("pft", names(ens)) + # names(ens[[grep("pft", names(ens))]]) <- pft.names - if ("AGB.pft" %in% var.names) { - forecast[[length(forecast)+1]] <- ens$AGB.pft #already has C #* unit.conv - names(forecast[[length(forecast)]]) <- paste0('AGB.pft.',pft.names) - } + forecast <- list() - if ("TotSoilCarb" %in% var.names) { - forecast[[length(forecast)+1]] <- ens$TotSoilCarb #PEcAn.utils::ud_convert(ens$TotSoilCarb, "kg/m^2", "Mg/ha") #* unit.conv - names(forecast[[length(forecast)]]) <- c("TotSoilCarb") - } + if ("Fcomp" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$AGB.pft # already has C #* unit.conv + names(forecast[[length(forecast)]]) <- paste0("Fcomp.", pft.names) + } + if ("AGB.pft" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$AGB.pft # already has C #* unit.conv + names(forecast[[length(forecast)]]) <- paste0("AGB.pft.", pft.names) + } - }else{ + if ("TotSoilCarb" %in% var.names) { + forecast[[length(forecast) + 1]] <- ens$TotSoilCarb # PEcAn.utils::ud_convert(ens$TotSoilCarb, "kg/m^2", "Mg/ha") #* unit.conv + names(forecast[[length(forecast)]]) <- c("TotSoilCarb") + } + } else { forecast <- list() if ("AGB.pft" %in% var.names) { - forecast[[length(forecast)+1]] <- rep(NA,length(settings$pfts)) + forecast[[length(forecast) + 1]] <- rep(NA, length(settings$pfts)) } if ("Fcomp" %in% var.names) { - forecast[[length(forecast)+1]] <- rep(NA,length(settings$pfts)) #already has C #* unit.conv + forecast[[length(forecast) + 1]] <- rep(NA, length(settings$pfts)) # already has C #* unit.conv } if ("TotSoilCarb" %in% var.names) { - forecast[[length(forecast)+1]] <- NA + forecast[[length(forecast) + 1]] <- NA } } # Put forecast into vector diff --git a/models/linkages/R/sample.IC.LINKAGES.R b/models/linkages/R/sample.IC.LINKAGES.R index a1d5b18e1fd..042e866d53e 100644 --- a/models/linkages/R/sample.IC.LINKAGES.R +++ b/models/linkages/R/sample.IC.LINKAGES.R @@ -1,16 +1,20 @@ sample.IC.LINKAGES <- function(ne, state, year = NULL) { ## g C * m-2 ground area in wood (above-ground + roots) - biomass_tsca = ifelse(rep("biomass_tsca" %in% names(state), ne), - state$biomass_tsca[1, sample.int(ncol(state$biomass_tsca),ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - stats::runif(ne, 0, 14000)) ## prior - biomass_acsa3 = ifelse(rep("biomass_acsa3" %in% names(state), ne), - state$biomass_acsa3[1, sample.int(ncol(state$biomass_acsa3), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - stats::runif(ne, 0, 14000)) ## prior - biomass_beal2 = ifelse(rep("biomass_beal2" %in% names(state),ne), - state$biomass_beal2[1, sample.int(ncol(state$biomass_beal2),ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - stats::runif(ne, 0, 14000)) ## prior - biomass_thoc2 = ifelse(rep("biomass_thoc2" %in% names(state),ne), - state$biomass_thoc2[1, sample.int(ncol(state$biomass_thoc2), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 - stats::runif(ne, 0, 14000)) ## prior + biomass_tsca <- ifelse(rep("biomass_tsca" %in% names(state), ne), + state$biomass_tsca[1, sample.int(ncol(state$biomass_tsca), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 + stats::runif(ne, 0, 14000) + ) ## prior + biomass_acsa3 <- ifelse(rep("biomass_acsa3" %in% names(state), ne), + state$biomass_acsa3[1, sample.int(ncol(state$biomass_acsa3), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 + stats::runif(ne, 0, 14000) + ) ## prior + biomass_beal2 <- ifelse(rep("biomass_beal2" %in% names(state), ne), + state$biomass_beal2[1, sample.int(ncol(state$biomass_beal2), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 + stats::runif(ne, 0, 14000) + ) ## prior + biomass_thoc2 <- ifelse(rep("biomass_thoc2" %in% names(state), ne), + state$biomass_thoc2[1, sample.int(ncol(state$biomass_thoc2), ne), 1] * 0.1, ## unit Mg/ha ->kg/m2 + stats::runif(ne, 0, 14000) + ) ## prior return(data.frame(biomass_tsca, biomass_acsa3, biomass_beal2, biomass_thoc2)) } # sample.IC.LINKAGES diff --git a/models/linkages/R/spinup.LINKAGES.R b/models/linkages/R/spinup.LINKAGES.R index cff880fcf5a..e4b84de8107 100644 --- a/models/linkages/R/spinup.LINKAGES.R +++ b/models/linkages/R/spinup.LINKAGES.R @@ -1,12 +1,12 @@ spinup.LINKAGES <- function(start.year, end.year, temp.mat, precip.mat, paleon = NULL) { if (is.null(paleon)) { - paleon <- TRUE # Why not just have `paleon = TRUE` above? + paleon <- TRUE # Why not just have `paleon = TRUE` above? } if (paleon) { spin.num <- 20 spin.length <- 500 start.year <- start.year - spin.length - + temp.mat <- rbind(temp.mat[rep(1:spin.num, length = spin.length), ], temp.mat) precip.mat <- rbind(precip.mat[rep(1:spin.num, length = spin.length), ], precip.mat) nyear <- nrow(temp.mat) @@ -15,15 +15,17 @@ spinup.LINKAGES <- function(start.year, end.year, temp.mat, precip.mat, paleon = start.year <- start.year - spin.num year <- seq(start.year, end.year, 1) nyear <- length(year) - + temp.mat <- rbind(temp.mat[1:spin.num, ], temp.mat) precip.mat <- rbind(precip.mat[1:spin.num, ], precip.mat) } - + ### Add some sort of test for steady state or not - - return(list(start.year = start.year, - nyear = nyear, - temp.mat = temp.mat, - precip.mat = precip.mat)) + + return(list( + start.year = start.year, + nyear = nyear, + temp.mat = temp.mat, + precip.mat = precip.mat + )) } # spinup.LINKAGES diff --git a/models/linkages/R/split_inputs.LINKAGES.R b/models/linkages/R/split_inputs.LINKAGES.R index 24e7d68051e..cfcc0454341 100644 --- a/models/linkages/R/split_inputs.LINKAGES.R +++ b/models/linkages/R/split_inputs.LINKAGES.R @@ -14,7 +14,5 @@ #' @export #' split_inputs.LINKAGES <- function(settings, start.time, stop.time, inputs) { - return(inputs) - } # split_inputs.LINKAGES diff --git a/models/linkages/R/write.config.LINKAGES.R b/models/linkages/R/write.config.LINKAGES.R index 43c300a33ca..cb8097f6e9c 100644 --- a/models/linkages/R/write.config.LINKAGES.R +++ b/models/linkages/R/write.config.LINKAGES.R @@ -20,9 +20,8 @@ #' @export #' @author Ann Raiho, Betsy Cowdery #' -write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.id, +write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.id, restart = NULL, spinup = FALSE, inputs = NULL, IC = NULL) { - # 850-869 repeated to fill 1000 years if (is.null(restart)) { restart <- FALSE # why not have restart default to FALSE above? @@ -30,201 +29,198 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i if (is.null(spinup)) { spinup <- FALSE # why not have spinup default to FALSE above? } - - ##TO DO add restart file as IC for HF + + ## TO DO add restart file as IC for HF # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) - if (!file.exists(rundir)) { # why not use `dir.exists`? + if (!file.exists(rundir)) { # why not use `dir.exists`? dir.create(rundir) } outdir <- file.path(settings$host$outdir, run.id) - if (!file.exists(outdir)) { # why not use `dir.exists`? + if (!file.exists(outdir)) { # why not use `dir.exists`? dir.create(outdir) } - + #----------------------------------------------------------------------- - #TO DO: need to change to date because sometimes this runs two years when it shouldn't + # TO DO: need to change to date because sometimes this runs two years when it shouldn't start.year <- as.numeric(strftime(settings$run$start.date, "%Y")) end.year <- as.numeric(strftime(settings$run$end.date, "%Y")) year <- seq(start.year, end.year, 1) - + iplot <- 1 nyear <- length(year) max.ind <- 1500 plat <- abs(as.numeric(settings$run$site$lat)) - + bgs <- 120 egs <- 273 - + texture <- utils::read.csv(system.file("texture.csv", package = "PEcAn.LINKAGES")) - + dbcon <- PEcAn.DB::db.open(settings$database$bety) on.exit(PEcAn.DB::db.close(dbcon), add = TRUE) - - if("soil" %in% names(settings$run$inputs)){ + + if ("soil" %in% names(settings$run$inputs)) { ## open soil file soil <- settings$run$inputs$soil nc.soil <- ncdf4::nc_open(soil$path) - + ## extract LINKAGES variables - fc <- ncdf4::ncvar_get(nc.soil,"volume_fraction_of_water_in_soil_at_field_capacity") * 100 - dry <- ncdf4::ncvar_get(nc.soil,"volume_fraction_of_condensed_water_in_soil_at_wilting_point") * 100 - if(length(fc) > 1) fc <- mean(fc) - if(length(dry) > 1) dry <- mean(dry) + fc <- ncdf4::ncvar_get(nc.soil, "volume_fraction_of_water_in_soil_at_field_capacity") * 100 + dry <- ncdf4::ncvar_get(nc.soil, "volume_fraction_of_condensed_water_in_soil_at_wilting_point") * 100 + if (length(fc) > 1) fc <- mean(fc) + if (length(dry) > 1) dry <- mean(dry) ncdf4::nc_close(nc.soil) - - }else{ + } else { soils <- PEcAn.DB::db.query(paste("SELECT soil,som,sand_pct,clay_pct,soilnotes FROM sites WHERE id =", settings$run$site$id), - con = dbcon) + con = dbcon + ) + + soil.dat <- PEcAn.data.land::soil_params(sand = soils$sand_pct / 100, clay = soils$clay_pct / 100, silt = 100 - soils$sand_pct - soils$clay_pct) - soil.dat <- PEcAn.data.land::soil_params(sand = soils$sand_pct/100, clay = soils$clay_pct/100, silt = 100 - soils$sand_pct - soils$clay_pct) - fc <- soil.dat$volume_fraction_of_water_in_soil_at_field_capacity * 100 dry <- soil.dat$volume_fraction_of_condensed_water_in_soil_at_wilting_point * 100 - - if(is.na(fc)) fc = 5 - if(is.na(dry)) dry = 5 + + if (is.na(fc)) fc <- 5 + if (is.na(dry)) dry <- 5 } - fdat <- utils::read.csv(system.file("fdat.csv", package = "linkages"), header = FALSE) #litter quality parameters + fdat <- utils::read.csv(system.file("fdat.csv", package = "linkages"), header = FALSE) # litter quality parameters clat <- utils::read.csv(system.file("clat.csv", package = "linkages"), header = FALSE) load(system.file("switch.mat.Rdata", package = "linkages")) - if(!is.null(inputs)){ + if (!is.null(inputs)) { climate_file <- inputs$met$path load(climate_file) - }else{ + } else { climate_file <- settings$run$inputs$met$path - load(climate_file) + load(climate_file) } - - temp.mat <- matrix(temp.mat[which(rownames(temp.mat)%in%start.year:end.year),],ncol=12,byrow=F) - precip.mat <- matrix(precip.mat[which(rownames(precip.mat)%in%start.year:end.year),],ncol=12,byrow=F) - + + temp.mat <- matrix(temp.mat[which(rownames(temp.mat) %in% start.year:end.year), ], ncol = 12, byrow = F) + precip.mat <- matrix(precip.mat[which(rownames(precip.mat) %in% start.year:end.year), ], ncol = 12, byrow = F) + basesc <- 74 basesn <- 1.64 - spp.params.default <- utils::read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params + spp.params.default <- utils::read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params nspec <- length(settings$pfts) spp.params.save <- numeric(nspec) for (i in seq_len(nspec)) { spp.params.save[i] <- which(spp.params.default[, 1] %in% settings$pfts[i]$pft$name) } spp.params <- spp.params.default[spp.params.save, ] - + ### Create species parameter matrix with correct PFTs trait.values$`Hemlock(Tsuga Canadensis)`$ ### group will be each spp. if (!is.null(trait.values)) { for (group in names(trait.values)) { - if (group == "env" | any(settings$run$inputs$met$source == 'PalEONregional')) { - + if (group == "env" | any(settings$run$inputs$met$source == "PalEONregional")) { ## leave defaults - } else { ## copy values - # IF: not sure what's going on here but I had to have this hack to overwrite params below - # should come back to this - if(is.null(dim(trait.values[[group]]))){ - vals <- as.data.frame(t(trait.values[[group]])) - }else{ - vals <- as.data.frame(trait.values[[group]]) - } - - if ("SLA" %in% names(vals)) { - sla_use <- (1/vals$SLA)*1000 - sla_use[sla_use>5000] <- stats::rnorm(1,4000,100) - spp.params[spp.params$Spp_Name == group, ]$FWT <- sla_use - ## If change here need to change in write_restart as well - } - - # replace defaults with traits - #new.params.locs <- which(names(spp.params) %in% names(vals)) - #new.vals.locs <- which(names(vals) %in% names(spp.params)) - #spp.params[which(spp.params$Spp_Name == group), new.params.locs] <- vals[new.vals.locs] - - # conversion of some traits to match what LINKAGES needs Going to have to look up this paper - # Botkin 1972 Some Ecological Consequences of a computer model of forest growth - if ("HTMAX" %in% names(vals) & "DBHMAX" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$B2 <- 2 * (((vals$HTMAX * 100) - 137) / - (vals$DBHMAX * 100)) - spp.params[spp.params$Spp_Name == group, ]$B3 <- (vals$HTMAX * 100 - 137) / (vals$DBHMAX * 100^2) - } - - if ("root2shoot" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$RTST <- vals$root2shoot - } - - # if ("leaf_longevity" %in% names(vals)) { - # spp.params[spp.params$Spp_Name == group, ]$FRT <- vals$leaf_longevity - # } - - if ("DMAX" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$DMAX <- vals$DMAX - } - if ("DMIN" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$DMIN <- vals$DMIN - } - if ("AGEMX" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$AGEMX <- vals$AGEMX - } - - if ("Gmax" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$G <- vals$Gmax - } - if ("SPRTND" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$SPRTND <- vals$SPRTND - } - if ("SPRTMN" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$SPRTMN <- vals$SPRTMN - } - if ("SPRTMX" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$SPRTMX <- vals$SPRTMX - } - if ("MPLANT" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$MPLANT <- vals$MPLANT - } - if ("D3" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$D3 <- vals$D3 - } - if ("FROST" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$FROST <- vals$FROST - } - if ("CM1" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$CM1 <- vals$CM1 - } - if ("CM2" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$CM2 <- vals$CM2 - } - if ("CM3" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$CM3 <- vals$CM3 - } - if ("CM4" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$CM4 <- vals$CM4 - } - if ("CM5" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$CM5 <- vals$CM5 - } - - if ("SLTA" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$SLTA <- vals$SLTA - } - if ("SLTB" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$SLTB <- vals$SLTB - } - if ("FRT" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$FRT <- vals$FRT - } - if ("TL" %in% names(vals)) { - spp.params[spp.params$Spp_Name == group, ]$TL <- ceiling(vals$TL) - } + # IF: not sure what's going on here but I had to have this hack to overwrite params below + # should come back to this + if (is.null(dim(trait.values[[group]]))) { + vals <- as.data.frame(t(trait.values[[group]])) + } else { + vals <- as.data.frame(trait.values[[group]]) + } + + if ("SLA" %in% names(vals)) { + sla_use <- (1 / vals$SLA) * 1000 + sla_use[sla_use > 5000] <- stats::rnorm(1, 4000, 100) + spp.params[spp.params$Spp_Name == group, ]$FWT <- sla_use + ## If change here need to change in write_restart as well + } + + # replace defaults with traits + # new.params.locs <- which(names(spp.params) %in% names(vals)) + # new.vals.locs <- which(names(vals) %in% names(spp.params)) + # spp.params[which(spp.params$Spp_Name == group), new.params.locs] <- vals[new.vals.locs] + + # conversion of some traits to match what LINKAGES needs Going to have to look up this paper + # Botkin 1972 Some Ecological Consequences of a computer model of forest growth + if ("HTMAX" %in% names(vals) & "DBHMAX" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$B2 <- 2 * (((vals$HTMAX * 100) - 137) / + (vals$DBHMAX * 100)) + spp.params[spp.params$Spp_Name == group, ]$B3 <- (vals$HTMAX * 100 - 137) / (vals$DBHMAX * 100^2) + } + + if ("root2shoot" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$RTST <- vals$root2shoot + } + + # if ("leaf_longevity" %in% names(vals)) { + # spp.params[spp.params$Spp_Name == group, ]$FRT <- vals$leaf_longevity + # } + + if ("DMAX" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$DMAX <- vals$DMAX + } + if ("DMIN" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$DMIN <- vals$DMIN + } + if ("AGEMX" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$AGEMX <- vals$AGEMX + } + if ("Gmax" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$G <- vals$Gmax + } + if ("SPRTND" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$SPRTND <- vals$SPRTND + } + if ("SPRTMN" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$SPRTMN <- vals$SPRTMN + } + if ("SPRTMX" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$SPRTMX <- vals$SPRTMX + } + if ("MPLANT" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$MPLANT <- vals$MPLANT + } + if ("D3" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$D3 <- vals$D3 + } + if ("FROST" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$FROST <- vals$FROST + } + if ("CM1" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$CM1 <- vals$CM1 + } + if ("CM2" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$CM2 <- vals$CM2 + } + if ("CM3" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$CM3 <- vals$CM3 + } + if ("CM4" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$CM4 <- vals$CM4 + } + if ("CM5" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$CM5 <- vals$CM5 + } + + if ("SLTA" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$SLTA <- vals$SLTA + } + if ("SLTB" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$SLTB <- vals$SLTB + } + if ("FRT" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$FRT <- vals$FRT + } + if ("TL" %in% names(vals)) { + spp.params[spp.params$Spp_Name == group, ]$TL <- ceiling(vals$TL) } } } - + } + switch.mat <- switch.mat[spp.params.save, ] - + if (spinup) { spinup.out <- spinup.LINKAGES(start.year, end.year, temp.mat, precip.mat) start.year <- spinup.out$start.year @@ -232,16 +228,20 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i nyear <- spinup.out$nyear temp.mat <- spinup.out$temp.mat precip.mat <- spinup.out$precip.mat - settings$run$start.date <- paste0(spinup.out$start.year, - strftime(settings$run$start.date, "/%m/%d")) + settings$run$start.date <- paste0( + spinup.out$start.year, + strftime(settings$run$start.date, "/%m/%d") + ) } - + input <- file.path(settings$rundir, run.id, "linkages.input.Rdata") - - save(iplot, nyear, nspec, fc, dry, bgs, egs, max.ind, plat, temp.mat, - precip.mat, spp.params, switch.mat, fdat, clat, basesc, basesn, - start.year, end.year, file = input) - + + save(iplot, nyear, nspec, fc, dry, bgs, egs, max.ind, plat, temp.mat, + precip.mat, spp.params, switch.mat, fdat, clat, basesc, basesn, + start.year, end.year, + file = input + ) + if (restart) { restartfile <- file.path(settings$rundir, run.id, "linkages.restart.Rdata") } else { @@ -254,7 +254,7 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.LINKAGES"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -263,7 +263,7 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -271,27 +271,27 @@ write.config.LINKAGES <- function(defaults = NULL, trait.values, settings, run.i if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) jobsh <- gsub("@SITE_MET@", settings$run$inputs$met$path, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + jobsh <- gsub("@INPUT@", input, jobsh) jobsh <- gsub("@RESTART@", restart, jobsh) if (restart) { jobsh <- gsub("@RESTARTFILE@", restartfile, jobsh) } - + pft_names <- unlist(sapply(settings$pfts, `[[`, "name")) pft_names <- paste0("pft_names = c('", paste(pft_names, collapse = "','"), "')") jobsh <- gsub("@PFT_NAMES@", pft_names, jobsh) diff --git a/models/linkages/R/write_restart.LINKAGES.R b/models/linkages/R/write_restart.LINKAGES.R index 8e0b442048a..0c321161c87 100644 --- a/models/linkages/R/write_restart.LINKAGES.R +++ b/models/linkages/R/write_restart.LINKAGES.R @@ -1,7 +1,7 @@ ##' @title write_restart.LINKAGES ##' @name write_restart.LINKAGES ##' @author Ann Raiho \email{araiho@@nd.edu} -##' +##' ##' @param outdir output directory ##' @param runid run ID ##' @param start.time,stop.time year that is being read @@ -11,79 +11,79 @@ ##' @param new.params updated parameter values to write. ## Format is named list with each entry matching a PFT ##' @param inputs passed on to `write.config.LINKAGES()` -##' +##' ##' @description Write restart files for LINKAGES -##' +##' ##' @return NONE ##' @export -##' +##' write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, - settings, new.state, + settings, new.state, RENAME = TRUE, new.params, inputs) { - ### TO DO : needs to be vectorized to improve SDA speed for runs that are longer than 50 years ### TO DO : distance matrix needs fixing - + ### Removing negative numbers because biomass can't be negative ### new.state[new.state < 0] <- 0 - + names.keep <- names(new.state) new.state <- as.matrix(new.state) names(new.state) <- names.keep - + new.state.save <- new.state - - if(any(grep('Fcomp',names.keep))){ + + if (any(grep("Fcomp", names.keep))) { new.state <- new.state.save[grep("Fcomp", names(new.state.save))] new.state.other <- new.state.save[grep("Fcomp", names(new.state.save), invert = TRUE)] } - - if(any(grep('AGB.pft',names.keep))){ + + if (any(grep("AGB.pft", names.keep))) { new.state <- new.state.save[grep("AGB.pft", names(new.state.save))] new.state.other <- new.state.save[grep("AGB.pft", names(new.state.save), invert = TRUE)] } - + variables <- names(new.state) ### Going to need to change this... ### Get some expert opinion N <- length(new.state) distance.matrix <- matrix(1, N, N) for (i in seq_len(N)) { - distance.matrix[i, ] <- sample(c(seq(0, N-1, 1)), size = N) - if(which(distance.matrix[i,]==0)!=i){ - distance.matrix[i,which(distance.matrix[i,]==0)] <- distance.matrix[i,i] - distance.matrix[i,i] <- 0 - } + distance.matrix[i, ] <- sample(c(seq(0, N - 1, 1)), size = N) + if (which(distance.matrix[i, ] == 0) != i) { + distance.matrix[i, which(distance.matrix[i, ] == 0)] <- distance.matrix[i, i] + distance.matrix[i, i] <- 0 + } } - #diag(distance.matrix) <- 0 - - if(FALSE){ - distance.matrix <- rbind(c(0, 1, 4, 3, 2, 6, 5, 8, 7, 9, 10, 11, 12, 13, 14), - c(5, 0, 3, 4, 8, 1, 2, 7, 6, 9, 10, 11, 12, 13, 14), - c(5, 3, 0, 1, 8, 4, 2, 7, 6, 9, 10, 11, 12, 13, 14), - c(6, 2, 1, 0, 8, 4, 3, 7, 5, 9, 10, 11, 12, 13, 14), - c(2, 7, 5, 4, 0, 8, 6, 1, 3, 9, 10, 11, 12, 13, 14), - c(6, 1, 3, 4, 8, 0, 2, 7, 5, 9, 10, 11, 12, 13, 14), - c(5, 3, 1, 2, 8, 6, 0, 7, 4, 9, 10, 11, 12, 13, 14), - c(3, 6, 4, 5, 1, 7, 8, 0, 2, 9, 10, 11, 12, 13, 14), - c(1, 5, 3, 2, 7, 6, 4, 8, 0, 9, 10, 11, 12, 13, 14), - c(3, 6, 4, 5, 1, 7, 8, 9, 2, 0, 10, 11, 12, 13, 14), - c(3, 6, 4, 5, 1, 7, 8, 10, 2, 9, 0, 11, 12, 13, 14), - c(3, 6, 4, 5, 1, 7, 8, 11, 2, 9, 10, 0, 12, 13, 14), - c(3, 6, 4, 5, 1, 7, 8, 12, 2, 9, 10, 11, 0, 13, 14), - c(3, 6, 4, 5, 1, 7, 8, 13, 2, 9, 10, 11, 12, 0, 14), - c(3, 6, 4, 5, 1, 7, 8, 14, 2, 9, 10, 11, 12, 13, 0)) - + # diag(distance.matrix) <- 0 + + if (FALSE) { + distance.matrix <- rbind( + c(0, 1, 4, 3, 2, 6, 5, 8, 7, 9, 10, 11, 12, 13, 14), + c(5, 0, 3, 4, 8, 1, 2, 7, 6, 9, 10, 11, 12, 13, 14), + c(5, 3, 0, 1, 8, 4, 2, 7, 6, 9, 10, 11, 12, 13, 14), + c(6, 2, 1, 0, 8, 4, 3, 7, 5, 9, 10, 11, 12, 13, 14), + c(2, 7, 5, 4, 0, 8, 6, 1, 3, 9, 10, 11, 12, 13, 14), + c(6, 1, 3, 4, 8, 0, 2, 7, 5, 9, 10, 11, 12, 13, 14), + c(5, 3, 1, 2, 8, 6, 0, 7, 4, 9, 10, 11, 12, 13, 14), + c(3, 6, 4, 5, 1, 7, 8, 0, 2, 9, 10, 11, 12, 13, 14), + c(1, 5, 3, 2, 7, 6, 4, 8, 0, 9, 10, 11, 12, 13, 14), + c(3, 6, 4, 5, 1, 7, 8, 9, 2, 0, 10, 11, 12, 13, 14), + c(3, 6, 4, 5, 1, 7, 8, 10, 2, 9, 0, 11, 12, 13, 14), + c(3, 6, 4, 5, 1, 7, 8, 11, 2, 9, 10, 0, 12, 13, 14), + c(3, 6, 4, 5, 1, 7, 8, 12, 2, 9, 10, 11, 0, 13, 14), + c(3, 6, 4, 5, 1, 7, 8, 13, 2, 9, 10, 11, 12, 0, 14), + c(3, 6, 4, 5, 1, 7, 8, 14, 2, 9, 10, 11, 12, 13, 0) + ) } - #distance.matrix <- rbind(c(0,3,1,2), c(3,0,2,1), c(1,2,0,3), c(2,1,3,0)) - + # distance.matrix <- rbind(c(0,3,1,2), c(3,0,2,1), c(1,2,0,3), c(2,1,3,0)) + ## HACK - spp.params.default <- utils::read.csv(system.file("spp_matrix.csv", package = "linkages")) #default spp.params + spp.params.default <- utils::read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params nspec <- length(settings$pfts) spp.params.save <- numeric(nspec) for (i in seq_len(nspec)) { spp.params.save[i] <- which(spp.params.default[, 1] %in% settings$pfts[i]$pft$name) } - + spp.params <- spp.params.default[spp.params.save, ] biomass_spp_params <- function(new.params, default.params, pft) { if ("SLTA" %in% names(new.params[[as.character(pft)]])) { @@ -97,9 +97,9 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, sltb <- default.params[default.params$Spp_Name == pft, ]$SLTB } if ("SLA" %in% names(new.params[[as.character(pft)]])) { - sla_use <- (1/new.params[[as.character(pft)]]$SLA)*1000 - sla_use[sla_use>5000] <- stats::rnorm(1,4000,100) - fwt <- sla_use#(1 / new.params[[as.character(pft)]]$SLA) * 10000 + sla_use <- (1 / new.params[[as.character(pft)]]$SLA) * 1000 + sla_use[sla_use > 5000] <- stats::rnorm(1, 4000, 100) + fwt <- sla_use # (1 / new.params[[as.character(pft)]]$SLA) * 10000 } else { fwt <- default.params[default.params$Spp_Name == pft, ]$FWT } @@ -110,20 +110,20 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, } return(list(slta = slta, sltb = sltb, fwt = fwt, frt = frt)) } # biomass_spp_params - + biomass_function <- function(dbh, spp.biomass.params) { # kg/tree - 0.1193 * dbh ^ 2.393 + - ((spp.biomass.params$slta + spp.biomass.params$sltb * dbh) / 2) ^ 2 * - 3.14 * spp.biomass.params$fwt * spp.biomass.params$frt * 0.001 + 0.1193 * dbh^2.393 + + ((spp.biomass.params$slta + spp.biomass.params$sltb * dbh) / 2)^2 * + 3.14 * spp.biomass.params$fwt * spp.biomass.params$frt * 0.001 } # biomass_function - + merit <- function(dbh, b_obs, spp.biomass.params) { - (b_obs - biomass_function(dbh, spp.biomass.params)) ^ 2 + (b_obs - biomass_function(dbh, spp.biomass.params))^2 } # merit - + ## HACK - + # skip ensemble member if no file availible outfile <- file.path(outdir, runid, "linkages.out.Rdata") if (!file.exists(outfile)) { @@ -133,117 +133,119 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, } } print(paste0("runid = ", runid)) - + # load output load(outfile) - - ntrees <- ntrees.kill[, ncol(ntrees.kill), 1] # number of trees - - if(sum(ntrees)==0) { - #reloads spin up if theres nothing in the output file - print('No survivors. Reusing spinup.') - load(file.path(outdir, runid,list.files(file.path(outdir, runid))[grep(list.files(file.path(outdir, runid)),pattern='linkages')][1])) - ntrees <- ntrees.kill[, ncol(ntrees.kill), 1] # number of trees - + + ntrees <- ntrees.kill[, ncol(ntrees.kill), 1] # number of trees + + if (sum(ntrees) == 0) { + # reloads spin up if theres nothing in the output file + print("No survivors. Reusing spinup.") + load(file.path(outdir, runid, list.files(file.path(outdir, runid))[grep(list.files(file.path(outdir, runid)), pattern = "linkages")][1])) + ntrees <- ntrees.kill[, ncol(ntrees.kill), 1] # number of trees } - - nspec <- length(settings$pfts) + + nspec <- length(settings$pfts) ncohrt <- ncohrt - tyl <- tyl - C.mat <- C.mat - - nogro <- as.vector(nogro.save[, ncol(nogro.save), 1]) ## no growth indicator - ksprt <- matrix(0, 1, nspec) ## kill sprout indicator ## LOOK INTO THIS - iage <- as.vector(iage.save[, ncol(iage.save), 1]) # individual age - - dbh <- as.vector(dbh.save[, ncol(dbh.save), 1]) - + tyl <- tyl + C.mat <- C.mat + + nogro <- as.vector(nogro.save[, ncol(nogro.save), 1]) ## no growth indicator + ksprt <- matrix(0, 1, nspec) ## kill sprout indicator ## LOOK INTO THIS + iage <- as.vector(iage.save[, ncol(iage.save), 1]) # individual age + + dbh <- as.vector(dbh.save[, ncol(dbh.save), 1]) + n.index <- c(rep(1, ntrees[1])) for (i in 2:length(settings$pfts)) { n.index <- c(n.index, rep(i, ntrees[i])) } - - if(max(dbh) < 20){ # if all trees are small than large trees are 95th percentile otherwise trees bigger than 5 cm + + if (max(dbh) < 20) { # if all trees are small than large trees are 95th percentile otherwise trees bigger than 5 cm large.trees <- which(dbh >= (max(dbh) / 1.05)) - }else{ + } else { large.trees <- which(dbh >= 20) } - + large.trees <- which(dbh > 0) - + for (s in seq_along(settings$pfts)) { ntrees[s] <- length(which(n.index[large.trees] == s)) } - + n.index <- n.index[large.trees] - + dbh <- dbh[large.trees] iage <- iage[large.trees] nogro <- nogro[large.trees] - + new.ntrees <- numeric(length(settings$pfts)) - - print(paste0("ntrees (large trees) =", ntrees)) #these are the large trees - + + print(paste0("ntrees (large trees) =", ntrees)) # these are the large trees + ##### This takes the average individual biomass of each species from the model and computes how many ##### individuals you should keep to match the biomass estimated from the data. Still have to correct ##### for the total species biomass in the next step. - + ind.biomass <- numeric(sum(ntrees)) - + # calculate biomass of each individual for (j in seq_len(sum(ntrees))) { # slta <- spp.params$SLTA[n.index[j]] sltb <- spp.params$SLTB[n.index[j]] fwt <- # spp.params$FWT[n.index[j]] frt <- spp.params$FRT[n.index[j]] pft <- spp.params$Spp_Name[n.index[j]] - spp.biomass.params <- biomass_spp_params(new.params = new.params, - default.params = spp.params.default, - pft = pft) - ind.biomass[j] <- biomass_function(dbh[j], spp.biomass.params) * (1 / 833) * 0.48 # changing units to be kgC/m^2 + spp.biomass.params <- biomass_spp_params( + new.params = new.params, + default.params = spp.params.default, + pft = pft + ) + ind.biomass[j] <- biomass_function(dbh[j], spp.biomass.params) * (1 / 833) * 0.48 # changing units to be kgC/m^2 } - - data2 <- data.frame(ind.biomass = ind.biomass, - n.index = n.index) - mean.biomass.spp <- stats::aggregate(ind.biomass ~ n.index, mean, data = data2) # calculate mean individual biomass for each species - #browser() + + data2 <- data.frame( + ind.biomass = ind.biomass, + n.index = n.index + ) + mean.biomass.spp <- stats::aggregate(ind.biomass ~ n.index, mean, data = data2) # calculate mean individual biomass for each species + # browser() # calculate number of individuals needed to match new.state for (s in seq_along(settings$pfts)) { - if (ntrees[s] > 0) { - fix_adjust <- new.state[s]/mean.biomass.spp[mean.biomass.spp[, 1] == s, 2] # number of individuals needed to agree with new.state + fix_adjust <- new.state[s] / mean.biomass.spp[mean.biomass.spp[, 1] == s, 2] # number of individuals needed to agree with new.state } else { for (r in 1:(length(settings$pfts) - 1)) { - s.select <- which(distance.matrix[s, ] == r) # select a new spp. to clone from + s.select <- which(distance.matrix[s, ] == r) # select a new spp. to clone from if (ntrees[s.select] > 0) { break } } fix_adjust <- new.state[s] / mean.biomass.spp[mean.biomass.spp[, 1] == s.select, 2] } - new.ntrees[s] <- as.numeric(ceiling(fix_adjust-.01)) #new number of ind. of each species - if(new.ntrees[s]>200&!is.na(new.ntrees[s])){ - new.ntrees[s] = sample(size = 1, x = 50:150) - } + new.ntrees[s] <- as.numeric(ceiling(fix_adjust - .01)) # new number of ind. of each species + if (new.ntrees[s] > 200 & !is.na(new.ntrees[s])) { + new.ntrees[s] <- sample(size = 1, x = 50:150) + } print(s) } - - #making sure to stick with density dependence rules in linkages (< 198 trees per 800/m^2) - #someday we could think about estimating this parameter from data - if(sum(new.ntrees,na.rm = T) > 198) new.ntrees <- round((new.ntrees / sum(new.ntrees)) * stats::runif(1,195,198)) - + + # making sure to stick with density dependence rules in linkages (< 198 trees per 800/m^2) + # someday we could think about estimating this parameter from data + if (sum(new.ntrees, na.rm = T) > 198) new.ntrees <- round((new.ntrees / sum(new.ntrees)) * stats::runif(1, 195, 198)) + print(paste0("new.ntrees =", new.ntrees)) - + new.n.index <- c(rep(1, new.ntrees[1])) for (i in 2:length(settings$pfts)) { new.n.index <- c(new.n.index, rep(i, new.ntrees[i])) } - + n.ind <- 200 - + dbh.temp <- numeric(n.ind) iage.temp <- numeric(n.ind) nogro.temp <- numeric(n.ind) - + # sample from individuals to construct new states for (s in seq_len(nspec)) { if (new.ntrees[s] == 0) { @@ -256,33 +258,37 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, if (new.ntrees[s] > ntrees[s] & ntrees[s] >= 1) { # new are greater than the old of the same spp. and there are old trees to clone print('new are # greater than the old of the same spp. and there are old trees of same spp. to clone') - select <- c(which(n.index == s), - sample(size = (new.ntrees[s] - ntrees[s]), x = which(n.index == s), replace = TRUE)) + select <- c( + which(n.index == s), + sample(size = (new.ntrees[s] - ntrees[s]), x = which(n.index == s), replace = TRUE) + ) } else { # print(paste0('clone needed for spp. ',s)) for (r in 1:(length(settings$pfts) - 1)) { - s.select <- which(distance.matrix[s, ] == r) #select a new spp. to clone from + s.select <- which(distance.matrix[s, ] == r) # select a new spp. to clone from # print(paste0('r =',r)) if (ntrees[s.select] > 0) { break } } # print(s.select) - select <- sample(size = as.numeric(new.ntrees[s]), - x = which(n.index == s.select), - replace = TRUE) + select <- sample( + size = as.numeric(new.ntrees[s]), + x = which(n.index == s.select), + replace = TRUE + ) } } dbh.temp[which(new.n.index == s)] <- dbh[select] iage.temp[which(new.n.index == s)] <- iage[select] nogro.temp[which(new.n.index == s)] <- nogro[select] } - + # fix dbh of sampled individuals to match new.state - nl <- 1 ## individual counter - b_calc <- numeric(length(settings$pfts)) #biomass of sampled trees - b_calc1 <- numeric(length(settings$pfts)) #biomass of sampled trees - bcorr <- numeric(length(settings$pfts)) #biomass correction factor to new.state + nl <- 1 ## individual counter + b_calc <- numeric(length(settings$pfts)) # biomass of sampled trees + b_calc1 <- numeric(length(settings$pfts)) # biomass of sampled trees + bcorr <- numeric(length(settings$pfts)) # biomass correction factor to new.state b_obs <- numeric(sum(new.ntrees)) for (s in seq_len(nspec)) { if (new.ntrees[s] == 0) { @@ -290,50 +296,56 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, } nu <- nl + new.ntrees[s] - 1 pft <- unique(spp.params$Spp_Name[new.n.index[nl:nu]]) - spp.biomass.params <- biomass_spp_params(new.params = new.params, - default.params = spp.params.default, - pft = pft) - b_calc[s] <- sum(biomass_function(dbh.temp[nl:nu], - spp.biomass.params = spp.biomass.params)) * (1 / 833) * 0.48 # changing units to be kgC/m^2 - - bcorr[s] <- new.state[s] / b_calc[s] #calculate biomass correction - + spp.biomass.params <- biomass_spp_params( + new.params = new.params, + default.params = spp.params.default, + pft = pft + ) + b_calc[s] <- sum(biomass_function(dbh.temp[nl:nu], + spp.biomass.params = spp.biomass.params + )) * (1 / 833) * 0.48 # changing units to be kgC/m^2 + + bcorr[s] <- new.state[s] / b_calc[s] # calculate biomass correction + if (length(pft) > 1) { stop("error too many pfts assigned") } - - b_obs[nl:nu] <- biomass_function(dbh.temp[nl:nu], - spp.biomass.params = spp.biomass.params) * as.numeric(bcorr[s]) + + b_obs[nl:nu] <- biomass_function(dbh.temp[nl:nu], + spp.biomass.params = spp.biomass.params + ) * as.numeric(bcorr[s]) bMax <- 200 for (j in nl:nu) { dbh.temp[j] <- stats::optimize( merit, c(1, bMax), b_obs = b_obs[j], - spp.biomass.params = spp.biomass.params)$minimum + spp.biomass.params = spp.biomass.params + )$minimum } b_calc1[s] <- sum(biomass_function(dbh.temp[nl:nu], - spp.biomass.params = spp.biomass.params)) * (1 / 833) * 0.48 + spp.biomass.params = spp.biomass.params + )) * (1 / 833) * 0.48 nl <- nu + 1 } - + dbh <- dbh.temp iage <- iage.temp - nogro <- nogro.temp # numeric(200)#hack - - #nogro[nogro < 1] <- 1 - + nogro <- nogro.temp # numeric(200)#hack + + # nogro[nogro < 1] <- 1 + ntrees <- new.ntrees - + # print(dbh[1:ntrees[1]]) - + # translate agb to dbh - - #dbh_spp[s] <- optimize(merit, c(0,200))$minimum bcorr = new.state[i,] / + + # dbh_spp[s] <- optimize(merit, c(0,200))$minimum bcorr = new.state[i,] / # agb.pft[,ncol(agb.pft),1] *(bcorr[s]/ntrees[s]) dbh.temp1[j] <- optimize(merit, # c(0,200))$minimum - + # for(n in 1:nspec){ slta <- spp.params$SLTA[n] sltb <- spp.params$SLTB[n] fwt <- # spp.params$FWT[n] frt <- spp.params$FRT[n] if (agb.pft[n,ncol(agb.pft),1]==0 & # new.state[i,n]>0){ abg.pft.temp <- sum(distance.matrix[,n]%*%t(agb.pft[n,ncol(agb.pft),1])) @@ -341,45 +353,54 @@ write_restart.LINKAGES <- function(outdir, runid, start.time, stop.time, # dbh[sum(ntrees[1:n])-1] for(j in 1:ntrees.temp){ b_obs <- # biomass_function(dbh[j],slta=slta,sltb=sltb,fwt=fwt,frt=frt)*bcorr[n] dbh.temp[j] <- # optimize(merit, c(0,200),b_obs=b_obs)$minimum } } nu <- nl + ntrees[n] - 1 nl <- nu + 1 } - + ##### SOIL if ("TotSoilCarb" %in% names(new.state.other)) { leaf.sum <- sum(tyl[1:12]) * 0.48 - if(new.state.other["TotSoilCarb"] > 1000) new.state.other["TotSoilCarb"] = stats::rnorm(1,1000,10) + if (new.state.other["TotSoilCarb"] > 1000) new.state.other["TotSoilCarb"] <- stats::rnorm(1, 1000, 10) soil.org.mat <- new.state.other["TotSoilCarb"] - leaf.sum soil.corr <- soil.org.mat / (sum(C.mat[C.mat[1:ncohrt, 5], 1]) * 0.48) - #if(soil.corr > 1) soil.corr <- 1 + # if(soil.corr > 1) soil.corr <- 1 C.mat[C.mat[1:ncohrt, 5], 1] <- C.mat[C.mat[1:ncohrt, 5], 1] * as.numeric(soil.corr) - C.mat[is.na(C.mat[1:ncohrt,1]),1] <- 0 - C.mat[C.mat[1:ncohrt,1] < 0,1] <- 0 + C.mat[is.na(C.mat[1:ncohrt, 1]), 1] <- 0 + C.mat[C.mat[1:ncohrt, 1] < 0, 1] <- 0 } - + if (RENAME) { - file.rename(file.path(settings$rundir, runid, "linkages.restart.Rdata"), - file.path(settings$rundir, runid, paste0(start.time, "linkages.restart.Rdata"))) # save original output + file.rename( + file.path(settings$rundir, runid, "linkages.restart.Rdata"), + file.path(settings$rundir, runid, paste0(start.time, "linkages.restart.Rdata")) + ) # save original output } restart.file <- file.path(settings$rundir, runid, "linkages.restart.Rdata") sprintf("%s", restart.file) - - + + save(dbh, tyl, ntrees, nogro, ksprt, iage, C.mat, ncohrt, file = restart.file) - + # make a new settings with the right years min start date and end date - fail in informative way settings$run$start.date <- paste0( formatC(lubridate::year(start.time + 1), width = 4, format = "d", flag = "0"), - "/01/01") + "/01/01" + ) settings$run$end.date <- paste0( formatC(lubridate::year(stop.time), width = 4, format = "d", flag = "0"), - "/12/31") + "/12/31" + ) do.call(write.config.LINKAGES, - args = list(trait.values = new.params, settings = settings, run.id = runid, - restart = TRUE, spinup = FALSE, inputs = inputs)) + args = list( + trait.values = new.params, settings = settings, run.id = runid, + restart = TRUE, spinup = FALSE, inputs = inputs + ) + ) # save original output if (RENAME) { - file.rename(file.path(outdir, runid, "linkages.out.Rdata"), - file.path(outdir, runid, paste0(start.time, "linkages.out.Rdata"))) + file.rename( + file.path(outdir, runid, "linkages.out.Rdata"), + file.path(outdir, runid, paste0(start.time, "linkages.out.Rdata")) + ) } } # write_restart.LINKAGES diff --git a/models/linkages/inst/LINKAGES.lyford.SDA.R b/models/linkages/inst/LINKAGES.lyford.SDA.R index 0518504f062..b78f31cfb95 100644 --- a/models/linkages/inst/LINKAGES.lyford.SDA.R +++ b/models/linkages/inst/LINKAGES.lyford.SDA.R @@ -6,11 +6,11 @@ options(warn = 1, keep.source = TRUE, error = quote({ })) status.start <- function(name) { - cat(paste(name, format(Sys.time(), "%F %T"), sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE) + cat(paste(name, format(Sys.time(), "%F %T"), sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } -status.end <- function(status="DONE") { - cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE) +status.end <- function(status = "DONE") { + cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } #---------------- Load libraries. -----------------------------------------------------------------# @@ -63,66 +63,68 @@ library(PEcAn.LINKAGES) # 1961/01/01 # 2010/12/31 # - - #---------------- Load PEcAn settings file. -------------------------------------------------------# - # Open and read in settings file for PEcAn run. - settings <- read.settings("pecan.SDA.xml") + +#---------------- Load PEcAn settings file. -------------------------------------------------------# +# Open and read in settings file for PEcAn run. +settings <- read.settings("pecan.SDA.xml") #--------------------------------------------------------------------------------------------------# #---------------- Load data. -------------------------------------------------------# -load('~/linkages_lyford_summary_v6.Rdata') +load("~/linkages_lyford_summary_v6.Rdata") row.keep <- list() -spp.params.default <- read.csv(system.file("spp_matrix.csv", package = "linkages")) #default spp.params #this doesn't work unless linkages is in my home directory +spp.params.default <- read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params #this doesn't work unless linkages is in my home directory -for(i in 1:15){ - row.keep[[i]]<-grep(rownames(ab_mat)[i],spp.params.default[,2])[1] +for (i in 1:15) { + row.keep[[i]] <- grep(rownames(ab_mat)[i], spp.params.default[, 2])[1] } -dist.mat<-spp.params.default[unlist(row.keep),] -dist.mat<-dist.mat[-9,] -rownames(dist.mat)<-dist.mat[,1] -dist(dist.mat,method="canberra") +dist.mat <- spp.params.default[unlist(row.keep), ] +dist.mat <- dist.mat[-9, ] +rownames(dist.mat) <- dist.mat[, 1] +dist(dist.mat, method = "canberra") -new.names <- spp.params.default[unlist(row.keep),1] -new.names[2] <- spp.params.default[18,1] +new.names <- spp.params.default[unlist(row.keep), 1] +new.names[2] <- spp.params.default[18, 1] rm.spp <- which(is.na(new.names)) -new.names<-new.names[-rm.spp] +new.names <- new.names[-rm.spp] -ab_mat<-ab_mat[-rm.spp,] +ab_mat <- ab_mat[-rm.spp, ] -rownames(ab_mat)<- paste0("AGB.pft.",new.names) +rownames(ab_mat) <- paste0("AGB.pft.", new.names) obs.mean <- list() -for(i in 1:ncol(ab_mat)){ - obs.mean[[i]] <- ab_mat[,i] +for (i in 1:ncol(ab_mat)) { + obs.mean[[i]] <- ab_mat[, i] } -cov_array<-cov_array[-rm.spp,-rm.spp,] -colnames(cov_array)<-new.names -rownames(cov_array)<-new.names +cov_array <- cov_array[-rm.spp, -rm.spp, ] +colnames(cov_array) <- new.names +rownames(cov_array) <- new.names obs.cov <- list() -for(i in 1:dim(cov_array)[3]){ - obs.cov[[i]] <- cov_array[,,i] +for (i in 1:dim(cov_array)[3]) { + obs.cov[[i]] <- cov_array[, , i] } -years<-1960:2014 -names(obs.mean) <- paste0(years,'/12/31') - -for(i in 1:14){ - plot(ab_mat[i,],typ='l',main=new.names[i]) - lines(ab_mat[i,]*cov_array[i,i,]) - lines(ab_mat[i,]*cov_array[i,i,]) - lines(ab_mat[i,]*(cov_array[i,i,]+1)) +years <- 1960:2014 +names(obs.mean) <- paste0(years, "/12/31") + +for (i in 1:14) { + plot(ab_mat[i, ], typ = "l", main = new.names[i]) + lines(ab_mat[i, ] * cov_array[i, i, ]) + lines(ab_mat[i, ] * cov_array[i, i, ]) + lines(ab_mat[i, ] * (cov_array[i, i, ] + 1)) } #---------------- Build Initial Conditions ----------------------------------------------------------------------# status.start("IC") -#ne = as.numeric(settings$state.data.assimilation$n.ensemble) -IC = matrix(NA,as.numeric(settings$state.data.assimilation$n.ensemble),length(settings$pfts)) -#sample.IC.SIPNET(ne,state,year=1) +# ne = as.numeric(settings$state.data.assimilation$n.ensemble) +IC <- matrix(NA, as.numeric(settings$state.data.assimilation$n.ensemble), length(settings$pfts)) +# sample.IC.SIPNET(ne,state,year=1) status.end() #--------------- Assimilation -------------------------------------------------------# status.start("EnKF") -sda.enkf(settings=settings, obs.mean = obs.mean, - obs.cov = obs.cov, IC = IC, Q = NULL) -status.end() \ No newline at end of file +sda.enkf( + settings = settings, obs.mean = obs.mean, + obs.cov = obs.cov, IC = IC, Q = NULL +) +status.end() diff --git a/models/linkages/inst/MIP_workflow_addn.R b/models/linkages/inst/MIP_workflow_addn.R index ce4fd8859ec..8ba1ff7727b 100644 --- a/models/linkages/inst/MIP_workflow_addn.R +++ b/models/linkages/inst/MIP_workflow_addn.R @@ -1,49 +1,57 @@ +settings.file <- "/Users/paleolab/pecan/models/linkages/inst/linkages.xml" -settings.file = "/Users/paleolab/pecan/models/linkages/inst/linkages.xml" +file.copy( + paste0("/Users/paleolab/Linkages/met2model_output_v4.2/", site, "/", "climate.txt"), + paste0("/Users/paleolab/pecan/models/linkages/inst/", site, "/run/ENS-00001/") +) -file.copy(paste0("/Users/paleolab/Linkages/met2model_output_v4.2/",site,"/","climate.txt"), - paste0("/Users/paleolab/pecan/models/linkages/inst/",site,"/run/ENS-00001/")) +PFTs <- c("Acer", "betula", "carya", "castanea dentata", "fagus grandifolia", "picea", "pinus", "tsuga canadensis", "quercus") -PFTs = c("Acer","betula","carya","castanea dentata","fagus grandifolia","picea","pinus","tsuga canadensis","quercus") - -#Harvard Forest Howland Forest UNDERC Billy's Lake Deming Lake Minden Bog +# Harvard Forest Howland Forest UNDERC Billy's Lake Deming Lake Minden Bog #-72.18 -68.73 -89.53 -94.58 -95.17 -82.83 -#42.54 45.25 46.22 46.28 47.17 43.61 - -site = "PMB" -sitelat = 43.61 -sitelon = -82.83 -all_spp_params = read.csv("/Users/paleolab/Linkages/spp_matrix.csv") -pick_spp = c("ash","beech","birch","elm","hemlock","maple","oak","pine","tamarack") -PFTs = as.character(all_spp_params[which(all_spp_params$Spp_Name%in%pick_spp),1]) -outdir = paste0("/Users/paleolab/Linkages/met2model_output_v4.2/",site) -model2netcdf.LINKAGES(PFTs = PFTs, outdir = outdir, sitelat = sitelat, sitelon = sitelon, start_date=NULL, end_date=NULL,force=FALSE) - -met2model.LINKAGES(in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PUN", - in.prefix = "PUN", - outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PUN/", - start_date = "0850/01/01", - end_date = "2010/12/31", - overwrite=FALSE,verbose=FALSE) -met2model.LINKAGES(in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PMB", - in.prefix = "PMB", - outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PMB", - start_date = "0850/01/01", - end_date = "2010/12/31", - overwrite=FALSE,verbose=FALSE) -met2model.LINKAGES(in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PHA", - in.prefix = "PHA", - outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PHA", - start_date = "0850/01/01", - end_date = "2010/12/31", - overwrite=FALSE,verbose=FALSE) -met2model.LINKAGES(in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PHO", - in.prefix = "PHO", - outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PHO", - start_date = "0850/01/01", - end_date = "2010/12/31", - overwrite=FALSE,verbose=FALSE) +# 42.54 45.25 46.22 46.28 47.17 43.61 + +site <- "PMB" +sitelat <- 43.61 +sitelon <- -82.83 +all_spp_params <- read.csv("/Users/paleolab/Linkages/spp_matrix.csv") +pick_spp <- c("ash", "beech", "birch", "elm", "hemlock", "maple", "oak", "pine", "tamarack") +PFTs <- as.character(all_spp_params[which(all_spp_params$Spp_Name %in% pick_spp), 1]) +outdir <- paste0("/Users/paleolab/Linkages/met2model_output_v4.2/", site) +model2netcdf.LINKAGES(PFTs = PFTs, outdir = outdir, sitelat = sitelat, sitelon = sitelon, start_date = NULL, end_date = NULL, force = FALSE) + +met2model.LINKAGES( + in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PUN", + in.prefix = "PUN", + outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PUN/", + start_date = "0850/01/01", + end_date = "2010/12/31", + overwrite = FALSE, verbose = FALSE +) +met2model.LINKAGES( + in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PMB", + in.prefix = "PMB", + outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PMB", + start_date = "0850/01/01", + end_date = "2010/12/31", + overwrite = FALSE, verbose = FALSE +) +met2model.LINKAGES( + in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PHA", + in.prefix = "PHA", + outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PHA", + start_date = "0850/01/01", + end_date = "2010/12/31", + overwrite = FALSE, verbose = FALSE +) +met2model.LINKAGES( + in.path = "/Users/paleolab/Linkages/phase1a_met_drivers_v4.2/PHO", + in.prefix = "PHO", + outfolder = "/Users/paleolab/linkages/met2model_output_v4.2/PHO", + start_date = "0850/01/01", + end_date = "2010/12/31", + overwrite = FALSE, verbose = FALSE +) write.config.LINKAGES(settings = settings, run.id = run.id) - diff --git a/models/linkages/inst/output.visualization.LINKAGES.R b/models/linkages/inst/output.visualization.LINKAGES.R index 15de6c095bc..8b9669dfe80 100644 --- a/models/linkages/inst/output.visualization.LINKAGES.R +++ b/models/linkages/inst/output.visualization.LINKAGES.R @@ -1,43 +1,42 @@ -if(FALSE){ -### set working directory to output file. - -outdir = "/Users/paleolab/pecan/pecan//PEcAn_LINKAGES_PMB/out/ENS-00001/" -link = as.matrix(read.csv(paste0(outdir,"OUT.csv"),head=FALSE)) +if (FALSE) { + ### set working directory to output file. -x=seq(0,1150,50) #change to match output interval in OUT.csv -ecol_proc=link[1:24,] -colnames(ecol_proc) = c("year","num stems","ag biomass","leaf litter","leaf litter N","ag npp","avail n","humus C:N","soil co2-c","soil OM","aet") -ecol_proc_cis = link[25:48,] + outdir <- "/Users/paleolab/pecan/pecan//PEcAn_LINKAGES_PMB/out/ENS-00001/" + link <- as.matrix(read.csv(paste0(outdir, "OUT.csv"), head = FALSE)) -par(mfrow=c(3,3)) -for(i in 2:10){ - plot(x,ecol_proc[,i],typ="l",ylim=c(min(ecol_proc[,i]-ecol_proc_cis[,i]),max(ecol_proc[,i]+ecol_proc_cis[,i])),main=colnames(ecol_proc)[i],ylab=NA,xlab="Year") - lines(x,ecol_proc[,i]-ecol_proc_cis[,i],lty=3,col="blue") - lines(x,ecol_proc[,i]+ecol_proc_cis[,i],lty=3,col="blue") - #abline(v=ipolat_nums,lty=4) -} + x <- seq(0, 1150, 50) # change to match output interval in OUT.csv + ecol_proc <- link[1:24, ] + colnames(ecol_proc) <- c("year", "num stems", "ag biomass", "leaf litter", "leaf litter N", "ag npp", "avail n", "humus C:N", "soil co2-c", "soil OM", "aet") + ecol_proc_cis <- link[25:48, ] -tree_choices = as.matrix(read.csv("/Users/paleolab/pecan/models/linkages/tests/testthat/LINKAGES_tree_choices.csv",header=FALSE)) -tree_names = tree_choices[link[(2*(nrow(link)-2)/4)+2,2:11],1] -biomass=link[((2*(nrow(link)-2)/4)+3):((3*(nrow(link)-2)/4)+2),] -colnames(biomass) = c("Year",tree_names) -biomass_cis = link[((3*(nrow(link)-2)/4)+3):nrow(link),] + par(mfrow = c(3, 3)) + for (i in 2:10) { + plot(x, ecol_proc[, i], typ = "l", ylim = c(min(ecol_proc[, i] - ecol_proc_cis[, i]), max(ecol_proc[, i] + ecol_proc_cis[, i])), main = colnames(ecol_proc)[i], ylab = NA, xlab = "Year") + lines(x, ecol_proc[, i] - ecol_proc_cis[, i], lty = 3, col = "blue") + lines(x, ecol_proc[, i] + ecol_proc_cis[, i], lty = 3, col = "blue") + # abline(v=ipolat_nums,lty=4) + } -par(mfrow=c(1,2)) -plot(x,biomass[,2],type="l",lwd=4,main=NA,xlab="Years",ylab="Average Biomass",ylim=c(0,max(test_biomass[,2:11]))) -lines(x,biomass[,3],col="red",lwd=4) -lines(x,biomass[,4],col="yellow",lwd=4) -lines(x,biomass[,5],col="blue",lwd=4) -lines(x,biomass[,6],col="green",lwd=4) -lines(x,biomass[,7],col="purple",lwd=4) -lines(x,biomass[,8],col="gray",lwd=4) -lines(x,biomass[,9],col="orange",lwd=4) -lines(x,biomass[,10],col="lightblue",lwd=4) -lines(x,biomass[,11],col="pink",lwd=4) -plot.new() -legend("center",c(colnames(biomass[,2:11])),lwd=rep(4,9),lty=rep(1,9),col=c("black","red","yellow","blue","green","purple","gray","orange","lightblue","pink"),xpd=TRUE) + tree_choices <- as.matrix(read.csv("/Users/paleolab/pecan/models/linkages/tests/testthat/LINKAGES_tree_choices.csv", header = FALSE)) + tree_names <- tree_choices[link[(2 * (nrow(link) - 2) / 4) + 2, 2:11], 1] + biomass <- link[((2 * (nrow(link) - 2) / 4) + 3):((3 * (nrow(link) - 2) / 4) + 2), ] + colnames(biomass) <- c("Year", tree_names) + biomass_cis <- link[((3 * (nrow(link) - 2) / 4) + 3):nrow(link), ] -#to check to see if the model2netCDF was working -#nc_open(paste0(outdir,"1.nc")) + par(mfrow = c(1, 2)) + plot(x, biomass[, 2], type = "l", lwd = 4, main = NA, xlab = "Years", ylab = "Average Biomass", ylim = c(0, max(test_biomass[, 2:11]))) + lines(x, biomass[, 3], col = "red", lwd = 4) + lines(x, biomass[, 4], col = "yellow", lwd = 4) + lines(x, biomass[, 5], col = "blue", lwd = 4) + lines(x, biomass[, 6], col = "green", lwd = 4) + lines(x, biomass[, 7], col = "purple", lwd = 4) + lines(x, biomass[, 8], col = "gray", lwd = 4) + lines(x, biomass[, 9], col = "orange", lwd = 4) + lines(x, biomass[, 10], col = "lightblue", lwd = 4) + lines(x, biomass[, 11], col = "pink", lwd = 4) + plot.new() + legend("center", c(colnames(biomass[, 2:11])), lwd = rep(4, 9), lty = rep(1, 9), col = c("black", "red", "yellow", "blue", "green", "purple", "gray", "orange", "lightblue", "pink"), xpd = TRUE) -} \ No newline at end of file + # to check to see if the model2netCDF was working + # nc_open(paste0(outdir,"1.nc")) +} diff --git a/models/linkages/inst/remove_files.R b/models/linkages/inst/remove_files.R index 0c879e4c79d..c349926dfc8 100644 --- a/models/linkages/inst/remove_files.R +++ b/models/linkages/inst/remove_files.R @@ -1,14 +1,14 @@ +for (i in 1:nens) { + # file.remove(file.path(outdir,run.id[[i]],"linkages.out.Rdata")) + # file.remove(file.path(outdir,run.id[[i]],"1860linkages.out.Rdata")) -for(i in 1:nens){ - #file.remove(file.path(outdir,run.id[[i]],"linkages.out.Rdata")) - #file.remove(file.path(outdir,run.id[[i]],"1860linkages.out.Rdata")) - - file.rename(file.path(outdir,run.id[[i]],"1910linkages.out.Rdata"), - file.path(outdir,run.id[[i]],"linkages.out.Rdata")) #save original output - for(t in 1:15){ - file.remove(file.path(outdir,run.id[[i]],paste0(total.time[t],".nc"))) - file.remove(file.path(outdir,run.id[[i]],paste0(total.time[t],".nc.var"))) - file.remove(file.path(outdir,run.id[[i]],paste0(total.time[t],"linkages.out.Rdata"))) + file.rename( + file.path(outdir, run.id[[i]], "1910linkages.out.Rdata"), + file.path(outdir, run.id[[i]], "linkages.out.Rdata") + ) # save original output + for (t in 1:15) { + file.remove(file.path(outdir, run.id[[i]], paste0(total.time[t], ".nc"))) + file.remove(file.path(outdir, run.id[[i]], paste0(total.time[t], ".nc.var"))) + file.remove(file.path(outdir, run.id[[i]], paste0(total.time[t], "linkages.out.Rdata"))) } - } diff --git a/models/linkages/inst/setup_da.R b/models/linkages/inst/setup_da.R index 312218e46e8..7161662ff28 100644 --- a/models/linkages/inst/setup_da.R +++ b/models/linkages/inst/setup_da.R @@ -1,7 +1,9 @@ -#plots a confidence interval around an x-y plot (e.g. a timeseries) -ciEnvelope <- function(x,ylo,yhi,...){ - polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi), - ylo[1])), border = NA,...) +# plots a confidence interval around an x-y plot (e.g. a timeseries) +ciEnvelope <- function(x, ylo, yhi, ...) { + polygon(cbind(c(x, rev(x), x[1]), c( + ylo, rev(yhi), + ylo[1] + )), border = NA, ...) } #--------------------------------------------------------------------------------# @@ -12,96 +14,100 @@ options(warn = 1, keep.source = TRUE, error = quote({ })) status.start <- function(name) { - cat(paste(name, format(Sys.time(), "%F %T"), sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE) + cat(paste(name, format(Sys.time(), "%F %T"), sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } -status.end <- function(status="DONE") { - cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE) +status.end <- function(status = "DONE") { + cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } #---------------- Load libraries. -----------------------------------------------------------------# library(PEcAn.all) library(PEcAnAssimSequential) -#library(PEcAn.visualization) +# library(PEcAn.visualization) library(mvtnorm) library(rjags) library(reshape2) library(PEcAn.LINKAGES) #--------------------------------------------------------------------------------------------------# # -source('~/pecan/modules/assim.sequential/R/sda.enkf.R') +source("~/pecan/modules/assim.sequential/R/sda.enkf.R") ######### sipnet settings <- read.settings("/fs/data2/output//PEcAn_1000002613/pecan.SDA.xml") settings$ensemble$size <- 50 -settings$state.data.assimilation$n.ensemble<- 100 +settings$state.data.assimilation$n.ensemble <- 100 load(file.path(settings$outdir, "samples.Rdata")) -pick.trait.params <- c(names(ensemble.samples[[1]]),names(ensemble.samples[[2]])) +pick.trait.params <- c(names(ensemble.samples[[1]]), names(ensemble.samples[[2]])) obs.mean <- list() -for(i in 1:10) { - obs.mean[[i]]<-c(100+i, 5+i) - names(obs.mean[[i]])<-c("NPP",'plantWood') - } +for (i in 1:10) { + obs.mean[[i]] <- c(100 + i, 5 + i) + names(obs.mean[[i]]) <- c("NPP", "plantWood") +} obs.cov <- list() -for(i in 1:10) obs.cov[[i]]<- diag(c(.1,.08)) +for (i in 1:10) obs.cov[[i]] <- diag(c(.1, .08)) -sda.enkf(settings=settings, obs.mean = obs.mean, - obs.cov = obs.cov, IC = IC, Q = NULL) +sda.enkf( + settings = settings, obs.mean = obs.mean, + obs.cov = obs.cov, IC = IC, Q = NULL +) ######### linkages settings <- read.settings("/fs/data2/output//PEcAn_1000002229/pecan.xml") settings$ensemble$size <- 30 -IC = matrix(NA,as.numeric(settings$ensemble$size),length(settings$pfts)) -settings$run$start.date <-"1960/01/01" -settings$run$end.date <-"1960/12/31" -settings$ensemble$start.date <-"1960/01/01" -settings$ensemble$end.date <-"1960/12/31" -variables <- c("AGB.pft","TotSoilCarb") +IC <- matrix(NA, as.numeric(settings$ensemble$size), length(settings$pfts)) +settings$run$start.date <- "1960/01/01" +settings$run$end.date <- "1960/12/31" +settings$ensemble$start.date <- "1960/01/01" +settings$ensemble$end.date <- "1960/12/31" +variables <- c("AGB.pft", "TotSoilCarb") processvar <- TRUE pick.trait.params <- c("G") -spp.params.default <- read.csv(system.file("spp_matrix.csv", package = "linkages")) #default spp.params #this doesn't work unless linkages is in my home directory -sample_parameters=TRUE +spp.params.default <- read.csv(system.file("spp_matrix.csv", package = "linkages")) # default spp.params #this doesn't work unless linkages is in my home directory +sample_parameters <- TRUE ################################################## load("/home/araiho/linkages_lyford_summary.Rdata") row.keep <- list() -for(i in 1:15){ - row.keep[[i]]<-grep(rownames(ab_mat)[i],spp.params.default[,2])[1] +for (i in 1:15) { + row.keep[[i]] <- grep(rownames(ab_mat)[i], spp.params.default[, 2])[1] } -dist.mat<-spp.params.default[unlist(row.keep),] -dist.mat<-dist.mat[-9,] -rownames(dist.mat)<-dist.mat[,1] -dist(dist.mat,method="canberra") +dist.mat <- spp.params.default[unlist(row.keep), ] +dist.mat <- dist.mat[-9, ] +rownames(dist.mat) <- dist.mat[, 1] +dist(dist.mat, method = "canberra") -new.names <- spp.params.default[unlist(row.keep),1] -new.names[2] <- spp.params.default[18,1] +new.names <- spp.params.default[unlist(row.keep), 1] +new.names[2] <- spp.params.default[18, 1] rm.spp <- which(is.na(new.names)) -new.names<-new.names[-rm.spp] +new.names <- new.names[-rm.spp] -ab_mat<-ab_mat[-rm.spp,] +ab_mat <- ab_mat[-rm.spp, ] -rownames(ab_mat)<- paste0("AGB.pft.",new.names) +rownames(ab_mat) <- paste0("AGB.pft.", new.names) obs.mean <- list() -for(i in 1:ncol(ab_mat)){ - obs.mean[[i]] <- ab_mat[,i] +for (i in 1:ncol(ab_mat)) { + obs.mean[[i]] <- ab_mat[, i] } -cov_array<-cov_array[-rm.spp,-rm.spp,] -colnames(cov_array)<-new.names -rownames(cov_array)<-new.names +cov_array <- cov_array[-rm.spp, -rm.spp, ] +colnames(cov_array) <- new.names +rownames(cov_array) <- new.names obs.cov <- list() -for(i in 1:dim(cov_array)[3]){ - obs.cov[[i]] <- cov_array[,,i] +for (i in 1:dim(cov_array)[3]) { + obs.cov[[i]] <- cov_array[, , i] } -sda.enkf(settings=settings,obs.mean = obs.mean, - obs.cov = obs.cov, pick.trait.params = c("G"), - given.process.variance = NULL) +sda.enkf( + settings = settings, obs.mean = obs.mean, + obs.cov = obs.cov, pick.trait.params = c("G"), + given.process.variance = NULL +) -################################################## \ No newline at end of file +################################################## diff --git a/models/linkages/tests/testthat/test.met2model.LINKAGES.R b/models/linkages/tests/testthat/test.met2model.LINKAGES.R index b961449ebcf..5d79e60c4c8 100644 --- a/models/linkages/tests/testthat/test.met2model.LINKAGES.R +++ b/models/linkages/tests/testthat/test.met2model.LINKAGES.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" @@ -16,15 +17,13 @@ test_that("Met conversion runs without error", { expect_true(file.exists(result[["file"]][[1]])) }) -if(FALSE){ - - start.year = 850 - end.year = 2010 - site = "PUN" - - in.path = paste0("/Users/paleolab/Linkages/phase1a_met_drivers_v4.1/",site,"/") - outfolder = paste0("/Users/paleolab/Linkages/met2model_output/",site,"/") - +if (FALSE) { + start.year <- 850 + end.year <- 2010 + site <- "PUN" + + in.path <- paste0("/Users/paleolab/Linkages/phase1a_met_drivers_v4.1/", site, "/") + outfolder <- paste0("/Users/paleolab/Linkages/met2model_output/", site, "/") + met2model.LINKAGES(site = site, in.path = in.path, outfolder = outfolder, start.year = start.year, end.year = end.year) - } diff --git a/models/lpjguess/R/met2model.LPJGUESS.R b/models/lpjguess/R/met2model.LPJGUESS.R index ec898e203fb..20ace2a4aed 100644 --- a/models/lpjguess/R/met2model.LPJGUESS.R +++ b/models/lpjguess/R/met2model.LPJGUESS.R @@ -20,8 +20,6 @@ ##' @importFrom ncdf4 ncvar_get ncvar_def ncdim_def ncatt_get ncatt_put nc_close met2model.LPJGUESS <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - - print("START met2model.LPJGUESS") start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") @@ -29,31 +27,37 @@ met2model.LPJGUESS <- function(in.path, in.prefix, outfolder, start_date, end_da end_year <- lubridate::year(end_date) year <- sprintf("%04d", seq(start_year, end_year, 1)) - nyear <- length(year) #number of years to simulate + nyear <- length(year) # number of years to simulate ## LPJ-GUESS looks for different input files for different climate variables out.file <- out.files.full <- list() var.names <- c("tmp", "pre", "cld") n.var <- length(var.names) - long.names <- c("air_temperature", - "precipitation_flux", - "surface_downwelling_shortwave_flux_in_air") + long.names <- c( + "air_temperature", + "precipitation_flux", + "surface_downwelling_shortwave_flux_in_air" + ) for (i in seq_len(n.var)) { out.file[[i]] <- paste(in.prefix, sprintf("%04d", start_year), end_year, var.names[[i]], - "nc", sep = ".") + "nc", + sep = "." + ) } for (i in seq_len(n.var)) { out.files.full[[i]] <- file.path(outfolder, out.file[[i]]) } - results <- data.frame(file = unlist(out.files.full), - host = PEcAn.remote::fqdn(), - mimetype = "application/x-netcdf", - formatname = "lpj-guess.metfile", - startdate = start_date, - enddate = end_date, - dbfile.name = unlist(out.file), - stringsAsFactors = FALSE) + results <- data.frame( + file = unlist(out.files.full), + host = PEcAn.remote::fqdn(), + mimetype = "application/x-netcdf", + formatname = "lpj-guess.metfile", + startdate = start_date, + enddate = end_date, + dbfile.name = unlist(out.file), + stringsAsFactors = FALSE + ) print("internal results") print(results) @@ -75,8 +79,9 @@ met2model.LPJGUESS <- function(in.path, in.prefix, outfolder, start_date, end_da ## calculate time step from the time-dimension length, check for leap year tstep <- ifelse(ncin[[1]]$dim$time$len %% 365 == 0, - ncin[[1]]$dim$time$len / 365, - ncin[[1]]$dim$time$len / 366) + ncin[[1]]$dim$time$len / 365, + ncin[[1]]$dim$time$len / 366 + ) ## read climate data nc.tmp <- lapply(ncin, ncdf4::ncvar_get, long.names[1]) @@ -99,20 +104,22 @@ met2model.LPJGUESS <- function(in.path, in.prefix, outfolder, start_date, end_da ## write climate data define dimensions - latdim <- ncdf4::ncdim_def(name = "lat", "degrees_north", as.double(lat)) - londim <- ncdf4::ncdim_def(name = "lon", "degrees_east", as.double(lon)) + latdim <- ncdf4::ncdim_def(name = "lat", "degrees_north", as.double(lat)) + londim <- ncdf4::ncdim_def(name = "lon", "degrees_east", as.double(lon)) timedim <- ncdf4::ncdim_def("time", paste0("days since ", start_year - 1, "-12-31", sep = ""), as.double(c(1:length(unlist(tmp.list))))) fillvalue <- 9.96920996838687e+36 for (n in seq_len(n.var)) { # define variable - var.def <- ncdf4::ncvar_def(name = var.names[n], - units = var.units[n], - dim = (list(londim, latdim, timedim)), - fillvalue, long.names[n], - verbose = verbose, - prec = "float") + var.def <- ncdf4::ncvar_def( + name = var.names[n], + units = var.units[n], + dim = (list(londim, latdim, timedim)), + fillvalue, long.names[n], + verbose = verbose, + prec = "float" + ) # create netCD file for LPJ-GUESS ncfile <- ncdf4::nc_create(out.files.full[[n]], vars = var.def, force_v4 = TRUE) diff --git a/models/lpjguess/R/model2netcdf.LPJGUESS.R b/models/lpjguess/R/model2netcdf.LPJGUESS.R index c5202833104..cb16667dcc4 100644 --- a/models/lpjguess/R/model2netcdf.LPJGUESS.R +++ b/models/lpjguess/R/model2netcdf.LPJGUESS.R @@ -1,5 +1,5 @@ ##' Convert LPJ-GUESS output to netCDF -##' +##' ##' @name model2netcdf.LPJGUESS ##' @title Function to convert LPJ-GUESS model output to standard netCDF format ##' @@ -12,92 +12,92 @@ ##' ##' @author Istem Fer model2netcdf.LPJGUESS <- function(outdir, sitelat, sitelon, start_date, end_date) { - ### Read in model output in LPJ-GUESS format lpjguess.out.files <- list.files(outdir, pattern = "\\.out$") - + if (length(lpjguess.out.files) == 0) { PEcAn.logger::logger.error("No output files found at ", outdir) } - + lpjguess.output <- lapply(file.path(outdir, lpjguess.out.files), read.table, header = TRUE, sep = "") # n.outputs <- length(lpjguess.output) m.to.s <- 2592000 - + years <- seq(lubridate::year(start_date), lubridate::year(end_date)) - + ### Unit conversions - + # mgpp 'monthly gross primary production' in kgC/m2/month to GPP kgC/m2/s if ("mgpp.out" %in% lpjguess.out.files) { gpp <- lpjguess.output[[which(lpjguess.out.files == "mgpp.out")]][, 4:15] / m.to.s } - + # mnpp 'monthly net primary production' in kgC/m2/month to NPP kgC/m2/s if ("mnpp.out" %in% lpjguess.out.files) { npp <- lpjguess.output[[which(lpjguess.out.files == "mnpp.out")]][, 4:15] / m.to.s } - + # mra 'monthly autotrophic respiration' in kgC/m2/month to AutoResp kgC/m2/s if ("mra.out" %in% lpjguess.out.files) { arp <- lpjguess.output[[which(lpjguess.out.files == "mra.out")]][, 4:15] / m.to.s } - + # mrh 'monthly heterotrophic respiration' in kgC/m2/month to HeteroResp kgC/m2/s if ("mrh.out" %in% lpjguess.out.files) { hrp <- lpjguess.output[[which(lpjguess.out.files == "mrh.out")]][, 4:15] / m.to.s } - + # mnee 'monthly net ecosystem C exchange' in kgC/m2/month to NEE kgC/m2/s if ("mnee.out" %in% lpjguess.out.files) { nee <- lpjguess.output[[which(lpjguess.out.files == "mnee.out")]][, 4:15] / m.to.s } - + # mlai 'monthly Leaf Area Index' in m2/m2 to LAI m2/m2 if ("mnee.out" %in% lpjguess.out.files) { lai <- lpjguess.output[[which(lpjguess.out.files == "mlai.out")]][, 4:15] } - + ### Loop over years in LPJ-GUESS output to create separate netCDF outputs for (y in years) { - if (file.exists(file.path(outdir, paste(y, "nc", sep = ".")))) { next } - + print(paste("---- Processing year: ", y)) - + ## Set up outputs for netCDF file in appropriate units - + ## TODO: generalize for all possible outputs, both yearly and monthly - + output <- list() - output[[1]] <- gpp[which(years == y), ] # GPP in kgC/m2/s - output[[2]] <- npp[which(years == y), ] # NPP in kgC/m2/s - output[[3]] <- arp[which(years == y), ] # AutoResp in kgC/m2/s - output[[4]] <- hrp[which(years == y), ] # HeteroResp in kgC/m2/s - output[[5]] <- nee[which(years == y), ] # NEE in kgC/m2/s - output[[6]] <- lai[which(years == y), ] # LAI in m2/m2 - - if(lubridate::leap_year(y)){ + output[[1]] <- gpp[which(years == y), ] # GPP in kgC/m2/s + output[[2]] <- npp[which(years == y), ] # NPP in kgC/m2/s + output[[3]] <- arp[which(years == y), ] # AutoResp in kgC/m2/s + output[[4]] <- hrp[which(years == y), ] # HeteroResp in kgC/m2/s + output[[5]] <- nee[which(years == y), ] # NEE in kgC/m2/s + output[[6]] <- lai[which(years == y), ] # LAI in m2/m2 + + if (lubridate::leap_year(y)) { month_days <- c(001, 032, 061, 092, 122, 153, 183, 214, 245, 275, 306, 336) } else { month_days <- c(001, 032, 060, 091, 121, 152, 182, 213, 244, 274, 305, 335) } # ******************** Declare netCDF dimensions and variables ********************# - t <- ncdf4::ncdim_def(name = "time", - units = paste0("days since ", y, "-01-01 00:00:00"), - month_days, - calendar = "standard", - unlim = TRUE) + t <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since ", y, "-01-01 00:00:00"), + month_days, + calendar = "standard", + unlim = TRUE + ) lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") - + mstmipvar <- PEcAn.utils::mstmipvar - + dims <- list(lon = lon, lat = lat, time = t) - + nc_var <- list() nc_var[[1]] <- PEcAn.utils::to_ncvar("GPP", dims) nc_var[[2]] <- PEcAn.utils::to_ncvar("NPP", dims) @@ -105,9 +105,9 @@ model2netcdf.LPJGUESS <- function(outdir, sitelat, sitelon, start_date, end_date nc_var[[4]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) nc_var[[5]] <- PEcAn.utils::to_ncvar("NEE", dims) nc_var[[6]] <- PEcAn.utils::to_ncvar("LAI", dims) - + # ******************** Declare netCDF variables ********************# - + ### Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") @@ -118,5 +118,5 @@ model2netcdf.LPJGUESS <- function(outdir, sitelat, sitelon, start_date, end_date } close(varfile) ncdf4::nc_close(nc) - } ### End of year loop + } ### End of year loop } # model2netcdf.LPJGUESS diff --git a/models/lpjguess/R/readStateBinary.LPJGUESS.R b/models/lpjguess/R/readStateBinary.LPJGUESS.R index ffba146ac52..e4070ab05ae 100644 --- a/models/lpjguess/R/readStateBinary.LPJGUESS.R +++ b/models/lpjguess/R/readStateBinary.LPJGUESS.R @@ -10,328 +10,327 @@ ##' @export ##' @param out.path location on disk where model run outputs are stored ##' @param npft number of pfts specified in instruction file -##' @return Patchpft_list state variables common to all individuals of a particular PFT +##' @return Patchpft_list state variables common to all individuals of a particular PFT ##' @author Istem Fer -readStateBinary <- function(out.path, npft){ +readStateBinary <- function(out.path, npft) { # test path - out.path = "/fs/data2/output/PEcAn_1000002393/out/1000458390" + out.path <- "/fs/data2/output/PEcAn_1000002393/out/1000458390" setwd(out.path) - - + + Patchpft_list <- list() Vegetation_list <- list() Individual_list <- list() Soil_list <- list() Sompool_list <- list() - SompoolCent_list <- list() + SompoolCent_list <- list() Fluxes_list <- list() - + # open connection to the binary state file zz <- file("0.state", "rb") - + ##################### Class : Climate ##################### Climate <- list() - + # mean air temperature today (deg C) Climate$temp <- readBin(zz, double(), 1, size = 8) - + # total daily net downward shortwave solar radiation today (J/m2/day) Climate$rad <- readBin(zz, double(), 1, size = 8) - + # total daily photosynthetically-active radiation today (J/m2/day) Climate$par <- readBin(zz, double(), 1, size = 8) - + # precipitation today (mm) Climate$prec <- readBin(zz, double(), 1, size = 8) - + # day length today (h) Climate$daylength <- readBin(zz, double(), 1, size = 8) - + # atmospheric ambient CO2 concentration today (ppmv) Climate$co2 <- readBin(zz, double(), 1, size = 8) - + # latitude (degrees; +=north, -=south) Climate$lat <- readBin(zz, double(), 1, size = 8) - + # Insolation today, see also instype Climate$insol <- readBin(zz, double(), 1, size = 8) - + # Type of insolation Climate$instype <- readBin(zz, integer(), 1, size = 4) - + # equilibrium evapotranspiration today (mm/day) Climate$eet <- readBin(zz, double(), 1, size = 8) - + # mean temperature for the last 31 days (deg C) Climate$mtemp <- readBin(zz, double(), 1, size = 8) - + # mean of lowest mean monthly temperature for the last 20 years (deg C) Climate$mtemp_min20 <- readBin(zz, double(), 1, size = 8) - + # mean of highest mean monthly temperature for the last 20 years (deg C) Climate$mtemp_max20 <- readBin(zz, double(), 1, size = 8) - + # highest mean monthly temperature for the last 12 months (deg C) Climate$mtemp_max <- readBin(zz, double(), 1, size = 8) - - # accumulated growing degree day sum on 5 degree base + + # accumulated growing degree day sum on 5 degree base Climate$gdd5 <- readBin(zz, double(), 1, size = 8) - + # total gdd5 (accumulated) for this year (reset 1 January) Climate$agdd5 <- readBin(zz, double(), 1, size = 8) - + # number of days with temperatures <5 deg C bytes[18,1] Climate$chilldays <- readBin(zz, integer(), 1, size = 4) - + # true if chill day count may be reset by temperature fall below 5 deg C Climate$ifsensechill <- readBin(zz, logical(), 1, size = 1) - + # Respiration response to today's air temperature incorporating damping of Q10 due to temperature acclimation (Lloyd & Taylor 1994) Climate$gtemp <- readBin(zz, double(), 1, size = 8) - + # daily temperatures for the last 31 days (deg C) Climate$dtemp_31 <- readBin(zz, double(), 31, size = 8) - + Climate$unk1 <- readBin(zz, double(), 1, size = 8) Climate$unk2 <- readBin(zz, logical(), 1, size = 1) - + # minimum monthly temperatures for the last 20 years (deg C) Climate$mtemp_min_20 <- readBin(zz, double(), 20, size = 8) - + # maximum monthly temperatures for the last 20 years (deg C) - bytes[25,1] Climate$mtemp_max_20 <- readBin(zz, double(), 20, size = 8) - + # minimum monthly temperature for the last 12 months (deg C) Climate$mtemp_min <- readBin(zz, double(), 1, size = 8) - + # mean of monthly temperatures for the last 12 months (deg C) Climate$atemp_mean <- readBin(zz, double(), 1, size = 8) - - - - + + + + # Saved parameters used by function daylengthinsoleet Climate$sinelat <- readBin(zz, double(), 1, size = 8) Climate$cosinelat <- readBin(zz, double(), 1, size = 8) - + Climate$qo <- readBin(zz, double(), 365, size = 8) Climate$u <- readBin(zz, double(), 365, size = 8) Climate$v <- readBin(zz, double(), 365, size = 8) Climate$hh <- readBin(zz, double(), 365, size = 8) Climate$sinehh <- readBin(zz, double(), 365, size = 8) Climate$daylength_save <- readBin(zz, double(), 365, size = 8) - + # indicates whether saved values exist for this day - bytes[36,1] - Climate$doneday <- readBin(zz, logical(), 365, size = 1) - - + Climate$doneday <- readBin(zz, logical(), 365, size = 1) + + # /// diurnal temperature range, used in daily/monthly BVOC (deg C) # double dtr; - # + # # /// Sub-daily temperature (deg C) (\see temp) # std::vector temps; - # + # # /// Sub-daily insolation (\see insol) # std::vector insols; - # + # # /// Sub-daily PAR (\see par) # std::vector pars; - # + # # /// Sub-daily net downward shortwave solar radiation (\see rad) # std::vector rads; - # + # # /// Sub-daily respiration response (\see gtemp) # std::vector gtemps; - - + + # annual nitrogen deposition (kgN/m2/year) Climate$andep <- readBin(zz, double(), 1, size = 8) # daily nitrogen deposition (kgN/m2) Climate$dndep <- readBin(zz, double(), 1, size = 8) - - + + # /// annual nitrogen fertilization (kgN/m2/year) - Climate$anfert<- readBin(zz, double(), 1, size = 8) + Climate$anfert <- readBin(zz, double(), 1, size = 8) # /// daily nitrogen fertilization (kgN/m2/year) Climate$dnfert <- readBin(zz, double(), 1, size = 8) - + ##################### Class : Gridcell ##################### Gridcell <- list() - - Gridcell$landcoverfrac <- readBin(zz, double(), 6, size = 8) - Gridcell$landcoverfrac_old <- readBin(zz, double(), 6, size = 8) - Gridcell$LC_updated <- readBin(zz, logical(), 1, size = 1) + + Gridcell$landcoverfrac <- readBin(zz, double(), 6, size = 8) + Gridcell$landcoverfrac_old <- readBin(zz, double(), 6, size = 8) + Gridcell$LC_updated <- readBin(zz, logical(), 1, size = 1) Gridcell$seed <- readBin(zz, integer(), 1, size = 8) # C type long - + ##################### Class : Gridcellpft ##################### Gridcellpft <- list() - + # annual degree day sum above threshold damaging temperature - Gridcellpft$addtw <- rep(NA,npft) # npft : number of PFTs - # Michaelis-Menten kinetic parameters - Gridcellpft$Km <- rep(NA,npft) - - for(p in 1:npft){ + Gridcellpft$addtw <- rep(NA, npft) # npft : number of PFTs + # Michaelis-Menten kinetic parameters + Gridcellpft$Km <- rep(NA, npft) + + for (p in 1:npft) { Gridcellpft$addtw[p] <- readBin(zz, double(), 1, size = 8) Gridcellpft$Km[p] <- readBin(zz, double(), 1, size = 8) } - - + + ##################### Class : Stand ##################### Stand <- list() - + Stand$unk1 <- readBin(zz, integer(), 1, size = 4) # ??? - Stand$unk2 <- readBin(zz, integer(), 1, size = 4) # ??? - + Stand$unk2 <- readBin(zz, integer(), 1, size = 4) # ??? + ##################### Class : Standpft ##################### Standpft <- list() - Standpft$cmass_repr <- rep(NA,npft) - Standpft$anetps_ff_max <- rep(NA,npft) - Standpft$fpc_total <- rep(NA,npft) - Standpft$active <- rep(NA,npft) - - for(p in 1:npft){ + Standpft$cmass_repr <- rep(NA, npft) + Standpft$anetps_ff_max <- rep(NA, npft) + Standpft$fpc_total <- rep(NA, npft) + Standpft$active <- rep(NA, npft) + + for (p in 1:npft) { # net C allocated to reproduction for this PFT in all patches of this stand this year (kgC/m2) Standpft$cmass_repr[p] <- readBin(zz, double(), 1, size = 8) - + # maximum value of Patchpft::anetps_ff for this PFT in this stand so far in the simulation (kgC/m2/year) Standpft$anetps_ff_max[p] <- readBin(zz, double(), 1, size = 8) - + # FPC sum for this PFT as average for stand Standpft$fpc_total[p] <- readBin(zz, double(), 1, size = 8) - + # Is this PFT allowed to grow in this stand? Standpft$active[p] <- readBin(zz, logical(), 1, size = 1) - } - + } + # number of patches to loop around - nofpatch <- readBin(zz, integer(), 1, size = 4) - - for(pat in 1:nofpatch){ + nofpatch <- readBin(zz, integer(), 1, size = 4) + + for (pat in 1:nofpatch) { Patchpft_list[[pat]] <- getClass_Patchpft() Vegetation_list[[pat]] <- getClass_Vegetation() Individual_list[[pat]] <- getClass_Individual(Vegetation_list[[pat]]$indv) Soil_list[[pat]] <- getClass_Soil() Sompool_list[[pat]] <- getClass_Sompool() - SompoolCent_list[[pat]] <- getClass_SompoolCent() + SompoolCent_list[[pat]] <- getClass_SompoolCent() Fluxes_list[[pat]] <- getClass_Fluxes() } - + # close connection to the binary state file close(zz) - + return(Patchpft_list) } ##################### Class : Patchpft ##################### -getClass_Patchpft <- function(){ - +getClass_Patchpft <- function() { Patchpft <- list() - + # potential annual net assimilation (leaf-level net photosynthesis) at forest floor (kgC/m2/year) - Patchpft$anetps_ff <- rep(NA,npft) - + Patchpft$anetps_ff <- rep(NA, npft) + # water stress parameter (0-1 range; 1=minimum stress) - Patchpft$wscal <- rep(NA,npft) - + Patchpft$wscal <- rep(NA, npft) + # running sum (converted to annual mean) for wscal - Patchpft$wscal_mean <- rep(NA,npft) - - + Patchpft$wscal_mean <- rep(NA, npft) + + # potential annual net assimilation at forest floor averaged over establishment interval (kgC/m2/year) - Patchpft$anetps_ff_est <- rep(NA,npft) - + Patchpft$anetps_ff_est <- rep(NA, npft) + # first-year value of anetps_ff_est - Patchpft$anetps_ff_est_initial <- rep(NA,npft) - - + Patchpft$anetps_ff_est_initial <- rep(NA, npft) + + # annual mean wscal averaged over establishment interval - Patchpft$wscal_mean_est <- rep(NA,npft) - - # vegetation phenological state (fraction of potential leaf cover), updated daily - Patchpft$phen <- rep(NA,npft) - - # annual sum of daily fractional leaf cover - Patchpft$aphen <- rep(NA,npft) - + Patchpft$wscal_mean_est <- rep(NA, npft) + + # vegetation phenological state (fraction of potential leaf cover), updated daily + Patchpft$phen <- rep(NA, npft) + + # annual sum of daily fractional leaf cover + Patchpft$aphen <- rep(NA, npft) + # whether PFT can establish in this patch under current conditions - Patchpft$establish <- rep(NA,npft) - + Patchpft$establish <- rep(NA, npft) + # running total for number of saplings of this PFT to establish (cohort mode) - Patchpft$nsapling <- rep(NA,npft) - + Patchpft$nsapling <- rep(NA, npft) + # leaf-derived litter for PFT on modelled area basis (kgC/m2) - Patchpft$litter_leaf <- rep(NA,npft) - + Patchpft$litter_leaf <- rep(NA, npft) + # fine root-derived litter for PFT on modelled area basis (kgC/m2) - Patchpft$litter_root <- rep(NA,npft) - + Patchpft$litter_root <- rep(NA, npft) + # sapwood-derived litter for PFT on modelled area basis (kgC/m2) - Patchpft$litter_sap <- rep(NA,npft) - + Patchpft$litter_sap <- rep(NA, npft) + # year's sapwood-derived litter for PFT on modelled area basis (kgC/m2) - #Patchpft$litter_sap_year <- rep(NA,npft) - + # Patchpft$litter_sap_year <- rep(NA,npft) + # heartwood-derived litter for PFT on modelled area basis (kgC/m2) - Patchpft$litter_heart <- rep(NA,npft) - + Patchpft$litter_heart <- rep(NA, npft) + # year's heartwood-derived litter for PFT on modelled area basis (kgC/m2) - #Patchpft$litter_heart_year <- rep(NA,npft) - + # Patchpft$litter_heart_year <- rep(NA,npft) + # litter derived from allocation to reproduction for PFT on modelled area basis (kgC/m2) - Patchpft$litter_repr <- rep(NA,npft) - - + Patchpft$litter_repr <- rep(NA, npft) + + # non-FPC-weighted canopy conductance value for PFT under water-stress conditions (mm/s) - Patchpft$gcbase <- rep(NA,npft) - + Patchpft$gcbase <- rep(NA, npft) + # daily value of the above variable (mm/s) - Patchpft$gcbase_day <- rep(NA,npft) - + Patchpft$gcbase_day <- rep(NA, npft) + # two are extra, don't know which though # evapotranspirational "supply" function for this PFT today (mm/day) - Patchpft$wsupply <- rep(NA,npft) - Patchpft$wsupply_leafon <- rep(NA,npft) - + Patchpft$wsupply <- rep(NA, npft) + Patchpft$wsupply_leafon <- rep(NA, npft) + # fractional uptake of water from each soil layer today - Patchpft$fwuptake <- matrix(NA,nrow=npft,ncol=2) - - + Patchpft$fwuptake <- matrix(NA, nrow = npft, ncol = 2) + + # whether water-stress conditions for this PFT - Patchpft$wstress <- rep(NA,npft) + Patchpft$wstress <- rep(NA, npft) # daily version of the above variable - Patchpft$wstress_day <- rep(NA,npft) - + Patchpft$wstress_day <- rep(NA, npft) + # carbon depository for long-lived products like wood - Patchpft$harvested_products_slow <- rep(NA,npft) - - + Patchpft$harvested_products_slow <- rep(NA, npft) + + # # leaf-derived nitrogen litter for PFT on modelled area basis (kgN/m2) - Patchpft$nmass_litter_leaf <- rep(NA,npft) - + Patchpft$nmass_litter_leaf <- rep(NA, npft) + # # root-derived nitrogen litter for PFT on modelled area basis (kgN/m2) - Patchpft$nmass_litter_root <- rep(NA,npft) - + Patchpft$nmass_litter_root <- rep(NA, npft) + # # sapwood-derived nitrogen litter for PFT on modelled area basis (kgN/m2) - Patchpft$nmass_litter_sap <- rep(NA,npft) - + Patchpft$nmass_litter_sap <- rep(NA, npft) + # # year's sapwood-derived nitrogen litter for PFT on modelled area basis (kgN/m2) # Patchpft$nmass_litter_sap_year <- rep(NA,npft) - # + # # # heartwood-derived nitrogen litter for PFT on modelled area basis (kgN/m2) - Patchpft$nmass_litter_heart <- rep(NA,npft) - # + Patchpft$nmass_litter_heart <- rep(NA, npft) + # # # year's heartwood-derived nitrogen litter for PFT on modelled area basis (kgN/m2) # Patchpft$nmass_litter_heart_year <- rep(NA,npft) - - + + # nitrogen depository for long-lived products like wood - Patchpft$harvested_products_slow_nmass <- rep(NA,npft) - - - for(p in 1:npft){ + Patchpft$harvested_products_slow_nmass <- rep(NA, npft) + + + for (p in 1:npft) { Patchpft$anetps_ff[p] <- readBin(zz, double(), 1, size = 8) Patchpft$wscal[p] <- readBin(zz, double(), 1, size = 8) Patchpft$wscal_mean[p] <- readBin(zz, double(), 1, size = 8) @@ -347,449 +346,446 @@ getClass_Patchpft <- function(){ Patchpft$litter_sap[p] <- readBin(zz, double(), 1, size = 8) # Patchpft$litter_sap_year[p] <- readBin(zz, double(), 1, size = 8) Patchpft$litter_heart[p] <- readBin(zz, double(), 1, size = 8) - #Patchpft$litter_heart_year[p] <- readBin(zz, double(), 1, size = 8) + # Patchpft$litter_heart_year[p] <- readBin(zz, double(), 1, size = 8) Patchpft$litter_repr[p] <- readBin(zz, double(), 1, size = 8) - Patchpft$gcbase[p] <- readBin(zz, double(), 1, size = 8) - Patchpft$gcbase_day[p] <- readBin(zz, double(), 1, size = 8) - Patchpft$wsupply[p] <- readBin(zz, double(), 1, size = 8) - Patchpft$wsupply_leafon[p] <- readBin(zz, double(), 1, size = 8) - Patchpft$fwuptake[p,] <- readBin(zz, double(), 2, size = 8) - Patchpft$wstress[p] <- readBin(zz, logical(), 1, size = 1) - Patchpft$wstress_day[p] <- readBin(zz, logical(), 1, size = 1) + Patchpft$gcbase[p] <- readBin(zz, double(), 1, size = 8) + Patchpft$gcbase_day[p] <- readBin(zz, double(), 1, size = 8) + Patchpft$wsupply[p] <- readBin(zz, double(), 1, size = 8) + Patchpft$wsupply_leafon[p] <- readBin(zz, double(), 1, size = 8) + Patchpft$fwuptake[p, ] <- readBin(zz, double(), 2, size = 8) + Patchpft$wstress[p] <- readBin(zz, logical(), 1, size = 1) + Patchpft$wstress_day[p] <- readBin(zz, logical(), 1, size = 1) Patchpft$harvested_products_slow[p] <- readBin(zz, double(), 1, size = 8) Patchpft$nmass_litter_leaf[p] <- readBin(zz, double(), 1, size = 8) Patchpft$nmass_litter_root[p] <- readBin(zz, double(), 1, size = 8) Patchpft$nmass_litter_sap[p] <- readBin(zz, double(), 1, size = 8) - # Patchpft$nmass_litter_sap_year[p] <- readBin(zz, double(), 1, size = 8) - Patchpft$nmass_litter_heart[p] <- readBin(zz, double(), 1, size = 8) + # Patchpft$nmass_litter_sap_year[p] <- readBin(zz, double(), 1, size = 8) + Patchpft$nmass_litter_heart[p] <- readBin(zz, double(), 1, size = 8) # Patchpft$nmass_litter_heart_year[p] <- readBin(zz, double(), 1, size = 8) Patchpft$harvested_products_slow_nmass[p] <- readBin(zz, double(), 1, size = 8) } - + return(Patchpft) } ##################### Class : Vegetation ##################### -getClass_Vegetation <- function(){ - +getClass_Vegetation <- function() { Vegetation <- list() Vegetation$indv <- readBin(zz, integer(), 1, size = 4) - + return(Vegetation) } ##################### Class : Individual ##################### -getClass_Individual <- function(nind){ # nind <- Vegetation$indv - +getClass_Individual <- function(nind) { # nind <- Vegetation$indv + Individual <- list() - + # id code (0-based, sequential) - Individual$id <- rep(NA,nind) - + Individual$id <- rep(NA, nind) + # leaf C biomass on modelled area basis (kgC/m2) - Individual$cmass_leaf <- rep(NA,nind) - + Individual$cmass_leaf <- rep(NA, nind) + # fine root C biomass on modelled area basis (kgC/m2) - Individual$cmass_root <- rep(NA,nind) - + Individual$cmass_root <- rep(NA, nind) + # sapwood C biomass on modelled area basis (kgC/m2) - Individual$cmass_sap <- rep(NA,nind) - + Individual$cmass_sap <- rep(NA, nind) + # heartwood C biomass on modelled area basis (kgC/m2) - Individual$cmass_heart <- rep(NA,nind) - + Individual$cmass_heart <- rep(NA, nind) + # C "debt" (retrospective storage) (kgC/m2) - Individual$cmass_debt <- rep(NA,nind) - + Individual$cmass_debt <- rep(NA, nind) + # foliar projective cover (FPC) under full leaf cover as fraction of modelled area - Individual$fpc <- rep(NA,nind) - + Individual$fpc <- rep(NA, nind) + # fraction of PAR absorbed by foliage over projective area today, taking account of leaf phenological state - Individual$fpar <- rep(NA,nind) - + Individual$fpar <- rep(NA, nind) + # average density of individuals over patch (indiv/m2) - Individual$densindiv <- rep(NA,nind) - + Individual$densindiv <- rep(NA, nind) + # vegetation phenological state (fraction of potential leaf cover) - Individual$phen <- rep(NA,nind) - + Individual$phen <- rep(NA, nind) + # annual sum of daily fractional leaf cover - Individual$aphen <- rep(NA,nind) - + Individual$aphen <- rep(NA, nind) + # annual number of days with full leaf cover) (raingreen PFTs only; reset on 1 January) - Individual$aphen_raingreen <- rep(NA,nind) - + Individual$aphen_raingreen <- rep(NA, nind) + # accumulated NPP over modelled area (kgC/m2/year) - Individual$anpp <- rep(NA,nind) - + Individual$anpp <- rep(NA, nind) + # actual evapotranspiration over projected area (mm/day) - Individual$aet <- rep(NA,nind) - + Individual$aet <- rep(NA, nind) + # annual actual evapotranspiration over projected area (mm/year) - Individual$aaet <- rep(NA,nind) - + Individual$aaet <- rep(NA, nind) + # leaf to root mass ratio - Individual$ltor <- rep(NA,nind) - + Individual$ltor <- rep(NA, nind) + # plant height (m) - Individual$height <- rep(NA,nind) - + Individual$height <- rep(NA, nind) + # plant crown area (m2) - Individual$crownarea <- rep(NA,nind) - + Individual$crownarea <- rep(NA, nind) + # increment in fpc since last simulation year - Individual$deltafpc <- rep(NA,nind) - + Individual$deltafpc <- rep(NA, nind) + # bole height, i.e. height above ground of bottom of crown cylinder (m) - Individual$boleht <- rep(NA,nind) - + Individual$boleht <- rep(NA, nind) + # patch-level lai for this individual or cohort (function fpar) - Individual$lai <- rep(NA,nind) - + Individual$lai <- rep(NA, nind) + # patch-level lai for cohort in current vertical layer (function fpar) - Individual$lai_layer <- rep(NA,nind) - + Individual$lai_layer <- rep(NA, nind) + # individual leaf area index (individual and cohort modes only) - Individual$lai_indiv <- rep(NA,nind) - + Individual$lai_indiv <- rep(NA, nind) + # growth efficiency (NPP/leaf area) for each of the last five simulation years (kgC/m2/yr) - Individual$greff_5 <- matrix(NA,nrow=nind, ncol=5) - + Individual$greff_5 <- matrix(NA, nrow = nind, ncol = 5) + # individual/cohort age (years) - Individual$age <- rep(NA,nind) - + Individual$age <- rep(NA, nind) + # monthly LAI (including phenology component) - Individual$mlai <- matrix(NA,nrow=nind, ncol=12) - + Individual$mlai <- matrix(NA, nrow = nind, ncol = 12) + # FPAR assuming full leaf cover for all vegetation - Individual$fpar_leafon <- rep(NA,nind) - + Individual$fpar_leafon <- rep(NA, nind) + # LAI for current layer in canopy (cohort/individual mode; see function fpar) - Individual$lai_leafon_layer <- rep(NA,nind) - + Individual$lai_leafon_layer <- rep(NA, nind) + # interception associated with this individual today (patch basis) - Individual$intercep <- rep(NA,nind) - + Individual$intercep <- rep(NA, nind) + # accumulated mean fraction of potential leaf cover - Individual$phen_mean <- rep(NA,nind) - + Individual$phen_mean <- rep(NA, nind) + # whether individual subject to water stress - Individual$wstress <- rep(NA,nind) - + Individual$wstress <- rep(NA, nind) + # Whether this individual is truly alive. - Individual$alive <- rep(NA,nind) - + Individual$alive <- rep(NA, nind) + # bvoc # isoprene production (mg C m-2 d-1) - Individual$iso <- rep(NA,nind) - + Individual$iso <- rep(NA, nind) + # monoterpene production (mg C m-2 d-1) - Individual$mon <- rep(NA,nind) - + Individual$mon <- rep(NA, nind) + # monoterpene storage pool (mg C m-2) - Individual$monstor <- rep(NA,nind) - + Individual$monstor <- rep(NA, nind) + # isoprene seasonality factor (-) - Individual$fvocseas <- rep(NA,nind) - + Individual$fvocseas <- rep(NA, nind) + # leaf N biomass on modelled area basis (kgC/m2) - Individual$nmass_leaf <- rep(NA,nind) - - # root N biomass on modelled area basis (kgC/m2) - Individual$nmass_root <- rep(NA,nind) - - # sap N biomass on modelled area basis (kgC/m2) - Individual$nmass_sap <- rep(NA,nind) - + Individual$nmass_leaf <- rep(NA, nind) + + # root N biomass on modelled area basis (kgC/m2) + Individual$nmass_root <- rep(NA, nind) + + # sap N biomass on modelled area basis (kgC/m2) + Individual$nmass_sap <- rep(NA, nind) + # heart N biomass on modelled area basis (kgC/m2) - Individual$nmass_heart <- rep(NA,nind) - + Individual$nmass_heart <- rep(NA, nind) + # leaf nitrogen that is photosyntetic active - Individual$nactive <- rep(NA,nind) - + Individual$nactive <- rep(NA, nind) + # Nitrogen extinction scalar - Individual$nextin <- rep(NA,nind) - + Individual$nextin <- rep(NA, nind) + # long-term storage of labile nitrogen - Individual$nstore_longterm <- rep(NA,nind) - + Individual$nstore_longterm <- rep(NA, nind) + # storage of labile nitrogen - Individual$nstore_labile <- rep(NA,nind) - + Individual$nstore_labile <- rep(NA, nind) + # daily total nitrogen demand - Individual$ndemand <- rep(NA,nind) - + Individual$ndemand <- rep(NA, nind) + # fraction of individual nitrogen demand available for uptake - Individual$fnuptake <- rep(NA,nind) - + Individual$fnuptake <- rep(NA, nind) + # annual nitrogen uptake - Individual$anuptake <- rep(NA,nind) - + Individual$anuptake <- rep(NA, nind) + # maximum size of nitrogen storage - Individual$max_n_storage <- rep(NA,nind) - + Individual$max_n_storage <- rep(NA, nind) + # scales annual npp to maximum nitrogen storage - Individual$scale_n_storage <- rep(NA,nind) - + Individual$scale_n_storage <- rep(NA, nind) + # annual nitrogen limitation on vmax - Individual$avmaxnlim <- rep(NA,nind) - + Individual$avmaxnlim <- rep(NA, nind) + # annual optimal leaf C:N ratio - Individual$cton_leaf_aopt <- rep(NA,nind) - + Individual$cton_leaf_aopt <- rep(NA, nind) + # annual average leaf C:N ratio - Individual$cton_leaf_aavr <- rep(NA,nind) - + Individual$cton_leaf_aavr <- rep(NA, nind) + # plant mobile nitrogen status - Individual$cton_status <- rep(NA,nind) - + Individual$cton_status <- rep(NA, nind) + # total carbon in compartments before growth - Individual$cmass_veg <- rep(NA,nind) - + Individual$cmass_veg <- rep(NA, nind) + # total nitrogen in compartments before growth - Individual$nmass_veg <- rep(NA,nind) - + Individual$nmass_veg <- rep(NA, nind) + # whether individual subject to nitrogen stress - Individual$nstress <- rep(NA,nind) - + Individual$nstress <- rep(NA, nind) + # daily leaf nitrogen demand calculated from Vmax (kgN/m2) - Individual$leafndemand <- rep(NA,nind) - + Individual$leafndemand <- rep(NA, nind) + # daily root nitrogen demand based on leafndemand - Individual$rootndemand <- rep(NA,nind) - + Individual$rootndemand <- rep(NA, nind) + # daily sap wood nitrogen demand based on leafndemand - Individual$sapndemand <- rep(NA,nind) - + Individual$sapndemand <- rep(NA, nind) + # daily labile nitrogen demand based on npp - Individual$storendemand <- rep(NA,nind) - + Individual$storendemand <- rep(NA, nind) + # leaf fraction of total nitrogen demand - Individual$leaffndemand <- rep(NA,nind) - + Individual$leaffndemand <- rep(NA, nind) + # root fraction of total nitrogen demand - Individual$rootfndemand <- rep(NA,nind) - + Individual$rootfndemand <- rep(NA, nind) + # sap fraction of total nitrogen demand - Individual$sapfndemand <- rep(NA,nind) - + Individual$sapfndemand <- rep(NA, nind) + # store fraction of total nitrogen demand - Individual$storefndemand <- rep(NA,nind) - + Individual$storefndemand <- rep(NA, nind) + # daily leaf nitrogen demand over possible uptake (storage demand) - Individual$leafndemand_store <- rep(NA,nind) - + Individual$leafndemand_store <- rep(NA, nind) + # daily root nitrogen demand over possible uptake (storage demand) - Individual$rootndemand_store <- rep(NA,nind) - + Individual$rootndemand_store <- rep(NA, nind) + # Number of days with non-negligible phenology this month - Individual$nday_leafon <- rep(NA,nind) - Individual$unk <- rep(NA,nind) - - - for(i in 1:nind){ - + Individual$nday_leafon <- rep(NA, nind) + Individual$unk <- rep(NA, nind) + + + for (i in 1:nind) { Individual$id[i] <- readBin(zz, integer(), 1, size = 4) Individual$cmass_leaf[i] <- readBin(zz, double(), 1, size = 8) Individual$cmass_root[i] <- readBin(zz, double(), 1, size = 8) - Individual$cmass_sap[i] <- readBin(zz, double(), 1, size = 8) + Individual$cmass_sap[i] <- readBin(zz, double(), 1, size = 8) Individual$cmass_heart[i] <- readBin(zz, double(), 1, size = 8) Individual$cmass_debt[i] <- readBin(zz, double(), 1, size = 8) - Individual$fpc[i] <- readBin(zz, double(), 1, size = 8) - Individual$fpar[i] <- readBin(zz, double(), 1, size = 8) - Individual$densindiv[i] <- readBin(zz, double(), 1, size = 8) - Individual$phen[i] <- readBin(zz, double(), 1, size = 8) - Individual$aphen[i] <- readBin(zz, double(), 1, size = 8) - Individual$aphen_raingreen[i] <- readBin(zz, integer(), 1, size = 4) - Individual$anpp[i] <- readBin(zz, double(), 1, size = 8) - Individual$aet[i] <- readBin(zz, double(), 1, size = 8) - Individual$aaet[i] <- readBin(zz, double(), 1, size = 8) - Individual$ltor[i] <- readBin(zz, double(), 1, size = 8) - Individual$height[i] <- readBin(zz, double(), 1, size = 8) - Individual$crownarea[i] <- readBin(zz, double(), 1, size = 8) - Individual$deltafpc[i] <- readBin(zz, double(), 1, size = 8) - Individual$boleht[i] <- readBin(zz, double(), 1, size = 8) - Individual$lai[i] <- readBin(zz, double(), 1, size = 8) - Individual$lai_layer[i] <- readBin(zz, double(), 1, size = 8) - Individual$lai_indiv[i] <- readBin(zz, double(), 1, size = 8) - Individual$greff_5[i,] <- readBin(zz, double(), 5, size = 8) - - #not sure about this - Individual$unk[i] <- readBin(zz, double(), 1, size = 8) - - Individual$alogical[i] <- readBin(zz, logical(), 1, size = 1) - Individual$age[i] <- readBin(zz, double(), 1, size = 8) - Individual$mlai[i,] <- readBin(zz, double(), 12, size = 8) - - Individual$fpar_leafon[i] <- readBin(zz, double(), 1, size = 8) - Individual$lai_leafon_layer[i] <- readBin(zz, double(), 1, size = 8) - Individual$intercep[i] <- readBin(zz, double(), 1, size = 8) - Individual$phen_mean[i] <- readBin(zz, double(), 1, size = 8) - Individual$wstress[i] <- readBin(zz, logical(), 1, size = 1) - Individual$alive[i] <- readBin(zz, logical(), 1, size = 1) - Individual$iso[i] <- readBin(zz, double(), 1, size = 8) - Individual$mon[i] <- readBin(zz, double(), 1, size = 8) - Individual$monstor[i] <- readBin(zz, double(), 1, size = 8) - Individual$fvocseas[i] <- readBin(zz, double(), 1, size = 8) + Individual$fpc[i] <- readBin(zz, double(), 1, size = 8) + Individual$fpar[i] <- readBin(zz, double(), 1, size = 8) + Individual$densindiv[i] <- readBin(zz, double(), 1, size = 8) + Individual$phen[i] <- readBin(zz, double(), 1, size = 8) + Individual$aphen[i] <- readBin(zz, double(), 1, size = 8) + Individual$aphen_raingreen[i] <- readBin(zz, integer(), 1, size = 4) + Individual$anpp[i] <- readBin(zz, double(), 1, size = 8) + Individual$aet[i] <- readBin(zz, double(), 1, size = 8) + Individual$aaet[i] <- readBin(zz, double(), 1, size = 8) + Individual$ltor[i] <- readBin(zz, double(), 1, size = 8) + Individual$height[i] <- readBin(zz, double(), 1, size = 8) + Individual$crownarea[i] <- readBin(zz, double(), 1, size = 8) + Individual$deltafpc[i] <- readBin(zz, double(), 1, size = 8) + Individual$boleht[i] <- readBin(zz, double(), 1, size = 8) + Individual$lai[i] <- readBin(zz, double(), 1, size = 8) + Individual$lai_layer[i] <- readBin(zz, double(), 1, size = 8) + Individual$lai_indiv[i] <- readBin(zz, double(), 1, size = 8) + Individual$greff_5[i, ] <- readBin(zz, double(), 5, size = 8) + + # not sure about this + Individual$unk[i] <- readBin(zz, double(), 1, size = 8) + + Individual$alogical[i] <- readBin(zz, logical(), 1, size = 1) + Individual$age[i] <- readBin(zz, double(), 1, size = 8) + Individual$mlai[i, ] <- readBin(zz, double(), 12, size = 8) + + Individual$fpar_leafon[i] <- readBin(zz, double(), 1, size = 8) + Individual$lai_leafon_layer[i] <- readBin(zz, double(), 1, size = 8) + Individual$intercep[i] <- readBin(zz, double(), 1, size = 8) + Individual$phen_mean[i] <- readBin(zz, double(), 1, size = 8) + Individual$wstress[i] <- readBin(zz, logical(), 1, size = 1) + Individual$alive[i] <- readBin(zz, logical(), 1, size = 1) + Individual$iso[i] <- readBin(zz, double(), 1, size = 8) + Individual$mon[i] <- readBin(zz, double(), 1, size = 8) + Individual$monstor[i] <- readBin(zz, double(), 1, size = 8) + Individual$fvocseas[i] <- readBin(zz, double(), 1, size = 8) Individual$nmass_leaf[i] <- readBin(zz, double(), 1, size = 8) Individual$nmass_root[i] <- readBin(zz, double(), 1, size = 8) - Individual$nmass_sap[i] <- readBin(zz, double(), 1, size = 8) - Individual$nmass_heart[i] <- readBin(zz, double(), 1, size = 8) - Individual$nactive[i] <- readBin(zz, double(), 1, size = 8) - Individual$nextin[i] <- readBin(zz, double(), 1, size = 8) - Individual$nstore_longterm[i] <- readBin(zz, double(), 1, size = 8) - Individual$nstore_labile[i] <- readBin(zz, double(), 1, size = 8) - Individual$ndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$fnuptake[i] <- readBin(zz, double(), 1, size = 8) - Individual$anuptake[i] <- readBin(zz, double(), 1, size = 8) - Individual$max_n_storage[i] <- readBin(zz, double(), 1, size = 8) - Individual$scale_n_storage[i] <- readBin(zz, double(), 1, size = 8) - Individual$avmaxnlim[i] <- readBin(zz, double(), 1, size = 8) - Individual$cton_leaf_aopt[i] <- readBin(zz, double(), 1, size = 8) - Individual$cton_leaf_aavr[i] <- readBin(zz, double(), 1, size = 8) - Individual$cton_status[i] <- readBin(zz, double(), 1, size = 8) - Individual$cmass_veg[i] <- readBin(zz, double(), 1, size = 8) - Individual$nmass_veg[i] <- readBin(zz, double(), 1, size = 8) - Individual$nstress[i] <- readBin(zz, logical(), 1, size = 1) - Individual$leafndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$rootndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$sapndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$storendemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$leaffndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$rootfndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$sapfndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$storefndemand[i] <- readBin(zz, double(), 1, size = 8) - Individual$leafndemand_store[i] <- readBin(zz, double(), 1, size = 8) - Individual$rootndemand_store[i] <- readBin(zz, double(), 1, size = 8) - Individual$nday_leafon[i] <- readBin(zz, integer(), 1, size = 4) - + Individual$nmass_sap[i] <- readBin(zz, double(), 1, size = 8) + Individual$nmass_heart[i] <- readBin(zz, double(), 1, size = 8) + Individual$nactive[i] <- readBin(zz, double(), 1, size = 8) + Individual$nextin[i] <- readBin(zz, double(), 1, size = 8) + Individual$nstore_longterm[i] <- readBin(zz, double(), 1, size = 8) + Individual$nstore_labile[i] <- readBin(zz, double(), 1, size = 8) + Individual$ndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$fnuptake[i] <- readBin(zz, double(), 1, size = 8) + Individual$anuptake[i] <- readBin(zz, double(), 1, size = 8) + Individual$max_n_storage[i] <- readBin(zz, double(), 1, size = 8) + Individual$scale_n_storage[i] <- readBin(zz, double(), 1, size = 8) + Individual$avmaxnlim[i] <- readBin(zz, double(), 1, size = 8) + Individual$cton_leaf_aopt[i] <- readBin(zz, double(), 1, size = 8) + Individual$cton_leaf_aavr[i] <- readBin(zz, double(), 1, size = 8) + Individual$cton_status[i] <- readBin(zz, double(), 1, size = 8) + Individual$cmass_veg[i] <- readBin(zz, double(), 1, size = 8) + Individual$nmass_veg[i] <- readBin(zz, double(), 1, size = 8) + Individual$nstress[i] <- readBin(zz, logical(), 1, size = 1) + Individual$leafndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$rootndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$sapndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$storendemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$leaffndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$rootfndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$sapfndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$storefndemand[i] <- readBin(zz, double(), 1, size = 8) + Individual$leafndemand_store[i] <- readBin(zz, double(), 1, size = 8) + Individual$rootndemand_store[i] <- readBin(zz, double(), 1, size = 8) + Individual$nday_leafon[i] <- readBin(zz, integer(), 1, size = 4) } - + return(Individual) } ##################### Class : Soil ##################### -getClass_Soil <- function(){ +getClass_Soil <- function() { Soil <- list() - + # water content of soil layers [0=upper layer] as fraction of available water holding capacity; - Soil$wcont <- readBin(zz, double(), 2, size = 8) - + Soil$wcont <- readBin(zz, double(), 2, size = 8) + # DLE - the average wcont over the growing season, for each soil layer - Soil$awcont <- readBin(zz, double(), 2, size = 8) - - # water content of sublayer of upper soil layer for which evaporation from the bare soil surface is possible - Soil$wcont_evap <- readBin(zz, double(), 1, size = 8) - + Soil$awcont <- readBin(zz, double(), 2, size = 8) + + # water content of sublayer of upper soil layer for which evaporation from the bare soil surface is possible + Soil$wcont_evap <- readBin(zz, double(), 1, size = 8) + # daily water content in upper soil layer for each day of year - Soil$dwcontupper <- readBin(zz, double(), 365, size = 8) - + Soil$dwcontupper <- readBin(zz, double(), 365, size = 8) + # mean water content in upper soil layer for last month - Soil$mwcontupper <- readBin(zz, double(), 1, size = 8) - + Soil$mwcontupper <- readBin(zz, double(), 1, size = 8) + # stored snow as average over modelled area (mm rainfall equivalents) - Soil$snowpack <- readBin(zz, double(), 1, size = 8) - + Soil$snowpack <- readBin(zz, double(), 1, size = 8) + # total runoff today (mm/day) Soil$runoff <- readBin(zz, double(), 1, size = 8) - + # soil temperature today at 0.25 m depth (deg C) Soil$temp <- readBin(zz, double(), 1, size = 8) - + # daily temperatures for the last month (deg C) Soil$dtemp <- readBin(zz, double(), 31, size = 8) - + # mean soil temperature for the last month (deg C) Soil$mtemp <- readBin(zz, double(), 1, size = 8) Soil$gtemp <- readBin(zz, double(), 1, size = 8) - + # soil organic matter (SOM) pool with c. 1000 yr turnover (kgC/m2) Soil$cpool_slow <- readBin(zz, double(), 1, size = 8) - + # soil organic matter (SOM) pool with c. 33 yr turnover (kgC/m2) Soil$cpool_fast <- readBin(zz, double(), 1, size = 8) - + # mean annual litter decomposition (kgC/m2/yr) Soil$decomp_litter_mean <- readBin(zz, double(), 1, size = 8) - + # mean value of decay constant for fast SOM fraction Soil$k_soilfast_mean <- readBin(zz, double(), 1, size = 8) - + # mean value of decay constant for slow SOM fraction Soil$k_soilslow_mean <- readBin(zz, double(), 1, size = 8) - + # Parameters used by function soiltemp and updated monthly Soil$alag <- readBin(zz, double(), 1, size = 8) Soil$exp_alag <- readBin(zz, double(), 1, size = 8) - + # water content of soil layers [0=upper layer] as fraction of available water holding capacity # double mwcont[12][NSOILLAYER] Soil$mwcont <- readBin(zz, double(), 24, size = 8) - + # daily water content in lower soil layer for each day of year Soil$dwcontlower <- readBin(zz, double(), 365, size = 8) - + # mean water content in lower soil layer for last month Soil$mwcontlower <- readBin(zz, double(), 1, size = 8) - + # rainfall and snowmelt today (mm) Soil$rain_melt <- readBin(zz, double(), 1, size = 8) - + # upper limit for percolation (mm) Soil$max_rain_melt <- readBin(zz, double(), 1, size = 8) - + # whether to percolate today Soil$percolate <- readBin(zz, logical(), 1, size = 1) - + return(Soil) } ##################### Class : Sompool ##################### -getClass_Sompool <- function(){ +getClass_Sompool <- function() { Sompool <- list() npools <- 12 - + # C mass in pool kgC/m2 - Sompool$cmass <- rep(NA,npools) - + Sompool$cmass <- rep(NA, npools) + # Nitrogen mass in pool kgN/m2 - Sompool$nmass <- rep(NA,npools) - + Sompool$nmass <- rep(NA, npools) + # (potential) decrease in C following decomposition today (kgC/m2) - Sompool$cdec <- rep(NA,npools) - + Sompool$cdec <- rep(NA, npools) + # (potential) decrease in nitrogen following decomposition today (kgN/m2) - Sompool$ndec <- rep(NA,npools) - + Sompool$ndec <- rep(NA, npools) + # daily change in carbon and nitrogen - Sompool$delta_cmass <- rep(NA,npools) - Sompool$delta_nmass <- rep(NA,npools) - + Sompool$delta_cmass <- rep(NA, npools) + Sompool$delta_nmass <- rep(NA, npools) + # lignin fractions - Sompool$ligcfrac <- rep(NA,npools) - + Sompool$ligcfrac <- rep(NA, npools) + # fraction of pool remaining after decomposition - Sompool$fracremain <- rep(NA,npools) - + Sompool$fracremain <- rep(NA, npools) + # nitrogen to carbon ratio - Sompool$ntoc <- rep(NA,npools) - + Sompool$ntoc <- rep(NA, npools) + # Fire # soil litter moisture flammability threshold (fraction of AWC) - Sompool$litterme <- rep(NA,npools) - + Sompool$litterme <- rep(NA, npools) + # soil litter fire resistance (0-1) - Sompool$fireresist <- rep(NA,npools) - + Sompool$fireresist <- rep(NA, npools) + # Fast SOM spinup variables # monthly mean fraction of carbon pool remaining after decomposition - Sompool$mfracremain_mean <- matrix(NA,nrow=npools, ncol=12) - - for(n in 1:npools){ - Sompool$cmass[n] <- readBin(zz, double(), 1, size = 8) + Sompool$mfracremain_mean <- matrix(NA, nrow = npools, ncol = 12) + + for (n in 1:npools) { + Sompool$cmass[n] <- readBin(zz, double(), 1, size = 8) Sompool$nmass[n] <- readBin(zz, double(), 1, size = 8) Sompool$cdec[n] <- readBin(zz, double(), 1, size = 8) Sompool$ndec[n] <- readBin(zz, double(), 1, size = 8) @@ -800,78 +796,78 @@ getClass_Sompool <- function(){ Sompool$ntoc[n] <- readBin(zz, double(), 1, size = 8) Sompool$litterme[n] <- readBin(zz, double(), 1, size = 8) Sompool$fireresist[n] <- readBin(zz, double(), 1, size = 8) - Sompool$mfracremain_mean[n,] <- readBin(zz, double(), 12, size = 8) + Sompool$mfracremain_mean[n, ] <- readBin(zz, double(), 12, size = 8) } return(Sompool) } ##################### Class : Sompool, CENTURY ##################### -getClass_SompoolCent <- function(){ +getClass_SompoolCent <- function() { SompoolCent <- list() - + # daily percolation (mm) SompoolCent$dperc <- readBin(zz, double(), 1, size = 8) - + # fraction of decayed organic nitrogen leached each day; SompoolCent$orgleachfrac <- readBin(zz, double(), 1, size = 8) - + # soil mineral nitrogen pool (kgN/m2) SompoolCent$nmass_avail <- readBin(zz, double(), 1, size = 8) - + # soil nitrogen input (kgN/m2) SompoolCent$ninput <- readBin(zz, double(), 1, size = 8) - + # annual sum of nitrogen mineralisation SompoolCent$anmin <- readBin(zz, double(), 1, size = 8) - + # annual sum of nitrogen immobilisation SompoolCent$animmob <- readBin(zz, double(), 1, size = 8) - + # annual leaching from available nitrogen pool - SompoolCent$aminleach <- readBin(zz, double(), 1, size = 8) - + SompoolCent$aminleach <- readBin(zz, double(), 1, size = 8) + # annual leaching of organics from active nitrogen pool SompoolCent$aorgleach <- readBin(zz, double(), 1, size = 8) - - # total annual nitrogen fixation + + # total annual nitrogen fixation SompoolCent$anfix <- readBin(zz, double(), 1, size = 8) - + # calculated annual mean nitrogen fixation SompoolCent$anfix_calc <- readBin(zz, double(), 1, size = 8) - + # annual nitrogen fixation SompoolCent$anfix_mean <- readBin(zz, double(), 1, size = 8) - + # stored nitrogen deposition in snowpack SompoolCent$snowpack_nmass <- readBin(zz, double(), 1, size = 8) - + # years at which to begin documenting for calculation of Century equilibrium SompoolCent$solvesomcent_beginyr <- readBin(zz, integer(), 1, size = 4) - + # years at which to end documentation and start calculation of Century equilibrium SompoolCent$solvesomcent_endyr <- readBin(zz, integer(), 1, size = 4) - + # Cumulative litter pools for one year. SompoolCent$solvesom <- readBin(zz, double(), 1, size = 8) - + # monthly fraction of available mineral nitrogen taken up SompoolCent$fnuptake_mean <- readBin(zz, double(), 12, size = 8) - + # monthly fraction of organic carbon/nitrogen leached SompoolCent$morgleach_mean <- readBin(zz, double(), 12, size = 8) - + # monthly fraction of available mineral nitrogen leached SompoolCent$mminleach_mean <- readBin(zz, double(), 12, size = 8) - + return(SompoolCent) } ##################### Class : Fluxes ##################### -getClass_Fluxes <- function(){ +getClass_Fluxes <- function() { Fluxes <- list() - #f.bytes <- bytes[1570:1667,] + # f.bytes <- bytes[1570:1667,] Fluxes$f1 <- readBin(zz, double(), 1, size = 8) Fluxes$f2 <- readBin(zz, double(), 1, size = 8) Fluxes$f3 <- readBin(zz, double(), 1, size = 8) @@ -970,72 +966,69 @@ getClass_Fluxes <- function(){ Fluxes$f96 <- readBin(zz, double(), 12, size = 8) Fluxes$f97 <- readBin(zz, double(), 12, size = 8) Fluxes$f98 <- readBin(zz, double(), 1, size = 8) - + return(Fluxes) - + # # Fluxes stored as totals for the whole patch # Fluxes$PerPatchFluxType <- list() - # + # # # Carbon flux to atmosphere from burnt vegetation and litter (kgC/m2) - # Fluxes$PerPatchFluxType$FIREC - # + # Fluxes$PerPatchFluxType$FIREC + # # # Carbon flux to atmosphere from soil respiration (kgC/m2) # Fluxes$PerPatchFluxType$SOILC - # + # # # Flux from atmosphere to vegetation associated with establishment (kgC/m2) - # Fluxes$PerPatchFluxType$ESTC - # + # Fluxes$PerPatchFluxType$ESTC + # # # Flux to atmosphere from consumed harvested products (kgC/m2) - # Fluxes$PerPatchFluxType$HARVESTC - # + # Fluxes$PerPatchFluxType$HARVESTC + # # # Flux to atmosphere from consumed harvested products (kgN/m2) - # Fluxes$PerPatchFluxType$HARVESTN - # + # Fluxes$PerPatchFluxType$HARVESTN + # # # NH3 flux to atmosphere from fire - # Fluxes$PerPatchFluxType$NH3_FIRE - # - # # NO flux to atmosphere from fire - # Fluxes$PerPatchFluxType$NO_FIRE - # + # Fluxes$PerPatchFluxType$NH3_FIRE + # + # # NO flux to atmosphere from fire + # Fluxes$PerPatchFluxType$NO_FIRE + # # # NO2 flux to atmosphere from fire - # Fluxes$PerPatchFluxType$NO2_FIRE - # - # # N2O flux to atmosphere from fire - # Fluxes$PerPatchFluxType$N2O_FIRE - # - # # N2 flux to atmosphere from fire - # Fluxes$PerPatchFluxType$N2_FIRE - # + # Fluxes$PerPatchFluxType$NO2_FIRE + # + # # N2O flux to atmosphere from fire + # Fluxes$PerPatchFluxType$N2O_FIRE + # + # # N2 flux to atmosphere from fire + # Fluxes$PerPatchFluxType$N2_FIRE + # # # N flux from soil # Fluxes$PerPatchFluxType$N_SOIL - # + # # # Reproduction costs - # Fluxes$PerPatchFluxType$REPRC - # + # Fluxes$PerPatchFluxType$REPRC + # # # Number of types, must be last # Fluxes$PerPatchFluxType$NPERPATCHFLUXTYPES # 12 - # + # # # Fluxes stored per pft # Fluxes$PerPFTFluxType <- list() - # + # # # NPP (kgC/m2) # Fluxes$PerPFTFluxType$NPP - # + # # # GPP (kgC/m2) # Fluxes$PerPFTFluxType$GPP - # + # # # Autotrophic respiration (kgC/m2) # Fluxes$PerPFTFluxType$RA - # + # # # Isoprene (mgC/m2) # Fluxes$PerPFTFluxType$ISO - # + # # # Monoterpene (mgC/m2) # Fluxes$PerPFTFluxType$MON - # + # # # Number of types, must be last # Fluxes$PerPFTFluxType$NPERPFTFLUXTYPES # 5 } - - - diff --git a/models/lpjguess/R/write.config.LPJGUESS.R b/models/lpjguess/R/write.config.LPJGUESS.R index 43251103d1c..c9d2d64b956 100644 --- a/models/lpjguess/R/write.config.LPJGUESS.R +++ b/models/lpjguess/R/write.config.LPJGUESS.R @@ -13,7 +13,6 @@ ##' @export ##' @author Istem Fer, Tony Gardella write.config.LPJGUESS <- function(defaults, trait.values, settings, run.id) { - # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) if (!file.exists(rundir)) { @@ -23,11 +22,11 @@ write.config.LPJGUESS <- function(defaults, trait.values, settings, run.id) { if (!file.exists(outdir)) { dir.create(outdir) } - + #----------------------------------------------------------------------- # write LPJ-GUESS specific instruction file settings <- write.insfile.LPJGUESS(settings, trait.values, rundir, outdir, run.id) - + #----------------------------------------------------------------------- # create launch script (which will create symlink) if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { @@ -35,7 +34,7 @@ write.config.LPJGUESS <- function(defaults, trait.values, settings, run.id) { } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.LPJGUESS"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -44,7 +43,7 @@ write.config.LPJGUESS <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -52,23 +51,23 @@ write.config.LPJGUESS <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) jobsh <- gsub("@INSFILE@", settings$model$insfile, jobsh) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) } # write.config.LPJGUESS @@ -85,100 +84,100 @@ write.config.LPJGUESS <- function(defaults, trait.values, settings, run.id) { #' @return settings Updated list #' @author Istem Fer write.insfile.LPJGUESS <- function(settings, trait.values, rundir, outdir, run.id) { - - guessins <- readLines(con = system.file("template.ins", package = "PEcAn.LPJGUESS"), n = -1) + guessins <- readLines(con = system.file("template.ins", package = "PEcAn.LPJGUESS"), n = -1) paramsins <- readLines(con = system.file("pecan.ins", package = "PEcAn.LPJGUESS"), n = -1) - pftindx <- 152:222 # should grab automatically - pftblock <- paramsins[pftindx] # lines with pft params - + pftindx <- 152:222 # should grab automatically + pftblock <- paramsins[pftindx] # lines with pft params + # create the grid indices file grid.file <- file.path(settings$host$rundir, "gridind.txt") - gridind <- readLines(con = system.file("gridind.txt", package = "PEcAn.LPJGUESS"), n = -1) + gridind <- readLines(con = system.file("gridind.txt", package = "PEcAn.LPJGUESS"), n = -1) writeLines(gridind, grid.file) - guessins <- gsub("@GRID_FILE@", grid.file, guessins) - - pft_names <- sapply(settings$pfts, `[[`,"name") - load(system.file("lpjguess_params.Rdata",package = "PEcAn.LPJGUESS")) - + guessins <- gsub("@GRID_FILE@", grid.file, guessins) + + pft_names <- sapply(settings$pfts, `[[`, "name") + load(system.file("lpjguess_params.Rdata", package = "PEcAn.LPJGUESS")) + # name and unit conversion trait.values <- pecan2lpjguess(trait.values) - + # these are strings, should they be passed via xml? # e.g. defaults lifeform=tree phenology=evergreen leafphysiognomy=broadleaf landcover=natural pathway=c3 noprior_params <- c("lifeform", "phenology", "leafphysiognomy", "landcover", "pathway") - - write2pftblock <- vector("list", length(settings$pfts)) + + write2pftblock <- vector("list", length(settings$pfts)) # write params with values from trait.values for (i in seq_along(settings$pfts)) { - - write2pftblock[[i]] <- pftblock - write2pftblock[[i]] <- gsub(paste0("@pft@"), pft_names[i], write2pftblock[[i]]) - - warning_list <- list() - - # pass param values - # IMPORTANT : Ideally all params should have priors on them! Currently the defaults are only for a tropical broadleaved evergreen pft - for(t in seq_along(lpjguess_param_list)){ - trait_name <- names(lpjguess_param_list)[t] - if(trait_name != "pft" & !(trait_name %in% noprior_params)){ - if(trait_name %in% names(trait.values[[i]])){ # pass sample - - pecan_sample <- trait.values[[i]][[trait_name]] - - if(trait_name == "rootdist"){ # convert from ratio to fractions - lower_layer_fraction = 1/(pecan_sample+1) - upper_layer_fraction = 1 - lower_layer_fraction - pecan_sample <- paste(upper_layer_fraction, lower_layer_fraction) - } - - if(trait_name == "wooddens"){ # convert from relative density to sapwood and heartwood density (kgC/m3) - pecan_sample <- pecan_sample*997 # density of water - } - - write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), pecan_sample, write2pftblock[[i]]) - }else{ # use default - write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), lpjguess_param_list[[trait_name]], write2pftblock[[i]]) - warning_list[[trait_name]] <- trait_name + write2pftblock[[i]] <- pftblock + write2pftblock[[i]] <- gsub(paste0("@pft@"), pft_names[i], write2pftblock[[i]]) + + warning_list <- list() + + # pass param values + # IMPORTANT : Ideally all params should have priors on them! Currently the defaults are only for a tropical broadleaved evergreen pft + for (t in seq_along(lpjguess_param_list)) { + trait_name <- names(lpjguess_param_list)[t] + if (trait_name != "pft" & !(trait_name %in% noprior_params)) { + if (trait_name %in% names(trait.values[[i]])) { # pass sample + + pecan_sample <- trait.values[[i]][[trait_name]] + + if (trait_name == "rootdist") { # convert from ratio to fractions + lower_layer_fraction <- 1 / (pecan_sample + 1) + upper_layer_fraction <- 1 - lower_layer_fraction + pecan_sample <- paste(upper_layer_fraction, lower_layer_fraction) } - } - } - - # handle the no prior params - for(t in seq_along(noprior_params)){ - trait_name <- noprior_params[t] - if(!is.null(settings$pfts[[i]][[trait_name]])){ # specified in xml - write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), paste0("'", settings$pfts[[i]][[trait_name]], "'"), write2pftblock[[i]]) - }else{ #pass the default, add to warning - write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), paste0("'", lpjguess_param_list[[trait_name]], "'"), write2pftblock[[i]]) + + if (trait_name == "wooddens") { # convert from relative density to sapwood and heartwood density (kgC/m3) + pecan_sample <- pecan_sample * 997 # density of water + } + + write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), pecan_sample, write2pftblock[[i]]) + } else { # use default + write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), lpjguess_param_list[[trait_name]], write2pftblock[[i]]) warning_list[[trait_name]] <- trait_name } } - - PEcAn.logger::logger.warn("***You have not specified the following parameters for your PFT,", pft_names[i],"- Be aware that the defaults may not work well for you.***", unlist(warning_list)) - } #end of pft-loop - + } + + # handle the no prior params + for (t in seq_along(noprior_params)) { + trait_name <- noprior_params[t] + if (!is.null(settings$pfts[[i]][[trait_name]])) { # specified in xml + write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), paste0("'", settings$pfts[[i]][[trait_name]], "'"), write2pftblock[[i]]) + } else { # pass the default, add to warning + write2pftblock[[i]] <- gsub(paste0("@", trait_name, "@"), paste0("'", lpjguess_param_list[[trait_name]], "'"), write2pftblock[[i]]) + warning_list[[trait_name]] <- trait_name + } + } + + PEcAn.logger::logger.warn("***You have not specified the following parameters for your PFT,", pft_names[i], "- Be aware that the defaults may not work well for you.***", unlist(warning_list)) + } # end of pft-loop + # erase the placeholder, write back the pft blocks - paramsins <- paramsins[-pftindx] + paramsins <- paramsins[-pftindx] paramsins <- c(paramsins, unlist(write2pftblock)) - - + + # write clim file names - + tmp.file <- settings$run$inputs$met$path pre.file <- gsub(".tmp.nc", ".pre.nc", tmp.file) cld.file <- gsub(".tmp.nc", ".cld.nc", tmp.file) - + guessins <- gsub("@TEMP_FILE@", tmp.file, guessins) guessins <- gsub("@PREC_FILE@", pre.file, guessins) guessins <- gsub("@INSOL_FILE@", cld.file, guessins) - + # create and write CO2 file start.year <- lubridate::year(settings$run$start.date) end.year <- lubridate::year(settings$run$end.date) n.year <- length(start.year:end.year) - co2.file <- file.path(settings$rundir, - paste0("co2.", sprintf("%04d", start.year), ".", end.year, ".txt")) - + co2.file <- file.path( + settings$rundir, + paste0("co2.", sprintf("%04d", start.year), ".", end.year, ".txt") + ) + # for pre-industrial values just use 280 ppm if (end.year < 1850) { CO2 <- data.frame(start.year:end.year, rep(280, n.year)) @@ -196,16 +195,16 @@ write.insfile.LPJGUESS <- function(settings, trait.values, rundir, outdir, run.i } write.table(CO2, file = co2.file, row.names = FALSE, col.names = FALSE, sep = "\t", eol = "\n") guessins <- gsub("@CO2_FILE@", co2.file, guessins) - + # write soil file path soil.file <- settings$run$inputs$soil$path guessins <- gsub("@SOIL_FILE@", soil.file, guessins) - + settings$model$insfile <- file.path(settings$rundir, run.id, "guess.ins") - + writeLines(paramsins, con = file.path(settings$rundir, run.id, "params.ins")) writeLines(guessins, con = file.path(settings$rundir, run.id, "guess.ins")) - + return(settings) } # write.insfile.LPJGUESS @@ -216,35 +215,34 @@ write.insfile.LPJGUESS <- function(settings, trait.values, rundir, outdir, run.i #' @param trait.values trait.values, list #' @return translated list #' @author Istem Fer -pecan2lpjguess <- function(trait.values){ - +pecan2lpjguess <- function(trait.values) { # TODO :match all lpjguess and pecan names vartable <- tibble::tribble( - ~pecanname, ~lpjguessname, ~pecanunits, ~lpjguessunits, - "root_turnover_rate", "turnover_root", NA, NA, + ~pecanname, ~lpjguessname, ~pecanunits, ~lpjguessunits, + "root_turnover_rate", "turnover_root", NA, NA, "sapwood_turnover_rate", "turnover_sap", NA, NA, "leaf_turnover_rate", "turnover_leaf", NA, NA, "SLA", "sla", NA, NA, - "ci2ca", "lambda_max", NA, NA, + "ci2ca", "lambda_max", NA, NA, "Emax", "emax", NA, NA, "reprfrac", "reprfrac", NA, NA, "water_stress_threshold", "wscal_min", NA, NA, "drought_tolerance", "drought_tolerance", NA, NA, - "turnover_harv_prod", "turnover_harv_prod", NA, NA, + "turnover_harv_prod", "turnover_harv_prod", NA, NA, "crownarea_max", "crownarea_max", NA, NA, - "ltor_max", "ltor_max", NA, NA, + "ltor_max", "ltor_max", NA, NA, "root_dist_ratio", "rootdist", NA, NA, - "k_allom2", "k_allom2", NA, NA, - "k_allom3", "k_allom3", NA, NA, - "k_rp", "k_rp", NA, NA, - "wood_density", "wooddens", NA, NA, + "k_allom2", "k_allom2", NA, NA, + "k_allom3", "k_allom3", NA, NA, + "k_rp", "k_rp", NA, NA, + "wood_density", "wooddens", NA, NA, "c2n_fineroot", "cton_root", NA, NA, - "c2n_sapwood", "cton_sap", NA, NA, + "c2n_sapwood", "cton_sap", NA, NA, "nup2root_max", "nuptoroot", NA, NA, - "km_volume", "km_volume", NA, NA, + "km_volume", "km_volume", NA, NA, "growth_resp_factor", "respcoeff", NA, NA, "kest_repr", "kest_repr", NA, NA, - "kest_bg", "kest_bg", NA, NA, + "kest_bg", "kest_bg", NA, NA, "kest_pres", "kest_pres", NA, NA, "k_chilla", "k_chilla", NA, NA, "k_chillb", "k_chillb", NA, NA, @@ -253,15 +251,15 @@ pecan2lpjguess <- function(trait.values){ "harv_eff", "harv_eff", NA, NA, "res_outtake", "res_outtake", NA, NA, "harvest_slow_frac", "harvest_slow_frac", NA, NA, - "fnstorage", "fnstorage", NA, NA, + "fnstorage", "fnstorage", NA, NA, "GDD", "phengdd5ramp", NA, NA, "est_max", "est_max", NA, NA, "parff_min", "parff_min", NA, NA, "alphar", "alphar", NA, NA, - "greff_min", "greff_min", NA, NA, + "greff_min", "greff_min", NA, NA, "k_allom1", "k_allom1", NA, NA, - "k_latosa", "k_latosa", NA, NA, - "gcmin", "gmin", "m s-1", "mm s-1", + "k_latosa", "k_latosa", NA, NA, + "gcmin", "gmin", "m s-1", "mm s-1", "intc", "intc", NA, NA, "ga", "ga", NA, NA, "tcmin_surv", "tcmin_surv", NA, NA, @@ -271,7 +269,7 @@ pecan2lpjguess <- function(trait.values){ "gdd5min_est", "gdd5min_est", NA, NA, "pstemp_min", "pstemp_min", NA, NA, "pstemp_low", "pstemp_low", NA, NA, - "pstemp_high", "pstemp_high", NA, NA, + "pstemp_high", "pstemp_high", NA, NA, "pstemp_max", "pstemp_max", NA, NA, "leaf_longevity", "leaflong", NA, NA, "longevity", "longevity", NA, NA, @@ -279,26 +277,29 @@ pecan2lpjguess <- function(trait.values){ "eps_iso", "eps_iso", NA, NA, "seas_iso", "seas_iso", NA, NA, "eps_mon", "eps_mon", NA, NA, - "storfrac_mon", "storfrac_mon", NA, NA) - - trait.values <- lapply(trait.values, function(x){ + "storfrac_mon", "storfrac_mon", NA, NA + ) + + trait.values <- lapply(trait.values, function(x) { names(x) <- vartable$lpjguessname[match(names(x), vartable$pecanname)] return(x) }) - + # TODO : unit conversions? toconvert <- vartable$lpjguessname[!is.na(vartable$lpjguessunits)] - trait.values <- lapply(trait.values, function(x){ - canconvert <- toconvert[toconvert %in% names(x)] - if(length(canconvert) != 0){ - for(c in seq_along(canconvert)){ - x[,names(x) == canconvert[c]] <- PEcAn.utils::ud_convert(x[,names(x) == canconvert[c]], - vartable$pecanunits[vartable$lpjguessname == canconvert[c]], - vartable$lpjguessunits[vartable$lpjguessname == canconvert[c]]) + trait.values <- lapply(trait.values, function(x) { + canconvert <- toconvert[toconvert %in% names(x)] + if (length(canconvert) != 0) { + for (c in seq_along(canconvert)) { + x[, names(x) == canconvert[c]] <- PEcAn.utils::ud_convert( + x[, names(x) == canconvert[c]], + vartable$pecanunits[vartable$lpjguessname == canconvert[c]], + vartable$lpjguessunits[vartable$lpjguessname == canconvert[c]] + ) } } return(x) }) - + return(trait.values) -} +} diff --git a/models/lpjguess/tests/testthat.R b/models/lpjguess/tests/testthat.R index 481a3f96014..f55808f4fad 100644 --- a/models/lpjguess/tests/testthat.R +++ b/models/lpjguess/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.LPJGUESS") +# test_check("PEcAn.LPJGUESS") diff --git a/models/lpjguess/tests/testthat/test.met2model.R b/models/lpjguess/tests/testthat/test.met2model.R index 4f76266bf7f..e57bbb13700 100644 --- a/models/lpjguess/tests/testthat/test.met2model.R +++ b/models/lpjguess/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/maat/R/met2model.MAAT.R b/models/maat/R/met2model.MAAT.R index c7609bad305..f7e0f4922c7 100644 --- a/models/maat/R/met2model.MAAT.R +++ b/models/maat/R/met2model.MAAT.R @@ -1,4 +1,3 @@ - ## R Code to convert NetCDF CF met files into MAAT model met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only @@ -8,7 +7,7 @@ # leaf_user_met prefix PREFIX_XML <- "\n" -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' met2model wrapper for MAAT ##' ##' @name met2model.MAAT @@ -20,7 +19,7 @@ PREFIX_XML <- "\n" ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose -##' @param leap_year Enforce Leap-years? If set to TRUE, will require leap years to have 366 days. +##' @param leap_year Enforce Leap-years? If set to TRUE, will require leap years to have 366 days. ##' If set to false, will require all years to have 365 days. Default = TRUE. ##' @param ... additional arguments, currently ignored ##' @export @@ -28,27 +27,29 @@ PREFIX_XML <- "\n" ##' met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, leap_year = TRUE, ...) { - PEcAn.logger::logger.info("START met2model.MAAT") start_date <- as.POSIXlt(start_date, tz = "GMT") end_date <- as.POSIXlt(end_date, tz = "GMT") out.file <- paste(in.prefix, - strptime(start_date, "%Y-%m-%d"), - strptime(end_date, "%Y-%m-%d"), - "csv", - sep = ".") + strptime(start_date, "%Y-%m-%d"), + strptime(end_date, "%Y-%m-%d"), + "csv", + sep = "." + ) out.file.full <- file.path(outfolder, out.file) - results <- data.frame(file = out.file.full, - host = PEcAn.remote::fqdn(), - mimetype = "text/csv", - formatname = "MAAT meteorology", - startdate = start_date, - enddate = end_date, - dbfile.name = out.file, - stringsAsFactors = FALSE) + results <- data.frame( + file = out.file.full, + host = PEcAn.remote::fqdn(), + mimetype = "text/csv", + formatname = "MAAT meteorology", + startdate = start_date, + enddate = end_date, + dbfile.name = out.file, + stringsAsFactors = FALSE + ) print("internal results") print(results) @@ -68,12 +69,11 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) day_secs <- PEcAn.utils::ud_convert(1, "day", "seconds") - + ## loop over files ## TODO need to filter out the data that is not inside start_date, end_date for (year in start_year:end_year) { - - PEcAn.logger::logger.info(paste0("Processing year: ",year)) + PEcAn.logger::logger.info(paste0("Processing year: ", year)) ncdf.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) if (file.exists(ncdf.file)) { @@ -88,35 +88,35 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, dt <- PEcAn.utils::seconds_in_year(year, leap_year) / length(sec) tstep <- round(day_secs / dt) - dt <- day_secs / tstep + dt <- day_secs / tstep ### extract required MAAT driver variables names(nc$var) - lat <- ncdf4::ncvar_get(nc, "latitude") - lon <- ncdf4::ncvar_get(nc, "longitude") - + lat <- ncdf4::ncvar_get(nc, "latitude") + lon <- ncdf4::ncvar_get(nc, "longitude") + # Air temperature - Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") ## in degC - + # Precipitation - Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") ## 'kg/m^2/s' - + Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") ## 'kg/m^2/s' + # Get atmospheric pressure - Atm_press <- ncdf4::ncvar_get(nc,"air_pressure") ## in Pa + Atm_press <- ncdf4::ncvar_get(nc, "air_pressure") ## in Pa # get humidity vars - RH_perc <- try(ncdf4::ncvar_get(nc, "relative_humidity"), silent = TRUE) ## RH Percentage - Qair <- try(ncdf4::ncvar_get(nc, "specific_humidity"), silent = TRUE) #humidity (kg/kg) - SVP <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(Tair_C), "millibar", "Pa") ## Saturation vapor pressure - VPD <- try(ncdf4::ncvar_get(nc, "water_vapor_saturation_deficit"), silent = TRUE) ## in Pa + RH_perc <- try(ncdf4::ncvar_get(nc, "relative_humidity"), silent = TRUE) ## RH Percentage + Qair <- try(ncdf4::ncvar_get(nc, "specific_humidity"), silent = TRUE) # humidity (kg/kg) + SVP <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(Tair_C), "millibar", "Pa") ## Saturation vapor pressure + VPD <- try(ncdf4::ncvar_get(nc, "water_vapor_saturation_deficit"), silent = TRUE) ## in Pa if (!is.numeric(VPD)) { VPD <- SVP * (1 - PEcAn.data.atmosphere::qair2rh(Qair, Tair_C)) PEcAn.logger::logger.info("water_vapor_saturation_deficit absent; VPD calculated from Qair, Tair, and SVP (saturation vapor pressure) ") } VPD_kPa <- PEcAn.utils::ud_convert(VPD, "Pa", "kPa") - e_a <- SVP - VPD # AirVP + e_a <- SVP - VPD # AirVP if (!is.numeric(RH_perc)) { - RH_perc <- PEcAn.data.atmosphere::qair2rh(Qair, Tair_C,Atm_press) + RH_perc <- PEcAn.data.atmosphere::qair2rh(Qair, Tair_C, Atm_press) } # get windspeed @@ -124,26 +124,25 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, if (!is.numeric(ws) | length(unique(ws)) == 1) { U <- ncdf4::ncvar_get(nc, "eastward_wind") V <- ncdf4::ncvar_get(nc, "northward_wind") - ws <- sqrt(U ^ 2 + V ^ 2) ## m/s + ws <- sqrt(U^2 + V^2) ## m/s PEcAn.logger::logger.info("wind_speed absent; calculated from eastward_wind and northward_wind") } - + # get radiation - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 - PAR <- try(ncdf4::ncvar_get(nc, "surface_downwelling_photosynthetic_photon_flux_in_air") * 1e+06, silent = TRUE) ## mol/m2/s to umols/m2/s + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 + PAR <- try(ncdf4::ncvar_get(nc, "surface_downwelling_photosynthetic_photon_flux_in_air") * 1e+06, silent = TRUE) ## mol/m2/s to umols/m2/s if (!is.numeric(PAR)) { - PAR <- SW * 2.114 #W/m2 TO umol/m2/s + PAR <- SW * 2.114 # W/m2 TO umol/m2/s } # get CO2 (if exists) CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air"), silent = TRUE) useCO2 <- is.numeric(CO2) if (useCO2) { - CO2 <- CO2 * 1e+06 ## convert from mole fraction (kg/kg) to ppm + CO2 <- CO2 * 1e+06 ## convert from mole fraction (kg/kg) to ppm } ncdf4::nc_close(nc) - } else { print("Skipping to next year") next @@ -151,23 +150,23 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, ## build time variables (year, month, day of year) skip <- FALSE - nyr <- floor(length(sec) * dt / 86400 / 365) - yr <- NULL - doy <- NULL - hr <- NULL + nyr <- floor(length(sec) * dt / 86400 / 365) + yr <- NULL + doy <- NULL + hr <- NULL asec <- sec for (y in seq(year, year + nyr - 1)) { diy <- PEcAn.utils::days_in_year(y, leap_year) ytmp <- rep(y, PEcAn.utils::ud_convert(diy / dt, "days", "seconds")) dtmp <- rep(seq_len(diy), each = day_secs / dt) if (is.null(yr)) { - yr <- ytmp + yr <- ytmp doy <- dtmp - hr <- rep(NA, length(dtmp)) + hr <- rep(NA, length(dtmp)) } else { - yr <- c(yr, ytmp) + yr <- c(yr, ytmp) doy <- c(doy, dtmp) - hr <- c(hr, rep(NA, length(dtmp))) + hr <- c(hr, rep(NA, length(dtmp))) } rng <- length(doy) - length(ytmp):1 + 1 if (!all(rng >= 0)) { @@ -176,49 +175,53 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, break } asec[rng] <- asec[rng] - asec[rng[1]] - hr[rng] <- (asec[rng] - (dtmp - 1) * day_secs) / day_secs * 24 + hr[rng] <- (asec[rng] - (dtmp - 1) * day_secs) / day_secs * 24 } # Time output variable - time <- as.POSIXct(asec, tz = "UTC", origin = paste0(year,"-01-01")) + time <- as.POSIXct(asec, tz = "UTC", origin = paste0(year, "-01-01")) # output matrix n <- length(Tair) if (useCO2) { - tmp <- cbind.data.frame(Time = time[1:n], - Year = yr[1:n], - DOY = doy[1:n], - Hour = hr[1:n], - Frac_day = frac.day[1:n], - Timestep_frac = rep(dt/day_secs, n), - CO2 = CO2, - Tair_degC = Tair_C, - Prec_mm = Rain * dt, # converts from mm/s to mm umols/m2/s - Atm_press_Pa = Atm_press, - RH_perc = RH_perc, - VPD_kPa = VPD_kPa, - PAR_umols_m2_s = PAR, - Windspeed_m_s = ws) + tmp <- cbind.data.frame( + Time = time[1:n], + Year = yr[1:n], + DOY = doy[1:n], + Hour = hr[1:n], + Frac_day = frac.day[1:n], + Timestep_frac = rep(dt / day_secs, n), + CO2 = CO2, + Tair_degC = Tair_C, + Prec_mm = Rain * dt, # converts from mm/s to mm umols/m2/s + Atm_press_Pa = Atm_press, + RH_perc = RH_perc, + VPD_kPa = VPD_kPa, + PAR_umols_m2_s = PAR, + Windspeed_m_s = ws + ) } else { - tmp <- cbind.data.frame(Time = time[1:n], - Year = yr[1:n], - DOY = doy[1:n], - Hour = hr[1:n], - Frac_day = frac.day[1:n], - Timestep_frac = rep(dt/day_secs, n), - Tair_degC = Tair_C, - Prec_mm = Rain * dt, # converts from mm/s to mm umols/m2/s - Atm_press_Pa = Atm_press, - RH_perc = RH_perc, - VPD_kPa = VPD_kPa, - PAR_umols_m2_s = PAR, - Windspeed_m_s = ws) + tmp <- cbind.data.frame( + Time = time[1:n], + Year = yr[1:n], + DOY = doy[1:n], + Hour = hr[1:n], + Frac_day = frac.day[1:n], + Timestep_frac = rep(dt / day_secs, n), + Tair_degC = Tair_C, + Prec_mm = Rain * dt, # converts from mm/s to mm umols/m2/s + Atm_press_Pa = Atm_press, + RH_perc = RH_perc, + VPD_kPa = VPD_kPa, + PAR_umols_m2_s = PAR, + Windspeed_m_s = ws + ) } ## quick error check, sometimes get a NA in the last hr ?? NEEDED? hr.na <- which(is.na(tmp[, 3])) if (length(hr.na) > 0) { - tmp[hr.na, 3] <- tmp[hr.na - 1, 3] + dt/day_secs * 24 + tmp[hr.na, 3] <- tmp[hr.na - 1, 3] + dt / day_secs * 24 } if (is.null(out)) { @@ -226,10 +229,9 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, } else { out <- rbind(out, tmp) } - } ## end loop over years + } ## end loop over years if (!is.null(out)) { - ## write met csv output # write.table(out,out.file.full,quote = FALSE,sep='\t',row.names=FALSE,col.names=FALSE) utils::write.csv(out, out.file.full, row.names = FALSE) @@ -248,22 +250,26 @@ met2model.MAAT <- function(in.path, in.prefix, outfolder, start_date, end_date, # Create leaf_user_met.xml if (useCO2) { - leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'", - atm_press="'Atm_press_Pa'",ca_conc="'CO2'",wind="'Windspeed_m_s'"))) + leaf_user_met_list <- list(leaf = list(env = list( + time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'", vpd = "'VPD_kPa'", + atm_press = "'Atm_press_Pa'", ca_conc = "'CO2'", wind = "'Windspeed_m_s'" + ))) } else { - leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'", - atm_press="'Atm_press_Pa'",wind="'Windspeed_m_s'"))) + leaf_user_met_list <- list(leaf = list(env = list( + time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'", vpd = "'VPD_kPa'", + atm_press = "'Atm_press_Pa'", wind = "'Windspeed_m_s'" + ))) } leaf_user_met_xml <- PEcAn.settings::listToXml(leaf_user_met_list, "met_data_translator") # output XML file XML::saveXML(leaf_user_met_xml, - file = file.path(outfolder, "leaf_user_met.xml"), - indent = TRUE, - prefix = PREFIX_XML) + file = file.path(outfolder, "leaf_user_met.xml"), + indent = TRUE, + prefix = PREFIX_XML + ) return(invisible(results)) - } else { print("NO MET TO OUTPUT") return(invisible(NULL)) diff --git a/models/maat/R/model2netcdf.MAAT.R b/models/maat/R/model2netcdf.MAAT.R index dd1a21a9080..1b25e4ae0ee 100755 --- a/models/maat/R/model2netcdf.MAAT.R +++ b/models/maat/R/model2netcdf.MAAT.R @@ -7,36 +7,38 @@ ##' @param sitelon Longitude of the site ##' @param start_date Start time of the simulation ##' @param end_date End time of the simulation -##' -##' +##' +##' ##' @export ##' @author Shawn Serbin, Anthony Walker, Alexey Shiklomanov ##' model2netcdf.MAAT <- function(rundir, outdir, sitelat = -999, sitelon = -999, start_date = NULL, end_date = NULL) { - # setup constants day_secs <- PEcAn.utils::ud_convert(1, "day", "seconds") - + # setup helper function var_update <- function(data, out, oldname, newname, oldunits, newunits = NULL, missval = -999, longname, ncdims) { # ifelse is no longer working as expected, so now we have this function to deal with any Inf values f_sort <- function(s) { - if (!is.finite(s)) -999 - else tryCatch(PEcAn.utils::misc.convert(s, oldunits, newunits), error = function(e) s) + if (!is.finite(s)) { + -999 + } else { + tryCatch(PEcAn.utils::misc.convert(s, oldunits, newunits), error = function(e) s) + } } ## define variable - if(is.null(newunits)) newunits = oldunits + if (is.null(newunits)) newunits <- oldunits newvar <- ncdf4::ncvar_def(name = newname, units = newunits, dim = ncdims, missval = missval, longname = longname) ## convert data dat <- data - if (newname %in% c("Year","FracJulianDay")) { + if (newname %in% c("Year", "FracJulianDay")) { PEcAn.logger::logger.info(paste0("Skipping conversion for: ", newname)) dat.new <- dat } else { dat.new <- apply(as.matrix(dat, length(dat), 1), 1, f_sort) } ## prep for writing - if(is.null(out)) { + if (is.null(out)) { out <- list(var = list(), dat = list()) out$var[[1]] <- newvar out$dat[[1]] <- dat.new @@ -47,15 +49,15 @@ model2netcdf.MAAT <- function(rundir, outdir, sitelat = -999, sitelon = -999, st } return(out) } - + ### look for leaf_user_met.xml file - met_exists <- file.exists(file.path(rundir,"leaf_user_met.xml")) + met_exists <- file.exists(file.path(rundir, "leaf_user_met.xml")) - ## TODO: Clean up and make this function more elegant. In particular, refactor such that its easier to + ## TODO: Clean up and make this function more elegant. In particular, refactor such that its easier to ## manage output variables when running with/without met drivers vs have two separate processing paths below - + ### Read in model output in MAAT format - maat.out.file <- file.path(outdir, list.files(outdir,'*.csv$')) # updated to handle mod_mimic runs + maat.out.file <- file.path(outdir, list.files(outdir, "*.csv$")) # updated to handle mod_mimic runs maat.output <- utils::read.csv(maat.out.file, header = TRUE, sep = ",") maat.output.dims <- dim(maat.output) @@ -63,137 +65,176 @@ model2netcdf.MAAT <- function(rundir, outdir, sitelat = -999, sitelon = -999, st start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) num_years <- length(start_year:end_year) - timezone <- "UTC" # should be set based on met drivers and what time units they are in. Ugh - + timezone <- "UTC" # should be set based on met drivers and what time units they are in. Ugh + if (met_exists) { # ** maat.dates assumes UTC, is this correct? what if input met is in a local TZ?? need to revist this ** maat_run_start_date <- format(lubridate::as_datetime(maat.output$time, tz = timezone)[1], "%Y-%m-%d %H:%M:%S") - maat_dates <- strptime(maat.output$time, format = "%Y-%m-%d", tz = timezone) + maat_dates <- strptime(maat.output$time, format = "%Y-%m-%d", tz = timezone) } else { maat_run_start_date <- format(lubridate::as_datetime(start_date, tz = timezone)[1], "%Y-%m-%d %H:%M:%S") } - + ### setup nc file lat/long lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") - + ### Setup outputs for netCDF file in appropriate units for (year in seq(start_year, end_year)) { - if (file.exists(file.path(outdir, paste(year, "nc", sep = "."))) ) { + if (file.exists(file.path(outdir, paste(year, "nc", sep = ".")))) { PEcAn.logger::logger.debug(paste("---- Output year", year, "already exists.")) - next ## skip, model output already present. + next ## skip, model output already present. } - + PEcAn.logger::logger.info(paste("---- Processing MAAT output year: ", year)) - + if (met_exists) { - ## Subset data for processing sub.maat.output <- subset(maat.output, lubridate::year(maat_dates) == year) sub.maat.dates <- lubridate::as_date(sub.maat.output$time) sub.maat.doy <- lubridate::yday(sub.maat.dates) sub.maat.output.dims <- dim(sub.maat.output) - dims <- dim(subset(sub.maat.output, - strptime(time, format = "%Y-%m-%d", tz=timezone) == - seq(strptime(sub.maat.dates[1], format = "%Y-%m-%d", tz = timezone), by = "days", length = 1))) + dims <- dim(subset( + sub.maat.output, + strptime(time, format = "%Y-%m-%d", tz = timezone) == + seq(strptime(sub.maat.dates[1], format = "%Y-%m-%d", tz = timezone), by = "days", length = 1) + )) timestep.s <- day_secs / dims[1] # e.g. 1800 = 30 minute timesteps dayfrac <- 1 / dims[1] day.steps <- head(seq(0, 1, by = dayfrac), -1) - + # setup netCDF time variable for year - maat_run_start_by_year <- format(lubridate::as_datetime(sub.maat.dates, tz =timezone)[1], "%Y-%m-%d %H:%M:%S") + maat_run_start_by_year <- format(lubridate::as_datetime(sub.maat.dates, tz = timezone)[1], "%Y-%m-%d %H:%M:%S") tvals <- (sub.maat.doy - 1) + day.steps bounds <- array(data = NA_real_, dim = c(length(tvals), 2)) - bounds[,1] <- tvals - bounds[,2] <- bounds[,1] + dayfrac - t <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", maat_run_start_by_year), - vals = tvals, calendar = "standard", - unlim = TRUE) # standard calendar for leap years? Also need to be sure we update cal depending on leap/no leap - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname = "history time interval endpoint dimensions", - vals = 1:2, units = "") - + bounds[, 1] <- tvals + bounds[, 2] <- bounds[, 1] + dayfrac + t <- ncdf4::ncdim_def( + name = "time", units = paste0("days since ", maat_run_start_by_year), + vals = tvals, calendar = "standard", + unlim = TRUE + ) # standard calendar for leap years? Also need to be sure we update cal depending on leap/no leap + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, units = "" + ) + ### Parse MAAT output - #output <- list() # create empty output + # output <- list() # create empty output output <- NULL ncdims <- list(lon, lat, t) out.year <- as.numeric(rep(year, sub.maat.output.dims[1])) - output <- var_update(out.year, output, "Year", "Year", oldunits = "YYYY", newunits = NULL, missval = -999, - longname="Simulation Year", ncdims=ncdims) - output <- var_update(tvals, output, "FracJulianDay", "FracJulianDay", oldunits = "Frac DOY", newunits = NULL, missval = -999, - longname="Fraction of Julian Date", ncdims=ncdims) - output <- var_update(sub.maat.output$A, output, "A", "assimilation_rate", oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, - longname = "Leaf assimilation rate", ncdims = ncdims) - output <- var_update(sub.maat.output$rd, output, "rd", "leaf_respiration", oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, - longname = "Leaf Respiration Rate", ncdims = ncdims) - output <- var_update((1 / (sub.maat.output$rs)), output, "gs", "stomatal_conductance", oldunits = "mol H2O m-2 s-1", - newunits = "kg H2O m-2 s-1", missval = -999, longname = "Leaf Stomatal Conductance", ncdims = ncdims) - output <- var_update(sub.maat.output$ci, output, "ci", "Ci", oldunits = "Pa", - newunits = "Pa", missval = -999, longname = "Leaf Internal CO2 Concentration", ncdims = ncdims) - output <- var_update(sub.maat.output$cc, output, "cc", "Cc", oldunits = "Pa", - newunits = "Pa", missval = -999, longname = "Leaf Mesophyll CO2 Concentration", ncdims = ncdims) - + output <- var_update(out.year, output, "Year", "Year", + oldunits = "YYYY", newunits = NULL, missval = -999, + longname = "Simulation Year", ncdims = ncdims + ) + output <- var_update(tvals, output, "FracJulianDay", "FracJulianDay", + oldunits = "Frac DOY", newunits = NULL, missval = -999, + longname = "Fraction of Julian Date", ncdims = ncdims + ) + output <- var_update(sub.maat.output$A, output, "A", "assimilation_rate", + oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, + longname = "Leaf assimilation rate", ncdims = ncdims + ) + output <- var_update(sub.maat.output$rd, output, "rd", "leaf_respiration", + oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, + longname = "Leaf Respiration Rate", ncdims = ncdims + ) + output <- var_update((1 / (sub.maat.output$rs)), output, "gs", "stomatal_conductance", + oldunits = "mol H2O m-2 s-1", + newunits = "kg H2O m-2 s-1", missval = -999, longname = "Leaf Stomatal Conductance", ncdims = ncdims + ) + output <- var_update(sub.maat.output$ci, output, "ci", "Ci", + oldunits = "Pa", + newunits = "Pa", missval = -999, longname = "Leaf Internal CO2 Concentration", ncdims = ncdims + ) + output <- var_update(sub.maat.output$cc, output, "cc", "Cc", + oldunits = "Pa", + newunits = "Pa", missval = -999, longname = "Leaf Mesophyll CO2 Concentration", ncdims = ncdims + ) + ## put in time_bounds before writing out new nc file - #length(output$var) - output$var[[length(output$var) + 1]] <- ncdf4::ncvar_def(name = "time_bounds", units = "", - longname = "history time interval endpoints", - dim = list(time_interval, time = t), - prec = "double") + # length(output$var) + output$var[[length(output$var) + 1]] <- ncdf4::ncvar_def( + name = "time_bounds", units = "", + longname = "history time interval endpoints", + dim = list(time_interval, time = t), + prec = "double" + ) output$dat[[length(output$dat) + 1]] <- c(rbind(bounds[, 1], bounds[, 2])) ## !!TODO: ADD MORE MAAT OUTPUTS HERE!! ## - } else { - t <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", maat_run_start_date), - vals = 1, calendar = "standard", - unlim = TRUE) # standard calendar for leap years? Also need to be sure we update cal depending on leap/no leap - bounds <- array(data = NA_real_, dim = c(1,2)) - bounds[,1] <- 0 - bounds[,2] <- 1 - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname = "history time interval endpoint dimensions", - vals = 1:2, units = "") - + t <- ncdf4::ncdim_def( + name = "time", units = paste0("days since ", maat_run_start_date), + vals = 1, calendar = "standard", + unlim = TRUE + ) # standard calendar for leap years? Also need to be sure we update cal depending on leap/no leap + bounds <- array(data = NA_real_, dim = c(1, 2)) + bounds[, 1] <- 0 + bounds[, 2] <- 1 + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, units = "" + ) + output <- NULL - ncdims <- list(lon, lat, t) - output <- var_update(sub.maat.output$A, output, "A", "assimilation_rate", oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, - longname = "Leaf assimilation rate", ncdims = ncdims) - output <- var_update(maat.output$rd, output, "rd", "leaf_respiration", oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, - longname = "Leaf Respiration Rate", ncdims = ncdims) - output <- var_update((1 / (maat.output$rs)), output, "gs", "stomatal_conductance", oldunits = "mol H2O m-2 s-1", - newunits = "kg H2O m-2 s-1", missval = -999, longname = "Leaf Stomatal Conductance", ncdims = ncdims) - output <- var_update(maat.output$ci, output, "ci", "Ci", oldunits = "Pa", - newunits = "Pa", missval = -999, longname = "Leaf Internal CO2 Concentration", ncdims = ncdims) - output <- var_update(maat.output$cc, output, "cc", "Cc", oldunits = "Pa", - newunits = "Pa", missval = -999, longname = "Leaf Mesophyll CO2 Concentration", ncdims = ncdims) - + ncdims <- list(lon, lat, t) + output <- var_update(sub.maat.output$A, output, "A", "assimilation_rate", + oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, + longname = "Leaf assimilation rate", ncdims = ncdims + ) + output <- var_update(maat.output$rd, output, "rd", "leaf_respiration", + oldunits = "umol C m-2 s-1", newunits = "kg C m-2 s-1", missval = -999, + longname = "Leaf Respiration Rate", ncdims = ncdims + ) + output <- var_update((1 / (maat.output$rs)), output, "gs", "stomatal_conductance", + oldunits = "mol H2O m-2 s-1", + newunits = "kg H2O m-2 s-1", missval = -999, longname = "Leaf Stomatal Conductance", ncdims = ncdims + ) + output <- var_update(maat.output$ci, output, "ci", "Ci", + oldunits = "Pa", + newunits = "Pa", missval = -999, longname = "Leaf Internal CO2 Concentration", ncdims = ncdims + ) + output <- var_update(maat.output$cc, output, "cc", "Cc", + oldunits = "Pa", + newunits = "Pa", missval = -999, longname = "Leaf Mesophyll CO2 Concentration", ncdims = ncdims + ) + ## put in time_bounds before writing out new nc file - output$var[[length(output$var) + 1]] <- ncdf4::ncvar_def(name="time_bounds", units="", - longname = "history time interval endpoints", - dim = list(time_interval, time = t), - prec = "double") + output$var[[length(output$var) + 1]] <- ncdf4::ncvar_def( + name = "time_bounds", units = "", + longname = "history time interval endpoints", + dim = list(time_interval, time = t), + prec = "double" + ) output$dat[[length(output$dat) + 1]] <- c(rbind(bounds[, 1], bounds[, 2])) - ## !!TODO: ADD MORE MAAT OUTPUTS HERE!! ## + ## !!TODO: ADD MORE MAAT OUTPUTS HERE!! ## } - + ## write netCDF data ncout <- ncdf4::nc_create(file.path(outdir, paste(year, "nc", sep = ".")), output$var) - ncdf4::ncatt_put(ncout, "time", "bounds", "time_bounds", prec=NA) + ncdf4::ncatt_put(ncout, "time", "bounds", "time_bounds", prec = NA) for (i in seq_along(output$var)) { - #print(i) # for debugging + # print(i) # for debugging ncdf4::ncvar_put(ncout, output$var[[i]], output$dat[[i]]) } - + ## extract variable and long names to VAR file for PEcAn vis - utils::write.table(sapply(ncout$var, function(x) { x$longname }), - file = file.path(outdir, paste(year, "nc.var", sep = ".")), - col.names = FALSE, - row.names = TRUE, - quote = FALSE) - + utils::write.table( + sapply(ncout$var, function(x) { + x$longname + }), + file = file.path(outdir, paste(year, "nc.var", sep = ".")), + col.names = FALSE, + row.names = TRUE, + quote = FALSE + ) + # close netCDF file try(ncdf4::nc_close(ncout)) - - } ## Year loop + } ## Year loop } # model2netcdf.MAAT -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ## EOF diff --git a/models/maat/R/write.config.MAAT.R b/models/maat/R/write.config.MAAT.R index 825cefe061f..7a8dea13ec2 100644 --- a/models/maat/R/write.config.MAAT.R +++ b/models/maat/R/write.config.MAAT.R @@ -1,9 +1,9 @@ -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ## Functions to prepare and write out MAAT model xml files for MA, SA, and Ensemble runs PREFIX_XML <- "\n" -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##------------------------------------------------------------------------------------------------# +## ------------------------------------------------------------------------------------------------# ##' Convert samples for MAAT ##' ##' convert parameters and parameter names from PEcAn database default units/names with MAAT @@ -17,32 +17,31 @@ PREFIX_XML <- "\n" ##' @export ##' @author Shawn Serbin, Anthony Walker convert.samples.MAAT <- function(trait.samples, runid) { - ### Convert object if (is.list(trait.samples)) { trait.samples <- as.data.frame(trait.samples) } - + ### first rename variables trait.names <- colnames(trait.samples) - trait.names[trait.names == "leaf_respiration_rate_m2"] <- "atref.rd" - trait.names[trait.names == "Vcmax"] <- "atref.vcmax" - trait.names[trait.names == "Jmax"] <- "atref.jmax" - trait.names[trait.names == "Ev_Arrhenius"] <- "Ha.vcmax" # Arrhenius activation energy - trait.names[trait.names == "Ej_Arrhenius"] <- "Ha.jmax" # Arrhenius activation energy - trait.names[trait.names == "Ha_Modified_Arrhenius_Vcmax"] <- "Ha.vcmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax - trait.names[trait.names == "Hd_Modified_Arrhenius_Vcmax"] <- "Hd.vcmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax - trait.names[trait.names == "Ha_Modified_Arrhenius_Jmax"] <- "Ha.jmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax - trait.names[trait.names == "Hd_Modified_Arrhenius_Jmax"] <- "Hd.jmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax - trait.names[trait.names == "cuticular_cond"] <- "g0" # Medlyn and ball-berry min conductance value (i.e. g0, or the intercept of A/gs relationship) - trait.names[trait.names == "stomatal_slope"] <- "g1_leuning" - trait.names[trait.names == "stomatal_slope.g1"] <- "g1_medlyn" - trait.names[trait.names == "stomatal_slope.BB"] <- "g1_ball" - trait.names[trait.names == "f_frac"] <- "f" - trait.names[trait.names == "theta"] <- "theta_j" # curvature of J quadratic in Farqhuar & Wong 1984 (unitless) - trait.names[trait.names == "leaf_respiration_Q10"] <- "q10.rd" # Q10 of Rd (unitless) + trait.names[trait.names == "leaf_respiration_rate_m2"] <- "atref.rd" + trait.names[trait.names == "Vcmax"] <- "atref.vcmax" + trait.names[trait.names == "Jmax"] <- "atref.jmax" + trait.names[trait.names == "Ev_Arrhenius"] <- "Ha.vcmax" # Arrhenius activation energy + trait.names[trait.names == "Ej_Arrhenius"] <- "Ha.jmax" # Arrhenius activation energy + trait.names[trait.names == "Ha_Modified_Arrhenius_Vcmax"] <- "Ha.vcmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax + trait.names[trait.names == "Hd_Modified_Arrhenius_Vcmax"] <- "Hd.vcmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax + trait.names[trait.names == "Ha_Modified_Arrhenius_Jmax"] <- "Ha.jmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax + trait.names[trait.names == "Hd_Modified_Arrhenius_Jmax"] <- "Hd.jmax" # !!TODO: Allow for the same prior to update both Vcmax and Jmax + trait.names[trait.names == "cuticular_cond"] <- "g0" # Medlyn and ball-berry min conductance value (i.e. g0, or the intercept of A/gs relationship) + trait.names[trait.names == "stomatal_slope"] <- "g1_leuning" + trait.names[trait.names == "stomatal_slope.g1"] <- "g1_medlyn" + trait.names[trait.names == "stomatal_slope.BB"] <- "g1_ball" + trait.names[trait.names == "f_frac"] <- "f" + trait.names[trait.names == "theta"] <- "theta_j" # curvature of J quadratic in Farqhuar & Wong 1984 (unitless) + trait.names[trait.names == "leaf_respiration_Q10"] <- "q10.rd" # Q10 of Rd (unitless) colnames(trait.samples) <- trait.names - + ### Conversions -- change to only use if Collatz, should also provide standard Rd oputput if ("atref.rd" %in% names(trait.samples)) { ## Calculate dark_resp_factor - rd as a proportion of Vcmax, Williams & Flannagan 1998 ~ 0.1 @@ -65,11 +64,11 @@ convert.samples.MAAT <- function(trait.samples, runid) { ## Convert from kJ mol-1 to J mol-1 trait.samples$Hd.jmax <- PEcAn.utils::ud_convert(trait.samples$Hd.jmax, "kJ", "J") } - if ("leaf_reflect_vis" %in% names(trait.samples) & "leaf_trans_vis" %in% names(trait.samples) ){ - leaf_abs <- 1-(trait.samples[["leaf_reflect_vis"]]+trait.samples[["leaf_trans_vis"]]) + if ("leaf_reflect_vis" %in% names(trait.samples) & "leaf_trans_vis" %in% names(trait.samples)) { + leaf_abs <- 1 - (trait.samples[["leaf_reflect_vis"]] + trait.samples[["leaf_trans_vis"]]) trait.samples[["a"]] <- leaf_abs - remove <- which(colnames(trait.samples)=="leaf_trans_vis" | colnames(trait.samples)=="leaf_reflect_vis") - trait.samples <- trait.samples[,-remove] + remove <- which(colnames(trait.samples) == "leaf_trans_vis" | colnames(trait.samples) == "leaf_reflect_vis") + trait.samples <- trait.samples[, -remove] } if ("leaf_width" %in% names(trait.samples)) { ## Convert from mm to m @@ -79,17 +78,17 @@ convert.samples.MAAT <- function(trait.samples, runid) { ## Convert from umol H2O m-2 s-1 to mol m-2s-1 trait.samples$g0 <- PEcAn.utils::ud_convert(trait.samples$g0, "umol H2O m-2 s-1", "mol H2O m-2 s-1") } - - # for debugging conversions - #save(trait.samples, file = file.path(settings$host$outdir,runid,'trait.samples.Rdata')) - + + # for debugging conversions + # save(trait.samples, file = file.path(settings$host$outdir,runid,'trait.samples.Rdata')) + ### Return trait.samples as modified by function return(trait.samples) } # convert.samples.MAAT -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ##' Writes a MAAT config file. ##' ##' Requires a pft xml object, a list of trait values for a single model run, @@ -104,11 +103,10 @@ convert.samples.MAAT <- function(trait.samples, runid) { ##' @author Shawn Serbin, Anthony Walker, Rob Kooper, Chris Black ##' write.config.MAAT <- function(defaults = NULL, trait.values, settings, run.id) { - # function needed to nest parameters in the appropriately the output MAAT XML. See below - nest_entries <- function(x, pattern, new_name = pattern){ + nest_entries <- function(x, pattern, new_name = pattern) { matches <- grepl(pattern, names(x)) - if(!any(matches)){ + if (!any(matches)) { return(x) } nested <- stats::setNames(x[matches], gsub(pattern, "", names(x[matches]))) @@ -116,136 +114,150 @@ write.config.MAAT <- function(defaults = NULL, trait.values, settings, run.id) { x[[new_name]] <- nested x } - + # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - - ### Move model files to run dirs. Use built-in MAAT script setup_MAAT_project.bs --- May need to revise this with + + ### Move model files to run dirs. Use built-in MAAT script setup_MAAT_project.bs --- May need to revise this with ### lastest MAAT v1.0 and changes within. This script no longer completely fits within the PEcAn logic. May be better ### to manually move/link needed script files within PEcAn and not use any built-in MAAT bash scripts. maat_mod_obj <- as.character(settings$model$config$mod_obj) - settings$model$config$mod_obj <- NULL # remove from final MAAT *_user_static.xml MAAT file - system2(file.path(settings$model$binary, "run_scripts/setup_MAAT_project.bs"), - c(maat_mod_obj, rundir, file.path(settings$model$binary, "run_scripts"), - file.path(settings$model$binary, "src"))) + settings$model$config$mod_obj <- NULL # remove from final MAAT *_user_static.xml MAAT file + system2( + file.path(settings$model$binary, "run_scripts/setup_MAAT_project.bs"), + c( + maat_mod_obj, rundir, file.path(settings$model$binary, "run_scripts"), + file.path(settings$model$binary, "src") + ) + ) # remove leaf_user_dynamic.xml from rundir since PEcAn is not currently using dynamic variables (for now, revist later as-needed) # see: https://github.com/walkeranthonyp/MAAT/issues/8 for reference - unlink(file.path(rundir,"leaf_user_dynamic.xml"), recursive = FALSE) + unlink(file.path(rundir, "leaf_user_dynamic.xml"), recursive = FALSE) # remove leaf_user_met.xml file if running without met drivers. Look for this file during model2netCDF step to select processing path if (is.null(settings$run$inputs$met)) { - unlink(file.path(rundir,"leaf_user_met.xml"), recursive = FALSE) + unlink(file.path(rundir, "leaf_user_met.xml"), recursive = FALSE) } # below is now required given that MAAT logic no longer moves or links to the run_MAAT.R script file run_maat_script <- file.path(settings$model$binary, "src", "run_MAAT.R") - + ### Parse config options to XML if (!is.null(settings$model$config$mod_mimic)) { - PEcAn.logger::logger.info(paste0("Running with model mimic: ",settings$model$config$mod_mimic)) + PEcAn.logger::logger.info(paste0("Running with model mimic: ", settings$model$config$mod_mimic)) mod_mimic <- as.character(settings$model$config$mod_mimic) settings$model$config$mod_mimic <- NULL xml <- PEcAn.settings::listToXml(settings$model$config, "default") } else { PEcAn.logger::logger.info("*** Model mimic not selected ***") - mod_mimic <- 'NULL' + mod_mimic <- "NULL" xml <- PEcAn.settings::listToXml(settings$model$config, "default") } - + ### Run rename and conversion function on PEcAn trait values PEcAn.logger::logger.info("*** Convert input trait values to MAAT parameters and units ***") - traits <- convert.samples.MAAT(trait.samples = trait.values[[settings$pfts$pft$name]],runid=run.id) + traits <- convert.samples.MAAT(trait.samples = trait.values[[settings$pfts$pft$name]], runid = run.id) # below for debugging - #save(traits, file = file.path(settings$host$outdir,run.id,'trait.samples.converted.Rdata')) - + # save(traits, file = file.path(settings$host$outdir,run.id,'trait.samples.converted.Rdata')) + ### Convert traits to list # with MAAT v1.0 we need to generate nested lists # create full nested list and convert to MAAT XML format traits <- as.list(traits) traits.list <- list() - maat_param_prefix_list <- list(param=c("Ha.","Hd.","atref.","reftemp.","Topt.","deltaS.","a_deltaS_t.","b_deltaS_t.","q10.","a_q10_t.", - "b_q10_t.","tupp_cox.","tlow_cox.","exp_cox."), - xml=c("Ha","Hd","atref","reftemp","Topt","deltaS","a_deltaS_t","b_deltaS_t","q10","a_q10_t", - "b_q10_t","tupp_cox","tlow_cox","exp_cox")) + maat_param_prefix_list <- list( + param = c( + "Ha.", "Hd.", "atref.", "reftemp.", "Topt.", "deltaS.", "a_deltaS_t.", "b_deltaS_t.", "q10.", "a_q10_t.", + "b_q10_t.", "tupp_cox.", "tlow_cox.", "exp_cox." + ), + xml = c( + "Ha", "Hd", "atref", "reftemp", "Topt", "deltaS", "a_deltaS_t", "b_deltaS_t", "q10", "a_q10_t", + "b_q10_t", "tupp_cox", "tlow_cox", "exp_cox" + ) + ) q <- 1 for (p in seq(seq_along(1:length(maat_param_prefix_list$param)))) { - if (q==1) { + if (q == 1) { traits.list <- nest_entries(traits, paste0(maat_param_prefix_list$param[p]), paste0(maat_param_prefix_list$xml[p])) } else { traits.list <- nest_entries(traits.list, paste0(maat_param_prefix_list$param[p]), paste0(maat_param_prefix_list$xml[p])) } - q <- q+1 + q <- q + 1 } traits.xml <- PEcAn.settings::listToXml(traits.list, "pars") - rm(p,q) - + rm(p, q) + ### Finalize XML xml[[1]] <- XML::addChildren(xml[[1]], traits.xml) - + ### Save final XML stack as a properly formatted MAAT parameter/option XML file - XML::saveXML(xml, - file = file.path(settings$rundir, run.id, "leaf_user_static.xml"), - indent = TRUE, - prefix = PREFIX_XML) - + XML::saveXML(xml, + file = file.path(settings$rundir, run.id, "leaf_user_static.xml"), + indent = TRUE, + prefix = PREFIX_XML + ) + ### Setup job.sh script to run MAAT model if (is.null(settings$run$inputs$met)) { PEcAn.logger::logger.info("-- No met selected. Running without a met driver --") - jobsh <- paste0("#!/bin/bash\n","Rscript ",run_maat_script," ", - "\"srcdir <- ","'",file.path(settings$model$binary, "src"),"'","\""," ", - "\"pdir <- ","'",rundir,"'","\""," ","\"mod_obj <- ","'",maat_mod_obj,"'","\""," ", - "\"xml<-T","\""," ","\"uq<-F","\""," ", - "\"factorial<-F","\""," ","\"mod_mimic<-",mod_mimic,"\""," ", - "\"odir <- ","'",outdir,"'","\""," > ",rundir, - "/logfile.txt","\n",'echo "', - ' library(PEcAn.MAAT); model2netcdf.MAAT(', - "'",rundir,"',","'",outdir,"',", - settings$run$site$lat,",", - settings$run$site$lon,", '", - settings$run$start.date,"', '", - settings$run$end.date,"') ", - '" | R --vanilla') - - # Run with met drivers + jobsh <- paste0( + "#!/bin/bash\n", "Rscript ", run_maat_script, " ", + "\"srcdir <- ", "'", file.path(settings$model$binary, "src"), "'", "\"", " ", + "\"pdir <- ", "'", rundir, "'", "\"", " ", "\"mod_obj <- ", "'", maat_mod_obj, "'", "\"", " ", + "\"xml<-T", "\"", " ", "\"uq<-F", "\"", " ", + "\"factorial<-F", "\"", " ", "\"mod_mimic<-", mod_mimic, "\"", " ", + "\"odir <- ", "'", outdir, "'", "\"", " > ", rundir, + "/logfile.txt", "\n", 'echo "', + " library(PEcAn.MAAT); model2netcdf.MAAT(", + "'", rundir, "',", "'", outdir, "',", + settings$run$site$lat, ",", + settings$run$site$lon, ", '", + settings$run$start.date, "', '", + settings$run$end.date, "') ", + '" | R --vanilla' + ) + + # Run with met drivers } else if (!is.null(settings$run$inputs$met)) { - ## temporary fix for #2064 - #met.dir <- dirname(settings$run$inputs$met$path) + # met.dir <- dirname(settings$run$inputs$met$path) met.dir <- dirname(as.character(settings$run$inputs$met$path)) - #met.file <- basename(settings$run$inputs$met$path) + # met.file <- basename(settings$run$inputs$met$path) met.file <- basename(as.character(settings$run$inputs$met$path)) - - file.copy(file.path(met.dir, list.files(met.dir, "*.xml")), - rundir, - overwrite = TRUE, - recursive = FALSE, - copy.mode = TRUE, - copy.date = TRUE) - + + file.copy(file.path(met.dir, list.files(met.dir, "*.xml")), + rundir, + overwrite = TRUE, + recursive = FALSE, + copy.mode = TRUE, + copy.date = TRUE + ) + PEcAn.logger::logger.info("-- Met selected. Running with a met driver --") - PEcAn.logger::logger.info(paste0("Running with met: ",met.file)) - jobsh <- paste0("#!/bin/bash\n","Rscript ",run_maat_script," ", - "\"srcdir <- ","'",file.path(settings$model$binary, "src"),"'","\""," ", - "\"pdir <- ","'",rundir,"'","\""," ","\"mod_obj <- ","'",maat_mod_obj,"'","\""," ", - "\"xml<-T","\""," ","\"uq<-F","\""," ", - "\"factorial<-F","\""," ","\"mod_mimic<-",mod_mimic,"\""," ", - "\"odir <- ","'",outdir,"'","\""," ","\"mdir <- ","'",met.dir,"'", - "\""," ","\"metdata <- ","'",met.file,"'","\""," > ",rundir, - "/logfile.txt","\n",'echo "', - ' library(PEcAn.MAAT); model2netcdf.MAAT(', - "'",rundir,"',","'",outdir,"',", - settings$run$site$lat,",", - settings$run$site$lon,", '", - settings$run$start.date,"', '", - settings$run$end.date,"') ", - '" | R --vanilla') - } #End if/else - + PEcAn.logger::logger.info(paste0("Running with met: ", met.file)) + jobsh <- paste0( + "#!/bin/bash\n", "Rscript ", run_maat_script, " ", + "\"srcdir <- ", "'", file.path(settings$model$binary, "src"), "'", "\"", " ", + "\"pdir <- ", "'", rundir, "'", "\"", " ", "\"mod_obj <- ", "'", maat_mod_obj, "'", "\"", " ", + "\"xml<-T", "\"", " ", "\"uq<-F", "\"", " ", + "\"factorial<-F", "\"", " ", "\"mod_mimic<-", mod_mimic, "\"", " ", + "\"odir <- ", "'", outdir, "'", "\"", " ", "\"mdir <- ", "'", met.dir, "'", + "\"", " ", "\"metdata <- ", "'", met.file, "'", "\"", " > ", rundir, + "/logfile.txt", "\n", 'echo "', + " library(PEcAn.MAAT); model2netcdf.MAAT(", + "'", rundir, "',", "'", outdir, "',", + settings$run$site$lat, ",", + settings$run$site$lon, ", '", + settings$run$start.date, "', '", + settings$run$end.date, "') ", + '" | R --vanilla' + ) + } # End if/else + # Write the job.sh script writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - } # write.config.MAAT -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# ## EOF diff --git a/models/maat/inst/generate_maat_met_drivers_ngee.R b/models/maat/inst/generate_maat_met_drivers_ngee.R index 7d4fee47bf3..782232cc3cd 100644 --- a/models/maat/inst/generate_maat_met_drivers_ngee.R +++ b/models/maat/inst/generate_maat_met_drivers_ngee.R @@ -5,9 +5,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files library(lubridate) #--------------------------------------------------------------------------------------------------# @@ -15,9 +15,9 @@ library(lubridate) #--------------------------------------------------------------------------------------------------# ### Load met driver data -met_path <- file.path('/Volumes/data/Model_Data/sites/PA-SLZ/NGEETropics_source/') -met_drivers <- read.csv(file = file.path(met_path,'SanLorenzo_Sherman_met_drivers_2008-2017.csv'), header=T) -met_output_path <- file.path('/Volumes/data/Model_Data/sites/PA-SLZ/MAAT_drivers/') +met_path <- file.path("/Volumes/data/Model_Data/sites/PA-SLZ/NGEETropics_source/") +met_drivers <- read.csv(file = file.path(met_path, "SanLorenzo_Sherman_met_drivers_2008-2017.csv"), header = T) +met_output_path <- file.path("/Volumes/data/Model_Data/sites/PA-SLZ/MAAT_drivers/") ## Options site_name <- "PA-SLZ" @@ -32,23 +32,23 @@ names(met_drivers) head(met_drivers) date_range <- unique(lubridate::year(met_drivers$Date_UTC_start)) -met_yr_subset <- c(2015,2016) +met_yr_subset <- c(2015, 2016) MAAT_Time <- lubridate::as_datetime(met_drivers$Date_UTC_start, tz = "UTC") head(MAAT_Time) met_years <- lubridate::year(MAAT_Time) met_drivers$Time <- MAAT_Time -met_drivers$PAR_umols_m2_s <- met_drivers$SR..W.m2.*2.114 +met_drivers$PAR_umols_m2_s <- met_drivers$SR..W.m2. * 2.114 met_drivers$Tair_degC <- met_drivers$Ta..oC. met_drivers$RH_perc <- met_drivers$RH.... met_drivers$VPD_kPa <- PEcAn.data.atmosphere::get.vpd(met_drivers$RH...., met_drivers$Tair_degC) / 10 -met_drivers$Prec_mm <- met_drivers$Rain..mm.min.*30 # converting to mm per 30 min period +met_drivers$Prec_mm <- met_drivers$Rain..mm.min. * 30 # converting to mm per 30 min period # get additional variables -if (pressure){ - met_drivers$Press_Pa <- PEcAn.utils::ud_convert(met_drivers$BP_hPa, "mmHg", "Pa") # need to match this with source, when availible +if (pressure) { + met_drivers$Press_Pa <- PEcAn.utils::ud_convert(met_drivers$BP_hPa, "mmHg", "Pa") # need to match this with source, when availible } else { - met_drivers$Press_Pa <- rep(101325,length(met_drivers$Time)) + met_drivers$Press_Pa <- rep(101325, length(met_drivers$Time)) } if (wind) { @@ -56,8 +56,8 @@ if (wind) { } # subset -if (met_yr_subset[2]-met_yr_subset[1] != 0 ) { - met_driver_subset <- subset(met_drivers, met_years %in% seq(met_yr_subset[1], met_yr_subset[2],1)) +if (met_yr_subset[2] - met_yr_subset[1] != 0) { + met_driver_subset <- subset(met_drivers, met_years %in% seq(met_yr_subset[1], met_yr_subset[2], 1)) } else { met_driver_subset <- subset(met_drivers, met_years == met_yr_subset[1]) } @@ -65,38 +65,43 @@ met_years <- lubridate::year(met_driver_subset$Time) # finalize if (wind) { - output_met_driver <- cbind.data.frame(Time = met_driver_subset$Time, - Year = met_years, - DOY = lubridate::yday(met_driver_subset$Time), - Hour = strftime(met_driver_subset$Time,"%H:%M:%S", tz="UTC"), - Tair_degC = met_driver_subset$Tair_degC, - Prec_mm = met_driver_subset$Prec_mm, - Atm_press_Pa = met_driver_subset$Press_Pa, - RH_perc = met_driver_subset$RH_perc, - VPD_kPa = met_driver_subset$VPD_kPa, - PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s, - Windspeed_m_s = met_driver_subset$Windspeed_m_s - ) - - leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'", - atm_press="'Atm_press_Pa'",wind="'Windspeed_m_s'"))) - + output_met_driver <- cbind.data.frame( + Time = met_driver_subset$Time, + Year = met_years, + DOY = lubridate::yday(met_driver_subset$Time), + Hour = strftime(met_driver_subset$Time, "%H:%M:%S", tz = "UTC"), + Tair_degC = met_driver_subset$Tair_degC, + Prec_mm = met_driver_subset$Prec_mm, + Atm_press_Pa = met_driver_subset$Press_Pa, + RH_perc = met_driver_subset$RH_perc, + VPD_kPa = met_driver_subset$VPD_kPa, + PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s, + Windspeed_m_s = met_driver_subset$Windspeed_m_s + ) + + leaf_user_met_list <- list(leaf = list(env = list( + time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'", vpd = "'VPD_kPa'", + atm_press = "'Atm_press_Pa'", wind = "'Windspeed_m_s'" + ))) } else { - output_met_driver <- cbind.data.frame(Time = met_driver_subset$Time, - Year = met_years, - DOY = lubridate::yday(met_driver_subset$Time), - Hour = strftime(met_driver_subset$Time,"%H:%M:%S", tz="UTC"), - Tair_degC = met_driver_subset$Tair_degC, - Prec_mm = met_driver_subset$Prec_mm, - Atm_press_Pa = met_driver_subset$Press_Pa, - RH_perc = met_driver_subset$RH_perc, - VPD_kPa = met_driver_subset$VPD_kPa, - PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s - ) - - leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", - par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'", - atm_press="'Atm_press_Pa'"))) + output_met_driver <- cbind.data.frame( + Time = met_driver_subset$Time, + Year = met_years, + DOY = lubridate::yday(met_driver_subset$Time), + Hour = strftime(met_driver_subset$Time, "%H:%M:%S", tz = "UTC"), + Tair_degC = met_driver_subset$Tair_degC, + Prec_mm = met_driver_subset$Prec_mm, + Atm_press_Pa = met_driver_subset$Press_Pa, + RH_perc = met_driver_subset$RH_perc, + VPD_kPa = met_driver_subset$VPD_kPa, + PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s + ) + + leaf_user_met_list <- list(leaf = list(env = list( + time = "'Time'", temp = "'Tair_degC'", + par = "'PAR_umols_m2_s'", vpd = "'VPD_kPa'", + atm_press = "'Atm_press_Pa'" + ))) } leaf_user_met_xml <- PEcAn.settings::listToXml(leaf_user_met_list, "met_data_translator") @@ -106,14 +111,18 @@ leaf_user_met_xml <- PEcAn.settings::listToXml(leaf_user_met_list, "met_data_tra #--------------------------------------------------------------------------------------------------# ### Create output met for MAAT -write.csv(output_met_driver, - file = file.path(met_output_path,paste0(site_name,"_NGEET_",met_yr_subset[1],"_", - met_yr_subset[2],"_UTC.csv")),row.names = F) +write.csv(output_met_driver, + file = file.path(met_output_path, paste0( + site_name, "_NGEET_", met_yr_subset[1], "_", + met_yr_subset[2], "_UTC.csv" + )), row.names = F +) ### output XML file PREFIX_XML <- "\n" XML::saveXML(leaf_user_met_xml, - file = file.path(met_output_path, "leaf_user_met.xml"), - indent = TRUE, - prefix = PREFIX_XML) + file = file.path(met_output_path, "leaf_user_met.xml"), + indent = TRUE, + prefix = PREFIX_XML +) #--------------------------------------------------------------------------------------------------# -### EOF \ No newline at end of file +### EOF diff --git a/models/maat/inst/generate_maat_met_drivers_ngee_pa-bar.R b/models/maat/inst/generate_maat_met_drivers_ngee_pa-bar.R index 98954ea46f8..0a75dee4c8c 100644 --- a/models/maat/inst/generate_maat_met_drivers_ngee_pa-bar.R +++ b/models/maat/inst/generate_maat_met_drivers_ngee_pa-bar.R @@ -11,9 +11,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files library(lubridate) #--------------------------------------------------------------------------------------------------# @@ -21,9 +21,9 @@ library(lubridate) #--------------------------------------------------------------------------------------------------# ### Load met driver data -met_path <- file.path('/Volumes/data/Model_Data/sites/PA-Bar/NGEETropics_source/') -met_drivers <- read.csv(file = file.path(met_path,'BCI_met_drivers_2003_2016.csv'), header=T) -met_output_path <- file.path('/Volumes/data/Model_Data/sites/PA-Bar/MAAT_drivers/') +met_path <- file.path("/Volumes/data/Model_Data/sites/PA-Bar/NGEETropics_source/") +met_drivers <- read.csv(file = file.path(met_path, "BCI_met_drivers_2003_2016.csv"), header = T) +met_output_path <- file.path("/Volumes/data/Model_Data/sites/PA-Bar/MAAT_drivers/") ## Options site_name <- "PA-Bar" @@ -37,24 +37,24 @@ wind <- TRUE names(met_drivers) head(met_drivers) -MAAT_Time <- lubridate::mdy_hm(as.character(met_drivers$Date_UTC_start), tz="UTC") +MAAT_Time <- lubridate::mdy_hm(as.character(met_drivers$Date_UTC_start), tz = "UTC") date_range <- unique(lubridate::year(MAAT_Time)) head(MAAT_Time) -met_yr_subset <- c(2015,2016) +met_yr_subset <- c(2015, 2016) met_years <- lubridate::year(MAAT_Time) met_drivers$Time <- MAAT_Time -met_drivers$PAR_umols_m2_s <- met_drivers$SR_W_m2.*2.114 +met_drivers$PAR_umols_m2_s <- met_drivers$SR_W_m2. * 2.114 met_drivers$Tair_degC <- met_drivers$Temp_o_C. met_drivers$RH_perc <- met_drivers$RH_. met_drivers$VPD_kPa <- PEcAn.data.atmosphere::get.vpd(met_drivers$RH_perc, met_drivers$Tair_degC) / 10 -met_drivers$Prec_mm <- met_drivers$RA_mm_d/24 # converting to mm per 1 hour period (met timestep hourly) +met_drivers$Prec_mm <- met_drivers$RA_mm_d / 24 # converting to mm per 1 hour period (met timestep hourly) # get additional variables -if (pressure){ - met_drivers$Press_Pa <- PEcAn.utils::ud_convert(met_drivers$BP_hPa, "mmHg", "Pa") # need to match this with source, when availible +if (pressure) { + met_drivers$Press_Pa <- PEcAn.utils::ud_convert(met_drivers$BP_hPa, "mmHg", "Pa") # need to match this with source, when availible } else { - met_drivers$Press_Pa <- rep(101325,length(met_drivers$Time)) # just use standard atmospheric pressure at sea level + met_drivers$Press_Pa <- rep(101325, length(met_drivers$Time)) # just use standard atmospheric pressure at sea level } if (wind) { @@ -62,8 +62,8 @@ if (wind) { } # subset -if (met_yr_subset[2]-met_yr_subset[1] != 0 ) { - met_driver_subset <- subset(met_drivers, met_years %in% seq(met_yr_subset[1], met_yr_subset[2],1)) +if (met_yr_subset[2] - met_yr_subset[1] != 0) { + met_driver_subset <- subset(met_drivers, met_years %in% seq(met_yr_subset[1], met_yr_subset[2], 1)) } else { met_driver_subset <- subset(met_drivers, met_years == met_yr_subset[1]) } @@ -72,38 +72,43 @@ met_years <- lubridate::year(met_driver_subset$Time) # finalize if (wind) { - output_met_driver <- cbind.data.frame(Time = met_driver_subset$Time, - Year = met_years, - DOY = lubridate::yday(met_driver_subset$Time), - Hour = strftime(met_driver_subset$Time,"%H:%M:%S", tz="UTC"), - Tair_degC = met_driver_subset$Tair_degC, - Prec_mm = met_driver_subset$Prec_mm, - Atm_press_Pa = met_driver_subset$Press_Pa, - RH_perc = met_driver_subset$RH_perc, - VPD_kPa = met_driver_subset$VPD_kPa, - PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s, - Windspeed_m_s = met_driver_subset$Windspeed_m_s + output_met_driver <- cbind.data.frame( + Time = met_driver_subset$Time, + Year = met_years, + DOY = lubridate::yday(met_driver_subset$Time), + Hour = strftime(met_driver_subset$Time, "%H:%M:%S", tz = "UTC"), + Tair_degC = met_driver_subset$Tair_degC, + Prec_mm = met_driver_subset$Prec_mm, + Atm_press_Pa = met_driver_subset$Press_Pa, + RH_perc = met_driver_subset$RH_perc, + VPD_kPa = met_driver_subset$VPD_kPa, + PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s, + Windspeed_m_s = met_driver_subset$Windspeed_m_s ) - - leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'", - atm_press="'Atm_press_Pa'",wind="'Windspeed_m_s'"))) - + + leaf_user_met_list <- list(leaf = list(env = list( + time = "'Time'", temp = "'Tair_degC'", par = "'PAR_umols_m2_s'", vpd = "'VPD_kPa'", + atm_press = "'Atm_press_Pa'", wind = "'Windspeed_m_s'" + ))) } else { - output_met_driver <- cbind.data.frame(Time = met_driver_subset$Time, - Year = met_years, - DOY = lubridate::yday(met_driver_subset$Time), - Hour = strftime(met_driver_subset$Time,"%H:%M:%S", tz="UTC"), - Tair_degC = met_driver_subset$Tair_degC, - Prec_mm = met_driver_subset$Prec_mm, - Atm_press_Pa = met_driver_subset$Press_Pa, - RH_perc = met_driver_subset$RH_perc, - VPD_kPa = met_driver_subset$VPD_kPa, - PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s + output_met_driver <- cbind.data.frame( + Time = met_driver_subset$Time, + Year = met_years, + DOY = lubridate::yday(met_driver_subset$Time), + Hour = strftime(met_driver_subset$Time, "%H:%M:%S", tz = "UTC"), + Tair_degC = met_driver_subset$Tair_degC, + Prec_mm = met_driver_subset$Prec_mm, + Atm_press_Pa = met_driver_subset$Press_Pa, + RH_perc = met_driver_subset$RH_perc, + VPD_kPa = met_driver_subset$VPD_kPa, + PAR_umols_m2_s = met_driver_subset$PAR_umols_m2_s ) - - leaf_user_met_list <- list(leaf = list(env = list(time = "'Time'", temp = "'Tair_degC'", - par = "'PAR_umols_m2_s'",vpd="'VPD_kPa'", - atm_press="'Atm_press_Pa'"))) + + leaf_user_met_list <- list(leaf = list(env = list( + time = "'Time'", temp = "'Tair_degC'", + par = "'PAR_umols_m2_s'", vpd = "'VPD_kPa'", + atm_press = "'Atm_press_Pa'" + ))) } leaf_user_met_xml <- PEcAn.settings::listToXml(leaf_user_met_list, "met_data_translator") @@ -113,14 +118,18 @@ leaf_user_met_xml <- PEcAn.settings::listToXml(leaf_user_met_list, "met_data_tra #--------------------------------------------------------------------------------------------------# ### Create output met for MAAT -write.csv(output_met_driver, - file = file.path(met_output_path,paste0(site_name,"_NGEETropics_",met_yr_subset[1],"_", - met_yr_subset[2],"_UTC.csv")),row.names = F) +write.csv(output_met_driver, + file = file.path(met_output_path, paste0( + site_name, "_NGEETropics_", met_yr_subset[1], "_", + met_yr_subset[2], "_UTC.csv" + )), row.names = F +) ### output XML file PREFIX_XML <- "\n" XML::saveXML(leaf_user_met_xml, - file = file.path(met_output_path, "leaf_user_met.xml"), - indent = TRUE, - prefix = PREFIX_XML) + file = file.path(met_output_path, "leaf_user_met.xml"), + indent = TRUE, + prefix = PREFIX_XML +) #--------------------------------------------------------------------------------------------------# -### EOF \ No newline at end of file +### EOF diff --git a/models/maat/tests/testthat.R b/models/maat/tests/testthat.R index 2da241e0ec7..d237edf386e 100644 --- a/models/maat/tests/testthat.R +++ b/models/maat/tests/testthat.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html diff --git a/models/maat/tests/testthat/test.met2model.R b/models/maat/tests/testthat/test.met2model.R index 3b167ab1d3c..2a86a71af26 100644 --- a/models/maat/tests/testthat/test.met2model.R +++ b/models/maat/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/maespa/R/met2model.MAESPA.R b/models/maespa/R/met2model.MAESPA.R index e24aa9d1c51..b381e81abbd 100755 --- a/models/maespa/R/met2model.MAESPA.R +++ b/models/maespa/R/met2model.MAESPA.R @@ -1,4 +1,3 @@ - # R Code to convert NetCDF CF met files into MAESPA met files ## If files already exist in 'Outfolder', the default function is NOT to overwrite them and only @@ -19,28 +18,31 @@ ##' @param ... further arguments, currently ignored ##' ##' @author Tony Gardella -met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date, +met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { print("START met2model.MAESPA") start.date <- as.POSIXlt(start_date, tz = "GMT") end.date <- as.POSIXlt(end_date, tz = "GMT") out.file <- paste(in.prefix, - strptime(start.date, "%Y-%m-%d"), - strptime(end.date, "%Y-%m-%d"), - "dat", - sep = ".") + strptime(start.date, "%Y-%m-%d"), + strptime(end.date, "%Y-%m-%d"), + "dat", + sep = "." + ) out.file.full <- file.path(outfolder, out.file) - results <- data.frame(file = out.file.full, - host = PEcAn.remote::fqdn(), - mimetype = "text/plain", - formatname = "maespa.met", - startdate = start_date, - enddate = end_date, - dbfile.name = out.file, - stringsAsFactors = FALSE) + results <- data.frame( + file = out.file.full, + host = PEcAn.remote::fqdn(), + mimetype = "text/plain", + formatname = "maespa.met", + startdate = start_date, + enddate = end_date, + dbfile.name = out.file, + stringsAsFactors = FALSE + ) print("internal results") print(results) @@ -81,19 +83,19 @@ met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date # Check which variables are available and which are not ## extract variables - lat <- ncdf4::ncvar_get(nc, "latitude") - lon <- ncdf4::ncvar_get(nc, "longitude") - RAD <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") #W m-2 - PAR <- try(ncdf4::ncvar_get(nc, "surface_downwelling_photosynthetic_photon_flux_in_air")) #mol m-2 s-1 - TAIR <- ncdf4::ncvar_get(nc, "air_temperature") # K - QAIR <- ncdf4::ncvar_get(nc, "specific_humidity") # 1 - PPT <- ncdf4::ncvar_get(nc, "precipitation_flux") #kg m-2 s-1 - CA <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) #mol/mol - PRESS <- ncdf4::ncvar_get(nc, "air_pressure") # Pa - + lat <- ncdf4::ncvar_get(nc, "latitude") + lon <- ncdf4::ncvar_get(nc, "longitude") + RAD <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") # W m-2 + PAR <- try(ncdf4::ncvar_get(nc, "surface_downwelling_photosynthetic_photon_flux_in_air")) # mol m-2 s-1 + TAIR <- ncdf4::ncvar_get(nc, "air_temperature") # K + QAIR <- ncdf4::ncvar_get(nc, "specific_humidity") # 1 + PPT <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 + CA <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) # mol/mol + PRESS <- ncdf4::ncvar_get(nc, "air_pressure") # Pa + ## Convert specific humidity to fractional relative humidity RH <- PEcAn.data.atmosphere::qair2rh(QAIR, TAIR, PRESS) - + ## Process PAR if (!is.numeric(PAR)) { # Function from data.atmosphere will convert SW to par in W/m2 @@ -133,8 +135,7 @@ met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date } else { out <- rbind(out, tmp) } - - } ### end loop over years + } ### end loop over years ### Check for NA if (anyNA(out)) { @@ -170,7 +171,7 @@ met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date ## Write output met.dat file metfile <- system.file("met.dat", package = "PEcAn.MAESPA") met.dat <- Maeswrap::replacemetdata(out, oldmetfile = metfile, newmetfile = out.file.full) - + Maeswrap::replacePAR(out.file.full, "difsky", "environ", newval = difsky, noquotes = TRUE) Maeswrap::replacePAR(out.file.full, "ca", "environ", newval = defaultCO2, noquotes = TRUE) Maeswrap::replacePAR(out.file.full, "lat", "latlong", newval = lat, noquotes = TRUE) @@ -180,6 +181,6 @@ met2model.MAESPA <- function(in.path, in.prefix, outfolder, start_date, end_date Maeswrap::replacePAR(out.file.full, "startdate", "metformat", newval = startdate, noquotes = TRUE) Maeswrap::replacePAR(out.file.full, "enddate", "metformat", newval = enddate, noquotes = TRUE) Maeswrap::replacePAR(out.file.full, "columns", "metformat", newval = columnnames, noquotes = TRUE) - + return(invisible(results)) } # met2model.MAESPA diff --git a/models/maespa/R/model2netcdf.MAESPA.R b/models/maespa/R/model2netcdf.MAESPA.R index 65a29b71243..4b44997e30d 100755 --- a/models/maespa/R/model2netcdf.MAESPA.R +++ b/models/maespa/R/model2netcdf.MAESPA.R @@ -1,5 +1,5 @@ ##' Convert MAESPA output into the NACP Intercomparison format (ALMA using netCDF) -##' +##' ##' @name model2netcdf.MAESPA ##' @title Code to convert MAESPA's output into netCDF format ##' @@ -13,61 +13,62 @@ ##' ##' @author Tony Gardella model2netcdf.MAESPA <- function(outdir, sitelat, sitelon, start_date, end_date, stem_density) { - - ### Read in model output using Maeswrap. Dayflx.dat, watbalday.dat - dayflx.dataframe <- Maeswrap::readdayflux(filename = "Dayflx.dat") + dayflx.dataframe <- Maeswrap::readdayflux(filename = "Dayflx.dat") watbalday.dataframe <- Maeswrap::readwatbal(filename = "watbalday.dat") - + # moles of Carbon to kilograms mole2kg_C <- 0.0120107 # Seconds in a day secINday <- 60 * 60 * 24 - + ### Determine number of years and output timestep start_date <- as.POSIXlt(start_date, tz = "GMT") end_date <- as.POSIXlt(end_date, tz = "GMT") start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) years <- start_year:end_year - + for (y in years) { if (file.exists(file.path(outdir, paste(y, "nc", sep = ".")))) { next } - print(paste("---- Processing year: ", y)) # turn on for debugging - + print(paste("---- Processing year: ", y)) # turn on for debugging + ## Set up outputs for netCDF file in appropriate units output <- list() - output[[1]] <- (dayflx.dataframe$totPs) * mole2kg_C * stem_density # (GPP) gross photosynthesis. mol tree-1 d-1 -> kg C m-2 s-1 - output[[2]] <- (dayflx.dataframe$netPs) * mole2kg_C * stem_density # (NPP) photosynthesis net of foliar resp mol tree-1 d-1 -> kg C m-2 s-1 - output[[3]] <- (watbalday.dataframe$et)/secINday # (Tveg) modeled canopy transpiration mm -> kg m-2 s-1 - output[[4]] <- (watbalday.dataframe$qh) * 1e+06 # (Qh) Sensible heat flux MJ m-2 day-1 -> W m-2 - output[[5]] <- (watbalday.dataframe$qe) * 1e+06 # (Qle)latent Heat flux MJ m-2 day-1 -> W m-2 - + output[[1]] <- (dayflx.dataframe$totPs) * mole2kg_C * stem_density # (GPP) gross photosynthesis. mol tree-1 d-1 -> kg C m-2 s-1 + output[[2]] <- (dayflx.dataframe$netPs) * mole2kg_C * stem_density # (NPP) photosynthesis net of foliar resp mol tree-1 d-1 -> kg C m-2 s-1 + output[[3]] <- (watbalday.dataframe$et) / secINday # (Tveg) modeled canopy transpiration mm -> kg m-2 s-1 + output[[4]] <- (watbalday.dataframe$qh) * 1e+06 # (Qh) Sensible heat flux MJ m-2 day-1 -> W m-2 + output[[5]] <- (watbalday.dataframe$qe) * 1e+06 # (Qle)latent Heat flux MJ m-2 day-1 -> W m-2 + # ******************** Declare netCDF variables ********************# - t <- ncdf4::ncdim_def(name = "time", - units = paste0("days since ", y, "-01-01 00:00:00"), - vals = (dayflx.dataframe$DOY), - calendar = "standard", - unlim = TRUE) + t <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since ", y, "-01-01 00:00:00"), + vals = (dayflx.dataframe$DOY), + calendar = "standard", + unlim = TRUE + ) lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") - + for (i in seq_along(output)) { - if (length(output[[i]]) == 0) + if (length(output[[i]]) == 0) { output[[i]] <- rep(-999, length(t$vals)) + } } - + dims <- list(lon = lon, lat = lat, time = t) - - nc_var <- list() + + nc_var <- list() nc_var[[1]] <- PEcAn.utils::to_ncvar("GPP", dims) - nc_var[[2]] <- PEcAn.utils::to_ncvar("NPP",dims) + nc_var[[2]] <- PEcAn.utils::to_ncvar("NPP", dims) nc_var[[3]] <- PEcAn.utils::to_ncvar("TVeg", dims) nc_var[[4]] <- PEcAn.utils::to_ncvar("Qh", dims) nc_var[[5]] <- PEcAn.utils::to_ncvar("Qle", dims) - + ### Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") @@ -78,6 +79,5 @@ model2netcdf.MAESPA <- function(outdir, sitelat, sitelon, start_date, end_date, } close(varfile) ncdf4::nc_close(nc) - } ### End of year loop - -} ### End of function + } ### End of year loop +} ### End of function diff --git a/models/maespa/R/write.config.MAESPA.R b/models/maespa/R/write.config.MAESPA.R index 6adfd849eea..cc357f34c44 100755 --- a/models/maespa/R/write.config.MAESPA.R +++ b/models/maespa/R/write.config.MAESPA.R @@ -12,15 +12,14 @@ ##' @return configuration file for MAESPA for given run ##' @export ##' @author Tony Gardella -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.MAESPA <- function(defaults, trait.values, settings, run.id) { - - if(!requireNamespace("Maeswrap", quietly = TRUE)){ + if (!requireNamespace("Maeswrap", quietly = TRUE)) { PEcAn.logger::logger.severe("The Maeswrap package is not installed. Please consult PEcAn documentation for install notes: https://pecanproject.github.io/pecan-documentation/latest/pecan-models.html#maespa") } - + # find out where to write run/ouput rundir <- file.path(settings$host$rundir, as.character(run.id)) outdir <- file.path(settings$host$outdir, as.character(run.id)) @@ -34,9 +33,8 @@ write.config.MAESPA <- function(defaults, trait.values, settings, run.id) { jobsh <- readLines(con = settings$model$jobtemplate, n = -1) } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.MAESPA"), n = -1) - } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -45,7 +43,7 @@ write.config.MAESPA <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -53,97 +51,97 @@ write.config.MAESPA <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # ------------------------------------------------------------------------------------ Begin # writing input Maespa files - + ## Change Start/end Dates to dy/mo/yr start_date <- format(strptime(settings$run$start.date, format = "%Y/%m/%d"), "%d/%m/%y") end_date <- format(strptime(settings$run$end.date, format = "%Y/%m/%d"), "%d/%m/%y") - + ### Load trait values param.file.name <- paste(rundir, "/", "Maespa_params.", run.id, ".Rdata", sep = "") maespa.params <- save(trait.values, file = param.file.name) load(param.file.name) - + params <- data.frame(trait.values) colnames <- c(names(trait.values[[1]])) colnames(params) <- colnames - + vcmax <- as.numeric(params["Vcmax"]) jmax <- as.numeric(params["Jmax"]) - + ## Set default plot as 30x30 meter plot with 100 trees xmax <- 30 ymax <- 30 notrees <- 100 - stem_density <- (xmax * ymax)/notrees - + stem_density <- (xmax * ymax) / notrees + ### Confile.dat - confile.path <- system.file("confile.dat", package = "PEcAn.MAESPA") - confile <- readLines(confile.path) + confile.path <- system.file("confile.dat", package = "PEcAn.MAESPA") + confile <- readLines(confile.path) confile.run.path <- file.path(settings$rundir, run.id, "confile.dat") writeLines(confile, con = confile.run.path) - + Maeswrap::replacePAR(confile.run.path, "itermax", "model", newval = 100, noquotes = TRUE) Maeswrap::replacePAR(confile.run.path, "itargets", "treescon", newval = 153, noquotes = TRUE) Maeswrap::replacePAR(confile.run.path, "startdate", "dates", newval = start_date) Maeswrap::replacePAR(confile.run.path, "enddate", "dates", newval = end_date) - + ### str.dat USING DEFAULT EXAMPLE VERSION RIGHT NOW AS IS - strfile.path <- system.file("str.dat", package = "PEcAn.MAESPA") - strfile <- readLines(strfile.path) + strfile.path <- system.file("str.dat", package = "PEcAn.MAESPA") + strfile <- readLines(strfile.path) strfile.run.path <- file.path(settings$rundir, run.id, "str.dat") writeLines(strfile, con = strfile.run.path) - + ### phy.dat - phyfile.path <- system.file("phy.dat", package = "PEcAn.MAESPA") - phyfile <- readLines(phyfile.path) + phyfile.path <- system.file("phy.dat", package = "PEcAn.MAESPA") + phyfile <- readLines(phyfile.path) phyfile.run.path <- file.path(settings$rundir, run.id, "phy.dat") writeLines(phyfile, con = phyfile.run.path) - + Maeswrap::replacePAR(phyfile.run.path, "values", "vcmax", newval = vcmax) Maeswrap::replacePAR(phyfile.run.path, "dates", "vcmax", newval = start_date) Maeswrap::replacePAR(phyfile.run.path, "values", "jmax", newval = jmax) Maeswrap::replacePAR(phyfile.run.path, "dates", "jmax", newval = start_date) - + ### trees.dat treesfile.path <- system.file("trees.dat", package = "PEcAn.MAESPA") treesfile <- readLines(treesfile.path) treesfile.run.path <- file.path(settings$rundir, run.id, "trees.dat") writeLines(treesfile, con = treesfile.run.path) - + Maeswrap::replacePAR(treesfile.run.path, "xmax", "plot", newval = xmax, noquotes = TRUE) Maeswrap::replacePAR(treesfile.run.path, "ymax", "plot", newval = ymax, noquotes = TRUE) Maeswrap::replacePAR(treesfile.run.path, "notrees", "plot", newval = notrees, noquotes = TRUE) - + ## watpar.dat watparsfile.path <- system.file("watpars.dat", package = "PEcAn.MAESPA") watparsfile <- readLines(watparsfile.path) watparsfile.run.path <- file.path(settings$rundir, run.id, "watpars.dat") writeLines(watparsfile, con = watparsfile.run.path) - + # MET FILE metdat <- settings$run$input$met$path - + #--------------------------------------------------------------------------------------------- # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) jobsh <- gsub("@SITE_MET@", metdat, jobsh) jobsh <- gsub("@STEM_DENS@", stem_density, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) } # write.config.MAESPA diff --git a/models/maespa/tests/testthat.R b/models/maespa/tests/testthat.R index 78aacd1ba29..a14e1033b14 100755 --- a/models/maespa/tests/testthat.R +++ b/models/maespa/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -# test_check('PEcAn.MAESPA') +# test_check('PEcAn.MAESPA') diff --git a/models/maespa/tests/testthat/test.met2model.R b/models/maespa/tests/testthat/test.met2model.R index c89cb6ade66..a2c330983c4 100644 --- a/models/maespa/tests/testthat/test.met2model.R +++ b/models/maespa/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/preles/R/runPRELES.jobsh.R b/models/preles/R/runPRELES.jobsh.R index db3dbc8f46d..2a2e3b55af9 100644 --- a/models/preles/R/runPRELES.jobsh.R +++ b/models/preles/R/runPRELES.jobsh.R @@ -12,171 +12,175 @@ #' @export #' @author Tony Gardella, Michael Dietze runPRELES.jobsh <- function(met.file, outdir, parameters, sitelat, sitelon, start.date, end.date) { - if (!requireNamespace("Rpreles", quietly = TRUE)) { PEcAn.logger::logger.severe( "The Rpreles package is not installed. - Please execute- devtools::install_github('MikkoPeltoniemi/Rpreles')") + Please execute- devtools::install_github('MikkoPeltoniemi/Rpreles')" + ) } - + # Process start and end dates start_date <- as.POSIXlt(start.date, tz = "UTC") end_date <- as.POSIXlt(end.date, tz = "UTC") - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - - timestep.s <- PEcAn.utils::ud_convert(1, "day", "seconds") # Number of seconds in a day - + + timestep.s <- PEcAn.utils::ud_convert(1, "day", "seconds") # Number of seconds in a day + ## Build met met <- NULL for (year in start_year:end_year) { - met.file.y <- paste(met.file, year, "nc", sep = ".") - + if (file.exists(met.file.y)) { - ## Open netcdf file nc <- ncdf4::nc_open(met.file.y) - + ## convert time to seconds sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - + ## build day and year - + dt <- PEcAn.utils::seconds_in_year(year) / length(sec) - tstep <- round(timestep.s / dt) #time steps per day - + tstep <- round(timestep.s / dt) # time steps per day + diy <- PEcAn.utils::days_in_year(year) doy <- rep(seq_len(diy), each = tstep)[seq_along(sec)] - + ## Get variables from netcdf file - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") # SW in W/m2 - Tair <- ncdf4::ncvar_get(nc, "air_temperature") # air temperature in K - Precip <- ncdf4::ncvar_get(nc, "precipitation_flux") # precipitation in kg/m2s1 - CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) # mol/mol - SH <- ncdf4::ncvar_get(nc, "specific_humidity") - lat <- ncdf4::ncvar_get(nc, "latitude") - lon <- ncdf4::ncvar_get(nc, "longitude") - + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") # SW in W/m2 + Tair <- ncdf4::ncvar_get(nc, "air_temperature") # air temperature in K + Precip <- ncdf4::ncvar_get(nc, "precipitation_flux") # precipitation in kg/m2s1 + CO2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) # mol/mol + SH <- ncdf4::ncvar_get(nc, "specific_humidity") + lat <- ncdf4::ncvar_get(nc, "latitude") + lon <- ncdf4::ncvar_get(nc, "longitude") + ncdf4::nc_close(nc) - + ## Check for CO2 and PAR if (!is.numeric(CO2)) { - PEcAn.logger::logger.warn("CO2 not found. Setting to default: 4.0e+8 mol/mol") # using rough estimate of atmospheric CO2 levels + PEcAn.logger::logger.warn("CO2 not found. Setting to default: 4.0e+8 mol/mol") # using rough estimate of atmospheric CO2 levels CO2 <- rep(4e+08, length(Precip)) } - + ## GET VPD from Saturated humidity and Air Temperature RH <- PEcAn.data.atmosphere::qair2rh(SH, Tair) VPD <- PEcAn.data.atmosphere::get.vpd(RH, Tair) - - VPD <- VPD * 0.01 # convert to Pascal - + + VPD <- VPD * 0.01 # convert to Pascal + ## Get PPFD from SW - PPFD <- PEcAn.data.atmosphere::sw2ppfd(SW) # PPFD in umol/m2/s + PPFD <- PEcAn.data.atmosphere::sw2ppfd(SW) # PPFD in umol/m2/s PPFD <- PEcAn.utils::ud_convert(PPFD, "umol m-2 s-1", "mol m-2 s-1") - + ## Format/convert inputs - ppfd <- tapply(PPFD, doy, mean, na.rm = TRUE) # Find the mean for the day - tair <- PEcAn.utils::ud_convert(tapply(Tair, doy, mean, na.rm = TRUE), "kelvin", "celsius") # Convert Kelvin to Celcius - vpd <- PEcAn.utils::ud_convert(tapply(VPD, doy, mean, na.rm = TRUE), "Pa", "kPa") # pascal to kila pascal - precip <- tapply(Precip, doy, sum, na.rm = TRUE) # Sum to daily precipitation - co2 <- tapply(CO2, doy, mean) # need daily average, so sum up day - co2 <- co2 * 1e+06 # convert to ppm - doy <- tapply(doy, doy, mean) # day of year - fapar <- rep(0.6, length = length(doy)) # For now set to 0.6. Needs to be between 0-1 - + ppfd <- tapply(PPFD, doy, mean, na.rm = TRUE) # Find the mean for the day + tair <- PEcAn.utils::ud_convert(tapply(Tair, doy, mean, na.rm = TRUE), "kelvin", "celsius") # Convert Kelvin to Celcius + vpd <- PEcAn.utils::ud_convert(tapply(VPD, doy, mean, na.rm = TRUE), "Pa", "kPa") # pascal to kila pascal + precip <- tapply(Precip, doy, sum, na.rm = TRUE) # Sum to daily precipitation + co2 <- tapply(CO2, doy, mean) # need daily average, so sum up day + co2 <- co2 * 1e+06 # convert to ppm + doy <- tapply(doy, doy, mean) # day of year + fapar <- rep(0.6, length = length(doy)) # For now set to 0.6. Needs to be between 0-1 + ## Bind inputs tmp <- cbind(ppfd, tair, vpd, precip, co2, fapar) tmp[is.na(tmp)] <- 0 met <- rbind(met, tmp) - } ## end file exists - } ## end met process - + } ## end file exists + } ## end met process + param.def <- rep(NA, 30) - - #PARAMETER DEFAULT LIST - ##GPP_MODEL_PARAMETERS - #1.soildepth 413.0 |2.ThetaFC 0.450 | 3.ThetaPWP 0.118 |4.tauDrainage 3 - #5.betaGPP 0.748018 |6.tauGPP 13.23383 |7.S0GPP -3.9657867 |8.SmaxGPP 18.76696 - #9.kappaGPP -0.130473 |10.gammaGPP 0.034459 |11.soilthresGPP 0.450828 |12.cmCO2 2000 - #13.ckappaCO2 0.4 - ##EVAPOTRANSPIRATION_PARAMETERS - #14.betaET 0.324463 |15.kappaET 0.874151 |16.chiET 0.075601 |17.soilthresE 0.541605 - #18.nu ET 0.273584 - ##SNOW_RAIN_PARAMETERS - #19.Meltcoef 1.2 |20.I_0 0.33 |21.CWmax 4.970496 |22.SnowThreshold 0 - #23.T_0 0 - ##START INITIALISATION PARAMETERS - #24.SWinit 200 |25.CWinit 0 |26.SOGinit 0 |27.Sinit 20 - #28.t0 fPheno_start_date_Tsum_accumulation; conif -999, for birch 57 - #29.tcrit -999 fPheno_start_date_Tsum_Tthreshold, 1.5 birch - #30.tsumcrit -999 fPheno_budburst_Tsum, 134 birch - + + # PARAMETER DEFAULT LIST + ## GPP_MODEL_PARAMETERS + # 1.soildepth 413.0 |2.ThetaFC 0.450 | 3.ThetaPWP 0.118 |4.tauDrainage 3 + # 5.betaGPP 0.748018 |6.tauGPP 13.23383 |7.S0GPP -3.9657867 |8.SmaxGPP 18.76696 + # 9.kappaGPP -0.130473 |10.gammaGPP 0.034459 |11.soilthresGPP 0.450828 |12.cmCO2 2000 + # 13.ckappaCO2 0.4 + ## EVAPOTRANSPIRATION_PARAMETERS + # 14.betaET 0.324463 |15.kappaET 0.874151 |16.chiET 0.075601 |17.soilthresE 0.541605 + # 18.nu ET 0.273584 + ## SNOW_RAIN_PARAMETERS + # 19.Meltcoef 1.2 |20.I_0 0.33 |21.CWmax 4.970496 |22.SnowThreshold 0 + # 23.T_0 0 + ## START INITIALISATION PARAMETERS + # 24.SWinit 200 |25.CWinit 0 |26.SOGinit 0 |27.Sinit 20 + # 28.t0 fPheno_start_date_Tsum_accumulation; conif -999, for birch 57 + # 29.tcrit -999 fPheno_start_date_Tsum_Tthreshold, 1.5 birch + # 30.tsumcrit -999 fPheno_budburst_Tsum, 134 birch + ## Replace default with sampled parameters param_objs <- new.env() load(parameters, envir = param_objs) params <- data.frame(param_objs$trait.values) colnames(params) <- names(param_objs$trait.values[[1]]) - + param.def[5] <- as.numeric(params["bGPP"]) param.def[9] <- as.numeric(params["kGPP"]) - + ## Run PRELES - PRELES.output <- as.data.frame(Rpreles::PRELES(PAR = tmp[, "ppfd"], - TAir = tmp[, "tair"], - VPD = tmp[, "vpd"], - Precip = tmp[, "precip"], - CO2 = tmp[, "co2"], - fAPAR = tmp[, "fapar"], - p = param.def)) + PRELES.output <- as.data.frame(Rpreles::PRELES( + PAR = tmp[, "ppfd"], + TAir = tmp[, "tair"], + VPD = tmp[, "vpd"], + Precip = tmp[, "precip"], + CO2 = tmp[, "co2"], + fAPAR = tmp[, "fapar"], + p = param.def + )) PRELES.output.dims <- dim(PRELES.output) - + days <- as.Date(start_date):as.Date(end_date) year <- strftime(as.Date(days, origin = "1970-01-01"), "%Y") years <- unique(year) num.years <- length(years) - + for (y in years) { - if (file.exists(file.path(outdir, paste(y)))) + if (file.exists(file.path(outdir, paste(y)))) { next + } print(paste("----Processing year: ", y)) - + sub.PRELES.output <- subset(PRELES.output, years == y) sub.PRELES.output.dims <- dim(sub.PRELES.output) - + output <- list() - output[[1]] <- PEcAn.utils::ud_convert(sub.PRELES.output[, 1], 'g m-2 day-1', 'kg m-2 sec-1') #GPP - gC/m2day to kgC/m2s1 - output[[2]] <- (sub.PRELES.output[, 2])/timestep.s #Evapotranspiration - mm =kg/m2 - output[[3]] <- (sub.PRELES.output[, 3])/timestep.s #Soilmoisture - mm = kg/m2 - output[[6]] <- (sub.PRELES.output[, 6])/timestep.s #Evaporation - mm = kg/m2 - output[[7]] <- (sub.PRELES.output[, 7])/timestep.s #transpiration - mm = kg/m2 - - t <- ncdf4::ncdim_def(name = "time", - units = paste0("days since", y, "-01-01 00:00:00"), - vals = 1:nrow(sub.PRELES.output), - calendar = "standard", - unlim = TRUE) - + output[[1]] <- PEcAn.utils::ud_convert(sub.PRELES.output[, 1], "g m-2 day-1", "kg m-2 sec-1") # GPP - gC/m2day to kgC/m2s1 + output[[2]] <- (sub.PRELES.output[, 2]) / timestep.s # Evapotranspiration - mm =kg/m2 + output[[3]] <- (sub.PRELES.output[, 3]) / timestep.s # Soilmoisture - mm = kg/m2 + output[[6]] <- (sub.PRELES.output[, 6]) / timestep.s # Evaporation - mm = kg/m2 + output[[7]] <- (sub.PRELES.output[, 7]) / timestep.s # transpiration - mm = kg/m2 + + t <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since", y, "-01-01 00:00:00"), + vals = 1:nrow(sub.PRELES.output), + calendar = "standard", + unlim = TRUE + ) + lat <- ncdf4::ncdim_def("lat", "degrees_east", vals = as.numeric(sitelat), longname = "station_longitude") lon <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelon), longname = "station_longitude") - + for (i in seq_along(output)) { - if (length(output[[i]]) == 0) + if (length(output[[i]]) == 0) { output[[i]] <- rep(-999, length(t$vals)) + } } - + dims <- list(lon = lon, lat = lat, time = t) - - nc_var <- list() - nc_var[[1]] <- PEcAn.utils::to_ncvar("GPP",dims) + + nc_var <- list() + nc_var[[1]] <- PEcAn.utils::to_ncvar("GPP", dims) nc_var[[2]] <- ncdf4::ncvar_def("Evapotranspiration", "kg/m2s1", list(lon, lat, t), -999) nc_var[[3]] <- PEcAn.utils::to_ncvar("SoilMoist", dims) nc_var[[4]] <- PEcAn.utils::to_ncvar("Evap", dims) nc_var[[5]] <- PEcAn.utils::to_ncvar("TVeg", dims) - + nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") for (i in seq_along(nc_var)) { diff --git a/models/preles/R/write.config.PRELES.R b/models/preles/R/write.config.PRELES.R index 42e8433c241..b8dc65b9cfd 100644 --- a/models/preles/R/write.config.PRELES.R +++ b/models/preles/R/write.config.PRELES.R @@ -10,29 +10,29 @@ ##' @export ##' @author Tony Gardella, Micheal Dietze write.config.PRELES <- function(defaults, trait.values, settings, run.id) { - # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - + ### Define PARAMETERS filename <- paste(rundir, "/", "PRELES_params.", run.id, ".Rdata", sep = "") preles.params <- save(trait.values, file = filename) - + #----------------------------------------------------------------------- - + ### WRITE JOB.SH - jobsh <- paste0("#!/bin/bash\n", - 'echo "', - ' library(PEcAn.PRELES); runPRELES.jobsh(', - "'",settings$run$inputs$met$path,"',", - "'",outdir,"',", - "'",filename,"',", - "'",settings$run$site$lat,"',", - "'",settings$run$site$lon,"',", - "'",settings$run$start.date,"',", - "'",settings$run$end.date,"') ", - '" | R --vanilla' + jobsh <- paste0( + "#!/bin/bash\n", + 'echo "', + " library(PEcAn.PRELES); runPRELES.jobsh(", + "'", settings$run$inputs$met$path, "',", + "'", outdir, "',", + "'", filename, "',", + "'", settings$run$site$lat, "',", + "'", settings$run$site$lon, "',", + "'", settings$run$start.date, "',", + "'", settings$run$end.date, "') ", + '" | R --vanilla' ) writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) diff --git a/models/preles/tests/testthat/test.met2model.R b/models/preles/tests/testthat/test.met2model.R index 96e06426d95..f0035be7fa1 100644 --- a/models/preles/tests/testthat/test.met2model.R +++ b/models/preles/tests/testthat/test.met2model.R @@ -7,7 +7,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { skip("Not implemented") nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/sibcasa/R/datasets.R b/models/sibcasa/R/datasets.R index 525189048d6..5dc2bab175f 100644 --- a/models/sibcasa/R/datasets.R +++ b/models/sibcasa/R/datasets.R @@ -1,9 +1,9 @@ #' Output variables for SIBCASA #' #' TODO: Provide a complete description here of the dataset, -#' including its origin and purpose. +#' including its origin and purpose. #' The column descriptions below were written by someone looking at the file -#' with no insight into its usage +#' with no insight into its usage #' #' @docType data #' @keywords datasets diff --git a/models/sibcasa/R/met2model.SIBCASA.R b/models/sibcasa/R/met2model.SIBCASA.R index 5dc81323f70..15d3d0bb939 100644 --- a/models/sibcasa/R/met2model.SIBCASA.R +++ b/models/sibcasa/R/met2model.SIBCASA.R @@ -1,4 +1,3 @@ - #' Write SIBCASA met files #' #' Converts a met CF file to a sibcasa specific met file. The input diff --git a/models/sibcasa/R/model2netcdf.SIBCASA.R b/models/sibcasa/R/model2netcdf.SIBCASA.R index 236b705599c..3896bf455d8 100644 --- a/models/sibcasa/R/model2netcdf.SIBCASA.R +++ b/models/sibcasa/R/model2netcdf.SIBCASA.R @@ -1,4 +1,3 @@ - #-------------------------------------------------------------------------------------------------# #' Convert SIBCASA output into the NACP Intercomparison format (ALMA using netCDF) #' @@ -11,7 +10,6 @@ #' #' @author Tony Gardella model2netcdf.SIBCASA <- function(outdir, sitelat, sitelon, start_date, end_date) { - ## Get Files file_list <- dir(outdir, "hsib", all.files = TRUE) years <- sub(".qp2.nc", "", sub("hsib.", "", file_list)) diff --git a/models/sibcasa/R/write.config.SIBCASA.R b/models/sibcasa/R/write.config.SIBCASA.R index 1b390c4dee7..d3ddf46836a 100644 --- a/models/sibcasa/R/write.config.SIBCASA.R +++ b/models/sibcasa/R/write.config.SIBCASA.R @@ -1,4 +1,3 @@ - #-------------------------------------------------------------------------------------------------# #' Writes a SIBCASA config file. #' @@ -13,8 +12,6 @@ #' @export #' @author Anthony Gardella, Rob Kooper write.config.SIBCASA <- function(defaults, trait.values, settings, run.id) { - - # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) diff --git a/models/sibcasa/data/sibcasa_output_vars.R b/models/sibcasa/data/sibcasa_output_vars.R index 28e05b792a8..7515dd7349e 100644 --- a/models/sibcasa/data/sibcasa_output_vars.R +++ b/models/sibcasa/data/sibcasa_output_vars.R @@ -1,2 +1 @@ - sibcasa_output_vars <- utils::read.csv("sibcasa_output_vars.csv") diff --git a/models/sibcasa/tests/testthat.R b/models/sibcasa/tests/testthat.R index b82fe6345ad..bad835edb43 100644 --- a/models/sibcasa/tests/testthat.R +++ b/models/sibcasa/tests/testthat.R @@ -1,4 +1,4 @@ library(testthat) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.ModelName") +# test_check("PEcAn.ModelName") diff --git a/models/sibcasa/tests/testthat/test-configs.R b/models/sibcasa/tests/testthat/test-configs.R index c1d68bdb62b..6d1f9d4829a 100644 --- a/models/sibcasa/tests/testthat/test-configs.R +++ b/models/sibcasa/tests/testthat/test-configs.R @@ -1,6 +1,4 @@ - test_that("write.configs produces a job.sh", { - run_id <- "12345" path <- withr::local_file(file.path(tempdir(), "sibcasa_test")) @@ -11,12 +9,15 @@ test_that("write.configs produces a job.sh", { site = list(lat = 45, lon = 90, met = "foo"), start.date = as.Date("2010-01-01"), end.date = as.Date("2015-01-01"), - inputs = list(met = list(path = file.path(path, "met")))), + inputs = list(met = list(path = file.path(path, "met"))) + ), host = list( outdir = file.path(path, "out"), - rundir = file.path(path, "run")), + rundir = file.path(path, "run") + ), model = list(binary = "bar"), - rundir = file.path(path, "run")) + rundir = file.path(path, "run") + ) expect_silent(write.config.SIBCASA(settings = settings, run.id = run_id)) expect_true(file.exists(file.path(path, "run", run_id, "job.sh"))) diff --git a/models/sipnet/R/met2model.SIPNET.R b/models/sipnet/R/met2model.SIPNET.R index d1c57084318..c699cd444ab 100644 --- a/models/sipnet/R/met2model.SIPNET.R +++ b/models/sipnet/R/met2model.SIPNET.R @@ -20,21 +20,18 @@ ##' @author Luke Dramko, Michael Dietze, Alexey Shiklomanov, Rob Kooper met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, year.fragment = FALSE, ...) { - - - PEcAn.logger::logger.info("START met2model.SIPNET") start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") if (year.fragment) { # in.prefix is not guaranteed to contain the file extension. escaped <- gsub("(\\W)", "\\\\\\1", in.prefix) # The file name may contain special characters that could mess up the regular expression. - matching_files <- grep(escaped, list.files(in.path), value=TRUE) + matching_files <- grep(escaped, list.files(in.path), value = TRUE) if (length(matching_files) == 0) { PEcAn.logger::logger.severe(paste0("No files found matching ", in.prefix, "; cannot process data.")) } - + # This function is supposed to process netcdf files, so we'll search for files with the extension .nc and use those first. - nc_file = grep("\\.nc$", matching_files) + nc_file <- grep("\\.nc$", matching_files) if (length(nc_file) > 0) { if (grepl("\\.nc$", in.prefix)) { out.file <- sub("\\.nc$", ".clim", in.prefix) @@ -49,36 +46,39 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date } } else { # Default behavior out.file <- paste(in.prefix, strptime(start_date, "%Y-%m-%d"), - strptime(end_date, "%Y-%m-%d"), - "clim", - sep = ".") + strptime(end_date, "%Y-%m-%d"), + "clim", + sep = "." + ) } - + out.file.full <- file.path(outfolder, out.file) - - results <- data.frame(file = out.file.full, - host = PEcAn.remote::fqdn(), - mimetype = "text/csv", - formatname = "Sipnet.climna", - startdate = start_date, - enddate = end_date, - dbfile.name = out.file, - stringsAsFactors = FALSE) + + results <- data.frame( + file = out.file.full, + host = PEcAn.remote::fqdn(), + mimetype = "text/csv", + formatname = "Sipnet.climna", + startdate = start_date, + enddate = end_date, + dbfile.name = out.file, + stringsAsFactors = FALSE + ) PEcAn.logger::logger.info("internal results") PEcAn.logger::logger.info(results) - + if (file.exists(out.file.full) && !overwrite) { PEcAn.logger::logger.debug("File '", out.file.full, "' already exists, skipping to next file.") return(invisible(results)) } - + ## check to see if the outfolder is defined, if not create directory for output if (!file.exists(outfolder)) { dir.create(outfolder) } - + out <- NULL - + # get start/end year since inputs are specified on year basis # only if year.fragment = FALSE start_year <- lubridate::year(start_date) @@ -88,67 +88,65 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date } else { end_year <- lubridate::year(end_date) } - + ## loop over files for (year in start_year:end_year) { - skip <- FALSE PEcAn.logger::logger.info(year) - + diy <- PEcAn.utils::days_in_year(year) - + if (!year.fragment) { # default behavior old.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) } else { # Use the supplied file name old.file <- file.path(in.path, in.prefix) } - + if (file.exists(old.file)) { ## open netcdf - nc <- ncdf4::nc_open(old.file) - + nc <- ncdf4::nc_open(old.file) + ## convert time to seconds sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - - # Calculate the delta time. If using whole-year data, the appropriate length in seconds is + + # Calculate the delta time. If using whole-year data, the appropriate length in seconds is # fetched; otherwise, it is assumed that the length of time provided in the time dimension of # the input file is correct. if (year.fragment) { - dt <- mean(diff(sec), na.rm=TRUE) - + dt <- mean(diff(sec), na.rm = TRUE) } else { dt <- PEcAn.utils::seconds_in_year(year) / length(sec) } tstep <- round(86400 / dt) dt <- 86400 / tstep - + ## extract variables lat <- ncdf4::ncvar_get(nc, "latitude") lon <- ncdf4::ncvar_get(nc, "longitude") - Tair <-ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") - Qair <-ncdf4::ncvar_get(nc, "specific_humidity") #humidity (kg/kg) + Qair <- ncdf4::ncvar_get(nc, "specific_humidity") # humidity (kg/kg) ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) if (!is.numeric(ws)) { U <- ncdf4::ncvar_get(nc, "eastward_wind") V <- ncdf4::ncvar_get(nc, "northward_wind") - ws <- sqrt(U ^ 2 + V ^ 2) + ws <- sqrt(U^2 + V^2) PEcAn.logger::logger.info("wind_speed absent; calculated from eastward_wind and northward_wind") } - + Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") - - press <- ncdf4::ncvar_get(nc,'air_pressure') ## in pascal - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 - - PAR <- try(ncdf4::ncvar_get(nc, "surface_downwelling_photosynthetic_photon_flux_in_air")) ## in umol/m2/s + press <- ncdf4::ncvar_get(nc, "air_pressure") ## in pascal + + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 + + PAR <- try(ncdf4::ncvar_get(nc, "surface_downwelling_photosynthetic_photon_flux_in_air")) ## in umol/m2/s if (!is.numeric(PAR)) { PAR <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::sw2ppfd(SW), "umol ", "mol") PEcAn.logger::logger.info("surface_downwelling_photosynthetic_photon_flux_in_air absent; PAR set to SW * 0.45") } - + soilT <- try(ncdf4::ncvar_get(nc, "soil_temperature")) if (!is.numeric(soilT)) { # approximation borrowed from SIPNET CRUNCEP preprocessing's tsoil.py @@ -161,25 +159,24 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date } else { soilT <- PEcAn.utils::ud_convert(soilT, "K", "degC") } - - SVP <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(Tair_C), "millibar", "Pa") ## Saturation vapor pressure - VPD <- try(ncdf4::ncvar_get(nc, "water_vapor_saturation_deficit")) ## in Pa - if (!is.numeric(VPD)) { - VPD <- SVP * (1 - PEcAn.data.atmosphere::qair2rh(Qair, Tair_C, press = press/100)) + SVP <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(Tair_C), "millibar", "Pa") ## Saturation vapor pressure + VPD <- try(ncdf4::ncvar_get(nc, "water_vapor_saturation_deficit")) ## in Pa + if (!is.numeric(VPD)) { + VPD <- SVP * (1 - PEcAn.data.atmosphere::qair2rh(Qair, Tair_C, press = press / 100)) PEcAn.logger::logger.info("water_vapor_saturation_deficit absent; VPD calculated from Qair, Tair, and SVP (saturation vapor pressure) ") } e_a <- SVP - VPD VPDsoil <- PEcAn.utils::ud_convert(PEcAn.data.atmosphere::get.es(soilT), "millibar", "Pa") * - (1 - PEcAn.data.atmosphere::qair2rh(Qair, soilT, press/100)) - + (1 - PEcAn.data.atmosphere::qair2rh(Qair, soilT, press / 100)) + ncdf4::nc_close(nc) } else { PEcAn.logger::logger.info("Skipping to next year") next } - + ## build time variables (year, month, day of year) nyr <- floor(length(sec) / 86400 / 365 * dt) yr <- NULL @@ -216,98 +213,102 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date } yr[rng] <- rep(y + 1, length(rng)) doy[rng] <- rep(1:366, each = 86400 / dt)[1:length(rng)] - hr[rng] <- rep(seq(0, length = 86400 / dt, by = dt/86400 * 24), 366)[1:length(rng)] + hr[rng] <- rep(seq(0, length = 86400 / dt, by = dt / 86400 * 24), 366)[1:length(rng)] } if (skip) { PEcAn.logger::logger.info("Skipping to next year") next } - + ## 0 YEAR DAY HOUR TIMESTEP AirT SoilT PAR PRECIP VPD VPD_Soil AirVP(e_a) WIND SoilM build data ## matrix n <- length(Tair) - tmp <- cbind(rep(0, n), - yr[1:n], - doy[1:n], - hr[1:n], - rep(dt / 86400, n), - Tair_C, - soilT, - PAR * dt, # mol/m2/hr - Rain * dt, # converts from mm/s to mm - VPD, - VPDsoil, - e_a, - ws, # wind - rep(0.6, n)) # put soil water at a constant. Don't use, set SIPNET to MODEL_WATER = 1 - + tmp <- cbind( + rep(0, n), + yr[1:n], + doy[1:n], + hr[1:n], + rep(dt / 86400, n), + Tair_C, + soilT, + PAR * dt, # mol/m2/hr + Rain * dt, # converts from mm/s to mm + VPD, + VPDsoil, + e_a, + ws, # wind + rep(0.6, n) + ) # put soil water at a constant. Don't use, set SIPNET to MODEL_WATER = 1 + ## quick error check, sometimes get a NA in the last hr hr.na <- which(is.na(tmp[, 4])) if (length(hr.na) > 0) { - tmp[hr.na, 4] <- tmp[hr.na - 1, 4] + dt/86400 * 24 + tmp[hr.na, 4] <- tmp[hr.na - 1, 4] + dt / 86400 * 24 } - + ## filter out days not included in start or end date if not a year fragment. (This procedure would be nonsensible for a year ## fragment, as it would filter out all of the days.) - if(year == start_year && !year.fragment){ - extra.days <- length(as.Date(paste0(start_year, "-01-01")):as.Date(start_date)) #extra days length includes the start date - if (extra.days > 1){ + if (year == start_year && !year.fragment) { + extra.days <- length(as.Date(paste0(start_year, "-01-01")):as.Date(start_date)) # extra days length includes the start date + if (extra.days > 1) { PEcAn.logger::logger.info("Subsetting SIPNET met to match start date") - start.row <- ((extra.days - 1) * 86400 / dt) + 1 #subtract to include start.date, add to exclude last half hour of day before - tmp <- tmp[start.row:nrow(tmp),] + start.row <- ((extra.days - 1) * 86400 / dt) + 1 # subtract to include start.date, add to exclude last half hour of day before + tmp <- tmp[start.row:nrow(tmp), ] } } - if (year == end_year && !year.fragment){ - if(year == start_year){ - extra.days <- length(as.Date(start_date):as.Date(end_date)) - if (extra.days > 1){ + if (year == end_year && !year.fragment) { + if (year == start_year) { + extra.days <- length(as.Date(start_date):as.Date(end_date)) + if (extra.days > 1) { PEcAn.logger::logger.info("Subsetting SIPNET met to match end date") - end.row <- extra.days * 86400 / dt #subtract to include end.date - tmp <- tmp[1:end.row,] + end.row <- extra.days * 86400 / dt # subtract to include end.date + tmp <- tmp[1:end.row, ] } - } else{ - extra.days <- length(as.Date(end_date):as.Date(paste0(end_year, "-12-31"))) #extra days length includes the end date - if (extra.days > 1){ + } else { + extra.days <- length(as.Date(end_date):as.Date(paste0(end_year, "-12-31"))) # extra days length includes the end date + if (extra.days > 1) { PEcAn.logger::logger.info("Subsetting SIPNET met to match end date") - end.row <- nrow(tmp) - ((extra.days - 1) * 86400 / dt) #subtract to include end.date - tmp <- tmp[1:end.row,] + end.row <- nrow(tmp) - ((extra.days - 1) * 86400 / dt) # subtract to include end.date + tmp <- tmp[1:end.row, ] } } } - - if(year.fragment){ #gets correct DOY for fragmented years - - doy.start.index <- which(doy == lubridate::yday(start_date)) #which part of full doy set matches the start and end date - doy.end.index <- which(doy == lubridate::yday(end_date)) - #need to use the start and end time to figure out how many time steps to include in the doy subset - doy.start <- doy.start.index[ifelse(lubridate::hour(start_date) == 0, 1, lubridate::hour(start_date) / (24 / (86400 / dt)))] - doy.end <- doy.end.index[ifelse(lubridate::hour(end_date) == 0, 1, lubridate::hour(end_date) / (24 / (86400 / dt)))] - #check to see if doy matches with downloaded data dims, if not last time is removed - if(length(doy) != n){d<-doy[doy.start:(doy.end - 1)] }else{d<-(doy[doy.start:(doy.end)]) } - - if(year.fragment){ #gets correct DOY for fragmented years using start date, time since start date and end date - doy.seq <- as.Date(seq(from = start_date + sec[1], to = end_date, length.out = length(sec))) - doy <- as.numeric(strftime(doy.seq, format = "%j")) #starts with 1 on 1-01 - #doy.start <- length(as.Date(paste0(start_year, "-01-01")):as.Date(start_date)) * (86400 / dt) + 1 #subtract to include start.date, add to exclude last half hour of day before - #doy.end <- length(as.Date(paste0(start_year, "-01-01")):as.Date(end_date)) * (86400 / dt) - #doy <- doy[doy.start:doy.end] - year <- as.numeric(strftime(doy.seq, format = "%Y")) - tmp[,3] <- doy - tmp[,2] <- year + + if (year.fragment) { # gets correct DOY for fragmented years + + doy.start.index <- which(doy == lubridate::yday(start_date)) # which part of full doy set matches the start and end date + doy.end.index <- which(doy == lubridate::yday(end_date)) + # need to use the start and end time to figure out how many time steps to include in the doy subset + doy.start <- doy.start.index[ifelse(lubridate::hour(start_date) == 0, 1, lubridate::hour(start_date) / (24 / (86400 / dt)))] + doy.end <- doy.end.index[ifelse(lubridate::hour(end_date) == 0, 1, lubridate::hour(end_date) / (24 / (86400 / dt)))] + # check to see if doy matches with downloaded data dims, if not last time is removed + if (length(doy) != n) { + d <- doy[doy.start:(doy.end - 1)] + } else { + d <- (doy[doy.start:(doy.end)]) + } + + if (year.fragment) { # gets correct DOY for fragmented years using start date, time since start date and end date + doy.seq <- as.Date(seq(from = start_date + sec[1], to = end_date, length.out = length(sec))) + doy <- as.numeric(strftime(doy.seq, format = "%j")) # starts with 1 on 1-01 + # doy.start <- length(as.Date(paste0(start_year, "-01-01")):as.Date(start_date)) * (86400 / dt) + 1 #subtract to include start.date, add to exclude last half hour of day before + # doy.end <- length(as.Date(paste0(start_year, "-01-01")):as.Date(end_date)) * (86400 / dt) + # doy <- doy[doy.start:doy.end] + year <- as.numeric(strftime(doy.seq, format = "%Y")) + tmp[, 3] <- doy + tmp[, 2] <- year + } } - } - + if (is.null(out)) { out <- tmp } else { out <- rbind(out, tmp) } - - } ## end loop over years - + } ## end loop over years + if (!is.null(out)) { - ## write output utils::write.table(out, out.file.full, quote = FALSE, sep = "\t", row.names = FALSE, col.names = FALSE) return(invisible(results)) @@ -315,4 +316,4 @@ met2model.SIPNET <- function(in.path, in.prefix, outfolder, start_date, end_date PEcAn.logger::logger.info("NO MET TO OUTPUT") return(invisible(NULL)) } -} # met2model.SIPNET \ No newline at end of file +} # met2model.SIPNET diff --git a/models/sipnet/R/model2netcdf.SIPNET.R b/models/sipnet/R/model2netcdf.SIPNET.R index 438a5c43de3..47207b42c1a 100644 --- a/models/sipnet/R/model2netcdf.SIPNET.R +++ b/models/sipnet/R/model2netcdf.SIPNET.R @@ -1,60 +1,63 @@ #' Merge multiple NetCDF files into one -#' +#' #' @param files \code{character}. List of filepaths, which should lead to NetCDF files. #' @param outfile \code{character}. Output filename of the merged data. #' @return A NetCDF file containing all of the merged data. #' @examples #' \dontrun{ -#' files <- list.files(paste0(system.file(package="processNC"), "/extdata"), -#' pattern="tas.*\\.nc", full.names=TRUE) -#' temp <- tempfile(fileext=".nc") -#' mergeNC(files=files, outfile=temp) -#' terra::rast(temp) +#' files <- list.files(paste0(system.file(package = "processNC"), "/extdata"), +#' pattern = "tas.*\\.nc", full.names = TRUE +#' ) +#' temp <- tempfile(fileext = ".nc") +#' mergeNC(files = files, outfile = temp) +#' terra::rast(temp) #' } #' @export mergeNC #' @name mergeNC #' @source https://github.com/RS-eco/processNC/blob/main/R/mergeNC.R mergeNC <- function( - ##title<< Aggregate data in netCDF files - files ##<< character vector: names of the files to merge - , outfile ##<< character: path to save the results files to. -) - ##description<< - ## This function aggregates time periods in netCDF files. Basically it is just a - ## wrapper around the respective cdo function. + ## title<< Aggregate data in netCDF files + files ## << character vector: names of the files to merge + , outfile ## << character: path to save the results files to. + ) +## description<< +## This function aggregates time periods in netCDF files. Basically it is just a +## wrapper around the respective cdo function. { - ##test input - #if (system("cdo -V")==0) + ## test input + # if (system("cdo -V")==0) # stop('cdo not found. Please install it.') - + ## supply cdo command - cdoCmd <- paste('cdo -cat', paste(files, collapse=" "), outfile, sep=' ') - - ##run command + cdoCmd <- paste("cdo -cat", paste(files, collapse = " "), outfile, sep = " ") + + ## run command system(cdoCmd) - cat(paste('Created file ', outfile, '.\n', sep = '')) - - ## character string: name of the file created. + cat(paste("Created file ", outfile, ".\n", sep = "")) + + ## character string: name of the file created. invisible(outfile) } #--------------------------------------------------------------------------------------------------# ##' ##' Convert SIPNET DOY to datetime -##' +##' ##' @param sipnet_tval vector of SIPNET DOY values ##' @param base_year base year to calculate datetime from DOY -##' @param base_month reference month for converting from DOY to datetime +##' @param base_month reference month for converting from DOY to datetime ##' @param force_cf force output to follow CF convention. Default FALSE ##' ##' @export ##' ##' @author Alexey Shiklomanov, Shawn Serbin -##' +##' sipnet2datetime <- function(sipnet_tval, base_year, base_month = 1, force_cf = FALSE) { - base_date <- ISOdatetime(base_year, base_month, 1, - 0, 0, 0, "UTC") + base_date <- ISOdatetime( + base_year, base_month, 1, + 0, 0, 0, "UTC" + ) base_date_str <- strftime(base_date, "%F %T %z", tz = "UTC") if (force_cf) { is_cf <- TRUE @@ -63,13 +66,13 @@ sipnet2datetime <- function(sipnet_tval, base_year, base_month = 1, # Is CF if first time step is zero is_cf <- sipnet_tval[[1]] == 0 } - + if (is_cf) { cfval <- sipnet_tval } else { cfval <- sipnet_tval - 1 } - + PEcAn.utils::cf2datetime(cfval, paste("days since", base_date_str)) } @@ -93,17 +96,16 @@ sipnet2datetime <- function(sipnet_tval, base_year, base_month = 1, ##' @author Shawn Serbin, Michael Dietze model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, delete.raw = FALSE, revision, prefix = "sipnet.out", overwrite = FALSE, conflict = FALSE) { - ### Read in model output in SIPNET format sipnet_out_file <- file.path(outdir, prefix) sipnet_output <- utils::read.table(sipnet_out_file, header = T, skip = 1, sep = "") - #sipnet_output_dims <- dim(sipnet_output) + # sipnet_output_dims <- dim(sipnet_output) ### Determine number of years and output timestep - #start.day <- sipnet_output$day[1] + # start.day <- sipnet_output$day[1] num_years <- length(unique(sipnet_output$year)) simulation_years <- unique(sipnet_output$year) - + # get all years that we want data from year_seq <- seq(lubridate::year(start_date), lubridate::year(end_date)) @@ -115,75 +117,76 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, # get number of model timesteps per day # outday is the number of time steps in a day - for example 6 hours would have out_day of 4 - out_day <- sum( - sipnet_output$year == simulation_years[1] & - sipnet_output$day == unique(sipnet_output$day)[1], - na.rm = TRUE - ) # switched to day 2 in case first day is partial - + out_day <- sum( + sipnet_output$year == simulation_years[1] & + sipnet_output$day == unique(sipnet_output$day)[1], + na.rm = TRUE + ) # switched to day 2 in case first day is partial + timestep.s <- 86400 / out_day - - + + ### Loop over years in SIPNET output to create separate netCDF outputs for (y in year_seq) { - #initialize the conflicted as FALSE + # initialize the conflicted as FALSE conflicted <- FALSE - conflict <- TRUE #conflict is set to TRUE to enable the rename of yearly nc file for merging SDA results with sub-annual data - #if we have conflicts on this file. + conflict <- TRUE # conflict is set to TRUE to enable the rename of yearly nc file for merging SDA results with sub-annual data + # if we have conflicts on this file. if (file.exists(file.path(outdir, paste(y, "nc", sep = "."))) & overwrite == FALSE & conflict == FALSE) { next - }else if(file.exists(file.path(outdir, paste(y, "nc", sep = "."))) & conflict){ + } else if (file.exists(file.path(outdir, paste(y, "nc", sep = "."))) & conflict) { conflicted <- TRUE file.rename(file.path(outdir, paste(y, "nc", sep = ".")), file.path(outdir, "previous.nc")) } - print(paste("---- Processing year: ", y)) # turn on for debugging + print(paste("---- Processing year: ", y)) # turn on for debugging ## Subset data for processing sub.sipnet.output <- subset(sipnet_output, year == y) sub.sipnet.output.dims <- dim(sub.sipnet.output) dayfrac <- 1 / out_day - step <- utils::head(seq(0, 1, by = dayfrac), -1) ## probably dont want to use - ## hard-coded "step" because - ## leap years may not contain - ## all "steps", or - ## if model run doesnt start - ## at 00:00:00 - + step <- utils::head(seq(0, 1, by = dayfrac), -1) ## probably dont want to use + ## hard-coded "step" because + ## leap years may not contain + ## all "steps", or + ## if model run doesnt start + ## at 00:00:00 + # try to determine if DOY is CF compliant (i.e. 0 based index) or not (1 base index) - pecan_start_doy <- PEcAn.utils::datetime2cf(start_date, paste0("days since ",lubridate::year(start_date),"-01-01"), - tz = "UTC") + pecan_start_doy <- PEcAn.utils::datetime2cf(start_date, paste0("days since ", lubridate::year(start_date), "-01-01"), + tz = "UTC" + ) tvals <- sub.sipnet.output[["day"]] + sub.sipnet.output[["time"]] / 24 - if (sub.sipnet.output[["day"]][1]-pecan_start_doy==1) { + if (sub.sipnet.output[["day"]][1] - pecan_start_doy == 1) { sub_dates <- sipnet2datetime(tvals, y, force_cf = FALSE) } else { sub_dates <- sipnet2datetime(tvals, y, force_cf = TRUE) } - sub_dates_cf <- PEcAn.utils::datetime2cf(sub_dates, paste0("days since ",paste0(y,"-01-01"))) + sub_dates_cf <- PEcAn.utils::datetime2cf(sub_dates, paste0("days since ", paste0(y, "-01-01"))) # create netCDF time.bounds variable - bounds <- array(data=NA, dim=c(length(sub_dates_cf),2)) - bounds[,1] <- sub_dates_cf - bounds[,2] <- bounds[,1]+dayfrac + bounds <- array(data = NA, dim = c(length(sub_dates_cf), 2)) + bounds[, 1] <- sub_dates_cf + bounds[, 2] <- bounds[, 1] + dayfrac # create time bounds for each timestep in t, t+1; t+1, t+2... format - bounds <- round(bounds,4) - + bounds <- round(bounds, 4) + ## Setup outputs for netCDF file in appropriate units - output <- list( - "GPP" = (sub.sipnet.output$gpp * 0.001) / timestep.s, # GPP in kgC/m2/s + output <- list( + "GPP" = (sub.sipnet.output$gpp * 0.001) / timestep.s, # GPP in kgC/m2/s "NPP" = (sub.sipnet.output$gpp * 0.001) / timestep.s - ((sub.sipnet.output$rAboveground * - 0.001) / timestep.s + (sub.sipnet.output$rRoot * 0.001) / timestep.s), # NPP in kgC/m2/s. Post SIPNET calculation - "TotalResp" = (sub.sipnet.output$rtot * 0.001) / timestep.s, # Total Respiration in kgC/m2/s + 0.001) / timestep.s + (sub.sipnet.output$rRoot * 0.001) / timestep.s), # NPP in kgC/m2/s. Post SIPNET calculation + "TotalResp" = (sub.sipnet.output$rtot * 0.001) / timestep.s, # Total Respiration in kgC/m2/s "AutoResp" = (sub.sipnet.output$rAboveground * 0.001) / timestep.s + (sub.sipnet.output$rRoot * - 0.001) / timestep.s, # Autotrophic Respiration in kgC/m2/s - "HeteroResp" = ((sub.sipnet.output$rSoil - sub.sipnet.output$rRoot) * 0.001) / timestep.s, # Heterotrophic Respiration in kgC/m2/s - "SoilResp" = (sub.sipnet.output$rSoil * 0.001) / timestep.s, # Soil Respiration in kgC/m2/s - "NEE" = (sub.sipnet.output$nee * 0.001) / timestep.s, # NEE in kgC/m2/s - "AbvGrndWood" = (sub.sipnet.output$plantWoodC * 0.001), # Above ground wood kgC/m2 - "leaf_carbon_content" = (sub.sipnet.output$plantLeafC * 0.001), # Leaf C kgC/m2 - "TotLivBiom" = (sub.sipnet.output$plantWoodC * 0.001) + (sub.sipnet.output$plantLeafC * 0.001) + - (sub.sipnet.output$coarseRootC + sub.sipnet.output$fineRootC) * 0.001, # Total living C kgC/m2 - "TotSoilCarb" = (sub.sipnet.output$soil * 0.001) + (sub.sipnet.output$litter * 0.001) # Total soil C kgC/m2 + 0.001) / timestep.s, # Autotrophic Respiration in kgC/m2/s + "HeteroResp" = ((sub.sipnet.output$rSoil - sub.sipnet.output$rRoot) * 0.001) / timestep.s, # Heterotrophic Respiration in kgC/m2/s + "SoilResp" = (sub.sipnet.output$rSoil * 0.001) / timestep.s, # Soil Respiration in kgC/m2/s + "NEE" = (sub.sipnet.output$nee * 0.001) / timestep.s, # NEE in kgC/m2/s + "AbvGrndWood" = (sub.sipnet.output$plantWoodC * 0.001), # Above ground wood kgC/m2 + "leaf_carbon_content" = (sub.sipnet.output$plantLeafC * 0.001), # Leaf C kgC/m2 + "TotLivBiom" = (sub.sipnet.output$plantWoodC * 0.001) + (sub.sipnet.output$plantLeafC * 0.001) + + (sub.sipnet.output$coarseRootC + sub.sipnet.output$fineRootC) * 0.001, # Total living C kgC/m2 + "TotSoilCarb" = (sub.sipnet.output$soil * 0.001) + (sub.sipnet.output$litter * 0.001) # Total soil C kgC/m2 ) if (revision == "unk") { ## *** NOTE : npp in the sipnet output file is actually evapotranspiration, this is due to a bug in sipnet.c : *** @@ -191,51 +194,64 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, ## evapotranspiration in SIPNET is cm^3 water per cm^2 of area, to convert it to latent heat units W/m2 multiply with : ## 0.01 (cm2m) * 1000 (water density, kg m-3) * latent heat of vaporization (J kg-1) ## latent heat of vaporization is not constant and it varies slightly with temperature, get.lv() returns 2.5e6 J kg-1 by default - output[["Qle"]] <- (sub.sipnet.output$npp * 10 * PEcAn.data.atmosphere::get.lv()) / timestep.s # Qle W/m2 + output[["Qle"]] <- (sub.sipnet.output$npp * 10 * PEcAn.data.atmosphere::get.lv()) / timestep.s # Qle W/m2 } else { - output[["Qle"]] <- (sub.sipnet.output$evapotranspiration * 10 * PEcAn.data.atmosphere::get.lv()) / timestep.s # Qle W/m2 + output[["Qle"]] <- (sub.sipnet.output$evapotranspiration * 10 * PEcAn.data.atmosphere::get.lv()) / timestep.s # Qle W/m2 } - output[["Transp"]] <- (sub.sipnet.output$fluxestranspiration * 10) / timestep.s # Transpiration kgW/m2/s - output[["SoilMoist"]] <- (sub.sipnet.output$soilWater * 10) # Soil moisture kgW/m2 - output[["SoilMoistFrac"]] <- (sub.sipnet.output$soilWetnessFrac) # Fractional soil wetness - output[["SWE"]] <- (sub.sipnet.output$snow * 10) # SWE - output[["litter_carbon_content"]] <- sub.sipnet.output$litter * 0.001 ## litter kgC/m2 + output[["Transp"]] <- (sub.sipnet.output$fluxestranspiration * 10) / timestep.s # Transpiration kgW/m2/s + output[["SoilMoist"]] <- (sub.sipnet.output$soilWater * 10) # Soil moisture kgW/m2 + output[["SoilMoistFrac"]] <- (sub.sipnet.output$soilWetnessFrac) # Fractional soil wetness + output[["SWE"]] <- (sub.sipnet.output$snow * 10) # SWE + output[["litter_carbon_content"]] <- sub.sipnet.output$litter * 0.001 ## litter kgC/m2 output[["litter_mass_content_of_water"]] <- (sub.sipnet.output$litterWater * 10) # Litter water kgW/m2 - #calculate LAI for standard output - param <- utils::read.table(file.path(gsub(pattern = "/out/", - replacement = "/run/", x = outdir), - "sipnet.param"), stringsAsFactors = FALSE) + # calculate LAI for standard output + param <- utils::read.table(file.path( + gsub( + pattern = "/out/", + replacement = "/run/", x = outdir + ), + "sipnet.param" + ), stringsAsFactors = FALSE) id <- which(param[, 1] == "leafCSpWt") leafC <- 0.48 - SLA <- 1000 / param[id, 2] #SLA, m2/kgC + SLA <- 1000 / param[id, 2] # SLA, m2/kgC output[["LAI"]] <- output[["leaf_carbon_content"]] * SLA # LAI - output[["fine_root_carbon_content"]] <- sub.sipnet.output$fineRootC * 0.001 ## fine_root_carbon_content kgC/m2 - output[["coarse_root_carbon_content"]] <- sub.sipnet.output$coarseRootC * 0.001 ## coarse_root_carbon_content kgC/m2 + output[["fine_root_carbon_content"]] <- sub.sipnet.output$fineRootC * 0.001 ## fine_root_carbon_content kgC/m2 + output[["coarse_root_carbon_content"]] <- sub.sipnet.output$coarseRootC * 0.001 ## coarse_root_carbon_content kgC/m2 output[["GWBI"]] <- (sub.sipnet.output$woodCreation * 0.001) / 86400 ## kgC/m2/s - this is daily in SIPNET output[["AGB"]] <- (sub.sipnet.output$plantWoodC + sub.sipnet.output$plantLeafC) * 0.001 # Total aboveground biomass kgC/m2 - output[["time_bounds"]] <- c(rbind(bounds[,1], bounds[,2])) - + output[["time_bounds"]] <- c(rbind(bounds[, 1], bounds[, 2])) + # ******************** Declare netCDF variables ********************# - t <- ncdf4::ncdim_def(name = "time", - longname = "time", - units = paste0("days since ", y, "-01-01 00:00:00"), - vals = sub_dates_cf, - calendar = "standard", - unlim = TRUE) - lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), - longname = "station_latitude") - lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), - longname = "station_longitude") + t <- ncdf4::ncdim_def( + name = "time", + longname = "time", + units = paste0("days since ", y, "-01-01 00:00:00"), + vals = sub_dates_cf, + calendar = "standard", + unlim = TRUE + ) + lat <- ncdf4::ncdim_def("lat", "degrees_north", + vals = as.numeric(sitelat), + longname = "station_latitude" + ) + lon <- ncdf4::ncdim_def("lon", "degrees_east", + vals = as.numeric(sitelon), + longname = "station_longitude" + ) dims <- list(lon = lon, lat = lat, time = t) - time_interval <- ncdf4::ncdim_def(name = "hist_interval", - longname="history time interval endpoint dimensions", - vals = 1:2, units="") - + time_interval <- ncdf4::ncdim_def( + name = "hist_interval", + longname = "history time interval endpoint dimensions", + vals = 1:2, units = "" + ) + ## ***** Need to dynamically update the UTC offset here ***** for (i in seq_along(output)) { - if (length(output[[i]]) == 0) + if (length(output[[i]]) == 0) { output[[i]] <- rep(-999, length(t$vals)) + } } # ******************** Declare netCDF variables ********************# @@ -246,8 +262,10 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, "TotalResp" = PEcAn.utils::to_ncvar("TotalResp", dims), "AutoResp" = PEcAn.utils::to_ncvar("AutoResp", dims), "HeteroResp" = PEcAn.utils::to_ncvar("HeteroResp", dims), - "SoilResp" = ncdf4::ncvar_def("SoilResp", units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, - longname = "Soil Respiration"), #need to figure out standard variable for this output + "SoilResp" = ncdf4::ncvar_def("SoilResp", + units = "kg C m-2 s-1", dim = list(lon, lat, t), missval = -999, + longname = "Soil Respiration" + ), # need to figure out standard variable for this output "NEE" = PEcAn.utils::to_ncvar("NEE", dims), "AbvGrndWood" = PEcAn.utils::to_ncvar("AbvGrndWood", dims), "leaf_carbon_content" = PEcAn.utils::to_ncvar("leaf_carbon_content", dims), @@ -263,20 +281,26 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, "LAI" = PEcAn.utils::to_ncvar("LAI", dims), "fine_root_carbon_content" = PEcAn.utils::to_ncvar("fine_root_carbon_content", dims), "coarse_root_carbon_content" = PEcAn.utils::to_ncvar("coarse_root_carbon_content", dims), - "GWBI" = ncdf4::ncvar_def("GWBI", units = "kg C m-2", dim = list(lon, lat, t), missval = -999, - longname = "Gross Woody Biomass Increment"), - "AGB" = ncdf4::ncvar_def("AGB", units = "kg C m-2", dim = list(lon, lat, t), missval = -999, - longname = "Total aboveground biomass"), - "time_bounds" = ncdf4::ncvar_def(name="time_bounds", units='', - longname = "history time interval endpoints", dim=list(time_interval,time = t), - prec = "double") + "GWBI" = ncdf4::ncvar_def("GWBI", + units = "kg C m-2", dim = list(lon, lat, t), missval = -999, + longname = "Gross Woody Biomass Increment" + ), + "AGB" = ncdf4::ncvar_def("AGB", + units = "kg C m-2", dim = list(lon, lat, t), missval = -999, + longname = "Total aboveground biomass" + ), + "time_bounds" = ncdf4::ncvar_def( + name = "time_bounds", units = "", + longname = "history time interval endpoints", dim = list(time_interval, time = t), + prec = "double" + ) ) - + # ******************** Create netCDF and output variables ********************# ### Output netCDF data - if(conflicted & conflict){ - nc <- ncdf4::nc_create(file.path(outdir, paste("current", "nc", sep = ".")), nc_var) - ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec=NA) + if (conflicted & conflict) { + nc <- ncdf4::nc_create(file.path(outdir, paste("current", "nc", sep = ".")), nc_var) + ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec = NA) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") for (key in names(nc_var)) { ncdf4::ncvar_put(nc, nc_var[[key]], output[[key]]) @@ -284,24 +308,24 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, } close(varfile) ncdf4::nc_close(nc) - - #merge nc files of the same year together to enable the assimilation of sub-annual data - if(file.exists(file.path(outdir, "previous.nc"))){ + + # merge nc files of the same year together to enable the assimilation of sub-annual data + if (file.exists(file.path(outdir, "previous.nc"))) { files <- c(file.path(outdir, "previous.nc"), file.path(outdir, "current.nc")) - }else{ + } else { files <- file.path(outdir, "current.nc") } mergeNC(files = files, outfile = file.path(outdir, paste(y, "nc", sep = "."))) - #The command "cdo" in mergeNC will automatically rename "time_bounds" to "time_bnds". However, "time_bounds" is used - #in read_restart codes later. So we need to read the new NetCDF file and convert the variable name back. - nc<- ncdf4::nc_open(file.path(outdir, paste(y, "nc", sep = ".")),write=TRUE) - nc<-ncdf4::ncvar_rename(nc,"time_bnds","time_bounds") - ncdf4::ncatt_put(nc, "time", "bounds","time_bounds", prec=NA) + # The command "cdo" in mergeNC will automatically rename "time_bounds" to "time_bnds". However, "time_bounds" is used + # in read_restart codes later. So we need to read the new NetCDF file and convert the variable name back. + nc <- ncdf4::nc_open(file.path(outdir, paste(y, "nc", sep = ".")), write = TRUE) + nc <- ncdf4::ncvar_rename(nc, "time_bnds", "time_bounds") + ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec = NA) ncdf4::nc_close(nc) unlink(files, recursive = T) - }else{ - nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) - ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec=NA) + } else { + nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) + ncdf4::ncatt_put(nc, "time", "bounds", "time_bounds", prec = NA) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") for (i in seq_along(nc_var)) { ncdf4::ncvar_put(nc, nc_var[[i]], output[[i]]) @@ -310,7 +334,7 @@ model2netcdf.SIPNET <- function(outdir, sitelat, sitelon, start_date, end_date, close(varfile) ncdf4::nc_close(nc) } - } ### End of year loop + } ### End of year loop ## Delete raw output, if requested if (delete.raw) { diff --git a/models/sipnet/R/read_restart.SIPNET.R b/models/sipnet/R/read_restart.SIPNET.R index 2260c71a476..042f2aab1f4 100755 --- a/models/sipnet/R/read_restart.SIPNET.R +++ b/models/sipnet/R/read_restart.SIPNET.R @@ -1,140 +1,143 @@ ##' @title Read restart function for SDA with SIPNET -##' +##' ##' @author Ann Raiho \email{araiho@@nd.edu} -##' +##' ##' @inheritParams PEcAn.ModelName::read_restart.ModelName -##' +##' ##' @description Read Restart for SIPNET -##' +##' ##' @return X.vec vector of forecasts ##' @export read_restart.SIPNET <- function(outdir, runid, stop.time, settings, var.names, params) { - prior.sla <- params[[which(!names(params) %in% c("soil", "soil_SDA", "restart"))[1]]]$SLA - + forecast <- list() - params$restart <-c() #state.vars not in var.names will be added here - #SIPNET inital states refer to models/sipnet/inst/template.param - state.vars <- c("SWE", "SoilMoistFrac", "AbvGrndWood", "TotSoilCarb", "LAI", - "litter_carbon_content", "fine_root_carbon_content", - "coarse_root_carbon_content", "litter_mass_content_of_water") - #when adding new state variables make sure the naming is consistent across read_restart, write_restart and write.configs - #pre-populate parsm$restart with NAs so state names can be added + params$restart <- c() # state.vars not in var.names will be added here + # SIPNET inital states refer to models/sipnet/inst/template.param + state.vars <- c( + "SWE", "SoilMoistFrac", "AbvGrndWood", "TotSoilCarb", "LAI", + "litter_carbon_content", "fine_root_carbon_content", + "coarse_root_carbon_content", "litter_mass_content_of_water" + ) + # when adding new state variables make sure the naming is consistent across read_restart, write_restart and write.configs + # pre-populate parsm$restart with NAs so state names can be added params$restart <- rep(NA, length(setdiff(state.vars, var.names))) - #add states to params$restart NOT in var.names + # add states to params$restart NOT in var.names names(params$restart) <- setdiff(state.vars, var.names) # Read ensemble output - ens <- PEcAn.utils::read.output(runid = runid, - outdir = file.path(outdir, runid), - start.year = lubridate::year(stop.time), - end.year = lubridate::year(stop.time), - variables = c(state.vars,"time_bounds")) - #calculate last - start.time <- as.Date(paste0(lubridate::year(stop.time),"-01-01")) - time_var <- ens$time_bounds[1,] - real_time <- as.POSIXct(time_var*3600*24, origin = start.time) + ens <- PEcAn.utils::read.output( + runid = runid, + outdir = file.path(outdir, runid), + start.year = lubridate::year(stop.time), + end.year = lubridate::year(stop.time), + variables = c(state.vars, "time_bounds") + ) + # calculate last + start.time <- as.Date(paste0(lubridate::year(stop.time), "-01-01")) + time_var <- ens$time_bounds[1, ] + real_time <- as.POSIXct(time_var * 3600 * 24, origin = start.time) # last <- which(as.Date(real_time)==as.Date(stop.time))[1] - last <- which(as.Date(real_time)==as.Date(stop.time))[length(which(as.Date(real_time)==as.Date(stop.time)))] - + last <- which(as.Date(real_time) == as.Date(stop.time))[length(which(as.Date(real_time) == as.Date(stop.time)))] + #### PEcAn Standard Outputs if ("AbvGrndWood" %in% var.names) { - forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(ens$AbvGrndWood[last], "kg/m^2", "Mg/ha") + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(ens$AbvGrndWood[last], "kg/m^2", "Mg/ha") names(forecast[[length(forecast)]]) <- c("AbvGrndWood") - - wood_total_C <- ens$AbvGrndWood[last] + ens$fine_root_carbon_content[last] + ens$coarse_root_carbon_content[last] - if (wood_total_C<=0) wood_total_C <- 0.0001 # Making sure we are not making Nans in case there is no plant living there. - - params$restart["abvGrndWoodFrac"] <- ens$AbvGrndWood[last] / wood_total_C - params$restart["coarseRootFrac"] <- ens$coarse_root_carbon_content[last] / wood_total_C - params$restart["fineRootFrac"] <- ens$fine_root_carbon_content[last] / wood_total_C - }else{ - params$restart["AbvGrndWood"] <- PEcAn.utils::ud_convert(ens$AbvGrndWood[last], "kg/m^2", "g/m^2") + + wood_total_C <- ens$AbvGrndWood[last] + ens$fine_root_carbon_content[last] + ens$coarse_root_carbon_content[last] + if (wood_total_C <= 0) wood_total_C <- 0.0001 # Making sure we are not making Nans in case there is no plant living there. + + params$restart["abvGrndWoodFrac"] <- ens$AbvGrndWood[last] / wood_total_C + params$restart["coarseRootFrac"] <- ens$coarse_root_carbon_content[last] / wood_total_C + params$restart["fineRootFrac"] <- ens$fine_root_carbon_content[last] / wood_total_C + } else { + params$restart["AbvGrndWood"] <- PEcAn.utils::ud_convert(ens$AbvGrndWood[last], "kg/m^2", "g/m^2") # calculate fractions, store in params, will use in write_restart - wood_total_C <- ens$AbvGrndWood[last] + ens$fine_root_carbon_content[last] + ens$coarse_root_carbon_content[last] - if (wood_total_C<=0) wood_total_C <- 0.0001 # Making sure we are not making Nans in case there is no plant living there. - - params$restart["abvGrndWoodFrac"] <- ens$AbvGrndWood[last] / wood_total_C - params$restart["coarseRootFrac"] <- ens$coarse_root_carbon_content[last] / wood_total_C - params$restart["fineRootFrac"] <- ens$fine_root_carbon_content[last] / wood_total_C + wood_total_C <- ens$AbvGrndWood[last] + ens$fine_root_carbon_content[last] + ens$coarse_root_carbon_content[last] + if (wood_total_C <= 0) wood_total_C <- 0.0001 # Making sure we are not making Nans in case there is no plant living there. + + params$restart["abvGrndWoodFrac"] <- ens$AbvGrndWood[last] / wood_total_C + params$restart["coarseRootFrac"] <- ens$coarse_root_carbon_content[last] / wood_total_C + params$restart["fineRootFrac"] <- ens$fine_root_carbon_content[last] / wood_total_C } - + if ("GWBI" %in% var.names) { - forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(mean(ens$GWBI), "kg/m^2/s", "Mg/ha/yr") + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(mean(ens$GWBI), "kg/m^2/s", "Mg/ha/yr") names(forecast[[length(forecast)]]) <- c("GWBI") } - + # Reading in NET Ecosystem Exchange for SDA - unit is kg C m-2 s-1 and the average is estimated if ("NEE" %in% var.names) { - forecast[[length(forecast) + 1]] <- mean(ens$NEE) ## + forecast[[length(forecast) + 1]] <- mean(ens$NEE) ## names(forecast[[length(forecast)]]) <- c("NEE") } - - + + # Reading in Latent heat flux for SDA - unit is MW m-2 if ("Qle" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$Qle[last]*1e-6 ## + forecast[[length(forecast) + 1]] <- ens$Qle[last] * 1e-6 ## names(forecast[[length(forecast)]]) <- c("Qle") } - + if ("leaf_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$leaf_carbon_content[last] ## kgC/m2*m2/kg*2kg/kgC + forecast[[length(forecast) + 1]] <- ens$leaf_carbon_content[last] ## kgC/m2*m2/kg*2kg/kgC names(forecast[[length(forecast)]]) <- c("LeafC") } - + if ("LAI" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$LAI[last] ## m2/m2 + forecast[[length(forecast) + 1]] <- ens$LAI[last] ## m2/m2 names(forecast[[length(forecast)]]) <- c("LAI") - }else{ + } else { params$restart["LAI"] <- ens$LAI[last] } - + if ("litter_carbon_content" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$litter_carbon_content[last] ##kgC/m2 + forecast[[length(forecast) + 1]] <- ens$litter_carbon_content[last] ## kgC/m2 names(forecast[[length(forecast)]]) <- c("litter_carbon_content") - }else{ - params$restart["litter_carbon_content"] <- PEcAn.utils::ud_convert(ens$litter_carbon_content[last], 'kg m-2', 'g m-2') # kgC/m2 -> gC/m2 + } else { + params$restart["litter_carbon_content"] <- PEcAn.utils::ud_convert(ens$litter_carbon_content[last], "kg m-2", "g m-2") # kgC/m2 -> gC/m2 } - + if ("litter_mass_content_of_water" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$litter_mass_content_of_water[last] ##kgC/m2 + forecast[[length(forecast) + 1]] <- ens$litter_mass_content_of_water[last] ## kgC/m2 names(forecast[[length(forecast)]]) <- c("litter_mass_content_of_water") - }else{ + } else { params$restart["litter_mass_content_of_water"] <- ens$litter_mass_content_of_water[last] } - + if ("SoilMoistFrac" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$SoilMoistFrac[last]*100 ## here we multiply it by 100 to convert from proportion to percentage. + forecast[[length(forecast) + 1]] <- ens$SoilMoistFrac[last] * 100 ## here we multiply it by 100 to convert from proportion to percentage. names(forecast[[length(forecast)]]) <- c("SoilMoistFrac") - }else{ + } else { params$restart["SoilMoistFrac"] <- ens$SoilMoistFrac[last] } - + # This is snow if ("SWE" %in% var.names) { - forecast[[length(forecast) + 1]] <- ens$SWE[last] ## kgC/m2 + forecast[[length(forecast) + 1]] <- ens$SWE[last] ## kgC/m2 names(forecast[[length(forecast)]]) <- c("SWE") - }else{ - params$restart["SWE"] <- ens$SWE[last]/10 + } else { + params$restart["SWE"] <- ens$SWE[last] / 10 } - + if ("TotLivBiom" %in% var.names) { - forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(ens$TotLivBiom[last], "kg/m^2", "Mg/ha") + forecast[[length(forecast) + 1]] <- PEcAn.utils::ud_convert(ens$TotLivBiom[last], "kg/m^2", "Mg/ha") names(forecast[[length(forecast)]]) <- c("TotLivBiom") } - + if ("TotSoilCarb" %in% var.names) { forecast[[length(forecast) + 1]] <- ens$TotSoilCarb[last] names(forecast[[length(forecast)]]) <- c("TotSoilCarb") - }else{ - params$restart["TotSoilCarb"] <- PEcAn.utils::ud_convert(ens$TotSoilCarb[last], 'kg m-2', 'g m-2') # kgC/m2 -> gC/m2 + } else { + params$restart["TotSoilCarb"] <- PEcAn.utils::ud_convert(ens$TotSoilCarb[last], "kg m-2", "g m-2") # kgC/m2 -> gC/m2 } - - #remove any remaining NAs from params$restart + + # remove any remaining NAs from params$restart params$restart <- stats::na.omit(params$restart) - + print(runid) - + X_tmp <- list(X = unlist(forecast), params = params) - + return(X_tmp) -} # read_restart.SIPNET \ No newline at end of file +} # read_restart.SIPNET diff --git a/models/sipnet/R/sample.IC.SIPNET.R b/models/sipnet/R/sample.IC.SIPNET.R index 540a7bf5006..db8dce6eeca 100644 --- a/models/sipnet/R/sample.IC.SIPNET.R +++ b/models/sipnet/R/sample.IC.SIPNET.R @@ -10,67 +10,77 @@ #' @export #' sample.IC.SIPNET <- function(ne, state, year = 1) { - ## Mg C / ha / yr GWBI ## no conversion needed because SIPNET doesn't take GWBI as IC anyway - GWBI <- ifelse(rep("GWBI" %in% names(state), ne), - state$GWBI[sample.int(length(state$GWBI), ne)], ## unit MgC ha-1 yr-1 - stats::runif(ne, 0, 10)) ## prior - + GWBI <- ifelse(rep("GWBI" %in% names(state), ne), + state$GWBI[sample.int(length(state$GWBI), ne)], ## unit MgC ha-1 yr-1 + stats::runif(ne, 0, 10) + ) ## prior + # g C * m-2 ground area in wood (above-ground + roots) Mgha2gm <- (1000000) / (10000) # these unit conversions are for testing # reminder : when working with kgC m-2 s-1 as NPP units singularity issues pop up in sda.enkf # using MgC ha-1 yr-1 for NPP in SDA and also brought back AbvGrndWood to MgC ha-1 for sanity reasons - AbvGrndWood <- ifelse(rep("AbvGrndWood" %in% names(state), ne), - PEcAn.utils::ud_convert(state$AbvGrndWood[sample.int(length(state$AbvGrndWood), ne)], "Mg/ha", "g/m^2"), - stats::runif(ne, 700, 15000)) ## prior - + AbvGrndWood <- ifelse(rep("AbvGrndWood" %in% names(state), ne), + PEcAn.utils::ud_convert(state$AbvGrndWood[sample.int(length(state$AbvGrndWood), ne)], "Mg/ha", "g/m^2"), + stats::runif(ne, 700, 15000) + ) ## prior + # sipnet accepts a plantWoodC pool that is above-ground + roots # instead of roots having their own state, we'll pass around fractions to update them deterministically fine_root_carbon_content <- stats::runif(ne, 100, 1000) coarse_root_carbon_content <- stats::runif(ne, 200, 2000) wood_total_C <- AbvGrndWood + fine_root_carbon_content + coarse_root_carbon_content - + abvGrndWoodFrac <- AbvGrndWood / wood_total_C - coarseRootFrac <- coarse_root_carbon_content / wood_total_C - fineRootFrac <- fine_root_carbon_content / wood_total_C - + coarseRootFrac <- coarse_root_carbon_content / wood_total_C + fineRootFrac <- fine_root_carbon_content / wood_total_C + # initial leaf area, m2 leaves * m-2 ground area (multiply by leafCSpWt to ## get initial plant leaf C) lai <- ifelse(rep("LAI" %in% names(state), ne), - state$LAI[1, sample.int(ncol(state$LAI), ne), year], - stats::runif(ne, 0, 7)) ## prior + state$LAI[1, sample.int(ncol(state$LAI), ne), year], + stats::runif(ne, 0, 7) + ) ## prior ## g C * m-2 ground area litter <- ifelse(rep("litter" %in% names(state), ne), - state$litter[1, sample.int(ncol(state$litter), ne), year], - stats::runif(ne, 130, 1200)) ## prior + state$litter[1, sample.int(ncol(state$litter), ne), year], + stats::runif(ne, 130, 1200) + ) ## prior ## g C * m-2 ground area soil <- ifelse(rep("soil" %in% names(state), ne), - state$soil[1, sample.int(ncol(state$soil), ne), year], - stats::runif(ne, 1200, 2000)) ## prior + state$soil[1, sample.int(ncol(state$soil), ne), year], + stats::runif(ne, 1200, 2000) + ) ## prior ## unitless: fraction of litterWHC litterWFrac <- ifelse(rep("litterW" %in% names(state), ne), - state$litterW[1, sample.int(ncol(state$litterW), ne), year], - stats::runif(ne)) ## prior + state$litterW[1, sample.int(ncol(state$litterW), ne), year], + stats::runif(ne) + ) ## prior ## unitless: fraction of soilWHC soilWFrac <- ifelse(rep("soilW" %in% names(state), ne), - state$soilW[1, sample.int(ncol(state$soilW), ne), year], - stats::runif(ne)) ## prior + state$soilW[1, sample.int(ncol(state$soilW), ne), year], + stats::runif(ne) + ) ## prior ## cm water equiv snow <- ifelse(rep("snow" %in% names(state), ne), - state$snow[1, sample.int(ncol(state$snow), ne), year], - stats::runif(ne, 0, 2000)) ## prior + state$snow[1, sample.int(ncol(state$snow), ne), year], + stats::runif(ne, 0, 2000) + ) ## prior microbe <- ifelse(rep("microbe" %in% names(state), ne), - state$microbe[1, sample.int(ncol(state$microbe), ne), year], - stats::runif(ne, 0.02, 1)) ## prior + state$microbe[1, sample.int(ncol(state$microbe), ne), year], + stats::runif(ne, 0.02, 1) + ) ## prior - return(data.frame(GWBI, AbvGrndWood, abvGrndWoodFrac, coarseRootFrac, fineRootFrac, lai, litter, - soil, litterWFrac, soilWFrac, snow, microbe)) + return(data.frame( + GWBI, AbvGrndWood, abvGrndWoodFrac, coarseRootFrac, fineRootFrac, lai, litter, + soil, litterWFrac, soilWFrac, snow, microbe + )) } # sample.IC.SIPNET diff --git a/models/sipnet/R/split_inputs.SIPNET.R b/models/sipnet/R/split_inputs.SIPNET.R index 4bc6de7dfa6..e82b46727d7 100644 --- a/models/sipnet/R/split_inputs.SIPNET.R +++ b/models/sipnet/R/split_inputs.SIPNET.R @@ -2,7 +2,7 @@ ##' @title split_inputs.SIPNET ##' @name split_inputs.SIPNET ##' @author Mike Dietze and Ann Raiho -##' +##' ##' @param settings PEcAn settings object ##' @param start.time start date and time for each SDA ensemble ##' @param stop.time stop date and time for each SDA ensemble @@ -10,7 +10,7 @@ ##' @param overwrite Default FALSE ##' @param outpath if specified, write output to a new directory. Default NULL writes back to the directory being read ##' @description Splits climate met for SIPNET -##' +##' ##' @return file split up climate file ##' ##' @importFrom dplyr %>% @@ -20,40 +20,44 @@ split_inputs.SIPNET <- function(settings, start.time, stop.time, inputs, overwri met <- inputs path <- dirname(met) prefix <- sub(".clim", "", basename(met), fixed = TRUE) - if(is.null(outpath)){ + if (is.null(outpath)) { outpath <- path } - if(!dir.exists(outpath)) dir.create(outpath) - + if (!dir.exists(outpath)) dir.create(outpath) + file <- NA names(file) <- paste(start.time, "-", stop.time) - - #Changing the name of the files, so it would contain the name of the hour as well. - file <- paste0(outpath, "/", prefix, ".", - paste0(start.time%>% as.character() %>% gsub(' ',"_",.), - "-", - stop.time%>% as.character() %>% gsub(' ',"_",.)), ".clim") - - if(file.exists(file) & !overwrite){ + + # Changing the name of the files, so it would contain the name of the hour as well. + file <- paste0( + outpath, "/", prefix, ".", + paste0( + start.time %>% as.character() %>% gsub(" ", "_", .), + "-", + stop.time %>% as.character() %>% gsub(" ", "_", .) + ), ".clim" + ) + + if (file.exists(file) & !overwrite) { return(file) } input.dat <- utils::read.table(met, header = FALSE) - #@Hamze, I added the Date variable by using year, doy, and hour and filtered the clim based that and then removed it afterwards. - dat<-input.dat %>% - dplyr::mutate(Date = strptime(paste(V2, V3), format = "%Y %j", tz = "UTC")%>% as.POSIXct()) %>% - dplyr::mutate(Date = as.POSIXct(paste0(Date, ceiling(V4), ":00"), format = "%Y-%m-%d %H:%M", tz = "UTC")) %>% - dplyr::filter(Date >= start.time, Date < stop.time) %>% + # @Hamze, I added the Date variable by using year, doy, and hour and filtered the clim based that and then removed it afterwards. + dat <- input.dat %>% + dplyr::mutate(Date = strptime(paste(V2, V3), format = "%Y %j", tz = "UTC") %>% as.POSIXct()) %>% + dplyr::mutate(Date = as.POSIXct(paste0(Date, ceiling(V4), ":00"), format = "%Y-%m-%d %H:%M", tz = "UTC")) %>% + dplyr::filter(Date >= start.time, Date < stop.time) %>% dplyr::select(-Date) - - + + ###### Write Met to file utils::write.table(dat, file, row.names = FALSE, col.names = FALSE) ###### Output input path to inputs - #settings$run$inputs$met$path <- file + # settings$run$inputs$met$path <- file return(file) } # split_inputs.SIPNET diff --git a/models/sipnet/R/veg2model.SIPNET.R b/models/sipnet/R/veg2model.SIPNET.R index 741e37a6748..508e3463064 100644 --- a/models/sipnet/R/veg2model.SIPNET.R +++ b/models/sipnet/R/veg2model.SIPNET.R @@ -1,18 +1,17 @@ #' veg2model.SIPNET -#' +#' #' @param outfolder location to store ncdf files #' @param poolinfo object passed from write_ic contains output from cohort2pool function #' @param siteid object passed from write_ic contains site id #' @param ens number of ensemble members -#' +#' #' @return result object with filepaths to ncdf files #' @export #' @author Alexis Helgeson -#' -veg2model.SIPNET <- function(outfolder, poolinfo, siteid, ens){ - +#' +veg2model.SIPNET <- function(outfolder, poolinfo, siteid, ens) { # Execute pool_ic function -result <- PEcAn.data.land::pool_ic_list2netcdf(input = poolinfo, outdir = outfolder, siteid = siteid, ens = ens) + result <- PEcAn.data.land::pool_ic_list2netcdf(input = poolinfo, outdir = outfolder, siteid = siteid, ens = ens) -return(result) -} \ No newline at end of file + return(result) +} diff --git a/models/sipnet/R/write.configs.SIPNET.R b/models/sipnet/R/write.configs.SIPNET.R index c294808b7b5..fee1c735a50 100755 --- a/models/sipnet/R/write.configs.SIPNET.R +++ b/models/sipnet/R/write.configs.SIPNET.R @@ -17,10 +17,10 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs template.in <- system.file("sipnet.in", package = "PEcAn.SIPNET") config.text <- readLines(con = template.in, n = -1) writeLines(config.text, con = file.path(settings$rundir, run.id, "sipnet.in")) - + ### WRITE *.clim - template.clim <- settings$run$inputs$met$path ## read from settings - + template.clim <- settings$run$inputs$met$path ## read from settings + if (!is.null(inputs)) { ## override if specified in inputs if ("met" %in% names(inputs)) { @@ -28,7 +28,7 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs } } PEcAn.logger::logger.info(paste0("Writing SIPNET configs with input ", template.clim)) - + # find out where to write run/ouput rundir <- file.path(settings$host$rundir, as.character(run.id)) outdir <- file.path(settings$host$outdir, as.character(run.id)) @@ -36,14 +36,14 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs rundir <- file.path(settings$rundir, as.character(run.id)) outdir <- file.path(settings$modeloutdir, as.character(run.id)) } - + # create launch script (which will create symlink) if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { jobsh <- readLines(con = settings$model$jobtemplate, n = -1) } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.SIPNET"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -52,13 +52,13 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + # create cdo specific settings cdosetup <- "" if (!is.null(settings$host$cdosetup)) { cdosetup <- paste(cdosetup, sep = "\n", paste(settings$host$cdosetup, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -66,92 +66,92 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # create rabbitmq specific setup. cpruncmd <- cpoutcmd <- rmoutdircmd <- rmrundircmd <- "" if (!is.null(settings$host$rabbitmq)) { - #rsync cmd from remote to local host. + # rsync cmd from remote to local host. settings$host$rabbitmq$cpfcmd <- ifelse(is.null(settings$host$rabbitmq$cpfcmd), "", settings$host$rabbitmq$cpfcmd) cpruncmd <- gsub("@OUTDIR@", settings$host$rundir, settings$host$rabbitmq$cpfcmd) cpruncmd <- gsub("@OUTFOLDER@", rundir, cpruncmd) - + cpoutcmd <- gsub("@OUTDIR@", settings$host$outdir, settings$host$rabbitmq$cpfcmd) cpoutcmd <- gsub("@OUTFOLDER@", outdir, cpoutcmd) - - #delete files within rundir and outdir. + + # delete files within rundir and outdir. rmoutdircmd <- paste("rm", file.path(outdir, "*")) rmrundircmd <- paste("rm", file.path(rundir, "*")) } - + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@CDO_SETUP@", cdosetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) jobsh <- gsub("@SITE_MET@", template.clim, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) - jobsh <- gsub("@END_DATE@",settings$run$end.date , jobsh) - + jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) + jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) jobsh <- gsub("@REVISION@", settings$model$revision, jobsh) - + jobsh <- gsub("@CPRUNCMD@", cpruncmd, jobsh) jobsh <- gsub("@CPOUTCMD@", cpoutcmd, jobsh) jobsh <- gsub("@RMOUTDIRCMD@", rmoutdircmd, jobsh) jobsh <- gsub("@RMRUNDIRCMD@", rmrundircmd, jobsh) - - if(is.null(settings$state.data.assimilation$NC.Prefix)){ + + if (is.null(settings$state.data.assimilation$NC.Prefix)) { settings$state.data.assimilation$NC.Prefix <- "sipnet.out" } jobsh <- gsub("@PREFIX@", settings$state.data.assimilation$NC.Prefix, jobsh) - - #overwrite argument - if(is.null(settings$state.data.assimilation$NC.Overwrite)){ + + # overwrite argument + if (is.null(settings$state.data.assimilation$NC.Overwrite)) { settings$state.data.assimilation$NC.Overwrite <- FALSE } jobsh <- gsub("@OVERWRITE@", settings$state.data.assimilation$NC.Overwrite, jobsh) - - #allow conflict? meaning allow full year nc export. - if(is.null(settings$state.data.assimilation$FullYearNC)){ + + # allow conflict? meaning allow full year nc export. + if (is.null(settings$state.data.assimilation$FullYearNC)) { settings$state.data.assimilation$FullYearNC <- FALSE } jobsh <- gsub("@CONFLICT@", settings$state.data.assimilation$FullYearNC, jobsh) - + if (is.null(settings$model$delete.raw)) { settings$model$delete.raw <- FALSE } jobsh <- gsub("@DELETE.RAW@", settings$model$delete.raw, jobsh) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - + ### WRITE *.param-spatial template.paramSpatial <- system.file("template.param-spatial", package = "PEcAn.SIPNET") file.copy(template.paramSpatial, file.path(settings$rundir, run.id, "sipnet.param-spatial")) - + ### WRITE *.param template.param <- system.file("template.param", package = "PEcAn.SIPNET") if ("default.param" %in% names(settings$model)) { template.param <- settings$model$default.param } - + param <- utils::read.table(template.param) - + #### write run-specific PFT parameters here #### Get parameters being handled by PEcAn for (pft in seq_along(trait.values)) { pft.traits <- unlist(trait.values[[pft]]) pft.names <- names(pft.traits) - + ## Append/replace params specified as constants constant.traits <- unlist(defaults[[1]]$constants) constant.names <- names(constant.traits) - + # Replace matches for (i in seq_along(constant.traits)) { ind <- match(constant.names[i], pft.names) @@ -164,21 +164,21 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs pft.traits[ind] <- constant.traits[i] } } - + # Remove NAs. Constants may be specified as NA to request template defaults. Note that it is 'NA' # (character) not actual NA due to being read in as XML pft.names <- pft.names[pft.traits != "NA" & !is.na(pft.traits)] pft.traits <- pft.traits[pft.traits != "NA" & !is.na(pft.traits)] pft.traits <- as.numeric(pft.traits) - + # Leaf carbon concentration - leafC <- 0.48 #0.5 + leafC <- 0.48 # 0.5 if ("leafC" %in% pft.names) { leafC <- pft.traits[which(pft.names == "leafC")] id <- which(param[, 1] == "cFracLeaf") - param[id, 2] <- leafC * 0.01 # convert to percentage from 0 to 1 + param[id, 2] <- leafC * 0.01 # convert to percentage from 0 to 1 } - + # Specific leaf area converted to SLW SLA <- NA id <- which(param[, 1] == "leafCSpWt") @@ -188,7 +188,7 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs } else { SLA <- 1000 * leafC / param[id, 2] } - + # Maximum photosynthesis Amax <- NA id <- which(param[, 1] == "aMax") @@ -202,82 +202,82 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("AmaxFrac" %in% pft.names) { param[which(param[, 1] == "aMaxFrac"), 2] <- pft.traits[which(pft.names == "AmaxFrac")] } - + ### Canopy extinction coefficiet (k) if ("extinction_coefficient" %in% pft.names) { param[which(param[, 1] == "attenuation"), 2] <- pft.traits[which(pft.names == "extinction_coefficient")] } - + # Leaf respiration rate converted to baseFolRespFrac if ("leaf_respiration_rate_m2" %in% pft.names) { Rd <- pft.traits[which(pft.names == "leaf_respiration_rate_m2")] id <- which(param[, 1] == "baseFolRespFrac") - param[id, 2] <- max(min(Rd/Amax, 1), 0) + param[id, 2] <- max(min(Rd / Amax, 1), 0) } - + # Low temp threshold for photosynethsis if ("Vm_low_temp" %in% pft.names) { param[which(param[, 1] == "psnTMin"), 2] <- pft.traits[which(pft.names == "Vm_low_temp")] } - + # Opt. temp for photosynthesis if ("psnTOpt" %in% pft.names) { param[which(param[, 1] == "psnTOpt"), 2] <- pft.traits[which(pft.names == "psnTOpt")] } - + # Growth respiration factor (fraction of GPP) if ("growth_resp_factor" %in% pft.names) { param[which(param[, 1] == "growthRespFrac"), 2] <- pft.traits[which(pft.names == "growth_resp_factor")] } ### !!! NOT YET USED - #Jmax = NA - #if("Jmax" %in% pft.names){ + # Jmax = NA + # if("Jmax" %in% pft.names){ # Jmax = pft.traits[which(pft.names == 'Jmax')] ### Using Jmax scaled to 25 degC. Maybe not be the best approach - #} - - #alpha = NA - #if("quantum_efficiency" %in% pft.names){ + # } + + # alpha = NA + # if("quantum_efficiency" %in% pft.names){ # alpha = pft.traits[which(pft.names == 'quantum_efficiency')] - #} - + # } + # Half saturation of PAR. PAR at which photosynthesis occurs at 1/2 theoretical maximum (Einsteins * m^-2 ground area * day^-1). - #if(!is.na(Jmax) & !is.na(alpha)){ + # if(!is.na(Jmax) & !is.na(alpha)){ # param[which(param[,1] == "halfSatPar"),2] = Jmax/(2*alpha) ### WARNING: this is a very coarse linear approximation and needs improvement ***** ### Yes, we also need to work on doing a paired query where we have both data together. ### Once halfSatPar is calculated, need to remove Jmax and quantum_efficiency from param list so they are not included in SA - #} + # } ### !!! - + # Half saturation of PAR. PAR at which photosynthesis occurs at 1/2 theoretical maximum (Einsteins * m^-2 ground area * day^-1). # Temporary implementation until above is working. if ("half_saturation_PAR" %in% pft.names) { param[which(param[, 1] == "halfSatPar"), 2] <- pft.traits[which(pft.names == "half_saturation_PAR")] } - + # Ball-berry slomatal slope parameter m if ("stomatal_slope.BB" %in% pft.names) { id <- which(param[, 1] == "m_ballBerry") param[id, 2] <- pft.traits[which(pft.names == "stomatal_slope.BB")] } - + # Slope of VPD–photosynthesis relationship. dVpd = 1 - dVpdSlope * vpd^dVpdExp if ("dVPDSlope" %in% pft.names) { param[which(param[, 1] == "dVpdSlope"), 2] <- pft.traits[which(pft.names == "dVPDSlope")] } - + # VPD–water use efficiency relationship. dVpd = 1 - dVpdSlope * vpd^dVpdExp if ("dVpdExp" %in% pft.names) { param[which(param[, 1] == "dVpdExp"), 2] <- pft.traits[which(pft.names == "dVpdExp")] } - + # Leaf turnover rate average turnover rate of leaves, in fraction per day NOTE: read in as # per-year rate! if ("leaf_turnover_rate" %in% pft.names) { param[which(param[, 1] == "leafTurnoverRate"), 2] <- pft.traits[which(pft.names == "leaf_turnover_rate")] } - + if ("wueConst" %in% pft.names) { param[which(param[, 1] == "wueConst"), 2] <- pft.traits[which(pft.names == "wueConst")] } @@ -285,7 +285,7 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("veg_respiration_Q10" %in% pft.names) { param[which(param[, 1] == "vegRespQ10"), 2] <- pft.traits[which(pft.names == "veg_respiration_Q10")] } - + # Base vegetation respiration. vegetation maintenance respiration at 0 degrees C (g C respired * g^-1 plant C * day^-1) # NOTE: only counts plant wood C - leaves handled elsewhere (both above and below-ground: assumed for now to have same resp. rate) # NOTE: read in as per-year rate! @@ -294,35 +294,35 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs id <- which(param[, 1] == "baseVegResp") ## Convert from umols CO2 kg s-1 to gC g day-1 stem_resp_g <- (((pft.traits[which(pft.names == "stem_respiration_rate")]) * - (44.0096 / 1e+06) * (12.01 / 44.0096)) / 1000) * 86400 + (44.0096 / 1e+06) * (12.01 / 44.0096)) / 1000) * 86400 ## use Q10 to convert stem resp from reference of 25C to 0C param[id,2] = ## pft.traits[which(pft.names=='stem_respiration_rate')]*vegRespQ10^(-25/10) - param[id, 2] <- stem_resp_g * vegRespQ10^(-25/10) + param[id, 2] <- stem_resp_g * vegRespQ10^(-25 / 10) } - + # turnover of fine roots (per year rate) if ("root_turnover_rate" %in% pft.names) { id <- which(param[, 1] == "fineRootTurnoverRate") param[id, 2] <- pft.traits[which(pft.names == "root_turnover_rate")] } - + # fine root respiration Q10 if ("fine_root_respiration_Q10" %in% pft.names) { param[which(param[, 1] == "fineRootQ10"), 2] <- pft.traits[which(pft.names == "fine_root_respiration_Q10")] } - + # base respiration rate of fine roots (per year rate) if ("root_respiration_rate" %in% pft.names) { fineRootQ10 <- param[which(param[, 1] == "fineRootQ10"), 2] id <- which(param[, 1] == "baseFineRootResp") ## Convert from umols CO2 kg s-1 to gC g day-1 root_resp_rate_g <- (((pft.traits[which(pft.names == "root_respiration_rate")]) * - (44.0096/1e+06) * (12.01 / 44.0096)) / 1000) * 86400 + (44.0096 / 1e+06) * (12.01 / 44.0096)) / 1000) * 86400 ## use Q10 to convert stem resp from reference of 25C to 0C param[id,2] = ## pft.traits[which(pft.names=='root_respiration_rate')]*fineRootQ10^(-25/10) - param[id, 2] <- root_resp_rate_g * fineRootQ10 ^ (-25 / 10) + param[id, 2] <- root_resp_rate_g * fineRootQ10^(-25 / 10) } - + # coarse root respiration Q10 if ("coarse_root_respiration_Q10" %in% pft.names) { param[which(param[, 1] == "coarseRootQ10"), 2] <- pft.traits[which(pft.names == "coarse_root_respiration_Q10")] @@ -336,34 +336,36 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs sum_alloc <- pft.traits[which(pft.names == "root_allocation_fraction")] + pft.traits[which(pft.names == "wood_allocation_fraction")] + pft.traits[which(pft.names == "leaf_allocation_fraction")] - if(sum_alloc > 1){ + if (sum_alloc > 1) { # I want this to be a severe for now, lateer can be changed back to warning - PEcAn.logger::logger.warn("Sum of allocation parameters exceeds 1 for runid = ", run.id, - "- This won't break anything since SIPNET has internal check, but notice that such combinations might not take effect in the outputs.") + PEcAn.logger::logger.warn( + "Sum of allocation parameters exceeds 1 for runid = ", run.id, + "- This won't break anything since SIPNET has internal check, but notice that such combinations might not take effect in the outputs." + ) } } - - + + # fineRootAllocation if ("root_allocation_fraction" %in% pft.names) { param[which(param[, 1] == "fineRootAllocation"), 2] <- pft.traits[which(pft.names == "root_allocation_fraction")] } - + # woodAllocation if ("wood_allocation_fraction" %in% pft.names) { param[which(param[, 1] == "woodAllocation"), 2] <- pft.traits[which(pft.names == "wood_allocation_fraction")] } - + # leafAllocation if ("leaf_allocation_fraction" %in% pft.names) { param[which(param[, 1] == "leafAllocation"), 2] <- pft.traits[which(pft.names == "leaf_allocation_fraction")] } - + # wood_turnover_rate if ("wood_turnover_rate" %in% pft.names) { param[which(param[, 1] == "woodTurnoverRate"), 2] <- pft.traits[which(pft.names == "wood_turnover_rate")] } - + ### ----- Soil parameters soil respiration Q10. if ("soil_respiration_Q10" %in% pft.names) { param[which(param[, 1] == "soilRespQ10"), 2] <- pft.traits[which(pft.names == "soil_respiration_Q10")] @@ -372,7 +374,7 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("som_respiration_rate" %in% pft.names) { param[which(param[, 1] == "baseSoilResp"), 2] <- pft.traits[which(pft.names == "som_respiration_rate")] } - + # litterBreakdownRate if ("turn_over_time" %in% pft.names) { id <- which(param[, 1] == "litterBreakdownRate") @@ -382,12 +384,12 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("frozenSoilEff" %in% pft.names) { param[which(param[, 1] == "frozenSoilEff"), 2] <- pft.traits[which(pft.names == "frozenSoilEff")] } - + # frozenSoilFolREff if ("frozenSoilFolREff" %in% pft.names) { param[which(param[, 1] == "frozenSoilFolREff"), 2] <- pft.traits[which(pft.names == "frozenSoilFolREff")] } - + # soilWHC if ("soilWHC" %in% pft.names) { param[which(param[, 1] == "soilWHC"), 2] <- pft.traits[which(pft.names == "soilWHC")] @@ -395,30 +397,30 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs # 10/31/2017 IF: these were the two assumptions used in the emulator paper in order to reduce dimensionality # These results in improved winter soil respiration values # they don't affect anything when the seasonal soil respiration functionality in SIPNET is turned-off - if(TRUE){ + if (TRUE) { # assume soil resp Q10 cold == soil resp Q10 param[which(param[, 1] == "soilRespQ10Cold"), 2] <- param[which(param[, 1] == "soilRespQ10"), 2] # default SIPNET prior of baseSoilRespCold was 1/4th of baseSoilResp # assuming they will scale accordingly param[which(param[, 1] == "baseSoilRespCold"), 2] <- param[which(param[, 1] == "baseSoilResp"), 2] * 0.25 } - + if ("immedEvapFrac" %in% pft.names) { param[which(param[, 1] == "immedEvapFrac"), 2] <- pft.traits[which(pft.names == "immedEvapFrac")] } - + if ("leafWHC" %in% pft.names) { param[which(param[, 1] == "leafPoolDepth"), 2] <- pft.traits[which(pft.names == "leafWHC")] } - + if ("waterRemoveFrac" %in% pft.names) { param[which(param[, 1] == "waterRemoveFrac"), 2] <- pft.traits[which(pft.names == "waterRemoveFrac")] } - + if ("fastFlowFrac" %in% pft.names) { param[which(param[, 1] == "fastFlowFrac"), 2] <- pft.traits[which(pft.names == "fastFlowFrac")] } - + if ("rdConst" %in% pft.names) { param[which(param[, 1] == "rdConst"), 2] <- pft.traits[which(pft.names == "rdConst")] } @@ -426,56 +428,56 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("GDD" %in% pft.names) { param[which(param[, 1] == "gddLeafOn"), 2] <- pft.traits[which(pft.names == "GDD")] } - + # Fraction of leaf fall per year (should be 1 for decid) if ("fracLeafFall" %in% pft.names) { param[which(param[, 1] == "fracLeafFall"), 2] <- pft.traits[which(pft.names == "fracLeafFall")] } - + # Leaf growth. Amount of C added to the leaf during the greenup period if ("leafGrowth" %in% pft.names) { param[which(param[, 1] == "leafGrowth"), 2] <- pft.traits[which(pft.names == "leafGrowth")] } - #update LeafOnday and LeafOffDay - if (!is.null(settings$run$inputs$leaf_phenology)){ - obs_year_start <- lubridate::year(settings$run$start.date) - obs_year_end <- lubridate::year(settings$run$end.date) - if (obs_year_start != obs_year_end) { - PEcAn.logger::logger.info("Start.date and end.date are not in the same year. Currently start.date is used for refering phenological data") - } - leaf_pheno_path <- settings$run$inputs$leaf_phenology$path ## read from settings - if (!is.null(leaf_pheno_path)){ - ##read data - leafphdata <- utils::read.csv(leaf_pheno_path) - leafOnDay <- leafphdata$leafonday[leafphdata$year == obs_year_start & leafphdata$site_id==settings$run$site$id] - leafOffDay<- leafphdata$leafoffday[leafphdata$year== obs_year_start & leafphdata$site_id==settings$run$site$id] - if (!is.na(leafOnDay)){ - param[which(param[, 1] == "leafOnDay"), 2] <- leafOnDay - } - if (!is.na(leafOffDay)){ - param[which(param[, 1] == "leafOffDay"), 2] <- leafOffDay - } + # update LeafOnday and LeafOffDay + if (!is.null(settings$run$inputs$leaf_phenology)) { + obs_year_start <- lubridate::year(settings$run$start.date) + obs_year_end <- lubridate::year(settings$run$end.date) + if (obs_year_start != obs_year_end) { + PEcAn.logger::logger.info("Start.date and end.date are not in the same year. Currently start.date is used for refering phenological data") + } + leaf_pheno_path <- settings$run$inputs$leaf_phenology$path ## read from settings + if (!is.null(leaf_pheno_path)) { + ## read data + leafphdata <- utils::read.csv(leaf_pheno_path) + leafOnDay <- leafphdata$leafonday[leafphdata$year == obs_year_start & leafphdata$site_id == settings$run$site$id] + leafOffDay <- leafphdata$leafoffday[leafphdata$year == obs_year_start & leafphdata$site_id == settings$run$site$id] + if (!is.na(leafOnDay)) { + param[which(param[, 1] == "leafOnDay"), 2] <- leafOnDay + } + if (!is.na(leafOffDay)) { + param[which(param[, 1] == "leafOffDay"), 2] <- leafOffDay + } } else { - PEcAn.logger::logger.info("No phenology data were found. Please consider running `PEcAn.data.remote::extract_phenology_MODIS` to get the parameter file.") + PEcAn.logger::logger.info("No phenology data were found. Please consider running `PEcAn.data.remote::extract_phenology_MODIS` to get the parameter file.") } } } ## end loop over PFTS ####### end parameter update - #working on reading soil file (only working for 1 soil file) - if(length(settings$run$inputs$soilinitcond$path)==1){ + # working on reading soil file (only working for 1 soil file) + if (length(settings$run$inputs$soilinitcond$path) == 1) { soil_IC_list <- PEcAn.data.land::pool_ic_netcdf2list(settings$run$inputs$soilinitcond$path) - #SoilWHC and LitterWHC - if("volume_fraction_of_water_in_soil_at_saturation"%in%names(soil_IC_list$vals)){ - #SoilWHC - param[which(param[, 1] == "soilWHC"), 2] <- mean(unlist(soil_IC_list$vals["volume_fraction_of_water_in_soil_at_saturation"]))*100 - - #LitterWHC - #param[which(param[, 1] == "litterWHC"), 2] <- unlist(soil_IC_list$vals["volume_fraction_of_water_in_soil_at_saturation"])[1]*100 - } - if("soil_hydraulic_conductivity_at_saturation"%in%names(soil_IC_list$vals)){ - #litwaterDrainrate - param[which(param[, 1] == "litWaterDrainRate"), 2] <- unlist(soil_IC_list$vals["soil_hydraulic_conductivity_at_saturation"])[1]*100/(3600*24) + # SoilWHC and LitterWHC + if ("volume_fraction_of_water_in_soil_at_saturation" %in% names(soil_IC_list$vals)) { + # SoilWHC + param[which(param[, 1] == "soilWHC"), 2] <- mean(unlist(soil_IC_list$vals["volume_fraction_of_water_in_soil_at_saturation"])) * 100 + + # LitterWHC + # param[which(param[, 1] == "litterWHC"), 2] <- unlist(soil_IC_list$vals["volume_fraction_of_water_in_soil_at_saturation"])[1]*100 + } + if ("soil_hydraulic_conductivity_at_saturation" %in% names(soil_IC_list$vals)) { + # litwaterDrainrate + param[which(param[, 1] == "litWaterDrainRate"), 2] <- unlist(soil_IC_list$vals["soil_hydraulic_conductivity_at_saturation"])[1] * 100 / (3600 * 24) } } if (!is.null(IC)) { @@ -484,16 +486,16 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs plant_wood_vars <- c("AbvGrndWood", "abvGrndWoodFrac", "coarseRootFrac", "fineRootFrac") if (all(plant_wood_vars %in% ic.names)) { # reconstruct total wood C - if(IC$abvGrndWoodFrac < 0.05){ + if (IC$abvGrndWoodFrac < 0.05) { wood_total_C <- IC$AbvGrndWood - }else{ + } else { wood_total_C <- IC$AbvGrndWood / IC$abvGrndWoodFrac } - #Sanity check + # Sanity check if (is.infinite(wood_total_C) | is.nan(wood_total_C) | wood_total_C < 0) { wood_total_C <- 0 - if (round(IC$AbvGrndWood) > 0 & round(IC$abvGrndWoodFrac, 3) == 0) + if (round(IC$AbvGrndWood) > 0 & round(IC$abvGrndWoodFrac, 3) == 0) { PEcAn.logger::logger.warn( paste0( "There is a major problem with ", @@ -506,9 +508,10 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs ) ) } - param[which(param[, 1] == "plantWoodInit"), 2] <- wood_total_C + } + param[which(param[, 1] == "plantWoodInit"), 2] <- wood_total_C param[which(param[, 1] == "coarseRootFrac"), 2] <- IC$coarseRootFrac - param[which(param[, 1] == "fineRootFrac"), 2] <- IC$fineRootFrac + param[which(param[, 1] == "fineRootFrac"), 2] <- IC$fineRootFrac } ## laiInit m2/m2 if ("lai" %in% ic.names) { @@ -524,8 +527,8 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs } ## litterWFracInit fraction if ("litter_mass_content_of_water" %in% ic.names) { - #here we use litterWaterContent/litterWHC to calculate the litterWFracInit - param[which(param[, 1] == "litterWFracInit"), 2] <- IC$litter_mass_content_of_water/(param[which(param[, 1] == "litterWHC"), 2]*10) + # here we use litterWaterContent/litterWHC to calculate the litterWFracInit + param[which(param[, 1] == "litterWFracInit"), 2] <- IC$litter_mass_content_of_water / (param[which(param[, 1] == "litterWHC"), 2] * 10) } ## soilWFracInit fraction if ("soilWFrac" %in% ic.names) { @@ -539,16 +542,14 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs if ("microbe" %in% ic.names) { param[which(param[, 1] == "microbeInit"), 2] <- IC$microbe } - } - - else if (length(settings$run$inputs$poolinitcond$path)>0) { + } else if (length(settings$run$inputs$poolinitcond$path) > 0) { ICs_num <- length(settings$run$inputs$poolinitcond$path) IC.path <- settings$run$inputs$poolinitcond$path[[sample(1:ICs_num, 1)]] IC.pools <- PEcAn.data.land::prepare_pools(IC.path, constants = list(sla = SLA)) - - if(!is.null(IC.pools)){ - IC.nc <- ncdf4::nc_open(IC.path) #for additional variables specific to SIPNET + + if (!is.null(IC.pools)) { + IC.nc <- ncdf4::nc_open(IC.path) # for additional variables specific to SIPNET ## plantWoodInit gC/m2 if ("wood" %in% names(IC.pools)) { param[which(param[, 1] == "plantWoodInit"), 2] <- PEcAn.utils::ud_convert(IC.pools$wood, "kg m-2", "g m-2") @@ -559,81 +560,82 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs param[which(param[, 1] == "laiInit"), 2] <- lai } - #Initial LAI is set as 0 for deciduous forests and grasslands for non-growing seasons - if (!(lubridate::month(settings$run$start.date) %in% seq(5,9))){ #Growing seasons are coarsely defined as months from May to September for non-conifers in the US - site_pft <- utils::read.csv(settings$run$inputs$pft.site$path) - site.pft.name <- site_pft$pft[site_pft$site == settings$run$site$id] - if (site.pft.name!="boreal.coniferous") { #Currently only excluding boreal conifers. Other evergreen PFTs could be added here later. - param[which(param[, 1] == "laiInit"), 2] <- 0 - } + # Initial LAI is set as 0 for deciduous forests and grasslands for non-growing seasons + if (!(lubridate::month(settings$run$start.date) %in% seq(5, 9))) { # Growing seasons are coarsely defined as months from May to September for non-conifers in the US + site_pft <- utils::read.csv(settings$run$inputs$pft.site$path) + site.pft.name <- site_pft$pft[site_pft$site == settings$run$site$id] + if (site.pft.name != "boreal.coniferous") { # Currently only excluding boreal conifers. Other evergreen PFTs could be added here later. + param[which(param[, 1] == "laiInit"), 2] <- 0 + } } ## neeInit gC/m2 - nee <- try(ncdf4::ncvar_get(IC.nc,"nee"),silent = TRUE) + nee <- try(ncdf4::ncvar_get(IC.nc, "nee"), silent = TRUE) if (!is.na(nee) && is.numeric(nee)) { param[which(param[, 1] == "neeInit"), 2] <- nee } ## litterInit gC/m2 if ("litter" %in% names(IC.pools)) { - param[which(param[, 1] == "litterInit"), 2] <- PEcAn.utils::ud_convert(IC.pools$litter, 'g m-2', 'g m-2') # BETY: kgC m-2 + param[which(param[, 1] == "litterInit"), 2] <- PEcAn.utils::ud_convert(IC.pools$litter, "g m-2", "g m-2") # BETY: kgC m-2 } ## soilInit gC/m2 if ("soil" %in% names(IC.pools)) { - param[which(param[, 1] == "soilInit"), 2] <- PEcAn.utils::ud_convert(sum(IC.pools$soil), 'kg m-2', 'g m-2') # BETY: kgC m-2 + param[which(param[, 1] == "soilInit"), 2] <- PEcAn.utils::ud_convert(sum(IC.pools$soil), "kg m-2", "g m-2") # BETY: kgC m-2 } ## soilWFracInit fraction - soilWFrac <- try(ncdf4::ncvar_get(IC.nc,"SoilMoistFrac"),silent = TRUE) + soilWFrac <- try(ncdf4::ncvar_get(IC.nc, "SoilMoistFrac"), silent = TRUE) if (!"try-error" %in% class(soilWFrac)) { if (!is.na(soilWFrac) && is.numeric(soilWFrac)) { - param[which(param[, 1] == "soilWFracInit"), 2] <- sum(soilWFrac)/100 + param[which(param[, 1] == "soilWFracInit"), 2] <- sum(soilWFrac) / 100 } } ## litterWFracInit fraction litterWFrac <- soilWFrac - + ## snowInit cm water equivalent (cm = g / cm2 because 1 g water = 1 cm3 water) - snow = try(ncdf4::ncvar_get(IC.nc,"SWE"),silent = TRUE) + snow <- try(ncdf4::ncvar_get(IC.nc, "SWE"), silent = TRUE) if (!is.na(snow) && is.numeric(snow)) { - param[which(param[, 1] == "snowInit"), 2] <- PEcAn.utils::ud_convert(snow, "kg m-2", "g cm-2") # BETY: kg m-2 + param[which(param[, 1] == "snowInit"), 2] <- PEcAn.utils::ud_convert(snow, "kg m-2", "g cm-2") # BETY: kg m-2 } ## leafOnDay - leafOnDay <- try(ncdf4::ncvar_get(IC.nc,"date_of_budburst"),silent = TRUE) + leafOnDay <- try(ncdf4::ncvar_get(IC.nc, "date_of_budburst"), silent = TRUE) if (!is.na(leafOnDay) && is.numeric(leafOnDay)) { param[which(param[, 1] == "leafOnDay"), 2] <- leafOnDay } ## leafOffDay - leafOffDay <- try(ncdf4::ncvar_get(IC.nc,"date_of_senescence"),silent = TRUE) + leafOffDay <- try(ncdf4::ncvar_get(IC.nc, "date_of_senescence"), silent = TRUE) if (!is.na(leafOffDay) && is.numeric(leafOffDay)) { param[which(param[, 1] == "leafOffDay"), 2] <- leafOffDay } - microbe <- try(ncdf4::ncvar_get(IC.nc,"Microbial Biomass C"),silent = TRUE) + microbe <- try(ncdf4::ncvar_get(IC.nc, "Microbial Biomass C"), silent = TRUE) if (!is.na(microbe) && is.numeric(microbe)) { - param[which(param[, 1] == "microbeInit"), 2] <- PEcAn.utils::ud_convert(microbe, "mg kg-1", "mg g-1") #BETY: mg microbial C kg-1 soil + param[which(param[, 1] == "microbeInit"), 2] <- PEcAn.utils::ud_convert(microbe, "mg kg-1", "mg g-1") # BETY: mg microbial C kg-1 soil } - + ncdf4::nc_close(IC.nc) - }else{ + } else { PEcAn.logger::logger.error("Bad initial conditions filepath; keeping defaults") } - }else{ - #some stuff about IC file that we can give in lieu of actual ICs + } else { + # some stuff about IC file that we can give in lieu of actual ICs } - - + + if (!is.null(settings$run$inputs$soilmoisture)) { - #read soil moisture netcdf file, grab closet date to start_date, set equal to soilWFrac - if(!is.null(settings$run$inputs$soilmoisture$path)){ + # read soil moisture netcdf file, grab closet date to start_date, set equal to soilWFrac + if (!is.null(settings$run$inputs$soilmoisture$path)) { soil.path <- settings$run$inputs$soilmoisture$path soilWFrac <- ncdf4::ncvar_get(ncdf4::nc_open(soil.path), varid = "mass_fraction_of_unfrozen_water_in_soil_moisture") - + param[which(param[, 1] == "soilWFracInit"), 2] <- soilWFrac } - } - if(file.exists(file.path(settings$rundir, run.id, "sipnet.param"))) file.rename(file.path(settings$rundir, run.id, "sipnet.param"),file.path(settings$rundir, run.id, paste0("sipnet_",lubridate::year(settings$run$start.date),"_",lubridate::year(settings$run$end.date),".param"))) - + if (file.exists(file.path(settings$rundir, run.id, "sipnet.param"))) file.rename(file.path(settings$rundir, run.id, "sipnet.param"), file.path(settings$rundir, run.id, paste0("sipnet_", lubridate::year(settings$run$start.date), "_", lubridate::year(settings$run$end.date), ".param"))) - utils::write.table(param, file.path(settings$rundir, run.id, "sipnet.param"), row.names = FALSE, col.names = FALSE, - quote = FALSE) + + utils::write.table(param, file.path(settings$rundir, run.id, "sipnet.param"), + row.names = FALSE, col.names = FALSE, + quote = FALSE + ) } # write.config.SIPNET #--------------------------------------------------------------------------------------------------# ##' @@ -648,20 +650,19 @@ write.config.SIPNET <- function(defaults, trait.values, settings, run.id, inputs ##' ##' @author Shawn Serbin, David LeBauer remove.config.SIPNET <- function(main.outdir, settings) { - ### Remove files on localhost if (settings$host$name == "localhost") { - files <- paste0(settings$outdir, list.files(path = settings$outdir, recursive = FALSE)) # Need to change this to the run folder when implemented - files <- files[-grep("*.xml", files)] # Keep pecan.xml file + files <- paste0(settings$outdir, list.files(path = settings$outdir, recursive = FALSE)) # Need to change this to the run folder when implemented + files <- files[-grep("*.xml", files)] # Keep pecan.xml file pft.dir <- strsplit(settings$pfts$pft$outdir, "/")[[1]] ln <- length(pft.dir) pft.dir <- pft.dir[ln] - files <- files[-grep(pft.dir, files)] # Keep pft folder + files <- files[-grep(pft.dir, files)] # Keep pft folder # file.remove(files,recursive=TRUE) - system(paste("rm -r ", files, sep = "", collapse = " "), ignore.stderr = TRUE) # remove files/dirs - + system(paste("rm -r ", files, sep = "", collapse = " "), ignore.stderr = TRUE) # remove files/dirs + ### On remote host } else { print("*** WARNING: Removal of files on remote host not yet implemented ***") } -} # remove.config.SIPNET +} # remove.config.SIPNET diff --git a/models/sipnet/R/write_restart.SIPNET.R b/models/sipnet/R/write_restart.SIPNET.R index 32d2736312e..ab7193b626b 100755 --- a/models/sipnet/R/write_restart.SIPNET.R +++ b/models/sipnet/R/write_restart.SIPNET.R @@ -12,45 +12,46 @@ ##' @param settings PEcAn settings object ##' @param new.state analysis state vector ##' @param RENAME flag to either rename output file or not -##' @param new.params list of parameters to convert between different states +##' @param new.params list of parameters to convert between different states ##' @param inputs list of model inputs to use in write.configs.SIPNET ##' @param verbose decide if we want to print the outputs. -##' +##' ##' @return NONE ##' ##' @importFrom dplyr %>% ##' @export write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, new.state, RENAME = TRUE, new.params = FALSE, inputs, verbose = FALSE) { - rundir <- settings$host$rundir variables <- colnames(new.state) # values that will be used for updating other states deterministically depending on the SDA states if (length(new.params$restart) > 0) { IC_extra <- data.frame(t(new.params$restart)) - } else{ + } else { IC_extra <- data.frame() - } - + } + if (RENAME) { - file.rename(file.path(outdir, runid, "sipnet.out"), - file.path(outdir, runid, paste0("sipnet.", as.Date(start.time), ".out"))) + file.rename( + file.path(outdir, runid, "sipnet.out"), + file.path(outdir, runid, paste0("sipnet.", as.Date(start.time), ".out")) + ) system(paste("rm", file.path(rundir, runid, "sipnet.clim"))) } else { print(paste("Files not renamed -- Need to rerun timestep", start.time, "before next time step")) } - + settings$run$start.date <- start.time settings$run$end.date <- stop.time - + ## Converting to sipnet units prior.sla <- new.params[[which(!names(new.params) %in% c("soil", "soil_SDA", "restart"))[1]]]$SLA - unit.conv <- 2 * (10000 / 1) * (1 / 1000) * (3.154 * 10^7) # kgC/m2/s -> Mg/ha/yr - + unit.conv <- 2 * (10000 / 1) * (1 / 1000) * (3.154 * 10^7) # kgC/m2/s -> Mg/ha/yr + analysis.save <- list() - + if ("NPP" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- PEcAn.utils::ud_convert(new.state$NPP, "kg/m^2/s", "Mg/ha/yr") #*unit.conv -> Mg/ha/yr + analysis.save[[length(analysis.save) + 1]] <- PEcAn.utils::ud_convert(new.state$NPP, "kg/m^2/s", "Mg/ha/yr") #* unit.conv -> Mg/ha/yr names(analysis.save[[length(analysis.save)]]) <- c("NPP") } @@ -58,60 +59,60 @@ write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, analysis.save[[length(analysis.save) + 1]] <- new.state$NEE names(analysis.save[[length(analysis.save)]]) <- c("NEE") } - + if ("AbvGrndWood" %in% variables) { - AbvGrndWood <- PEcAn.utils::ud_convert(new.state$AbvGrndWood, "Mg/ha", "g/m^2") + AbvGrndWood <- PEcAn.utils::ud_convert(new.state$AbvGrndWood, "Mg/ha", "g/m^2") analysis.save[[length(analysis.save) + 1]] <- AbvGrndWood names(analysis.save[[length(analysis.save)]]) <- c("AbvGrndWood") } - + if ("LeafC" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- new.state$LeafC * prior.sla * 2 ## kgC/m2*m2/kg*2kg/kgC -> m2/m2 + analysis.save[[length(analysis.save) + 1]] <- new.state$LeafC * prior.sla * 2 ## kgC/m2*m2/kg*2kg/kgC -> m2/m2 if (new.state$LeafC < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("lai") } - + if ("litter_carbon_content" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- PEcAn.utils::ud_convert(new.state$litter_carbon_content, 'kg m-2', 'g m-2') # kgC/m2 -> gC/m2 + analysis.save[[length(analysis.save) + 1]] <- PEcAn.utils::ud_convert(new.state$litter_carbon_content, "kg m-2", "g m-2") # kgC/m2 -> gC/m2 if (new.state$litter_carbon_content < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("litter_carbon_content") } - + if ("TotSoilCarb" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- PEcAn.utils::ud_convert(new.state$TotSoilCarb, 'kg m-2', 'g m-2') # kgC/m2 -> gC/m2 + analysis.save[[length(analysis.save) + 1]] <- PEcAn.utils::ud_convert(new.state$TotSoilCarb, "kg m-2", "g m-2") # kgC/m2 -> gC/m2 if (new.state$TotSoilCarb < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("soil") } - - if("litter_mass_content_of_water" %in% variables){ - analysis.save[[length(analysis.save) + 1]] <- new.state$litter_mass_content_of_water ## unitless + + if ("litter_mass_content_of_water" %in% variables) { + analysis.save[[length(analysis.save) + 1]] <- new.state$litter_mass_content_of_water ## unitless if (new.state$litter_mass_content_of_water < 0 || new.state$litter_mass_content_of_water > 1) analysis.save[[length(analysis.save)]] <- 0.5 names(analysis.save[[length(analysis.save)]]) <- c("litter_mass_content_of_water") } - + if ("SoilMoistFrac" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- new.state$SoilMoistFrac/100 ## unitless + analysis.save[[length(analysis.save) + 1]] <- new.state$SoilMoistFrac / 100 ## unitless if (analysis.save[[length(analysis.save)]] < 0 || analysis.save[[length(analysis.save)]] > 1) analysis.save[[length(analysis.save)]] <- 0.5 names(analysis.save[[length(analysis.save)]]) <- c("soilWFrac") } - + if ("SWE" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- new.state$SWE/10 + analysis.save[[length(analysis.save) + 1]] <- new.state$SWE / 10 if (analysis.save[[length(analysis.save)]] < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("SWE") } if ("LAI" %in% variables) { - analysis.save[[length(analysis.save) + 1]] <- new.state$LAI + analysis.save[[length(analysis.save) + 1]] <- new.state$LAI if (new.state$LAI < 0) analysis.save[[length(analysis.save)]] <- 0 names(analysis.save[[length(analysis.save)]]) <- c("lai") } - - if (!is.null(analysis.save) && length(analysis.save)>0){ + + if (!is.null(analysis.save) && length(analysis.save) > 0) { analysis.save.mat <- data.frame(matrix(unlist(analysis.save, use.names = TRUE), nrow = 1)) colnames(analysis.save.mat) <- names(unlist(analysis.save)) - analysis.save.mat <- cbind(analysis.save.mat, IC_extra) #add in all restart values - }else{ + analysis.save.mat <- cbind(analysis.save.mat, IC_extra) # add in all restart values + } else { analysis.save.mat <- NULL } @@ -119,11 +120,13 @@ write_restart.SIPNET <- function(outdir, runid, start.time, stop.time, settings, print(runid %>% as.character()) print(analysis.save.mat) } - do.call(write.config.SIPNET, args = list(defaults = NULL, - trait.values = new.params, - settings = settings, - run.id = runid, - inputs = inputs, - IC = analysis.save.mat)) + do.call(write.config.SIPNET, args = list( + defaults = NULL, + trait.values = new.params, + settings = settings, + run.id = runid, + inputs = inputs, + IC = analysis.save.mat + )) print(runid) -} # write_restart.SIPNET \ No newline at end of file +} # write_restart.SIPNET diff --git a/models/sipnet/inst/SIPNET.lyford.SDA.R b/models/sipnet/inst/SIPNET.lyford.SDA.R index 0bd93acece5..cbff691e95c 100644 --- a/models/sipnet/inst/SIPNET.lyford.SDA.R +++ b/models/sipnet/inst/SIPNET.lyford.SDA.R @@ -6,11 +6,11 @@ options(warn = 1, keep.source = TRUE, error = quote({ })) status.start <- function(name) { - cat(paste(name, format(Sys.time(), "%F %T"), sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE) + cat(paste(name, format(Sys.time(), "%F %T"), sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } -status.end <- function(status="DONE") { - cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE) +status.end <- function(status = "DONE") { + cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } #---------------- Load libraries. -----------------------------------------------------------------# @@ -23,7 +23,7 @@ library(rjags) library(reshape2) #--------------------------------------------------------------------------------------------------# # -# +# # # 35 # FALSE @@ -65,22 +65,24 @@ library(reshape2) #---------------- Load PEcAn settings file. -------------------------------------------------------# # Open and read in settings file for PEcAn run. -settings <- read.settings("pecan.SDA.xml") +settings <- read.settings("pecan.SDA.xml") #--------------------------------------------------------------------------------------------------# #---------------- Load data. -------------------------------------------------------# -load('~/sipnet_lyford_summary.Rdata') -years<-1962:2015 -names(obs.mean) <- paste0(years,'/12/31') +load("~/sipnet_lyford_summary.Rdata") +years <- 1962:2015 +names(obs.mean) <- paste0(years, "/12/31") #---------------- Build Initial Conditions ----------------------------------------------------------------------# status.start("IC") -ne = as.numeric(settings$state.data.assimilation$n.ensemble) -IC = sample.IC.SIPNET(ne,state,year=1) +ne <- as.numeric(settings$state.data.assimilation$n.ensemble) +IC <- sample.IC.SIPNET(ne, state, year = 1) status.end() #--------------- Assimilation -------------------------------------------------------# status.start("EnKF") -sda.enkf(settings=settings, obs.mean = obs.mean, - obs.cov = obs.cov, IC = IC, Q = NULL) -status.end() \ No newline at end of file +sda.enkf( + settings = settings, obs.mean = obs.mean, + obs.cov = obs.cov, IC = IC, Q = NULL +) +status.end() diff --git a/models/sipnet/tests/testthat.R b/models/sipnet/tests/testthat.R index 7a1fc04289f..c750f864487 100644 --- a/models/sipnet/tests/testthat.R +++ b/models/sipnet/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.SIPNET") +# test_check("PEcAn.SIPNET") diff --git a/models/sipnet/tests/testthat/test.met2model.R b/models/sipnet/tests/testthat/test.met2model.R index 9619f05e3ac..7dc5d4f0687 100644 --- a/models/sipnet/tests/testthat/test.met2model.R +++ b/models/sipnet/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/stics/R/met2model.STICS.R b/models/stics/R/met2model.STICS.R index 1d53bdd13ea..86520c48369 100644 --- a/models/stics/R/met2model.STICS.R +++ b/models/stics/R/met2model.STICS.R @@ -10,181 +10,181 @@ ##' @param end_date end date of the simulation ##' @param overwrite logical: replace output files if they already exist? ##' @param ... other arguments passed to function -##' @return results +##' @return results ##' @export ##' @author Istem Fer -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# met2model.STICS <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, ...) { - PEcAn.logger::logger.info("START met2model.STICS") - - start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") - start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - + + start_date <- as.POSIXlt(start_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") + start_year <- lubridate::year(start_date) + end_year <- lubridate::year(end_date) + # starting with the easiest case, full years # STICS looks for different input files for each year - out.files <- paste(in.prefix, seq(start_year, end_year), "climate", sep = ".") + out.files <- paste(in.prefix, seq(start_year, end_year), "climate", sep = ".") out.files.full <- file.path(outfolder, out.files) - - results <- data.frame(file = out.files.full, - host = PEcAn.remote::fqdn(), - mimetype = "text/plain", - formatname = "climate", - startdate = start_date, # these need fixing,not same for all climate files - enddate = end_date, # these need fixing - dbfile.name = out.files, - stringsAsFactors = FALSE) + + results <- data.frame( + file = out.files.full, + host = PEcAn.remote::fqdn(), + mimetype = "text/plain", + formatname = "climate", + startdate = start_date, # these need fixing,not same for all climate files + enddate = end_date, # these need fixing + dbfile.name = out.files, + stringsAsFactors = FALSE + ) PEcAn.logger::logger.info("internal results") PEcAn.logger::logger.info(results) - - + + ## check to see if the outfolder is defined, if not create directory for output if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } - + ctr <- 1 - + ## loop over files/years for (year in seq(start_year, end_year)) { - if (file.exists(out.files.full[ctr]) && !overwrite) { PEcAn.logger::logger.debug("File '", out.files.full[ctr], "' already exists, skipping to next file.") ctr <- ctr + 1 next } - + PEcAn.logger::logger.info(year) - + ## handle dates, also for partial year(s) - if(year == start_year & year != end_year){ + if (year == start_year & year != end_year) { # start year could be full or partial simdays <- seq(lubridate::yday(start_date), PEcAn.utils::days_in_year(year)) - }else if(year == end_year & year != start_year){ + } else if (year == end_year & year != start_year) { # end year could be full or partial simdays <- seq(1, lubridate::yday(end_date)) - }else if(year == end_year & year == start_year){ + } else if (year == end_year & year == start_year) { # we have one full or partial year simdays <- seq(lubridate::yday(start_date), lubridate::yday(end_date)) - }else{ + } else { # a full year in between simdays <- seq(1, PEcAn.utils::days_in_year(year)) } - - - NDAYS <- length(simdays) - NWEATHER <- as.integer(13) - weather_df <- as.data.frame(matrix( -999.9, nrow = NDAYS, ncol = NWEATHER)) - + + + NDAYS <- length(simdays) + NWEATHER <- as.integer(13) + weather_df <- as.data.frame(matrix(-999.9, nrow = NDAYS, ncol = NWEATHER)) + # prepare data frame for STICS format, daily inputs, but doesn't have to be full year - weather_df[ ,1] <- rep(gsub(".*_STICS_site_", "", outfolder), NDAYS) # column 1: name of weather file - weather_df[ ,2] <- rep(year, NDAYS) # column 2: year + weather_df[, 1] <- rep(gsub(".*_STICS_site_", "", outfolder), NDAYS) # column 1: name of weather file + weather_df[, 2] <- rep(year, NDAYS) # column 2: year start_month <- ifelse(year == start_year, paste0(start_date), paste0(year, "/01/01")) - end_month <- ifelse(year == end_year, paste0(end_date), paste0(year, "/12/31")) - - weather_df[ ,3] <- lubridate::month(seq(lubridate::as_date(start_month), - lubridate::as_date(end_month), by = "day")) # column 3: month - - weather_df[ ,4] <- lubridate::mday(seq(lubridate::as_date(start_month), - lubridate::as_date(end_month), by = "day")) # column 4: day in month - weather_df[ ,5] <- simdays # column 5: Julian day - + end_month <- ifelse(year == end_year, paste0(end_date), paste0(year, "/12/31")) + + weather_df[, 3] <- lubridate::month(seq(lubridate::as_date(start_month), + lubridate::as_date(end_month), + by = "day" + )) # column 3: month + + weather_df[, 4] <- lubridate::mday(seq(lubridate::as_date(start_month), + lubridate::as_date(end_month), + by = "day" + )) # column 4: day in month + weather_df[, 5] <- simdays # column 5: Julian day + ## handle variables old.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) if (file.exists(old.file)) { ## open netcdf - nc <- ncdf4::nc_open(old.file) + nc <- ncdf4::nc_open(old.file) on.exit(ncdf4::nc_close(nc), add = TRUE) - + ## convert time to seconds sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - + dt <- diff(sec)[1] tstep <- round(86400 / dt) dt <- 86400 / tstep - + ind <- rep(simdays, each = tstep) - - if(unlist(strsplit(nc$dim$time$units, " "))[1] %in% c("days", "day")){ - #this should always be the case, but just in case - origin_dt <- as.POSIXct(unlist(strsplit(nc$dim$time$units, " "))[3], "%Y-%m-%d", tz="UTC") + + if (unlist(strsplit(nc$dim$time$units, " "))[1] %in% c("days", "day")) { + # this should always be the case, but just in case + origin_dt <- as.POSIXct(unlist(strsplit(nc$dim$time$units, " "))[3], "%Y-%m-%d", tz = "UTC") ydays <- lubridate::yday(origin_dt + sec) - - }else{ + } else { PEcAn.logger::logger.error("Check units of time in the weather data.") } - + # column 6: minimum temperature (°C) - Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin - Tair <- Tair[ydays %in% simdays] + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + Tair <- Tair[ydays %in% simdays] Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") - t_dmin <- round(tapply(Tair_C, ind, min, na.rm = TRUE), digits = 2) # maybe round these numbers - weather_df[ ,6] <- t_dmin - + t_dmin <- round(tapply(Tair_C, ind, min, na.rm = TRUE), digits = 2) # maybe round these numbers + weather_df[, 6] <- t_dmin + # column 7: maximum temperature (°C) - t_dmax <- round(tapply(Tair_C, ind, max, na.rm = TRUE), digits = 2) # maybe round these numbers - weather_df[ ,7] <- t_dmax - + t_dmax <- round(tapply(Tair_C, ind, max, na.rm = TRUE), digits = 2) # maybe round these numbers + weather_df[, 7] <- t_dmax + # column 8: global radiation (MJ m-2. j-1) rad <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") - gr <- rad * 0.0864 # W m-2 to MJ m-2 d-1 - gr <- gr[ydays %in% simdays] - weather_df[ ,8] <- round(tapply(gr, ind, mean, na.rm = TRUE), digits = 2) # irradiation (MJ m-2 d-1) - + gr <- rad * 0.0864 # W m-2 to MJ m-2 d-1 + gr <- gr[ydays %in% simdays] + weather_df[, 8] <- round(tapply(gr, ind, mean, na.rm = TRUE), digits = 2) # irradiation (MJ m-2 d-1) + # column 9: Penman PET (mm.j-1) OPTIONAL, leave it as -999.9 for now - + # column 10: rainfall (mm.j-1) - Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 - Rain <- Rain[ydays %in% simdays] - raini <- tapply(Rain * 86400, ind, mean, na.rm = TRUE) - weather_df[ ,10] <- round(raini, digits = 2) # precipitation (mm d-1) - - # column 11: wind (m.s-1) + Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") # kg m-2 s-1 + Rain <- Rain[ydays %in% simdays] + raini <- tapply(Rain * 86400, ind, mean, na.rm = TRUE) + weather_df[, 10] <- round(raini, digits = 2) # precipitation (mm d-1) + + # column 11: wind (m.s-1) # OPTIONAL if you're not using the “Shuttleworth and Wallace” method or the “Penman calculate” method to calculate PET in the station file U <- try(ncdf4::ncvar_get(nc, "eastward_wind")) V <- try(ncdf4::ncvar_get(nc, "northward_wind")) - if(is.numeric(U) & is.numeric(V) & !all(is.nan(U)) & !all(is.nan(V))){ - U <- U[ydays %in% simdays] - V <- V[ydays %in% simdays] - ws <- sqrt(U ^ 2 + V ^ 2) - }else{ + if (is.numeric(U) & is.numeric(V) & !all(is.nan(U)) & !all(is.nan(V))) { + U <- U[ydays %in% simdays] + V <- V[ydays %in% simdays] + ws <- sqrt(U^2 + V^2) + } else { ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) ws <- ws[ydays %in% simdays] if (is.numeric(ws)) { PEcAn.logger::logger.info("eastward_wind and northward_wind absent; using wind_speed") - }else{ + } else { PEcAn.logger::logger.severe("No variable found to calculate wind_speed") } } - weather_df[ ,11] <- round(tapply(ws, ind, mean, na.rm = TRUE), digits = 2) # mean wind speed (m s-1) - + weather_df[, 11] <- round(tapply(ws, ind, mean, na.rm = TRUE), digits = 2) # mean wind speed (m s-1) + # column 12: vapour pressure (mbars), leave it as -999.9 for now # OPTIONAL if you're not using the “Shuttleworth and Wallace” method or the “Penman calculate” method to calculate PET in the station file - - # column 13: CO2 content(ppm). + + # column 13: CO2 content(ppm). co2 <- try(ncdf4::ncvar_get(nc, "mole_fraction_of_carbon_dioxide_in_air")) co2 <- co2[ydays %in% simdays] - if(is.numeric(co2)){ - weather_df[ ,13] <- round(tapply(co2 * 1e6, ind, mean, na.rm = TRUE), digits = 1) - }else{ + if (is.numeric(co2)) { + weather_df[, 13] <- round(tapply(co2 * 1e6, ind, mean, na.rm = TRUE), digits = 1) + } else { # default : 330 ppm - weather_df[ ,13] <- 330 + weather_df[, 13] <- 330 PEcAn.logger::logger.info("mole_fraction_of_carbon_dioxide_in_air absent; using default 330 ppm") } - - }else{ + } else { PEcAn.logger::logger.severe(old.file, " does not exist.") } utils::write.table(weather_df, file = out.files.full[ctr], col.names = FALSE, row.names = FALSE) ctr <- ctr + 1 - } ## end-loop over files return(invisible(results)) - } # met2model.STICS diff --git a/models/stics/R/model2netcdf.STICS.R b/models/stics/R/model2netcdf.STICS.R index 924c67b3637..fb5a431fb4c 100644 --- a/models/stics/R/model2netcdf.STICS.R +++ b/models/stics/R/model2netcdf.STICS.R @@ -1,5 +1,5 @@ ##' Convert STICS output into the NACP Intercomparison format (ALMA using netCDF) -##' +##' ##' @name model2netcdf.STICS ##' @title Code to convert STICS' output into netCDF format ##' @@ -8,99 +8,99 @@ ##' @param sitelon Longitude of the site ##' @param start_date Start time of the simulation ##' @param end_date End time of the simulation -##' @param overwrite Whether or not to overwrite existing output files +##' @param overwrite Whether or not to overwrite existing output files ##' @export ##' ##' @author Istem Fer -##' +##' model2netcdf.STICS <- function(outdir, sitelat, sitelon, start_date, end_date, overwrite = FALSE) { - ### Read in model output in STICS format out_files <- list.files(outdir) - + stics_out_file <- file.path(outdir, out_files[grepl("mod_s.*", out_files)]) - stics_output <- lapply(stics_out_file, utils::read.table, header = TRUE, sep = ";") - stics_output <- do.call("rbind", stics_output) + stics_output <- lapply(stics_out_file, utils::read.table, header = TRUE, sep = ";") + stics_output <- do.call("rbind", stics_output) # probably already ordered, but order by year and DoY - stics_output <- stics_output[order(stics_output[,1], stics_output[,4]), ] + stics_output <- stics_output[order(stics_output[, 1], stics_output[, 4]), ] + - simulation_years <- unique(stics_output$ian) - + # get all years that we want data from year_seq <- seq(lubridate::year(start_date), lubridate::year(end_date)) - + # check that specified years and output years match if (!all(year_seq %in% simulation_years)) { - # if not fail altogether, so that it won't break ensemble analysis + # if not fail altogether, so that it won't break ensemble analysis PEcAn.logger::logger.error("Years selected for model run and STICS output years do not match.") } - + # determine time step? - + for (y in simulation_years) { - if (file.exists(file.path(outdir, paste(y, "nc", sep = "."))) & overwrite == FALSE) { next } - - thisyear <- stics_output[ , "ian"] == y - + + thisyear <- stics_output[, "ian"] == y + outlist <- list() - outlist[[length(outlist)+1]] <- stics_output[thisyear, "lai.n."] # LAI in (m2 m-2) - + outlist[[length(outlist) + 1]] <- stics_output[thisyear, "lai.n."] # LAI in (m2 m-2) + # daily amount of CO2-C emitted due to soil mineralisation (humus and organic residues) (kg ha-1 d-1) - HeteroResp <- PEcAn.utils::ud_convert(stics_output[thisyear, "CO2sol"], "ha-1 day-1", "m-2 s-1") - - outlist[[length(outlist)+1]] <- HeteroResp - - - # dltams(n): daily growth rate of the plant (t.ha-1.d-1) - dltams <- PEcAn.utils::ud_convert(stics_output[thisyear, "dltams.n."], "ton", "kg") * 0.48 # ton to kgC + HeteroResp <- PEcAn.utils::ud_convert(stics_output[thisyear, "CO2sol"], "ha-1 day-1", "m-2 s-1") + + outlist[[length(outlist) + 1]] <- HeteroResp + + + # dltams(n): daily growth rate of the plant (t.ha-1.d-1) + dltams <- PEcAn.utils::ud_convert(stics_output[thisyear, "dltams.n."], "ton", "kg") * 0.48 # ton to kgC # dltaremobil: daily amount of perennial reserves remobilised (t.ha-1.d-1) dltaremobil <- PEcAn.utils::ud_convert(stics_output[thisyear, "dltaremobil"], "ton", "kg") * 0.48 # ton to kgC NPP <- dltams - dltaremobil # kgC ha-1 d-1 - NPP[NPP<0] <- 0 - + NPP[NPP < 0] <- 0 + # double checking that this is all NPP (above and below) ## this: - #stics_output[thisyear, "dltams.n."] # t.ha-1.d-1 + # stics_output[thisyear, "dltams.n."] # t.ha-1.d-1 ## should be roughly equal to this: - #diff(stics_output[thisyear, "masec.n."])+ diff(stics_output[thisyear, "msrac.n."]) # t.ha-1 - - NPP <- PEcAn.utils::ud_convert(NPP, "ha-1 day-1", "m-2 s-1") # kg C m-2 s-1 - outlist[[length(outlist)+1]] <- NPP - - NEE <- -1*(NPP-HeteroResp) - outlist[[length(outlist)+1]] <- NEE - + # diff(stics_output[thisyear, "masec.n."])+ diff(stics_output[thisyear, "msrac.n."]) # t.ha-1 + + NPP <- PEcAn.utils::ud_convert(NPP, "ha-1 day-1", "m-2 s-1") # kg C m-2 s-1 + outlist[[length(outlist) + 1]] <- NPP + + NEE <- -1 * (NPP - HeteroResp) + outlist[[length(outlist) + 1]] <- NEE + # other vars # Cr: amount of C in organic residues mixed with soil (kg.ha-1) # Crac: amount of C in roots at harvest (kg.ha-1) # Chumt: amount of C in humified organic matter (active + inert fractions) (kg.ha-1) - + # ******************** Declare netCDF dimensions and variables ********************# - t <- ncdf4::ncdim_def(name = "time", - units = paste0("days since ", y, "-01-01 00:00:00"), - stics_output[stics_output[,1] == y, 4], # allow partial years, this info is already in matrix_weather - calendar = "standard", - unlim = TRUE) - - + t <- ncdf4::ncdim_def( + name = "time", + units = paste0("days since ", y, "-01-01 00:00:00"), + stics_output[stics_output[, 1] == y, 4], # allow partial years, this info is already in matrix_weather + calendar = "standard", + unlim = TRUE + ) + + lat <- ncdf4::ncdim_def("lat", "degrees_north", vals = as.numeric(sitelat), longname = "station_latitude") - lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") - + lon <- ncdf4::ncdim_def("lon", "degrees_east", vals = as.numeric(sitelon), longname = "station_longitude") + dims <- list(lon = lon, lat = lat, time = t) - + nc_var <- list() - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("LAI", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NPP", dims) - nc_var[[length(nc_var)+1]] <- PEcAn.utils::to_ncvar("NEE", dims) - + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("LAI", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("HeteroResp", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("NPP", dims) + nc_var[[length(nc_var) + 1]] <- PEcAn.utils::to_ncvar("NEE", dims) + # ******************** Declare netCDF variables ********************# - + ### Output netCDF data nc <- ncdf4::nc_create(file.path(outdir, paste(y, "nc", sep = ".")), nc_var) varfile <- file(file.path(outdir, paste(y, "nc", "var", sep = ".")), "w") @@ -111,8 +111,5 @@ model2netcdf.STICS <- function(outdir, sitelat, sitelon, start_date, end_date, o } close(varfile) ncdf4::nc_close(nc) - } ### End of year loop - - } # model2netcdf.STICS diff --git a/models/stics/R/write.config.STICS.R b/models/stics/R/write.config.STICS.R index 39802f5520a..4a4179fb105 100644 --- a/models/stics/R/write.config.STICS.R +++ b/models/stics/R/write.config.STICS.R @@ -13,624 +13,618 @@ ##' @return configuration file for STICS for given run ##' @export ##' @author Istem Fer -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.STICS <- function(defaults, trait.values, settings, run.id) { - ## the rest of the code assumes only plant PFTs ## little modification here as not to have a bigger re-write for now - if(any(grepl("soil", names(trait.values)))){ + if (any(grepl("soil", names(trait.values)))) { soil_params <- trait.values[[grep("soil", names(trait.values))]] settings$pfts[[grep("soil", names(trait.values))]] <- NULL trait.values[[grep("soil", names(trait.values))]] <- NULL - }else{ + } else { soil_params <- NULL } - + ## simulation days, used later dseq <- seq(lubridate::as_date(settings$run$start.date), lubridate::as_date(settings$run$end.date), by = "day") - + # find out where to write run/ouput - rundir <- file.path(settings$host$rundir, run.id) - cfgdir <- file.path(settings$host$rundir, run.id, "config") - bindir <- file.path(settings$host$rundir, run.id, "bin") - outdir <- file.path(settings$host$outdir, run.id) - - + rundir <- file.path(settings$host$rundir, run.id) + cfgdir <- file.path(settings$host$rundir, run.id, "config") + bindir <- file.path(settings$host$rundir, run.id, "bin") + outdir <- file.path(settings$host$outdir, run.id) + + ########## Determining number of USMs (could be made its own function) - + # In STICS, it is 1 USM per crop cycle, where each cycle can be 2-years max # If we have a consecutive monoculture for > 2 years, we still need to divide it into 2-year USMs # If there are multiple pfts, this is a strong clue that there are multiple crop cycles # but it can also be the case that there is one cycle with intercropping - + years_requested <- unique(lubridate::year(dseq)) # we always pass two climate files to STICS, repeat the same year twice if the last crop cycle has 1 year only - if(length(years_requested) %%2 == 1) years_requested <- c(years_requested, years_requested[length(years_requested)]) - + if (length(years_requested) %% 2 == 1) years_requested <- c(years_requested, years_requested[length(years_requested)]) + # Could the events file hierarchy be organized by crop cycle? Need to check how ACE-json does - if(!is.null(settings$run$inputs$fielddata)){ + if (!is.null(settings$run$inputs$fielddata)) { events_file <- jsonlite::read_json(settings$run$inputs$fielddata$path, simplifyVector = TRUE)[[1]] - + # testing new approach - if(!is.null(events_file$rotation)){ + if (!is.null(events_file$rotation)) { usmdirs <- rep(NA, nrow(events_file$rotation)) - for(uic in seq_along(usmdirs)){ + for (uic in seq_along(usmdirs)) { p1 <- tolower(events_file$rotation$planted_crop1[uic]) p2 <- ifelse(events_file$rotation$planted_crop2[uic] != "-99.0", tolower(events_file$rotation$planted_crop2[uic]), "") - uname <- paste0(p1,p2) - usmdirs[uic] <- paste0(file.path(settings$host$rundir, run.id, uname), "_", - lubridate::year(events_file$rotation$rotation_begin[uic]), "-", - lubridate::year(events_file$rotation$rotation_end[uic])) + uname <- paste0(p1, p2) + usmdirs[uic] <- paste0( + file.path(settings$host$rundir, run.id, uname), "_", + lubridate::year(events_file$rotation$rotation_begin[uic]), "-", + lubridate::year(events_file$rotation$rotation_end[uic]) + ) } - }else{ - + } else { # events file can have info from other years, subset - sub_events <- events_file$events[(lubridate::year(events_file$events$date) %in% years_requested),] - - + sub_events <- events_file$events[(lubridate::year(events_file$events$date) %in% years_requested), ] + + crops <- c(sub_events$planted_crop, sub_events$harvest_crop) - if(!is.null(crops)){ + if (!is.null(crops)) { crops <- crops[!is.na(crops)] # filter NAs caused by flattening the json # for now taking a simplistic assumption that if there are more than 1 harvested + planted crops, there are multiple crop cycles - if(length(unique(crops)) > 1){ + if (length(unique(crops)) > 1) { # we probably have multiple pfts passed via settings, usmdir_root will be an array - usmdir_root <- paste0(file.path(settings$host$rundir, run.id, sapply(settings$pfts, `[[`, "name")), "_") + usmdir_root <- paste0(file.path(settings$host$rundir, run.id, sapply(settings$pfts, `[[`, "name")), "_") # !!! IMPORTANT: document also elsewhere # I'm making STICS PFT names to match fieldactivity names, or more broadly whatever is in the events json file!!! # e.g. barley is not barley but bar # alternatively I can start a LUT to match bety-pft names to match events species codes # we need to pass right parameters under right USM! - - if(length(years_requested) <= 2){ + + if (length(years_requested) <= 2) { # multiple usms due to crop rotation only # associate spp and year - usmdirs <- sapply(crops, function(x){ + usmdirs <- sapply(crops, function(x) { crop_yr <- lubridate::year(sub_events$date[(sub_events$planted_crop %in% x) | (sub_events$harvest_crop %in% x)]) crop_usm <- paste0(usmdir_root[grep(tolower(x), usmdir_root)], crop_yr) return(crop_usm) }) - - # make sure the usmdir order is the same as the rotation order - # this may need to get more sophisticated in the future + + # make sure the usmdir order is the same as the rotation order + # this may need to get more sophisticated in the future # but keeping the usmdirs in chronological order will come handy in the rest of this function usmdirs <- usmdirs[order(sapply(strsplit(sub(".*_", "", basename(usmdirs)), "-"), function(x) min(as.numeric(x))))] - - }else{ + } else { # multiple usms due to crop rotation and multiple cropping seasons per rotation # not implemented yet PEcAn.logger::logger.severe("write.config.STICS is under development for this case.") } - - }else{ + } else { # single crop, single usmdir_root - usmdir_root <- paste0(file.path(settings$host$rundir, run.id, settings$pfts$pft$name), "_") - if(length(years_requested) > 2){ + usmdir_root <- paste0(file.path(settings$host$rundir, run.id, settings$pfts$pft$name), "_") + if (length(years_requested) > 2) { # multiple usms because more than 2 years of simulation - years_indices <- rep(seq(1, length(years_requested), by=2), each=2) - usmdirs <- tapply(years_requested, years_indices, function(x) paste0(usmdir_root, paste(x, collapse = '-'))) - }else{ + years_indices <- rep(seq(1, length(years_requested), by = 2), each = 2) + usmdirs <- tapply(years_requested, years_indices, function(x) paste0(usmdir_root, paste(x, collapse = "-"))) + } else { # single usm because less than 2 years of simulation - usmdirs <- paste0(usmdir_root, paste(years_requested, collapse = '-')) - } + usmdirs <- paste0(usmdir_root, paste(years_requested, collapse = "-")) + } } - - }else{ - # somehow events have no crop identifiers, e.g. only fertilization and tilling events are passed + } else { + # somehow events have no crop identifiers, e.g. only fertilization and tilling events are passed # most likely a partial year & crop cycle - usmdir_root <- paste0(file.path(settings$host$rundir, run.id, settings$pfts$pft$name), "_") + usmdir_root <- paste0(file.path(settings$host$rundir, run.id, settings$pfts$pft$name), "_") # single usm - usmdirs <- paste0(usmdir_root, paste(years_requested, collapse = '-')) + usmdirs <- paste0(usmdir_root, paste(years_requested, collapse = "-")) } - } - } - + # TODO: have a better way to determine USMs - + ########################## finish usmdirs - + ## make sure rundir and outdir exist dir.create(rundir, showWarnings = FALSE, recursive = TRUE) dir.create(outdir, showWarnings = FALSE, recursive = TRUE) - + ## create usm, config and bin dirs - dir.create(cfgdir, showWarnings = FALSE, recursive = TRUE) - dir.create(bindir, showWarnings = FALSE, recursive = TRUE) + dir.create(cfgdir, showWarnings = FALSE, recursive = TRUE) + dir.create(bindir, showWarnings = FALSE, recursive = TRUE) sapply(usmdirs, dir.create, showWarnings = FALSE, recursive = TRUE) - + # write preferences - prf_xml <- XML::xmlParse(system.file("preferences.xml", package = "PEcAn.STICS")) + prf_xml <- XML::xmlParse(system.file("preferences.xml", package = "PEcAn.STICS")) prf_list <- XML::xmlToList(prf_xml) prf_list$entry$text <- rundir - - XML::saveXML(PEcAn.settings::listToXml(prf_list, "properties"), - file = file.path(cfgdir, "preferences.xml"), - prefix = '\n\n') - - + + XML::saveXML(PEcAn.settings::listToXml(prf_list, "properties"), + file = file.path(cfgdir, "preferences.xml"), + prefix = '\n\n' + ) + + # stics and javastics path stics_path <- settings$model$binary - + # Per STICS development team, there are two types of STICS inputs # Global input: _plt.xml, param_gen.xml, param_newform.xml # Local input: _ini.xml (initialization), sols.xml (soils), _tec.xml (crop management), (climate files) _sta.xml, *.year - + # NOTE: however, it's the text files, not the xml files that are read by the STICS executable. - + ################################# Prepare Plant File ####################################### - + ## this is where we overwrite model parameters - + # read in template plt file, has all the formalisms - plt_xml <- XML::xmlParse(system.file("crop_plt.xml", package = "PEcAn.STICS")) - #plt_list <- XML::xmlToList(plt_xml) + plt_xml <- XML::xmlParse(system.file("crop_plt.xml", package = "PEcAn.STICS")) + # plt_list <- XML::xmlToList(plt_xml) plt_files <- list() for (pft in seq_along(trait.values)) { - pft.traits <- unlist(trait.values[[pft]]) - pft.names <- names(pft.traits) - + pft.names <- names(pft.traits) + plant_file <- file.path(rundir, paste0(names(trait.values)[pft], "_plt.xml")) - - - if(names(trait.values)[pft] != "env"){ + + + if (names(trait.values)[pft] != "env") { # save the template, will be overwritten below XML::saveXML(plt_xml, file = plant_file) - }else{ + } else { next } - + plt_files[[pft]] <- plant_file - + # to learn the parameters in a plant file # SticsRFiles::get_param_info(file_path = plant_file) - + # go over each formalism and replace params following the order in crop_plt # TODO: vary more params - + # plant name and group # effect of atmospheric CO2 concentration - + # phasic development # to see parameters per formalism # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "phasic development") # unlist(values) - + # name code of plant in 3 letters # a handful of plants have to have specific codes, e.g. forages need to be 'fou' and vine needs to be 'vig' # but others can be anything? if not, either consider a LUT or passing via settings - if(names(trait.values)[pft] %in% c("frg", "wcl", "alf")){ - codeplante <- 'fou' + if (names(trait.values)[pft] %in% c("frg", "wcl", "alf")) { + codeplante <- "fou" codeperenne <- 2 - }else{ - codeplante <- base::substr(names(trait.values)[pft],1,3) + } else { + codeplante <- base::substr(names(trait.values)[pft], 1, 3) codeperenne <- 1 } codebfroid <- 2 # vernalization requirement, hardcoding for now, 2==yes SticsRFiles::set_param_xml(plant_file, "codeplante", codeplante, overwrite = TRUE) SticsRFiles::set_param_xml(plant_file, "codeperenne", codeperenne, overwrite = TRUE) SticsRFiles::set_param_xml(plant_file, "codebfroid", codebfroid, overwrite = TRUE) - + # minimum temperature below which development stops (degree C) if ("tdmin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tdmin", pft.traits[which(pft.names == "tdmin")], overwrite = TRUE) } - + # maximum temperature above which development stops (degree C) if ("tdmax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tdmax", pft.traits[which(pft.names == "tdmax")], overwrite = TRUE) } - + # basal photoperiod if ("phobase" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "phobase", pft.traits[which(pft.names == "phobase")], overwrite = TRUE) } - + # saturating photoperiod if ("phosat" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "phosat", pft.traits[which(pft.names == "phosat")], overwrite = TRUE) } - - + + # maximum phasic delay allowed due to stresses if ("phasic_delay_max" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "stressdev", pft.traits[which(pft.names == "phasic_delay_max")], overwrite = TRUE) } - + # minimum number of vernalising days (d) [0,7] if ("vernalization_days_min" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "jvcmini", round(pft.traits[which(pft.names == "vernalization_days_min")]), overwrite = TRUE) } - + # day of initiation of vernalisation in perennial crops (julian d) [1,731] # this only takes effect for perennial crops if ("vernalization_init" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "julvernal", round(pft.traits[which(pft.names == "vernalization_init")]), overwrite = TRUE) } - + # optimal temperature for vernalisation (degreeC) if ("vernalization_TOpt" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tfroid", pft.traits[which(pft.names == "vernalization_TOpt")], overwrite = TRUE) } - + # semi thermal amplitude for vernalising effect (degreeC) - if ("vernalization_TAmp" %in% pft.names) { + if ("vernalization_TAmp" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "ampfroid", pft.traits[which(pft.names == "vernalization_TAmp")], overwrite = TRUE) } - - if ("coeflevamf" %in% pft.names) { + + if ("coeflevamf" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coeflevamf", pft.traits[which(pft.names == "coeflevamf")], overwrite = TRUE) } - - if ("coefamflax" %in% pft.names) { + + if ("coefamflax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coefamflax", pft.traits[which(pft.names == "coefamflax")], overwrite = TRUE) } - - if ("coeflaxsen" %in% pft.names) { + + if ("coeflaxsen" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coeflaxsen", pft.traits[which(pft.names == "coeflaxsen")], overwrite = TRUE) } - - if ("coefsenlan" %in% pft.names) { + + if ("coefsenlan" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coefsenlan", pft.traits[which(pft.names == "coefsenlan")], overwrite = TRUE) } - - if ("coeflevdrp" %in% pft.names) { + + if ("coeflevdrp" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coeflevdrp", pft.traits[which(pft.names == "coeflevdrp")], overwrite = TRUE) } - - if ("coefdrpmat" %in% pft.names) { + + if ("coefdrpmat" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coefdrpmat", pft.traits[which(pft.names == "coefdrpmat")], overwrite = TRUE) } - - if ("coefflodrp" %in% pft.names) { + + if ("coefflodrp" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coefflodrp", pft.traits[which(pft.names == "coefflodrp")], overwrite = TRUE) } - - + + # emergence and starting # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "emergence and starting") # unlist(values) - + # minimum temperature below which emergence is stopped (degreeC) - if ("emergence_Tmin" %in% pft.names) { + if ("emergence_Tmin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tgmin", pft.traits[which(pft.names == "emergence_Tmin")], overwrite = TRUE) } - + # nbfeuilplant, leaf number per plant when planting, default 0, skipping for now - - + + # this is a switch, for now hardcoding to have delay at the beginning of the crop (1) # if starting the simulation from a later stage (e.g. lev) this has no effect # codegermin, option of simulation of a germination phase or a delay at the beginning of the crop (1) or direct starting (2) SticsRFiles::set_param_xml(plant_file, "codegermin", 1, overwrite = TRUE) - + # cumulative thermal time allowing germination (degree-d) - if ("cum_thermal_germin" %in% pft.names) { + if ("cum_thermal_germin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "stpltger", pft.traits[which(pft.names == "cum_thermal_germin")], overwrite = TRUE) } - + # skipping the other parameters related to this switch, they don't seem influential, at least on NPP and LAI # potgermi: soil water potential under which seed imbibition is impeded # nbjgerlim: maximum number of days after grain imbibition allowing full germination # propjgermin: minimal proportion of the duration nbjgerlim when the temperature is higher than the temperature threshold Tdmax - - + + # parameter of the curve of coleoptile elongation - if ("belong" %in% pft.names) { + if ("belong" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "belong", pft.traits[which(pft.names == "belong")], overwrite = TRUE) } - + # parameter of the plantlet elongation curve - if ("celong" %in% pft.names) { + if ("celong" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "celong", pft.traits[which(pft.names == "celong")], overwrite = TRUE) } - + # maximum elongation of the coleoptile in darkness condition - if ("coleoptile_elong_dark_max" %in% pft.names) { + if ("coleoptile_elong_dark_max" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "elmax", pft.traits[which(pft.names == "coleoptile_elong_dark_max")], overwrite = TRUE) } - + # number of days after germination after which plant emergence is reduced - if ("days_reduced_emergence_postgerm" %in% pft.names) { + if ("days_reduced_emergence_postgerm" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "nlevlim1", round(pft.traits[which(pft.names == "days2reduced_emergence_postgerm")]), overwrite = TRUE) } - + # number of days after germination after which plant emergence is impossible - if ("days2stopped_emergence_postgerm" %in% pft.names) { + if ("days2stopped_emergence_postgerm" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "nlevlim2", round(pft.traits[which(pft.names == "days2stopped_emergence_postgerm")]), overwrite = TRUE) } - + # plant vigor index allowing to emerge through a soil crust, vigueurbat == 1 inactivates some soil crust related parameters, skipping for now - + # there are also "planting" related parameters - + # leaves # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "leaves") # unlist(values) - - + + # phyllotherme, thermal duration between the apparition of two successive leaves on the main stem (degree day) # assuming this is the same as phyllochron if ("phyllochron" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "phyllotherme", pft.traits[which(pft.names == "phyllochron")], overwrite = TRUE) } - + # minimal density above which interplant competition starts (m-2) if ("dens_comp" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "bdens", pft.traits[which(pft.names == "dens_comp")], overwrite = TRUE) } - + # LAI above which competition between plants starts (m2 m-2) if ("lai_comp" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "laicomp", pft.traits[which(pft.names == "lai_comp")], overwrite = TRUE) } - + # basal height of crop (m) if ("height" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "hautbase", pft.traits[which(pft.names == "height")], overwrite = TRUE) } - + # maximum height of crop if ("HTMAX" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "hautmax", pft.traits[which(pft.names == "HTMAX")], overwrite = TRUE) } - + # minimum temperature at which growth ceases if ("tcmin_growth" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tcmin", pft.traits[which(pft.names == "tcmin_growth")], overwrite = TRUE) } - + # maximum temperature at which growth ceases if ("tcmax_growth" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tcmax", pft.traits[which(pft.names == "tcmax_growth")], overwrite = TRUE) } - + # temperature beyond which foliar growth stops if ("tcmax_foliar_growth" %in% pft.names) { # tcxstop must be > tdmax, priors should be set that way, and we can let the simulation fail afterwards, but putting a warning here - tdmax <- SticsRFiles::get_param_xml(plant_file, param="tdmax", select = "formalisme", select_value = "phasic development")[[1]][[1]] + tdmax <- SticsRFiles::get_param_xml(plant_file, param = "tdmax", select = "formalisme", select_value = "phasic development")[[1]][[1]] tcxstop <- pft.traits[which(pft.names == "tcmax_foliar_growth")] - if(tcxstop < tdmax){ + if (tcxstop < tdmax) { PEcAn.logger::logger.warn("tcmax_foliar_growth value (", tcxstop, ") should be greater than tdmax (", tdmax, ").") } SticsRFiles::set_param_xml(plant_file, "tcxstop", tcxstop, overwrite = TRUE) - } - + # ulai at the inflexion point of the function DELTAI=f(ULAI) if ("vlaimax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "vlaimax", pft.traits[which(pft.names == "vlaimax")], overwrite = TRUE) } - + # parameter of the logistic curve of LAI growth if ("pentlaimax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "pentlaimax", pft.traits[which(pft.names == "pentlaimax")], overwrite = TRUE) } - + # ulai from which the rate of leaf growth decreases if ("udlaimax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "udlaimax", pft.traits[which(pft.names == "udlaimax")], overwrite = TRUE) } - + # life span of early leaves expressed as a fraction of the life span of the last leaves emitted DURVIEF if ("early2last_leaflife" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "ratiodurvieI", pft.traits[which(pft.names == "early2last_leaflife")], overwrite = TRUE) } - + # fraction of senescent biomass (relative to total biomass) if ("senes2total_biomass" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "ratiosen", pft.traits[which(pft.names == "senes2total_biomass")], overwrite = TRUE) } - + # fraction of senescent leaves falling to the soil # not sure if this is supposed to be a fraction or a percentage in STICS, values look like a fraction but min-max is given as 0-100 # treating it like a fraction for now if ("fracLeafFall" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "abscission", pft.traits[which(pft.names == "fracLeafFall")], overwrite = TRUE) } - + # parameter relating the C/N of dead leaves and the INN if ("parazofmorte" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "parazofmorte", pft.traits[which(pft.names == "parazofmorte")], overwrite = TRUE) } - + # parameter of the N stress function active on leaf expansion (INNLAI), bilinear function vs INN passing through the point (INNmin, INNturgmin) if ("innturgmin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "innturgmin", pft.traits[which(pft.names == "innturgmin")], overwrite = TRUE) } - + # accelerating parameter for the lai growth rate if ("lai_growth_rate_accelerating" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "dlaimin", pft.traits[which(pft.names == "lai_growth_rate_accelerating")], overwrite = TRUE) } - + # maximum rate of the setting up of LAI if ("lai_max_rate" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "dlaimaxbrut", pft.traits[which(pft.names == "lai_max_rate")], overwrite = TRUE) - } - + } + # relative additional lifespan due to N excess in plant (INN > 1) if ("relative_addlifespan_DT_excessN" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "durviesupmax", pft.traits[which(pft.names == "relative_addlifespan_DT_excessN")], overwrite = TRUE) - } - + } + # parameter of the N stress function active on senescence (INNsenes), bilinear function vs INN passing through the point (INNmin, INNsen) if ("innsen" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "innsen", pft.traits[which(pft.names == "innsen")], overwrite = TRUE) - } - + } + # threshold soil water content active to simulate water senescence stress as a proportion of the turgor stress if ("rapsenturg" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "rapsenturg", pft.traits[which(pft.names == "rapsenturg")], overwrite = TRUE) - } - - + } + + # radiation interception # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "radiation interception") - + # extinction coefficient of photosynthetic active radiation in the canopy if ("extinction_coefficient_diffuse" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "extin", pft.traits[which(pft.names == "extinction_coefficient_diffuse")], overwrite = TRUE) - } - + } + # shoot biomass growth # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "shoot biomass growth") - + # minimum temperature for development if ("temin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "temin", pft.traits[which(pft.names == "temin")], overwrite = TRUE) } - + # maximal temperature above which plant growth stops if ("temax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "temax", pft.traits[which(pft.names == "temax")], overwrite = TRUE) } - + # optimal temperature (1/2) for plant growth if ("teopt" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "teopt", pft.traits[which(pft.names == "teopt")], overwrite = TRUE) } - + # optimal temperature (2/2) for plant growth if ("teoptbis" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "teoptbis", pft.traits[which(pft.names == "teoptbis")], overwrite = TRUE) } - + # maximum radiation use efficiency during the juvenile phase if ("RUE_juv" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "efcroijuv", pft.traits[which(pft.names == "RUE_juv")], overwrite = TRUE) } - + # maximum radiation use efficiency during the vegetative stage if ("RUE_veg" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "efcroiveg", pft.traits[which(pft.names == "RUE_veg")], overwrite = TRUE) } - + # maximum radiation use efficiency during the grain filling phase if ("RUE_rep" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "efcroirepro", pft.traits[which(pft.names == "RUE_rep")], overwrite = TRUE) } - + # fraction of daily remobilisable C reserves if ("remobres" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "remobres", pft.traits[which(pft.names == "remobres")], overwrite = TRUE) } - + # ratio biomass / useful height cut of crops (t.ha-1.m-1) if ("biomass2usefulheight" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "coefmshaut", pft.traits[which(pft.names == "biomass2usefulheight")], overwrite = TRUE) } - - + + # partitioning of biomass in organs # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "partitioning of biomass in organs") - + # maximum SLA (specific leaf area) of green leaves (cm2 g-1) if ("SLAMAX" %in% pft.names) { slamax <- pft.traits[which(pft.names == "SLAMAX")] slamax <- PEcAn.utils::ud_convert(PEcAn.utils::ud_convert(slamax, "m2", "cm2"), "kg-1", "g-1") # m2 kg-1 to cm2 g-1 SticsRFiles::set_param_xml(plant_file, "slamax", slamax, overwrite = TRUE) } - + # minimum SLA (specific leaf area) of green leaves (cm2 g-1) if ("SLAMIN" %in% pft.names) { slamin <- pft.traits[which(pft.names == "SLAMIN")] slamin <- PEcAn.utils::ud_convert(PEcAn.utils::ud_convert(slamin, "m2", "cm2"), "kg-1", "g-1") # m2 kg-1 to cm2 g-1 SticsRFiles::set_param_xml(plant_file, "slamin", slamin, overwrite = TRUE) } - - + + # ratio stem (structural part)/leaf if ("stem2leaf" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "tigefeuil", pft.traits[which(pft.names == "stem2leaf")], overwrite = TRUE) } - + # skipping: envfruit, fraction of envelop in grainmaxi (w:w) # skipping: sea, specific area of fruit envelops - + # yield formation, will get back - + # roots # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "roots") - - + + # sensanox, index of anoxia sensitivity (0 = insensitive), 0 for now # stoprac, stage when root growth stops (LAX= maximum leaf area index, end of leaf growth or SEN=beginning of leaf senescence) - + # sensrsec, index of root sensitivity to drought (1=insensitive) if ("rootsens2drought" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "sensrsec", pft.traits[which(pft.names == "rootsens2drought")], overwrite = TRUE) } - + # contrdamax, maximal reduction in root growth rate due to soil strengthness (high bulk density) if ("db_reduc_rgr_max" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "contrdamax", pft.traits[which(pft.names == "db_reduc_rgr_max")], overwrite = TRUE) - } - + } + # draclong, maximum rate of root length production per plant (cm plant-1 degreeD-1) if ("rootlength_prod_max" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "draclong", pft.traits[which(pft.names == "rootlength_prod_max")], overwrite = TRUE) - } - + } + # debsenrac, sum of degrees-days defining the beginning of root senescence (root life time) (degreeD) if ("root_sen_dday" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "debsenrac", round(pft.traits[which(pft.names == "root_sen_dday")]), overwrite = TRUE) - } - - #lvfront, root density at the root apex (cm cm-3) + } + + # lvfront, root density at the root apex (cm cm-3) if ("rootdens_at_apex" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "lvfront", pft.traits[which(pft.names == "rootdens_at_apex")], overwrite = TRUE) - } - + } + # longsperac - specific root length (cm g-1) if ("SRL" %in% pft.names) { - srl_val <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "SRL")], "m", "cm") + srl_val <- PEcAn.utils::ud_convert(pft.traits[which(pft.names == "SRL")], "m", "cm") SticsRFiles::set_param_xml(plant_file, "longsperac", srl_val, overwrite = TRUE) } - + # option to activate the N influence on root partitioning within the soil profile (1 = yes, 2 = no) SticsRFiles::set_param_xml(plant_file, "codazorac", 1, overwrite = TRUE) - + # reduction factor on root growth when soil mineral N is limiting (< minazorac) if ("minefnra" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "minefnra", pft.traits[which(pft.names == "minefnra")], overwrite = TRUE) - } - + } + # mineral N concentration in soil below which root growth is reduced (kg.ha-1.cm-1) if ("minazorac" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "minazorac", pft.traits[which(pft.names == "minazorac")], overwrite = TRUE) - } - + } + # mineral N concentration in soil above which root growth is maximum (kg.ha-1.cm-1) if ("maxazorac" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "maxazorac", pft.traits[which(pft.names == "maxazorac")], overwrite = TRUE) - } - + } + # frost - + # formalism - water - + # psisto, potential of stomatal closing (absolute value) (bars) - # note: units in betyDB are m, but my prior is for testing + # note: units in betyDB are m, but my prior is for testing if ("psi_stomata_closure" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "psisto", pft.traits[which(pft.names == "psi_stomata_closure")], overwrite = TRUE) - } - + } + # psiturg, potential of the beginning of decrease of the cellular extension (absolute value) (bars) # may or may not be leaf_psi_tlp in betyDB if ("leaf_psi_tlp" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "psiturg", pft.traits[which(pft.names == "leaf_psi_tlp")], overwrite = TRUE) - } - + } + # h2ofeuilverte, water content of green leaves (relative to fresh matter) (g g-1) # may or may not be water_content_TLP_leaf in betyDB if ("water_content_TLP_leaf" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "h2ofeuilverte", pft.traits[which(pft.names == "water_content_TLP_leaf")], overwrite = TRUE) - } - + } + # skipping: # h2ofeuiljaune # h2otigestruc @@ -638,153 +632,151 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # h2ofrvert # deshydbase # tempdeshyd - + # kmax, maximum crop coefficient for water requirements (=MET/PET) if ("crop_water_max" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "kmax", pft.traits[which(pft.names == "crop_water_max")], overwrite = TRUE) - } - + } + # nitrogen # masecNmax if ("masecNmax" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "masecNmax", pft.traits[which(pft.names == "masecNmax")], overwrite = TRUE) - } - + } + # Nreserve if ("Nreserve" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "Nreserve", pft.traits[which(pft.names == "Nreserve")], overwrite = TRUE) - } - - + } + + # Kmabs1 if ("Kmabs1" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "Kmabs1", pft.traits[which(pft.names == "Kmabs1")], overwrite = TRUE) - } - + } + # adil if ("adil" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "adil", pft.traits[which(pft.names == "adil")], overwrite = TRUE) - } - + } + # bdil if ("bdil" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "bdil", pft.traits[which(pft.names == "bdil")], overwrite = TRUE) - } - + } + # INNmin if ("INNmin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "INNmin", pft.traits[which(pft.names == "INNmin")], overwrite = TRUE) - } - + } + # Nmeta if ("Nmeta" %in% pft.names) { - SticsRFiles::set_param_xml(plant_file, "Nmeta", pft.traits[which(pft.names == "Nmeta")]*100, overwrite = TRUE) - } - + SticsRFiles::set_param_xml(plant_file, "Nmeta", pft.traits[which(pft.names == "Nmeta")] * 100, overwrite = TRUE) + } + # correspondance code BBCH - + # cultivar parameters # values = SticsRFiles::get_param_xml(plant_file, select = "formalisme", select_value = "cultivar parameters") - + # there are multiple cultivars (varietes) in plt file - # for now I assume we will always use only #1 in simulations + # for now I assume we will always use only #1 in simulations # hence, _tec file will always say variete==1, if you change the logic don't forget to update handling of the _tec file accordingly - + # maximal lifespan of an adult leaf expressed in summation of Q10=2 (2**(T-Tbase)) if ("leaf_lifespan_max" %in% pft.names) { # this will modifies all varietes' durvieFs by default SticsRFiles::set_param_xml(plant_file, "durvieF", pft.traits[which(pft.names == "leaf_lifespan_max")], overwrite = TRUE) # see example for setting a particular (the Grindstad) cultivar param - # SticsRFiles::set_param_xml(plant_file, "durvieF", pft.traits[which(pft.names == "leaf_lifespan_max")], select = "Grindstad", overwrite = TRUE) + # SticsRFiles::set_param_xml(plant_file, "durvieF", pft.traits[which(pft.names == "leaf_lifespan_max")], select = "Grindstad", overwrite = TRUE) } - - # cumulative thermal time between the stages LEV (emergence) and AMF (maximum acceleration of leaf growth, end of juvenile phase) + + # cumulative thermal time between the stages LEV (emergence) and AMF (maximum acceleration of leaf growth, end of juvenile phase) if ("cum_thermal_juvenile" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "stlevamf", pft.traits[which(pft.names == "cum_thermal_juvenile")], overwrite = TRUE) } - + # cumulative thermal time between the stages AMF (maximum acceleration of leaf growth, end of juvenile phase) and LAX (maximum leaf area index, end of leaf growth) if ("cum_thermal_growth" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "stamflax", pft.traits[which(pft.names == "cum_thermal_growth")], overwrite = TRUE) } - + # cumulative thermal time between the stages LEV (emergence) and DRP (starting date of filling of harvested organs) if ("cum_thermal_filling" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "stlevdrp", pft.traits[which(pft.names == "cum_thermal_filling")], overwrite = TRUE) } - + if ("adens" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "adens", pft.traits[which(pft.names == "adens")], overwrite = TRUE) } - + if ("croirac" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "croirac", pft.traits[which(pft.names == "croirac")], overwrite = TRUE) } - + # extinction coefficient connecting LAI to crop height if ("LAI2height" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "khaut", pft.traits[which(pft.names == "LAI2height")], overwrite = TRUE) } - + # average root radius if ("rayon" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "rayon", pft.traits[which(pft.names == "rayon")], overwrite = TRUE) } - + # minimal value for drought stress index if ("swfacmin" %in% pft.names) { SticsRFiles::set_param_xml(plant_file, "swfacmin", pft.traits[which(pft.names == "swfacmin")], overwrite = TRUE) } - + # convert xml2txt - if(names(trait.values)[pft] != "env"){ + if (names(trait.values)[pft] != "env") { SticsRFiles::convert_xml2txt(file = plant_file) # do I also need to move the file out of the plant folder to main rundir? } - + this_usm <- grep(names(trait.values)[pft], usmdirs) - sapply(this_usm, function(x){ + sapply(this_usm, function(x) { file.copy(file.path(rundir, "ficplt1.txt"), file.path(usmdirs[x], "ficplt1.txt"), overwrite = TRUE) }) - } # pft-loop ends - - - + + + ############################## Param gen / newform #################################### - - ## these also have plant parameters as well as soil + + ## these also have plant parameters as well as soil ## at the moment everything is treated as params, but some could be IC or come from the events file - + # these parameters won't change as crop changes in a continous rotation soil.names <- names(soil_params) - + for (pft in seq_along(trait.values)) { - - if(names(trait.values)[pft] == "env"){ + if (names(trait.values)[pft] == "env") { next } - - gen_xml <- XML::xmlParse(system.file("param_gen.xml", package = "PEcAn.STICS")) + + gen_xml <- XML::xmlParse(system.file("param_gen.xml", package = "PEcAn.STICS")) gen_file <- file.path(rundir, "param_gen.xml") XML::saveXML(gen_xml, file = gen_file) - codeinitprec <- ifelse(length(usmdirs>1), 1, 2) + codeinitprec <- ifelse(length(usmdirs > 1), 1, 2) SticsRFiles::set_param_xml(gen_file, "codeinitprec", codeinitprec, overwrite = TRUE) - - newf_xml <- XML::xmlParse(system.file("param_newform.xml", package = "PEcAn.STICS")) + + newf_xml <- XML::xmlParse(system.file("param_newform.xml", package = "PEcAn.STICS")) newf_file <- file.path(rundir, "param_newform.xml") - XML::saveXML(newf_xml, file = newf_file) + XML::saveXML(newf_xml, file = newf_file) + - pft.traits <- unlist(trait.values[[pft]]) - pft.names <- names(pft.traits) - + pft.names <- names(pft.traits) + ### Shoot growth # parameter defining radiation effect on conversion efficiency if ("rad_on_conversion_eff" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "coefb", pft.traits[which(pft.names == "rad_on_conversion_eff")], overwrite = TRUE) } - + # ratio of root mass to aerial mass at harvest if ("root2aerial_harvest" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "proprac", pft.traits[which(pft.names == "root2aerial_harvest")], overwrite = TRUE) @@ -794,31 +786,31 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("rootmin_harvest" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "y0msrac", pft.traits[which(pft.names == "rootmin_harvest")], overwrite = TRUE) } - + ### Root growth - + # bulk density of soil below which root growth is reduced due to a lack of soil cohesion (g.cm-3) if ("bd_rootgrowth_reduced" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "dacohes", pft.traits[which(pft.names == "bd_rootgrowth_reduced")], overwrite = TRUE) } - + # bulk density of soil above which root growth is maximal (g.cm-3) if ("bd_rootgrowth_maximal" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "daseuilbas", pft.traits[which(pft.names == "bd_rootgrowth_maximal")], overwrite = TRUE) } - + # bulk density of soil above which root growth becomes impossible (g.cm-3) if ("bd_rootgrowth_impossible" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "daseuilhaut", pft.traits[which(pft.names == "bd_rootgrowth_impossible")], overwrite = TRUE) } - + ### Water absorption and nitrogen content of the plant - + # parameter of increase of maximal transpiration when a water stress occurs if ("maxTPincrease_waterstress" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "beta", pft.traits[which(pft.names == "maxTPincrease_waterstress")], overwrite = TRUE) } - + # root length density (RLD) above which water and N uptake are maximum and independent of RLD if ("lvopt" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "lvopt", pft.traits[which(pft.names == "lvopt")], overwrite = TRUE) @@ -828,90 +820,90 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("difN_FC" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "difN", soil_params[which(soil.names == "difN_FC")], overwrite = TRUE) } - + # skipping # concrr: inorganic N concentration (NH4+NO3-N) in the rain - + # minimal amount of rain required to start an automatic fertilisation (N mm.d-1) if ("plNmin" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "plNmin", soil_params[which(soil.names == "plNmin")], overwrite = TRUE) } # skipping, irrlev: - # amount of irrigation applied automatically on the sowing day to allow germination when the model calculates automaticaly + # amount of irrigation applied automatically on the sowing day to allow germination when the model calculates automaticaly # the amount of irrigations or when the irrigation dates are calculated by sum of temperature - + # minimal amount of N in the plant required to compute INN (kg.ha-1) if ("QNpltminINN" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "QNpltminINN", pft.traits[which(pft.names == "QNpltminINN")], overwrite = TRUE) } - + ### Soil C and N processes and fertiliser losses - + # minimal temperature for decomposition of humified organic matter (degreeC) if ("tmin_mineralisation" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "tmin_mineralisation", soil_params[which(soil.names == "tmin_mineralisation")], overwrite = TRUE) } - + # parameter (1/2) of the temperature function on humus decomposition rate if ("T_p1_Hdecomp_rate" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "FTEMh", soil_params[which(soil.names == "T_p1_Hdecomp_rate")], overwrite = TRUE) } - + # parameter (2/2) of the temperature function on humus decomposition rate if ("T_p2_Hdecomp_rate" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "FTEMha", soil_params[which(soil.names == "T_p2_Hdecomp_rate")], overwrite = TRUE) } - + # reference temperature for decomposition of humified organic matter if ("T_r_HOMdecomp" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "TREFh", soil_params[which(soil.names == "T_r_HOMdecomp")], overwrite = TRUE) } - + # parameter (1/2) of the temperature function on decomposition rate of organic residues if ("FTEMr" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "FTEMr", soil_params[which(soil.names == "FTEMr")], overwrite = TRUE) } - + # parameter (2/2) of the temperature function on decomposition rate of organic residues if ("FTEMra" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "FTEMra", soil_params[which(soil.names == "FTEMra")], overwrite = TRUE) } - + # reference temperature for decomposition of organic residues if ("T_r_ORdecomp" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "TREFr", soil_params[which(soil.names == "T_r_ORdecomp")], overwrite = TRUE) } - + # TODO: come back to these # # not used anymore, or at least not with this name!!! # # relative potential mineralization rate: K2 = fmin1 * exp(- fmin2*argi) / (1+fmin3*calc) # if ("FMIN1" %in% soil.names) { # SticsRFiles::set_param_xml(gen_file, "FMIN1", soil_params[which(soil.names == "FMIN1")], overwrite = TRUE) # } - # + # # # not used anymore, or at least not with this name!!! # # parameter defining the effect of clay on the potential mineralization rate: K2 = fmin1 * exp(-fmin2*argi) / (1+fmin3*calc) # if ("FMIN2" %in% soil.names) { # SticsRFiles::set_param_xml(gen_file, "FMIN2", soil_params[which(soil.names == "FMIN2")], overwrite = TRUE) # } - # + # # # not used anymore, or at least not with this name!!! # # parameter defining the effect of CaCO3 on the potential mineralization rate: K2 = fmin1 * exp(-fmin2*argi) / (1+fmin3*calc) # if ("FMIN3" %in% soil.names) { # SticsRFiles::set_param_xml(gen_file, "FMIN3", soil_params[which(soil.names == "FMIN3")], overwrite = TRUE) # } - + # N/C ratio of soil humus if ("Wh" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "Wh", soil_params[which(soil.names == "Wh")], overwrite = TRUE) } - + # soil pH below which NH3 volatilisation derived from fertiliser is nil if ("pHminvol" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "pHminvol", soil_params[which(soil.names == "pHminvol")], overwrite = TRUE) } - + # soil pH above which NH3 volatilisation derived from fertiliser is maximum if ("pHmaxvol" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "pHmaxvol", soil_params[which(soil.names == "pHmaxvol")], overwrite = TRUE) @@ -926,7 +918,7 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("maxNimm_mineralfert" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "Xorgmax", soil_params[which(soil.names == "maxNimm_mineralfert")], overwrite = TRUE) } - + # relative water content (fraction of field capacity) below which mineralisation rate is nil if ("hminm" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "hminm", soil_params[which(soil.names == "hminm")], overwrite = TRUE) @@ -939,23 +931,23 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # skipping, alphaph: # maximal soil pH variation per unit of inorganic N added with slurry - + # skipping, dphvolmax: # maximal pH increase following the application of slurry - + # skipping, phvols: # parameter used to calculate the variation of soil pH after the addition of slurry - + # relative soil mineralisation rate at water saturation if ("fhminsat" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "fhminsat", soil_params[which(soil.names == "fhminsat")], overwrite = TRUE) } - + # reduction factor of decomposition rate of organic residues when mineral N is limiting if ("Nlim_reductionOMdecomp" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "fredkN", soil_params[which(soil.names == "Nlim_reductionOMdecomp")], overwrite = TRUE) } - + # reduction factor of decomposition rate of microbial biomass when mineral N is limiting if ("Nlim_reductionMBdecomp" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "fredlN", soil_params[which(soil.names == "Nlim_reductionMBdecomp")], overwrite = TRUE) @@ -965,32 +957,32 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("fNCbiomin" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "fNCbiomin", soil_params[which(soil.names == "fNCbiomin")], overwrite = TRUE) } - + # additional reduction factor of residues decomposition rate when mineral N is very limited in soil if ("fredNsup" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "fredNsup", soil_params[which(soil.names == "fredNsup")], overwrite = TRUE) } - + # maximum priming ratio (relative to SOM decomposition SD rate) if ("Primingmax" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "Primingmax", soil_params[which(soil.names == "Primingmax")], overwrite = TRUE) } - + ### Nitrification, denitrification and associated N2O emissions ### TODO: modify these params - + ### Soil hydrology and compaction - + # minimal amount of rain required to produce runoff (mm.d-1) if ("precmin4runoff" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "pminruis", soil_params[which(soil.names == "precmin4runoff")], overwrite = TRUE) } - + # soil thermal diffusivity (cm2.s-1) if ("soil_thermal_diffusivity" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "diftherm", soil_params[which(soil.names == "soil_thermal_diffusivity")], overwrite = TRUE) } - + # skipping, bformnappe: # coefficient for the water table shape (artificially drained soil) @@ -998,22 +990,22 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("rdrain" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "rdrain", soil_params[which(soil.names == "rdrain")], overwrite = TRUE) } - + # soil water potential corresponding to wilting point (Mpa) if ("SWP_WP" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "psihumin", soil_params[which(soil.names == "SWP_WP")], overwrite = TRUE) } - + # soil water potential corresponding to field capacity (Mpa) if ("SWP_FC" %in% soil.names) { SticsRFiles::set_param_xml(gen_file, "psihucc", soil_params[which(soil.names == "SWP_FC")], overwrite = TRUE) } - + # soil moisture content (fraction of field capacity) above which compaction may occur and delay sowing if ("SMC_compaction_delay_sow" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "prophumtasssem", pft.traits[which(pft.names == "SMC_compaction_delay_sow")], overwrite = TRUE) } - + # soil moisture content (fraction of field capacity) above which compaction may occur and delay harvest if ("SMC_compaction_delay_harvest" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "prophumtassrec", pft.traits[which(pft.names == "SMC_compaction_delay_harvest")], overwrite = TRUE) @@ -1021,10 +1013,10 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { ### skipping ### Soil tillage if soil compaction activated - + ### Typology of pebbles fertilisers and residues ### should some of these parameters come from event files? - + ### codetypeng: Types of mineral fertilisers - 1 atm # 1: Ammonium.nitrate # 2: Urea.Ammonium.Nitrate.solution @@ -1034,13 +1026,13 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # 6: Ammonium.phosphate # 7: Calcium.nitrate # 8: Fixed.efficiency - + # each option has 4 params # engamm: fraction of ammonium in the N fertilizer # orgeng: maximal amount of fertilizer N that can be immobilized in the soil (fraction for type 8) # deneng: maximal fraction of the mineral fertilizer that can be denitrified (used if codedenit is not activated) # voleng: maximal fraction of mineral fertilizer that can be volatilized - + ### codetypres: Type of residues for decomposition parameters - 21 atm # 1: Main crop on surface # 2: Intermediate crop on surface @@ -1063,9 +1055,9 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # 19: Others.1 ploughed in # 20: Others.2 ploughed in # 21: Dead roots in soil - + # each option has 17 params - + # fraction of organic residue which is decomposable if ("fOR_decomp" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "CroCo", pft.traits[which(pft.names == "fOR_decomp")], overwrite = TRUE) @@ -1075,17 +1067,17 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("ORdecomp_par" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "akres", pft.traits[which(pft.names == "ORdecomp_par")], overwrite = TRUE) } - + # potential rate of decomposition of organic residues: kres=akres+bkres/CsurNres if ("ORdecomp_rate" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "bkres", pft.traits[which(pft.names == "ORdecomp_rate")], overwrite = TRUE) } - + # parameter determining C/N ratio of biomass during organic residues decomposition: CsurNbio=awb+bwb/CsurNres if ("awb" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "awb", pft.traits[which(pft.names == "awb")], overwrite = TRUE) } - + # parameter determining C/N ratio of biomass during organic residues decomposition: CsurNbio=awb+bwb/CsurNres if ("bwb" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "bwb", pft.traits[which(pft.names == "bwb")], overwrite = TRUE) @@ -1095,35 +1087,35 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if ("minC2N_microbialbiomass" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "cwb", pft.traits[which(pft.names == "minC2N_microbialbiomass")], overwrite = TRUE) } - + # parameter of organic residues humification: hres = 1 - ahres*CsurNres/(bhres+CsurNres) if ("ahres" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "ahres", pft.traits[which(pft.names == "ahres")], overwrite = TRUE) } - + # parameter of organic residues humification: hres = 1 - ahres*CsurNres/(bhres+CsurNres) if ("bhres" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "bhres", pft.traits[which(pft.names == "bhres")], overwrite = TRUE) } - - + + # TODO: we need a soil PFT - + # potential decay rate of microbial biomass decomposing organic residues if ("microbialbiomass_decay" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "kbio", pft.traits[which(pft.names == "microbialbiomass_decay")], overwrite = TRUE) } - + # Carbon assimilation yield by the microbial biomass during crop residues decomposition if ("microbialbiomass_C_yield" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "yres", pft.traits[which(pft.names == "microbialbiomass_C_yield")], overwrite = TRUE) } - + # minimum value of C/N ratio of organic residue (g.g-1) if ("CNresmin" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "CNresmin", pft.traits[which(pft.names == "CNresmin")], overwrite = TRUE) } - + # maximum value of C/N ratio of organic residue (g.g-1) if ("CNresmax" %in% pft.names) { SticsRFiles::set_param_xml(gen_file, "CNresmax", pft.traits[which(pft.names == "CNresmax")], overwrite = TRUE) @@ -1131,246 +1123,244 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { # skipping, qmulchruis0: # amount of mulch above which runoff is suppressed - + # skipping, mouillabilmulch: # maximum wettability of crop mulch - + # skipping, kcouvmlch: # extinction coefficient connecting the soil cover to the amount of plant mulch - + # skipping, albedomulchresidus: # albedo of crop mulch - + # skipping, Qmulchdec: # maximal amount of decomposable mulch - + SticsRFiles::convert_xml2txt(file = gen_file) - + this_usm <- grep(names(trait.values)[pft], usmdirs) - sapply(this_usm, function(x){ + sapply(this_usm, function(x) { file.copy(file.path(rundir, "tempopar.sti"), file.path(usmdirs[x], "tempopar.sti"), overwrite = TRUE) }) - - ### new formulations + + ### new formulations # DO NOTHING ELSE FOR NOW - + SticsRFiles::convert_xml2txt(file = newf_file) - sapply(this_usm, function(x){ + sapply(this_usm, function(x) { file.copy(file.path(rundir, "tempoparv6.sti"), file.path(usmdirs[x], "tempoparv6.sti"), overwrite = TRUE) }) } - - - + + + ############################ Prepare Initialization File ################################## - + ## this is where we overwrite model initial conditions - + # read in template ini file - ini_xml <- XML::xmlParse(system.file("pecan_ini.xml", package = "PEcAn.STICS")) - for(i in seq_along(usmdirs)){ - + ini_xml <- XML::xmlParse(system.file("pecan_ini.xml", package = "PEcAn.STICS")) + for (i in seq_along(usmdirs)) { # doesn't really matter what these are called, they will all be eventually 'ficini.txt' ini_file <- file.path(rundir, paste0(basename(usmdirs[i]), "_ini.xml")) - - # write the ini file + + # write the ini file XML::saveXML(ini_xml, file = ini_file) - + # DO NOTHING FOR NOW # but when you do note that this also has multiple options, e.g. - # SticsRFiles::set_param_xml(file = ini_file, param = "lai0", values = 1, select = "plante", select_value = "1", overwrite = TRUE) - if(i > 1){ + # SticsRFiles::set_param_xml(file = ini_file, param = "lai0", values = 1, select = "plante", select_value = "1", overwrite = TRUE) + if (i > 1) { # these may or may not be modified depending on how crop cycles work in STICS # 'snu' is bare soil # fine for annual crops but need to change for perennials - SticsRFiles::set_param_xml(file = ini_file, param = "stade0", values = "snu", select = "plante", select_value = "1", overwrite = TRUE) + SticsRFiles::set_param_xml(file = ini_file, param = "stade0", values = "snu", select = "plante", select_value = "1", overwrite = TRUE) # when snu others are set to 0 by STICS - - }else if(!is.null(settings$run$inputs$poolinitcond)){ + } else if (!is.null(settings$run$inputs$poolinitcond)) { ic_path <- settings$run$inputs$poolinitcond$path - ic_nc <- ncdf4::nc_open(ic_path) - + ic_nc <- ncdf4::nc_open(ic_path) + # initial leaf area index (m2 m-2) - lai0 <- ncdf4::ncvar_get(ic_nc, "LAI") - SticsRFiles::set_param_xml(file = ini_file, param = "lai0", values = lai0, select = "plante", select_value = "1", overwrite = TRUE) - + lai0 <- ncdf4::ncvar_get(ic_nc, "LAI") + SticsRFiles::set_param_xml(file = ini_file, param = "lai0", values = lai0, select = "plante", select_value = "1", overwrite = TRUE) + # initial aerial biomass (kg m-2 --> t ha-1) - masec0 <- ncdf4::ncvar_get(ic_nc, "AGB") + masec0 <- ncdf4::ncvar_get(ic_nc, "AGB") SticsRFiles::set_param_xml(file = ini_file, param = "masec0", values = PEcAn.utils::ud_convert(masec0, "kg m-2", "t ha-1"), select = "plante", select_value = "1", overwrite = TRUE) - + # initial depth of root apex of the crop (m --> cm) - zrac0 <- ncdf4::ncvar_get(ic_nc, "rooting_depth") - if(zrac0 < 0.2) zrac0 <- 0.2 - SticsRFiles::set_param_xml(file = ini_file, param = "zrac0", values = PEcAn.utils::ud_convert(zrac0, "m", "cm"), select = "plante", select_value = "1", overwrite = TRUE) - + zrac0 <- ncdf4::ncvar_get(ic_nc, "rooting_depth") + if (zrac0 < 0.2) zrac0 <- 0.2 + SticsRFiles::set_param_xml(file = ini_file, param = "zrac0", values = PEcAn.utils::ud_convert(zrac0, "m", "cm"), select = "plante", select_value = "1", overwrite = TRUE) + # initial grain dry weight - haven't started any simulations from this stage yet - # SticsRFiles::set_param_xml(file = ini_file, param = "magrain0", values = 0, select = "plante", select_value = "1", overwrite = TRUE) - + # SticsRFiles::set_param_xml(file = ini_file, param = "magrain0", values = 0, select = "plante", select_value = "1", overwrite = TRUE) + # initial N amount in the plant (kg m-2 --> kg ha-1) - QNplante0 <- ncdf4::ncvar_get(ic_nc, "plant_nitrogen_content") - SticsRFiles::set_param_xml(file = ini_file, param = "QNplante0", values = PEcAn.utils::ud_convert(QNplante0, "kg m-2", "kg ha-1"), select = "plante", select_value = "1", overwrite = TRUE) - + QNplante0 <- ncdf4::ncvar_get(ic_nc, "plant_nitrogen_content") + SticsRFiles::set_param_xml(file = ini_file, param = "QNplante0", values = PEcAn.utils::ud_convert(QNplante0, "kg m-2", "kg ha-1"), select = "plante", select_value = "1", overwrite = TRUE) + # Not anymore # initial reserve of biomass (kg m-2 --> t ha-1) - #resperenne0 <- ncdf4::ncvar_get(ic_nc, "reserve_biomass") - #SticsRFiles::set_param_xml(file = ini_file, param = "resperenne0", values = PEcAn.utils::ud_convert(resperenne0, "kg m-2", "t ha-1"), select = "plante", select_value = "1", overwrite = TRUE) - + # resperenne0 <- ncdf4::ncvar_get(ic_nc, "reserve_biomass") + # SticsRFiles::set_param_xml(file = ini_file, param = "resperenne0", values = PEcAn.utils::ud_convert(resperenne0, "kg m-2", "t ha-1"), select = "plante", select_value = "1", overwrite = TRUE) + # initial root density in each of the five soil layers - densinitial <- ncdf4::ncvar_get(ic_nc, "root_density") - if(all(densinitial==0)) densinitial[1] <- 0.5 # for lev - if(zrac0 == 0.2){ - densinitial[2:5] <-0 - }else if(zrac0 < 0.4){ - densinitial[3:5] <-0 - }else if(zrac0 < 0.6){ - densinitial[4:5] <-0 - }else if(zrac0 < 0.8){ - densinitial[5] <-0 #densinitial layers should not be filled if zrac0 is not there + densinitial <- ncdf4::ncvar_get(ic_nc, "root_density") + if (all(densinitial == 0)) densinitial[1] <- 0.5 # for lev + if (zrac0 == 0.2) { + densinitial[2:5] <- 0 + } else if (zrac0 < 0.4) { + densinitial[3:5] <- 0 + } else if (zrac0 < 0.6) { + densinitial[4:5] <- 0 + } else if (zrac0 < 0.8) { + densinitial[5] <- 0 # densinitial layers should not be filled if zrac0 is not there } - SticsRFiles::set_param_xml(file = ini_file, param = "densinitial", values = densinitial, select = "plante", select_value = "1", overwrite = TRUE) - + SticsRFiles::set_param_xml(file = ini_file, param = "densinitial", values = densinitial, select = "plante", select_value = "1", overwrite = TRUE) + # default 'lev' - # SticsRFiles::set_param_xml(file = ini_file, param = "stade0", values = "plt", select = "plante", select_value = "1", overwrite = TRUE) - + # SticsRFiles::set_param_xml(file = ini_file, param = "stade0", values = "plt", select = "plante", select_value = "1", overwrite = TRUE) + ncdf4::nc_close(ic_nc) } - + SticsRFiles::convert_xml2txt(file = ini_file) file.rename(file.path(rundir, "ficini.txt"), file.path(usmdirs[i], "ficini.txt")) } - + ############################ Prepare Soils ################################## - + ## this is where we modify soil characteristics - + #### THERE IS SOME BUG IN SticsRFiles::convert_xml2txt FOR SOLS.XML #### I NOW PUT TXT VERSION TO THE MODEL PACKAGE: param.sol #### TODO: revise others to have txt templates directly in the package - + # # changed from FINERT to finert and moved to the sols.xml # # initial fraction of soil organic N inactive for mineralisation (= stable SON/ total SON) # if ("FINERT" %in% soil.names) { # SticsRFiles::set_param_xml(gen_file, "finert", soil_params[which(soil.names == "FINERT")], overwrite = TRUE) # } - + sols_file <- file.path(rundir, "param.sol") - + # cp template sols file (txt) file.copy(system.file("param.sol", package = "PEcAn.STICS"), sols_file) - + # check param names # sols_vals <- SticsRFiles::get_soil_txt(sols_file) - + str_ns <- paste0(as.numeric(settings$run$site$id) %/% 1e+09, "-", as.numeric(settings$run$site$id) %% 1e+09) - + # I guess not important what this is called as long as it's consistent in usms - SticsRFiles::set_soil_txt(file = sols_file, param="typsol", value=paste0("sol", str_ns)) - - if(!is.null(settings$run$inputs$poolinitcond)){ + SticsRFiles::set_soil_txt(file = sols_file, param = "typsol", value = paste0("sol", str_ns)) + + if (!is.null(settings$run$inputs$poolinitcond)) { ic_path <- settings$run$inputs$poolinitcond$path - ic_nc <- ncdf4::nc_open(ic_path) - + ic_nc <- ncdf4::nc_open(ic_path) + # pH - pH <- ncdf4::ncvar_get(ic_nc, "pH") - pH <- round(pH[1], digits = 1) # STICS uses 1 pH value - SticsRFiles::set_soil_txt(file = sols_file, param="pH", value=pH) - - sapply(1:5, function(x) SticsRFiles::set_soil_txt(file = sols_file, param="epc", value=20, layer = x)) - + pH <- ncdf4::ncvar_get(ic_nc, "pH") + pH <- round(pH[1], digits = 1) # STICS uses 1 pH value + SticsRFiles::set_soil_txt(file = sols_file, param = "pH", value = pH) + + sapply(1:5, function(x) SticsRFiles::set_soil_txt(file = sols_file, param = "epc", value = 20, layer = x)) + # volume_fraction_of_water_in_soil_at_field_capacity - hccf <- ncdf4::ncvar_get(ic_nc, "volume_fraction_of_water_in_soil_at_field_capacity") - hccf <- round(hccf*100, digits = 2) - sapply(seq_along(hccf), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="hccf", value=hccf[x], layer = x)) - + hccf <- ncdf4::ncvar_get(ic_nc, "volume_fraction_of_water_in_soil_at_field_capacity") + hccf <- round(hccf * 100, digits = 2) + sapply(seq_along(hccf), function(x) SticsRFiles::set_soil_txt(file = sols_file, param = "hccf", value = hccf[x], layer = x)) + # volume_fraction_of_condensed_water_in_soil_at_wilting_point - hminf <- ncdf4::ncvar_get(ic_nc, "volume_fraction_of_condensed_water_in_soil_at_wilting_point") - hminf <- round(hminf*100, digits = 2) - sapply(seq_along(hminf), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="hminf", value=hminf[x], layer = x)) - + hminf <- ncdf4::ncvar_get(ic_nc, "volume_fraction_of_condensed_water_in_soil_at_wilting_point") + hminf <- round(hminf * 100, digits = 2) + sapply(seq_along(hminf), function(x) SticsRFiles::set_soil_txt(file = sols_file, param = "hminf", value = hminf[x], layer = x)) + # soil_organic_nitrogen_content - Norg <- ncdf4::ncvar_get(ic_nc, "soil_organic_nitrogen_content") - Norg <- round(Norg[1]*100, digits = 2) # STICS uses 1 Norg value - SticsRFiles::set_soil_txt(file = sols_file, param="Norg", value=Norg) + Norg <- ncdf4::ncvar_get(ic_nc, "soil_organic_nitrogen_content") + Norg <- round(Norg[1] * 100, digits = 2) # STICS uses 1 Norg value + SticsRFiles::set_soil_txt(file = sols_file, param = "Norg", value = Norg) # mass_fraction_of_clay_in_soil - argi <- ncdf4::ncvar_get(ic_nc, "mass_fraction_of_clay_in_soil") - argi <- round(argi[1]*100, digits = 0) # STICS uses 1 argi value - SticsRFiles::set_soil_txt(file = sols_file, param="argi", value=argi) - + argi <- ncdf4::ncvar_get(ic_nc, "mass_fraction_of_clay_in_soil") + argi <- round(argi[1] * 100, digits = 0) # STICS uses 1 argi value + SticsRFiles::set_soil_txt(file = sols_file, param = "argi", value = argi) + # soil_density (kg m-3 --> g cm-3) - DAF <- ncdf4::ncvar_get(ic_nc, "soil_density") - DAF <- round(PEcAn.utils::ud_convert(DAF, "kg m-3", "g cm-3"), digits = 1) - sapply(seq_along(DAF), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="DAF", value=DAF[x], layer = x)) - + DAF <- ncdf4::ncvar_get(ic_nc, "soil_density") + DAF <- round(PEcAn.utils::ud_convert(DAF, "kg m-3", "g cm-3"), digits = 1) + sapply(seq_along(DAF), function(x) SticsRFiles::set_soil_txt(file = sols_file, param = "DAF", value = DAF[x], layer = x)) + # c2n_humus - #CsurNsol0 <- ncdf4::ncvar_get(ic_nc, "c2n_humus") - #SticsRFiles::set_soil_txt(file = sols_file, param="CsurNsol", value=CsurNsol0) - - # epd + # CsurNsol0 <- ncdf4::ncvar_get(ic_nc, "c2n_humus") + # SticsRFiles::set_soil_txt(file = sols_file, param="CsurNsol", value=CsurNsol0) + + # epd epd <- rep(10, 5) - sapply(seq_along(epd), function(x) SticsRFiles::set_soil_txt(file = sols_file, param="epd", value=epd[x], layer = x)) - + sapply(seq_along(epd), function(x) SticsRFiles::set_soil_txt(file = sols_file, param = "epd", value = epd[x], layer = x)) + ncdf4::nc_close(ic_nc) } - + file.copy(sols_file, file.path(usmdirs, "param.sol")) - + # DO NOTHING ELSE FOR NOW # this has some bug for sols.xml # SticsRFiles::convert_xml2txt(file = sols_file, javastics = javastics_path) - + ######################### Prepare Weather Station File ############################### - + ## this is where we modify more initial conditions and site characteristics - + # read in template sta file - sta_xml <- XML::xmlParse(system.file("pecan_sta.xml", package = "PEcAn.STICS")) - + sta_xml <- XML::xmlParse(system.file("pecan_sta.xml", package = "PEcAn.STICS")) + # not important what it's called, will be 'station.txt' in the end sta_file <- file.path(rundir, paste0(str_ns, "_sta.xml")) - + XML::saveXML(sta_xml, file = sta_file) - + # change latitude SticsRFiles::set_param_xml(sta_file, "latitude", settings$run$site$lat, overwrite = TRUE) - + SticsRFiles::convert_xml2txt(file = sta_file) file.copy(file.path(rundir, "station.txt"), file.path(usmdirs, "station.txt")) - + # another way to change latitute # sta_txt <- file.path(rundir, "station.txt") # SticsRFiles::set_station_txt(sta_txt, param = "latitude", value = settings$run$site$lat) - + # DO NOTHING ELSE FOR NOW # Should these be prepared by met2model.STICS? - + ############################## Prepare LAI forcing #################################### ## skipping for now - - - + + + ############################ Prepare Technical File ################################## - + ## this is where we modify management practices ## TODO: use ICASA compatible json file - + ## instead of using a template, this could be easier if we prepare a dataframe and use SticsRFiles::gen_tec_xml tec_df <- data.frame(Tec_name = "tmp_tec.xml") - + # these shouldn't be empty even if we don't use them (values from timothy example in STICS) tec_df$iplt0 <- 999 # date of sowing tec_df$profsem <- 2 # depth of sowing tec_df$densitesem <- 100 # plant sowing density tec_df$variete <- 1 # cultivar number corresponding to the cultivar name in the plant file (could be passed via a field activity file) - tec_df$irecbutoir <- 999 #latest date of harvest (imposed if the crop cycle is not finished at this date) + tec_df$irecbutoir <- 999 # latest date of harvest (imposed if the crop cycle is not finished at this date) tec_df$profmes <- 120 # depth of measurement of the soil water reserve (cm) - #tec_df$engrais <- 1 # fertilizer type + # tec_df$engrais <- 1 # fertilizer type tec_df$concirr <- 0.11 # concentration of mineral N in irrigation water (kg ha-1 mm-1) - tec_df$ressuite <- 'straw+roots' # type of crop residue + tec_df$ressuite <- "straw+roots" # type of crop residue tec_df$h2ograinmax <- 0.32 # maximal water content of fruits at harvest # the following formalisms exist in the tec file: @@ -1383,80 +1373,77 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { ## harvest ## special techniques ## soil modification by techniques (compaction-fragmentation) - + # if a field activity file is given, most (all?) of our harvest cases are actually fall under special techniques - cut crop - if(!is.null(settings$run$inputs$fielddata)){ - + if (!is.null(settings$run$inputs$fielddata)) { events_file <- jsonlite::read_json(settings$run$inputs$fielddata$path, simplifyVector = TRUE)[[1]] # loop for each USM - for(usmi in seq_along(usmdirs)){ - + for (usmi in seq_along(usmdirs)) { usm_years <- c(sapply(strsplit(sub(".*_", "", basename(usmdirs[usmi])), "-"), function(x) (as.numeric(x)))) # note that usm years can overlap, may need more sophisticated checks dseq_sub <- dseq[lubridate::year(dseq) %in% usm_years] - + events_sub <- events_file$events[lubridate::year(events_file$events$date) %in% usm_years, ] - - if("planting" %in% events_sub$mgmt_operations_event){ - + + if ("planting" %in% events_sub$mgmt_operations_event) { pl_date <- events_sub$date[events_sub$mgmt_operations_event == "planting"] tec_df$iplt0 <- lubridate::yday(as.Date(pl_date)) - + profsem <- events_sub$planting_depth[events_sub$mgmt_operations_event == "planting"] - if(!is.null(profsem)){ + if (!is.null(profsem)) { tec_df$profsem <- as.numeric(profsem) # depth of sowing } - + densitesem <- events_sub$planting_sowing_density[events_sub$mgmt_operations_event == "planting"] - if(!is.null(densitesem)){ + if (!is.null(densitesem)) { tec_df$densitesem <- as.numeric(densitesem) # plant sowing density } - + # any other? } - - if("harvest" %in% events_sub$mgmt_operations_event){ + + if ("harvest" %in% events_sub$mgmt_operations_event) { # param names - h_param_names <- c("julfauche" , # date of each cut for forage crops, julian.d - "hautcoupe" , # cut height for forage crops, m - "lairesiduel", # residual LAI after each cut of forage crop, m2 m-2 - "msresiduel" , # residual aerial biomass after a cut of a forage crop, t.ha-1 - "anitcoupe", - "engraiscoupe", - "tauxexportfauche", - "restit", - "mscoupemini") # amount of mineral N added by fertiliser application at each cut of a forage crop, kg.ha-1 - - - harvest_sub <- events_sub[events_sub$mgmt_operations_event == "harvest",] - + h_param_names <- c( + "julfauche", # date of each cut for forage crops, julian.d + "hautcoupe", # cut height for forage crops, m + "lairesiduel", # residual LAI after each cut of forage crop, m2 m-2 + "msresiduel", # residual aerial biomass after a cut of a forage crop, t.ha-1 + "anitcoupe", + "engraiscoupe", + "tauxexportfauche", + "restit", + "mscoupemini" + ) # amount of mineral N added by fertiliser application at each cut of a forage crop, kg.ha-1 + + + harvest_sub <- events_sub[events_sub$mgmt_operations_event == "harvest", ] + harvest_list <- list() - for(hrow in seq_len(nrow(harvest_sub))){ - + for (hrow in seq_len(nrow(harvest_sub))) { # empty - harvest_df <- data.frame(julfauche = NA, hautcoupe = NA, lairesiduel = NA, msresiduel = NA, anitcoupe = NA) - - + harvest_df <- data.frame(julfauche = NA, hautcoupe = NA, lairesiduel = NA, msresiduel = NA, anitcoupe = NA) + + # If given harvest date is within simulation days # probably need to break down >2 years into multiple usms - if(as.Date(harvest_sub$date[hrow]) %in% dseq_sub){ - - # STICS needs cutting days in cumulative julian days + if (as.Date(harvest_sub$date[hrow]) %in% dseq_sub) { + # STICS needs cutting days in cumulative julian days # e.g. first cutting day of the first simulation year can be 163 (2018-06-13) # in following years it should be cumulative, meaning a cutting day on 2019-06-12 is 527, not 162 # the following code should give that - harvest_df$julfauche <- which(dseq_sub == as.Date(harvest_sub$date[hrow])) + lubridate::yday(dseq_sub[1]) - 1 - if("frg" %in% tolower(harvest_sub$harvest_crop) | - "wcl" %in% tolower(harvest_sub$harvest_crop)){ + harvest_df$julfauche <- which(dseq_sub == as.Date(harvest_sub$date[hrow])) + lubridate::yday(dseq_sub[1]) - 1 + if ("frg" %in% tolower(harvest_sub$harvest_crop) | + "wcl" %in% tolower(harvest_sub$harvest_crop)) { tec_df$irecbutoir <- 999 - if(!is.null(events_file$rotation)){ - tind <- which(dseq_sub == as.Date(events_file$rotation$rotation_end[usmi])) + lubridate::yday(dseq_sub[1]) - 1 - tec_df$irecbutoir <- ifelse(length(tind) == 0, 999, tind) + if (!is.null(events_file$rotation)) { + tind <- which(dseq_sub == as.Date(events_file$rotation$rotation_end[usmi])) + lubridate::yday(dseq_sub[1]) - 1 + tec_df$irecbutoir <- ifelse(length(tind) == 0, 999, tind) } - }else{ + } else { tec_df$irecbutoir <- harvest_df$julfauche } - harvest_df$hautcoupe <- as.numeric(harvest_sub$harvest_cut_height[harvest_sub$date==harvest_sub$date[hrow]]) # # cut height for forage crops + harvest_df$hautcoupe <- as.numeric(harvest_sub$harvest_cut_height[harvest_sub$date == harvest_sub$date[hrow]]) # # cut height for forage crops harvest_df$hautcoupe <- ifelse(harvest_df$hautcoupe == -99, 0.05, harvest_df$hautcoupe) harvest_df$lairesiduel <- ifelse(harvest_df$hautcoupe < 0.08, 0.2, 0.8) # hardcode for now harvest_df$msresiduel <- ifelse(harvest_df$hautcoupe < 0.08, 0.05, 0.3) # residual aerial biomass after a cut of a forage crop (t ha-1) @@ -1466,229 +1453,223 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { harvest_df$restit <- 0 harvest_df$mscoupemini <- 0 } - + colnames(harvest_df) <- paste0(h_param_names, "_", hrow) harvest_list[[hrow]] <- harvest_df } - harvest_tec <- do.call("cbind", harvest_list) - + harvest_tec <- do.call("cbind", harvest_list) + # need to get these from field data # cut crop - 1:yes, 2:no - if("frg" %in% tolower(harvest_sub$harvest_crop) | "wcl" %in% tolower(harvest_sub$harvest_crop)){ + if ("frg" %in% tolower(harvest_sub$harvest_crop) | "wcl" %in% tolower(harvest_sub$harvest_crop)) { harvest_tec$codefauche <- 1 - }else{ - harvest_tec$codefauche <- 2 + } else { + harvest_tec$codefauche <- 2 } - #harvest_tec$mscoupemini <- 0 # min val of aerial biomass to make a cut + # harvest_tec$mscoupemini <- 0 # min val of aerial biomass to make a cut harvest_tec$codemodfauche <- 2 # use calendar days harvest_tec$hautcoupedefaut <- 0.05 # cut height for forage crops (calendar calculated) harvest_tec$stadecoupedf <- "rec" - - - } #harvest-if end - - if("organic_material" %in% events_sub$mgmt_operations_event | - "fertilizer" %in% events_sub$mgmt_operations_event){ + } # harvest-if end + + if ("organic_material" %in% events_sub$mgmt_operations_event | + "fertilizer" %in% events_sub$mgmt_operations_event) { # param names - f_param_names <- c("julapN", # date of fertilization, julian.d - "absolute_value/%") # cut height for forage crops, m - - - fert_sub <- events_sub[events_sub$mgmt_operations_event %in% c("organic_material", "fertilizer"),] - + f_param_names <- c( + "julapN", # date of fertilization, julian.d + "absolute_value/%" + ) # cut height for forage crops, m + + + fert_sub <- events_sub[events_sub$mgmt_operations_event %in% c("organic_material", "fertilizer"), ] + fert_list <- list() - for(frow in seq_len(nrow(fert_sub))){ - + for (frow in seq_len(nrow(fert_sub))) { # empty - fert_df <- data.frame(jul = NA, val = NA) + fert_df <- data.frame(jul = NA, val = NA) # If given fertilization date is within simulation days - if(as.Date(fert_sub$date[frow]) %in% dseq_sub){ - - fert_df$jul <- which(dseq_sub == as.Date(fert_sub$date[frow])) + lubridate::yday(dseq_sub[1]) - 1 - - if(fert_sub$mgmt_operations_event[frow] == "organic_material"){ + if (as.Date(fert_sub$date[frow]) %in% dseq_sub) { + fert_df$jul <- which(dseq_sub == as.Date(fert_sub$date[frow])) + lubridate::yday(dseq_sub[1]) - 1 + + if (fert_sub$mgmt_operations_event[frow] == "organic_material") { Nprcnt <- ifelse(as.numeric(fert_sub$organic_material_N_conc[frow]) < 0, 5, as.numeric(fert_sub$organic_material_N_conc[frow])) - fert_df$val <- as.numeric(fert_sub$org_material_applic_amnt[frow]) * (Nprcnt/100) - }else{ + fert_df$val <- as.numeric(fert_sub$org_material_applic_amnt[frow]) * (Nprcnt / 100) + } else { fert_df$val <- as.numeric(fert_sub$N_in_applied_fertilizer[frow]) } - } - + colnames(fert_df) <- paste0(f_param_names, "_", frow) fert_list[[frow]] <- fert_df } - fert_tec <- do.call("cbind", fert_list) - } #fertilizer-if end - - - # DO NOTHING ELSE FOR NOW - # TODO: ADD OTHER MANAGEMENT - - # same usm -> continue columns - usm_tec_df <- cbind(tec_df, harvest_tec, fert_tec) - - usm_tec_df$ratiol <- 0 - - SticsRFiles::gen_tec_xml(param_df = usm_tec_df, - file=system.file("pecan_tec.xml", package = "PEcAn.STICS"), - out_dir = usmdirs[usmi]) - - # TODO: more than 1 USM, rbind - - SticsRFiles::convert_xml2txt(file = file.path(usmdirs[usmi], "tmp_tec.xml")) - - - } # end-loop over usms - } # TODO: if no events file is given modify other harvest parameters, e.g. harvest decision - + fert_tec <- do.call("cbind", fert_list) + } # fertilizer-if end + + + # DO NOTHING ELSE FOR NOW + # TODO: ADD OTHER MANAGEMENT + + # same usm -> continue columns + usm_tec_df <- cbind(tec_df, harvest_tec, fert_tec) + + usm_tec_df$ratiol <- 0 + + SticsRFiles::gen_tec_xml( + param_df = usm_tec_df, + file = system.file("pecan_tec.xml", package = "PEcAn.STICS"), + out_dir = usmdirs[usmi] + ) + + # TODO: more than 1 USM, rbind + + SticsRFiles::convert_xml2txt(file = file.path(usmdirs[usmi], "tmp_tec.xml")) + } # end-loop over usms + } # TODO: if no events file is given modify other harvest parameters, e.g. harvest decision + ################################ Prepare USM file ###################################### # loop for each USM - #ncodesuite <- ifelse(length(usmdirs) > 1, 1,0) - - for(usmi in seq_along(usmdirs)){ - - #usm_years <- years_requested[(usmi*2-1):(usmi*2)] + # ncodesuite <- ifelse(length(usmdirs) > 1, 1,0) + + for (usmi in seq_along(usmdirs)) { + # usm_years <- years_requested[(usmi*2-1):(usmi*2)] usm_years <- c(sapply(strsplit(sub(".*_", "", basename(usmdirs[usmi])), "-"), function(x) (as.numeric(x)))) dseq_sub <- dseq[lubridate::year(dseq) %in% usm_years] - + # read in template USM (Unit of SiMulation) file, has the master settings, file names etc. usm_file <- file.path(usmdirs[usmi], "new_travail.usm") - - # cp template usm file + + # cp template usm file file.copy(system.file("template.usm", package = "PEcAn.STICS"), usm_file) - - # Type of LAI simulation + + # Type of LAI simulation # 0 = culture (LAI calculated by the model), 1 = feuille (LAI forced) SticsRFiles::set_usm_txt(usm_file, "codesimul", "culture", append = FALSE) # hardcode for now - + # use optimization # 0 = no; 1 = yes main plant; 2 = yes associated plant - SticsRFiles::set_usm_txt(usm_file, "codeoptim", 0, append = FALSE) - + SticsRFiles::set_usm_txt(usm_file, "codeoptim", 0, append = FALSE) + # option to simulate several # successive USM (0 = no, 1 = yes) - if(usmi == 1){ + if (usmi == 1) { SticsRFiles::set_usm_txt(usm_file, "codesuite", 0, append = FALSE) - }else{ + } else { SticsRFiles::set_usm_txt(usm_file, "codesuite", 1, append = FALSE) } - + # number of simulated plants (sole crop=1; intercropping=2) SticsRFiles::set_usm_txt(usm_file, "nbplantes", 1, append = FALSE) # hardcode for now - + # pft name SticsRFiles::set_usm_txt(usm_file, "nom", basename(usmdirs[usmi]), append = FALSE) - - + + ## handle dates, also for partial year(s) ## needs developing with longer runs - if(usmi == 1){ + if (usmi == 1) { # beginning day of the simulation (julian.d) # end day of the simulation (julian.d) (at the end of consecutive years, i.e. can be greater than 366) SticsRFiles::set_usm_txt(usm_file, "datedebut", lubridate::yday(settings$run$start.date), append = FALSE) SticsRFiles::set_usm_txt(usm_file, "datefin", (lubridate::yday(settings$run$start.date) + length(dseq_sub) - 1), append = FALSE) - }else{ + } else { SticsRFiles::set_usm_txt(usm_file, "datedebut", 1, append = FALSE) # for now! SticsRFiles::set_usm_txt(usm_file, "datefin", length(dseq_sub), append = FALSE) } - + # name of the initialization file SticsRFiles::set_usm_txt(usm_file, "finit", paste0(basename(usmdirs[usmi]), "_ini.xml"), append = FALSE) - + # soil number SticsRFiles::set_usm_txt(usm_file, "numsol", 1, append = FALSE) - + # name of the soil in the sols.xml file SticsRFiles::set_usm_txt(usm_file, "nomsol", paste0("sol", str_ns), append = FALSE) - + # name of the weather station file SticsRFiles::set_usm_txt(usm_file, "fstation", paste0(str_ns, "_sta.xml"), append = FALSE) - + # name of the first climate file SticsRFiles::set_usm_txt(usm_file, "fclim1", paste0(str_ns, ".", usm_years[1]), append = FALSE) - + # name of the last climate file - if(length(usm_years) == 2){ + if (length(usm_years) == 2) { SticsRFiles::set_usm_txt(usm_file, "fclim2", paste0(str_ns, ".", usm_years[2]), append = FALSE) - }else{ + } else { # repeat same year SticsRFiles::set_usm_txt(usm_file, "fclim2", paste0(str_ns, ".", usm_years[1]), append = FALSE) } - - + + # number of simulation years SticsRFiles::set_usm_txt(usm_file, "nbans", length(unique(usm_years)), append = FALSE) # hardcode for now - + # number of calendar years involved in the crop cycle # 1 = 1 year e.g. for spring crops, 0 = two years, e.g. for winter crops - culturean <- ifelse( length(unique(usm_years)) == 2, 0, 1) - SticsRFiles::set_usm_txt(usm_file, "culturean", culturean, append = FALSE) #hardcoding this for now, if passed as a trait from priors it breaks sensitivity analysis + culturean <- ifelse(length(unique(usm_years)) == 2, 0, 1) + SticsRFiles::set_usm_txt(usm_file, "culturean", culturean, append = FALSE) # hardcoding this for now, if passed as a trait from priors it breaks sensitivity analysis # probably best to pass this via the json file - - # name of the plant file for main plant - if(length(plt_files) < usmi){ + + # name of the plant file for main plant + if (length(plt_files) < usmi) { # multiple usms, 1 plt file = same spp, consecutive rotations, but hacky - SticsRFiles::set_usm_txt(usm_file, "fplt1", basename(plt_files[[1]]), append = FALSE) - }else{ - SticsRFiles::set_usm_txt(usm_file, "fplt1", basename(plt_files[[usmi]]), append = FALSE) + SticsRFiles::set_usm_txt(usm_file, "fplt1", basename(plt_files[[1]]), append = FALSE) + } else { + SticsRFiles::set_usm_txt(usm_file, "fplt1", basename(plt_files[[usmi]]), append = FALSE) } - - + + # name of the technical file for main plant # does this even matter? - SticsRFiles::set_usm_txt(usm_file, "ftec1", "tmp_tec.xml", append = FALSE) - + SticsRFiles::set_usm_txt(usm_file, "ftec1", "tmp_tec.xml", append = FALSE) + # name of the LAI forcing file for main plant (null if none) SticsRFiles::set_usm_txt(usm_file, "flai1", "default.lai", append = FALSE) # hardcode for now, doesn't matter when codesimul==0 - - # TODO: more than 1 PFTs + + # TODO: more than 1 PFTs # STICS can run 2 PFTs max: main crop + intercrop } - - + + ################################ Prepare Run ###################################### - + # symlink climate files met_path <- settings$run$inputs$met$path - - for(usmi in seq_along(usmdirs)){ - + + for (usmi in seq_along(usmdirs)) { usm_years <- c(sapply(strsplit(sub(".*_", "", basename(usmdirs)[usmi]), "-"), function(x) (as.numeric(x)))) dseq_sub <- dseq[lubridate::year(dseq) %in% usm_years] - + clim_list <- list() # temporary solution - for(clim in seq_along(usm_years)){ + for (clim in seq_along(usm_years)) { # currently assuming only first year file has been passed to the settings, modify met2model if changing the logic - met_file <- gsub(paste0(lubridate::year(settings$run$start.date), ".climate"), paste0(usm_years[clim], ".climate"), met_path) + met_file <- gsub(paste0(lubridate::year(settings$run$start.date), ".climate"), paste0(usm_years[clim], ".climate"), met_path) clim_list[[clim]] <- utils::read.table(met_file) } clim_run <- do.call("rbind", clim_list) utils::write.table(clim_run, file.path(usmdirs[usmi], "climat.txt"), col.names = FALSE, row.names = FALSE) - } - + # symlink to binary file.symlink(stics_path, bindir) stics_exe <- file.path(bindir, basename(stics_path)) - + # symlink *.mod files - file.symlink(system.file("var.mod", package = "PEcAn.STICS"), file.path(usmdirs, "var.mod")) - file.symlink(system.file("rap.mod", package = "PEcAn.STICS"), file.path(usmdirs, "rap.mod")) + file.symlink(system.file("var.mod", package = "PEcAn.STICS"), file.path(usmdirs, "var.mod")) + file.symlink(system.file("rap.mod", package = "PEcAn.STICS"), file.path(usmdirs, "rap.mod")) file.symlink(system.file("prof.mod", package = "PEcAn.STICS"), file.path(usmdirs, "prof.mod")) - - #cmd_run <- paste("java -jar", jexe,"--run", rundir, usm_name) - + + # cmd_run <- paste("java -jar", jexe,"--run", rundir, usm_name) + # using SticsOnR wrapper in job.sh now - SticsOnR::stics_wrapper(model_options = wrapper_options) # used to be: # cmd_generate <- paste("java -jar", jexe,"--generate-txt", rundir, usm_name) # cmd_run <- paste("java -jar", jexe,"--run", rundir, usm_name) - + #----------------------------------------------------------------------- # create launch script (which will create symlink) @@ -1697,7 +1678,7 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.STICS"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -1706,7 +1687,7 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -1714,33 +1695,31 @@ write.config.STICS <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - - if(length(usmdirs)>1){ - jobsh <- gsub("@SUCCESSIVE_USMS@", paste0("list(c('", paste(basename(usmdirs), collapse="','"), "'))"), jobsh) - }else{ - jobsh <- gsub("@SUCCESSIVE_USMS@", 'NULL', jobsh) + + if (length(usmdirs) > 1) { + jobsh <- gsub("@SUCCESSIVE_USMS@", paste0("list(c('", paste(basename(usmdirs), collapse = "','"), "'))"), jobsh) + } else { + jobsh <- gsub("@SUCCESSIVE_USMS@", "NULL", jobsh) } - + jobsh <- gsub("@USMDIR@", usmdirs[1], jobsh) # for now - + jobsh <- gsub("@MODFILE@", paste0("mod_s", basename(usmdirs[1]), ".sti"), jobsh) jobsh <- gsub("@STICSEXE@", stics_exe, jobsh) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - - } # write.config.STICS diff --git a/models/stics/tests/testthat/test.met2model.R b/models/stics/tests/testthat/test.met2model.R index e77aefcb74c..a67755f9596 100644 --- a/models/stics/tests/testthat/test.met2model.R +++ b/models/stics/tests/testthat/test.met2model.R @@ -6,7 +6,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/models/template/R/met2model.MODEL.R b/models/template/R/met2model.MODEL.R index 2d3f7012e33..1bec855b658 100644 --- a/models/template/R/met2model.MODEL.R +++ b/models/template/R/met2model.MODEL.R @@ -10,18 +10,17 @@ ##' @return OK if everything was succesful. ##' @export ##' @author Rob Kooper -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# met2model.MODEL <- function(in.path, in.prefix, outfolder, overwrite = FALSE) { PEcAn.logger::logger.severe("NOT IMPLEMENTED") # Please follow the PEcAn style guide: # https://pecanproject.github.io/pecan-documentation/latest/coding-style.html - + # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". # Calls to dependent packages should use a double colon, e.g. # `packageName::functionName()`. # Also, `require()` should be used only when a package dependency is truly - # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. - + # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. } # met2model.MODEL diff --git a/models/template/R/model2netcdf.MODEL.R b/models/template/R/model2netcdf.MODEL.R index 2fce61d8925..f5a93c318ff 100644 --- a/models/template/R/model2netcdf.MODEL.R +++ b/models/template/R/model2netcdf.MODEL.R @@ -1,5 +1,5 @@ ##' Convert MODEL output into the NACP Intercomparison format (ALMA using netCDF) -##' +##' ##' @name model2netcdf.MODEL ##' @title Code to convert MODELS's output into netCDF format ##' @@ -16,12 +16,11 @@ model2netcdf.MODEL <- function(outdir, sitelat, sitelon, start_date, end_date) { # Please follow the PEcAn style guide: # https://pecanproject.github.io/pecan-documentation/develop/coding-style.html - + # Note that `library()` calls should _never_ appear here; instead, put # packages dependencies in the DESCRIPTION file, under "Imports:". # Calls to dependent packages should use a double colon, e.g. # `packageName::functionName()`. # Also, `require()` should be used only when a package dependency is truly - # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. - + # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. } # model2netcdf.MODEL diff --git a/models/template/R/read_restart.ModelName.R b/models/template/R/read_restart.ModelName.R index 5ca75e77e34..f8bd824e06c 100644 --- a/models/template/R/read_restart.ModelName.R +++ b/models/template/R/read_restart.ModelName.R @@ -1,22 +1,21 @@ #' @title Read restart template for SDA -#' +#' #' @author Alexey Shiklomanov -#' +#' #' @param outdir Output directory #' @param runid Run ID #' @param stop.time Year that is being read #' @param settings PEcAn settings object #' @param var.names Variable names to be extracted #' @param params Any parameters required for state calculations -#' +#' #' @description Read restart files from model. -#' +#' #' @return Forecast numeric matrix #' @export read_restart.ModelName <- function(outdir, - runid, + runid, stop.time, settings, var.names, params) {} - diff --git a/models/template/R/write.config.MODEL.R b/models/template/R/write.config.MODEL.R index 6f2dbc70ed0..b1196cd2ab4 100644 --- a/models/template/R/write.config.MODEL.R +++ b/models/template/R/write.config.MODEL.R @@ -12,7 +12,7 @@ ##' @return configuration file for MODEL for given run ##' @export ##' @author Rob Kooper -##-------------------------------------------------------------------------------------------------# +## -------------------------------------------------------------------------------------------------# write.config.MODEL <- function(defaults, trait.values, settings, run.id) { PEcAn.logger::logger.severe("NOT IMPLEMENTED") # Please follow the PEcAn style guide: @@ -22,12 +22,12 @@ write.config.MODEL <- function(defaults, trait.values, settings, run.id) { # Calls to dependent packages should use a double colon, e.g. # `packageName::functionName()`. # Also, `require()` should be used only when a package dependency is truly - # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. - + # optional. In this case, put the package name under "Suggests:" in DESCRIPTION. + # find out where to write run/ouput rundir <- file.path(settings$host$rundir, run.id) outdir <- file.path(settings$host$outdir, run.id) - + #----------------------------------------------------------------------- # create launch script (which will create symlink) if (!is.null(settings$model$jobtemplate) && file.exists(settings$model$jobtemplate)) { @@ -35,7 +35,7 @@ write.config.MODEL <- function(defaults, trait.values, settings, run.id) { } else { jobsh <- readLines(con = system.file("template.job", package = "PEcAn.MODEL"), n = -1) } - + # create host specific setttings hostsetup <- "" if (!is.null(settings$model$prerun)) { @@ -44,7 +44,7 @@ write.config.MODEL <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$prerun)) { hostsetup <- paste(hostsetup, sep = "\n", paste(settings$host$prerun, collapse = "\n")) } - + hostteardown <- "" if (!is.null(settings$model$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$model$postrun, collapse = "\n")) @@ -52,26 +52,26 @@ write.config.MODEL <- function(defaults, trait.values, settings, run.id) { if (!is.null(settings$host$postrun)) { hostteardown <- paste(hostteardown, sep = "\n", paste(settings$host$postrun, collapse = "\n")) } - + # create job.sh jobsh <- gsub("@HOST_SETUP@", hostsetup, jobsh) jobsh <- gsub("@HOST_TEARDOWN@", hostteardown, jobsh) - + jobsh <- gsub("@SITE_LAT@", settings$run$site$lat, jobsh) jobsh <- gsub("@SITE_LON@", settings$run$site$lon, jobsh) jobsh <- gsub("@SITE_MET@", settings$run$site$met, jobsh) - + jobsh <- gsub("@START_DATE@", settings$run$start.date, jobsh) jobsh <- gsub("@END_DATE@", settings$run$end.date, jobsh) - + jobsh <- gsub("@OUTDIR@", outdir, jobsh) jobsh <- gsub("@RUNDIR@", rundir, jobsh) - + jobsh <- gsub("@BINARY@", settings$model$binary, jobsh) - + writeLines(jobsh, con = file.path(settings$rundir, run.id, "job.sh")) Sys.chmod(file.path(settings$rundir, run.id, "job.sh")) - + #----------------------------------------------------------------------- ### Edit a templated config file for runs if (!is.null(settings$model$config) && file.exists(settings$model$config)) { @@ -92,7 +92,7 @@ write.config.MODEL <- function(defaults, trait.values, settings, run.id) { PEcAn.logger::logger.info("Using", filename, "as template") config.text <- readLines(con = filename, n = -1) } - + config.text <- gsub("@SITE_LAT@", settings$run$site$lat, config.text) config.text <- gsub("@SITE_LON@", settings$run$site$lon, config.text) config.text <- gsub("@SITE_MET@", settings$run$inputs$met$path, config.text) @@ -107,7 +107,7 @@ write.config.MODEL <- function(defaults, trait.values, settings, run.id) { config.text <- gsub("@OUTDIR@", settings$host$outdir, config.text) config.text <- gsub("@ENSNAME@", run.id, config.text) config.text <- gsub("@OUTFILE@", paste0("out", run.id), config.text) - + #----------------------------------------------------------------------- config.file.name <- paste0("CONFIG.", run.id, ".txt") writeLines(config.text, con = paste(outdir, config.file.name, sep = "")) diff --git a/models/template/R/write_restart.ModelName.R b/models/template/R/write_restart.ModelName.R index eceed0450a1..816173f119e 100644 --- a/models/template/R/write_restart.ModelName.R +++ b/models/template/R/write_restart.ModelName.R @@ -1,19 +1,19 @@ #' @title Write restart template for SDA -#' +#' #' @author Alexey Shiklomanov -#' +#' #' @param outdir outout directory -#' @param runid run id -#' @param start.time Time of current assimilation step +#' @param runid run id +#' @param start.time Time of current assimilation step #' @param stop.time Time of next assimilation step #' @param settings pecan settings list #' @param new.state Analysis state matrix returned by \code{sda.enkf} #' @param RENAME flag to either rename output file or not -#' @param new.params optional, additionals params to pass write.configs that are deterministically related to the parameters updated by the analysis +#' @param new.params optional, additionals params to pass write.configs that are deterministically related to the parameters updated by the analysis #' @param inputs new input paths updated by the SDA workflow, will be passed to write.configs -#' +#' #' @description Write restart files for model -#' +#' #' @export write_restart.ModelName <- function(outdir, runid, diff --git a/models/template/tests/testthat.R b/models/template/tests/testthat.R index f44dabc6ffb..8052bcf138b 100644 --- a/models/template/tests/testthat.R +++ b/models/template/tests/testthat.R @@ -2,4 +2,4 @@ library(testthat) library(PEcAn.utils) PEcAn.logger::logger.setQuitOnSevere(FALSE) -#test_check("PEcAn.ModelName") +# test_check("PEcAn.ModelName") diff --git a/models/template/tests/testthat/test.met2model.R b/models/template/tests/testthat/test.met2model.R index 566b76d2cb4..43f2bca2581 100644 --- a/models/template/tests/testthat/test.met2model.R +++ b/models/template/tests/testthat/test.met2model.R @@ -7,7 +7,8 @@ teardown(unlink(outfolder, recursive = TRUE)) test_that("Met conversion runs without error", { skip("This is a template test that will not run. To run it, remove this `skip` call.") nc_path <- system.file("test-data", "CRUNCEP.2000.nc", - package = "PEcAn.utils") + package = "PEcAn.utils" + ) in.path <- dirname(nc_path) in.prefix <- "CRUNCEP" start_date <- "2000-01-01" diff --git a/modules/DART/DART/Kodiak/models/ED2/utils/adjValue.R b/modules/DART/DART/Kodiak/models/ED2/utils/adjValue.R index eff7d139b55..d720c736c55 100644 --- a/modules/DART/DART/Kodiak/models/ED2/utils/adjValue.R +++ b/modules/DART/DART/Kodiak/models/ED2/utils/adjValue.R @@ -1,26 +1,25 @@ # This script writes the state vector values to the relevant history file. adjValue <- function() { + # The HDF5 library needed to handle the ED2 HDF5 files. + library(rhdf5) -# The HDF5 library needed to handle the ED2 HDF5 files. - library(rhdf5) + # Reading the name of the history file. The Fortran program F2R produces this name. + fname <- readLines(con = "file_name.txt", n = 1) -# Reading the name of the history file. The Fortran program F2R produces this name. - fname <- readLines(con="file_name.txt",n=1) + # Reading the existing green leaf factor and the PFT files. These are just to make certain that the green leaf factor vector will be of the correct length. -# Reading the existing green leaf factor and the PFT files. These are just to make certain that the green leaf factor vector will be of the correct length. + GLF <- h5read(fname, "GREEN_LEAF_FACTOR") - GLF <- h5read(fname,"GREEN_LEAF_FACTOR") + # Reading the state vecotr values as written down by the program F2R. + NV <- read.table("4Rvalues.dat") -# Reading the state vecotr values as written down by the program F2R. - NV <- read.table("4Rvalues.dat") + LAI <- NV[, 1] + GLF[9:11] <- NV[, 2] + B <- NV[, 3] - LAI <- NV[,1] - GLF[9:11] <- NV[,2] - B <- NV[,3] - - h5write(LAI,fname,"LAI") - h5write(GLF,fname,"GREEN_LEAF_FACTOR") - h5write(B,fname,"PHEN_PAR_A") + h5write(LAI, fname, "LAI") + h5write(GLF, fname, "GREEN_LEAF_FACTOR") + h5write(B, fname, "PHEN_PAR_A") } -adjValue() \ No newline at end of file +adjValue() diff --git a/modules/DART/DART/Kodiak/models/ED2/utils/createInput.R b/modules/DART/DART/Kodiak/models/ED2/utils/createInput.R index cd010d82537..a38d947d216 100644 --- a/modules/DART/DART/Kodiak/models/ED2/utils/createInput.R +++ b/modules/DART/DART/Kodiak/models/ED2/utils/createInput.R @@ -5,57 +5,55 @@ # The script here converts those days in to a date used by the ED2IN. createInput <- function() { - days <- read.table("4Rdate.dat") - year <- as.numeric(readLines('sim_year')) -# year <- 2002 - - if(days[2,2] == 365){ - ayears <- year + days[2,2]%/%365 - 1 - adays <- 365 - } - else { - ayears <- year + days[2,2]%/%365 - adays <- days[2,2]%%365 - } - - if(days[1,2] == 365){ - zyears <- year + days[1,2]%/%365 -1 - zdays <- 365 - } - else { - zyears <- year + days[1,2]%/%365 - zdays <- days[1,2]%%365 - } - - begin <- days2month(adays) - final <- days2month(zdays) - histo <- readLines(con='histo.dat') - - ed2in.text <- readLines(con='T_ED2IN', n=-1) - ed2in.text <- gsub('@HISTO@',histo,ed2in.text) - ed2in.text <- gsub('@START_DAY@',begin[1],ed2in.text) - ed2in.text <- gsub('@START_MONTH@',begin[2],ed2in.text) - ed2in.text <- gsub('@START_YEAR@',ayears,ed2in.text) - ed2in.text <- gsub('@END_DAY@',final[1],ed2in.text) - ed2in.text <- gsub('@END_MONTH@',final[2],ed2in.text) - ed2in.text <- gsub('@END_YEAR@',zyears,ed2in.text) - - writeLines(ed2in.text, con='ED2IN') + days <- read.table("4Rdate.dat") + year <- as.numeric(readLines("sim_year")) + # year <- 2002 + + if (days[2, 2] == 365) { + ayears <- year + days[2, 2] %/% 365 - 1 + adays <- 365 + } else { + ayears <- year + days[2, 2] %/% 365 + adays <- days[2, 2] %% 365 + } + + if (days[1, 2] == 365) { + zyears <- year + days[1, 2] %/% 365 - 1 + zdays <- 365 + } else { + zyears <- year + days[1, 2] %/% 365 + zdays <- days[1, 2] %% 365 + } + + begin <- days2month(adays) + final <- days2month(zdays) + histo <- readLines(con = "histo.dat") + + ed2in.text <- readLines(con = "T_ED2IN", n = -1) + ed2in.text <- gsub("@HISTO@", histo, ed2in.text) + ed2in.text <- gsub("@START_DAY@", begin[1], ed2in.text) + ed2in.text <- gsub("@START_MONTH@", begin[2], ed2in.text) + ed2in.text <- gsub("@START_YEAR@", ayears, ed2in.text) + ed2in.text <- gsub("@END_DAY@", final[1], ed2in.text) + ed2in.text <- gsub("@END_MONTH@", final[2], ed2in.text) + ed2in.text <- gsub("@END_YEAR@", zyears, ed2in.text) + + writeLines(ed2in.text, con = "ED2IN") } -days2month <- function(days){ - ec <- c(0,31,59,90,120,151,181,212,243,273,304,334,365) +days2month <- function(days) { + ec <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) - for(i in 1:12){ - if(days > ec[i]){ - if(days <= ec[i+1]){ - month <- i - } + for (i in 1:12) { + if (days > ec[i]) { + if (days <= ec[i + 1]) { + month <- i } } + } - day <- days - ec[month] - c(day,month) + day <- days - ec[month] + c(day, month) } createInput() diff --git a/modules/DART/DART/Kodiak/models/ED2/utils/createTransit.R b/modules/DART/DART/Kodiak/models/ED2/utils/createTransit.R index 303d97e8a46..e27faedb1ff 100644 --- a/modules/DART/DART/Kodiak/models/ED2/utils/createTransit.R +++ b/modules/DART/DART/Kodiak/models/ED2/utils/createTransit.R @@ -5,42 +5,42 @@ # The script here converts those days in to a date used by the ED2IN. createTransit <- function() { - year <- as.numeric(readLines('sim_year')) + year <- as.numeric(readLines("sim_year")) - ayears <- year -# adays <- 365 - adays <- as.numeric(readLines('end_date')) + ayears <- year + # adays <- 365 + adays <- as.numeric(readLines("end_date")) - zyears <- year + 1 - zdays <- 105 + zyears <- year + 1 + zdays <- 105 - start <- days2month(adays) - end <- days2month(zdays) + start <- days2month(adays) + end <- days2month(zdays) - ed2in.text <- readLines(con='R_ED2IN', n=-1) - ed2in.text <- gsub('@START_DAY@',start[1],ed2in.text) - ed2in.text <- gsub('@START_MONTH@',start[2],ed2in.text) - ed2in.text <- gsub('@START_YEAR@',ayears,ed2in.text) - ed2in.text <- gsub('@END_DAY@',end[1],ed2in.text) - ed2in.text <- gsub('@END_MONTH@',end[2],ed2in.text) - ed2in.text <- gsub('@END_YEAR@',zyears,ed2in.text) + ed2in.text <- readLines(con = "R_ED2IN", n = -1) + ed2in.text <- gsub("@START_DAY@", start[1], ed2in.text) + ed2in.text <- gsub("@START_MONTH@", start[2], ed2in.text) + ed2in.text <- gsub("@START_YEAR@", ayears, ed2in.text) + ed2in.text <- gsub("@END_DAY@", end[1], ed2in.text) + ed2in.text <- gsub("@END_MONTH@", end[2], ed2in.text) + ed2in.text <- gsub("@END_YEAR@", zyears, ed2in.text) - writeLines(ed2in.text, con='ED2IN') + writeLines(ed2in.text, con = "ED2IN") } -days2month <- function(days){ - ec <- c(0,31,59,90,120,151,181,212,243,273,304,334,365) +days2month <- function(days) { + ec <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) - for(i in 1:12){ - if(days > ec[i]){ - if(days <= ec[i+1]){ - month <- i - } + for (i in 1:12) { + if (days > ec[i]) { + if (days <= ec[i + 1]) { + month <- i } } + } - day <- days - ec[month] - c(day,month) + day <- days - ec[month] + c(day, month) } createTransit() diff --git a/modules/DART/DART/Kodiak/models/ED2/utils/readValue.R b/modules/DART/DART/Kodiak/models/ED2/utils/readValue.R index e1acc9d2e42..9a397113e4b 100644 --- a/modules/DART/DART/Kodiak/models/ED2/utils/readValue.R +++ b/modules/DART/DART/Kodiak/models/ED2/utils/readValue.R @@ -1,27 +1,26 @@ # This R script reads the predicted state vector values from the history files. readValue <- function() { - library(rhdf5) + library(rhdf5) -# Reads the name of the end time history file. The program F2R names this file. + # Reads the name of the end time history file. The program F2R names this file. - fname <- readLines(con="end_file.txt",n=1) + fname <- readLines(con = "end_file.txt", n = 1) - V <- h5read(fname,"GREEN_LEAF_FACTOR") - PFT <- h5read(fname,"PFT") - LAI <- h5read(fname,"LAI") - B <- h5read(fname,"PHEN_PAR_A") + V <- h5read(fname, "GREEN_LEAF_FACTOR") + PFT <- h5read(fname, "PFT") + LAI <- h5read(fname, "LAI") + B <- h5read(fname, "PHEN_PAR_A") -# The green leaf factor is a vector, but for all the decidous species the green leaf factor will be the same. Thus here the state vector value is set to be one of the decidious components of that vector. - - NV <- V[9] + # The green leaf factor is a vector, but for all the decidous species the green leaf factor will be the same. Thus here the state vector value is set to be one of the decidious components of that vector. -# Here the state vector for DART is written down and saved for program R2F to convert to binary form for DART + NV <- V[9] - CV <- c(LAI,NV,B) + # Here the state vector for DART is written down and saved for program R2F to convert to binary form for DART - write(CV, "Routput.dat") + CV <- c(LAI, NV, B) + write(CV, "Routput.dat") } -readValue() \ No newline at end of file +readValue() diff --git a/modules/DART/DART/Kodiak/models/ED2/work/adjValue.R b/modules/DART/DART/Kodiak/models/ED2/work/adjValue.R index c76cce0bbdf..baecc7e9e7a 100644 --- a/modules/DART/DART/Kodiak/models/ED2/work/adjValue.R +++ b/modules/DART/DART/Kodiak/models/ED2/work/adjValue.R @@ -1,28 +1,27 @@ # This script writes the state vector values to the relevant history file. adjValue <- function() { + # The HDF5 library needed to handle the ED2 HDF5 files. + library(rhdf5) -# The HDF5 library needed to handle the ED2 HDF5 files. - library(rhdf5) + # Reading the name of the history file. The Fortran program F2R produces this name. + fname <- readLines(con = "file_name.txt", n = 1) -# Reading the name of the history file. The Fortran program F2R produces this name. - fname <- readLines(con="file_name.txt",n=1) + # Reading the existing green leaf factor and the PFT files. These are just to make certain that the green leaf factor vector will be of the correct length. -# Reading the existing green leaf factor and the PFT files. These are just to make certain that the green leaf factor vector will be of the correct length. + GLF <- h5read(fname, "GREEN_LEAF_FACTOR") - GLF <- h5read(fname,"GREEN_LEAF_FACTOR") + # Reading the state vector values as written down by the program F2R. + NV <- read.table("4Rvalues.dat") -# Reading the state vector values as written down by the program F2R. - NV <- read.table("4Rvalues.dat") + LAI <- NV[, 1] + GLF[9:11] <- NV[, 2] + B <- NV[, 3] + F <- NV[, 4] - LAI <- NV[,1] - GLF[9:11] <- NV[,2] - B <- NV[,3] - F <- NV[,4] - - h5write(LAI,fname,"LAI") - h5write(GLF,fname,"GREEN_LEAF_FACTOR") - h5write(B,fname,"PHEN_PAR_A") - h5write(F,fname,"PHEN_PAR_FALL") + h5write(LAI, fname, "LAI") + h5write(GLF, fname, "GREEN_LEAF_FACTOR") + h5write(B, fname, "PHEN_PAR_A") + h5write(F, fname, "PHEN_PAR_FALL") } -adjValue() \ No newline at end of file +adjValue() diff --git a/modules/DART/DART/Kodiak/models/ED2/work/createInput.R b/modules/DART/DART/Kodiak/models/ED2/work/createInput.R index cd010d82537..a38d947d216 100644 --- a/modules/DART/DART/Kodiak/models/ED2/work/createInput.R +++ b/modules/DART/DART/Kodiak/models/ED2/work/createInput.R @@ -5,57 +5,55 @@ # The script here converts those days in to a date used by the ED2IN. createInput <- function() { - days <- read.table("4Rdate.dat") - year <- as.numeric(readLines('sim_year')) -# year <- 2002 - - if(days[2,2] == 365){ - ayears <- year + days[2,2]%/%365 - 1 - adays <- 365 - } - else { - ayears <- year + days[2,2]%/%365 - adays <- days[2,2]%%365 - } - - if(days[1,2] == 365){ - zyears <- year + days[1,2]%/%365 -1 - zdays <- 365 - } - else { - zyears <- year + days[1,2]%/%365 - zdays <- days[1,2]%%365 - } - - begin <- days2month(adays) - final <- days2month(zdays) - histo <- readLines(con='histo.dat') - - ed2in.text <- readLines(con='T_ED2IN', n=-1) - ed2in.text <- gsub('@HISTO@',histo,ed2in.text) - ed2in.text <- gsub('@START_DAY@',begin[1],ed2in.text) - ed2in.text <- gsub('@START_MONTH@',begin[2],ed2in.text) - ed2in.text <- gsub('@START_YEAR@',ayears,ed2in.text) - ed2in.text <- gsub('@END_DAY@',final[1],ed2in.text) - ed2in.text <- gsub('@END_MONTH@',final[2],ed2in.text) - ed2in.text <- gsub('@END_YEAR@',zyears,ed2in.text) - - writeLines(ed2in.text, con='ED2IN') + days <- read.table("4Rdate.dat") + year <- as.numeric(readLines("sim_year")) + # year <- 2002 + + if (days[2, 2] == 365) { + ayears <- year + days[2, 2] %/% 365 - 1 + adays <- 365 + } else { + ayears <- year + days[2, 2] %/% 365 + adays <- days[2, 2] %% 365 + } + + if (days[1, 2] == 365) { + zyears <- year + days[1, 2] %/% 365 - 1 + zdays <- 365 + } else { + zyears <- year + days[1, 2] %/% 365 + zdays <- days[1, 2] %% 365 + } + + begin <- days2month(adays) + final <- days2month(zdays) + histo <- readLines(con = "histo.dat") + + ed2in.text <- readLines(con = "T_ED2IN", n = -1) + ed2in.text <- gsub("@HISTO@", histo, ed2in.text) + ed2in.text <- gsub("@START_DAY@", begin[1], ed2in.text) + ed2in.text <- gsub("@START_MONTH@", begin[2], ed2in.text) + ed2in.text <- gsub("@START_YEAR@", ayears, ed2in.text) + ed2in.text <- gsub("@END_DAY@", final[1], ed2in.text) + ed2in.text <- gsub("@END_MONTH@", final[2], ed2in.text) + ed2in.text <- gsub("@END_YEAR@", zyears, ed2in.text) + + writeLines(ed2in.text, con = "ED2IN") } -days2month <- function(days){ - ec <- c(0,31,59,90,120,151,181,212,243,273,304,334,365) +days2month <- function(days) { + ec <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) - for(i in 1:12){ - if(days > ec[i]){ - if(days <= ec[i+1]){ - month <- i - } + for (i in 1:12) { + if (days > ec[i]) { + if (days <= ec[i + 1]) { + month <- i } } + } - day <- days - ec[month] - c(day,month) + day <- days - ec[month] + c(day, month) } createInput() diff --git a/modules/DART/DART/Kodiak/models/ED2/work/createTransit.R b/modules/DART/DART/Kodiak/models/ED2/work/createTransit.R index 303d97e8a46..e27faedb1ff 100644 --- a/modules/DART/DART/Kodiak/models/ED2/work/createTransit.R +++ b/modules/DART/DART/Kodiak/models/ED2/work/createTransit.R @@ -5,42 +5,42 @@ # The script here converts those days in to a date used by the ED2IN. createTransit <- function() { - year <- as.numeric(readLines('sim_year')) + year <- as.numeric(readLines("sim_year")) - ayears <- year -# adays <- 365 - adays <- as.numeric(readLines('end_date')) + ayears <- year + # adays <- 365 + adays <- as.numeric(readLines("end_date")) - zyears <- year + 1 - zdays <- 105 + zyears <- year + 1 + zdays <- 105 - start <- days2month(adays) - end <- days2month(zdays) + start <- days2month(adays) + end <- days2month(zdays) - ed2in.text <- readLines(con='R_ED2IN', n=-1) - ed2in.text <- gsub('@START_DAY@',start[1],ed2in.text) - ed2in.text <- gsub('@START_MONTH@',start[2],ed2in.text) - ed2in.text <- gsub('@START_YEAR@',ayears,ed2in.text) - ed2in.text <- gsub('@END_DAY@',end[1],ed2in.text) - ed2in.text <- gsub('@END_MONTH@',end[2],ed2in.text) - ed2in.text <- gsub('@END_YEAR@',zyears,ed2in.text) + ed2in.text <- readLines(con = "R_ED2IN", n = -1) + ed2in.text <- gsub("@START_DAY@", start[1], ed2in.text) + ed2in.text <- gsub("@START_MONTH@", start[2], ed2in.text) + ed2in.text <- gsub("@START_YEAR@", ayears, ed2in.text) + ed2in.text <- gsub("@END_DAY@", end[1], ed2in.text) + ed2in.text <- gsub("@END_MONTH@", end[2], ed2in.text) + ed2in.text <- gsub("@END_YEAR@", zyears, ed2in.text) - writeLines(ed2in.text, con='ED2IN') + writeLines(ed2in.text, con = "ED2IN") } -days2month <- function(days){ - ec <- c(0,31,59,90,120,151,181,212,243,273,304,334,365) +days2month <- function(days) { + ec <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) - for(i in 1:12){ - if(days > ec[i]){ - if(days <= ec[i+1]){ - month <- i - } + for (i in 1:12) { + if (days > ec[i]) { + if (days <= ec[i + 1]) { + month <- i } } + } - day <- days - ec[month] - c(day,month) + day <- days - ec[month] + c(day, month) } createTransit() diff --git a/modules/DART/DART/Kodiak/models/ED2/work/readValue.R b/modules/DART/DART/Kodiak/models/ED2/work/readValue.R index 91a48868a40..62f4994d137 100644 --- a/modules/DART/DART/Kodiak/models/ED2/work/readValue.R +++ b/modules/DART/DART/Kodiak/models/ED2/work/readValue.R @@ -1,28 +1,27 @@ # This R script reads the predicted state vector values from the history files. readValue <- function() { - library(rhdf5) + library(rhdf5) -# Reads the name of the end time history file. The program F2R names this file. + # Reads the name of the end time history file. The program F2R names this file. - fname <- readLines(con="end_file.txt",n=1) + fname <- readLines(con = "end_file.txt", n = 1) - V <- h5read(fname,"GREEN_LEAF_FACTOR") - PFT <- h5read(fname,"PFT") - LAI <- h5read(fname,"LAI") - B <- h5read(fname,"PHEN_PAR_A") - F <- h5read(fname,"PHEN_PAR_FALL") + V <- h5read(fname, "GREEN_LEAF_FACTOR") + PFT <- h5read(fname, "PFT") + LAI <- h5read(fname, "LAI") + B <- h5read(fname, "PHEN_PAR_A") + F <- h5read(fname, "PHEN_PAR_FALL") -# The green leaf factor is a vector, but for all the decidous species the green leaf factor will be the same. Thus here the state vector value is set to be one of the decidious components of that vector. - - NV <- V[9] + # The green leaf factor is a vector, but for all the decidous species the green leaf factor will be the same. Thus here the state vector value is set to be one of the decidious components of that vector. -# Here the state vector for DART is written down and saved for program R2F to convert to binary form for DART + NV <- V[9] - CV <- c(LAI,NV,B,F) + # Here the state vector for DART is written down and saved for program R2F to convert to binary form for DART - write(CV, "Routput.dat") + CV <- c(LAI, NV, B, F) + write(CV, "Routput.dat") } -readValue() \ No newline at end of file +readValue() diff --git a/modules/DART/R/ObsSeq.R b/modules/DART/R/ObsSeq.R index e532f695980..065ca63c281 100644 --- a/modules/DART/R/ObsSeq.R +++ b/modules/DART/R/ObsSeq.R @@ -6,32 +6,32 @@ ObsSeq <- function(m_file, f_file, cut_day) { # t_m <- seq(5,365,8) t_m <- as.numeric(substr(as.character(d_m), 5, 7)) + 4 e_m <- rep(0.66, length(t_m)) - - flux <- read.csv(f_file) - P2 <- flux$par_2 - P30 <- flux$par_30 - x2 <- P2[1:17280] - x30 <- P30[1:17280] - x2[x2 < 0.1] <- NA + + flux <- read.csv(f_file) + P2 <- flux$par_2 + P30 <- flux$par_30 + x2 <- P2[1:17280] + x30 <- P30[1:17280] + x2[x2 < 0.1] <- NA x30[x30 < 200] <- NA - fPAR <- x2/x30 - efPAR <- sqrt(0.02) * fPAR - lefPAR <- efPAR/fPAR - + fPAR <- x2 / x30 + efPAR <- sqrt(0.02) * fPAR + lefPAR <- efPAR / fPAR + period <- rep(1:45, each = 48 * 8) # period <- rep(1:90, each=48*4) x_f <- tapply(-log(fPAR) / 0.52 - 1.9, period, mean, na.rm = TRUE) e_f <- rep(0.95, 45) t_f <- seq(5, 361, 8) # e_f <- rep(0.95,90) t_f <- seq(5,361,4) - + x <- c(x_m, x_f) e <- c(e_m, e_f) t <- c(t_m, t_f) - + k <- rep(-1, length(x)) l <- rep(0, length(x)) - + obs <- data.frame(Obs = x, Time = t, Kind = k, Error = e, Loc = l) WriteObsSeq(obs, cut_day) write.csv(obs, "obs.dat") @@ -41,44 +41,47 @@ WriteObsSeq <- function(obs.file, day) { obs.file$Obs[obs.file$Time < day] <- NA obs.file$Obs[obs.file$Obs < 0] <- NA obs.file <- obs.file[complete.cases(obs.file), ] - - o <- order(obs.file$Time) - x <- obs.file$Obs[o] - loc <- obs.file$Loc[o] + + o <- order(obs.file$Time) + x <- obs.file$Obs[o] + loc <- obs.file$Loc[o] kind <- obs.file$Kind[o] - t <- obs.file$Time[o] - e <- obs.file$Error[o] - - num_c <- 1 + t <- obs.file$Time[o] + e <- obs.file$Error[o] + + num_c <- 1 num_qc <- 0 - + write("obs_sequence", file = "obs_seq.out") write("obs_kind_definitions", file = "obs_seq.out", sep = "\n", append = TRUE) write("\t\t1", file = "obs_seq.out", sep = "\n", append = TRUE) write("\t\t1 LAI", file = "obs_seq.out", sep = "\n", append = TRUE) - write(paste("num_copies:\t\t", num_c, "num_qc:\t", num_qc), file = "obs_seq.out", sep = "\n", - append = TRUE) - write(paste("num_obs:\t\t", length(x), "max_num_obs:\t", length(x)), file = "obs_seq.out", sep = "\n", - append = TRUE) + write(paste("num_copies:\t\t", num_c, "num_qc:\t", num_qc), + file = "obs_seq.out", sep = "\n", + append = TRUE + ) + write(paste("num_obs:\t\t", length(x), "max_num_obs:\t", length(x)), + file = "obs_seq.out", sep = "\n", + append = TRUE + ) write("observations", file = "obs_seq.out", sep = "\n", append = TRUE) write(paste(" first:\t1\tlast:\t", length(x)), file = "obs_seq.out", sep = "\n", append = TRUE) for (j in seq_along(x)) { write(paste(" OBS\t", j), file = "obs_seq.out", sep = "\n", append = TRUE) write(x[j], file = "obs_seq.out", sep = "\n", append = TRUE) - - pl <- c(j - 1, j + 1, -1) - pl[pl < 1] <- -1 + + pl <- c(j - 1, j + 1, -1) + pl[pl < 1] <- -1 pl[pl > length(x)] <- -1 - + write(pl, file = "obs_seq.out", sep = "\t\t", append = TRUE) write("obdef", file = "obs_seq.out", sep = "\n", append = TRUE) write("loc1d", file = "obs_seq.out", sep = "\n", append = TRUE) write(loc[j], file = "obs_seq.out", sep = "\n", append = TRUE) write("kind", file = "obs_seq.out", sep = "\n", append = TRUE) write(kind[j], file = "obs_seq.out", sep = "\n", append = TRUE) - + write(paste(" 0\t\t", t[j]), file = "obs_seq.out", sep = "\n", append = TRUE) write(e[j], file = "obs_seq.out", sep = "\n", append = TRUE) } } # WriteObsSeq - diff --git a/modules/DART/R/adjValue.R b/modules/DART/R/adjValue.R index 5691110c852..6adc3762981 100644 --- a/modules/DART/R/adjValue.R +++ b/modules/DART/R/adjValue.R @@ -1,9 +1,9 @@ adjValue <- function() { library(rhdf5) - + fname <- readLines(con = "file_name.txt", n = 1) NV <- read.table("4Rvalues.dat") - + h5write(NV[1, 1], fname, "LAI") } # adjValue diff --git a/modules/DART/R/createInput.R b/modules/DART/R/createInput.R index 4f76492b6bd..40b7b342d1b 100644 --- a/modules/DART/R/createInput.R +++ b/modules/DART/R/createInput.R @@ -1,14 +1,14 @@ createInput <- function() { - days <- read.table("4Rdate.dat") - + days <- read.table("4Rdate.dat") + start <- days2month(days[2, 2]) - end <- days2month(days[1, 2]) - + end <- days2month(days[1, 2]) + ed2in.text <- readLines(con = "ED2IN", n = -1) ed2in.text <- gsub("@START_DAY@", start[1], ed2in.text) ed2in.text <- gsub("@START_MONTH@", start[2], ed2in.text) ed2in.text <- gsub("@END_DAY@", end[1], ed2in.text) ed2in.text <- gsub("@END_MONTH@", end[2], ed2in.text) - + writeLines(ed2in.text, con = "R_ED2IN") } # createInput diff --git a/modules/DART/R/date2month.R b/modules/DART/R/date2month.R index ba67c36fc75..759c8bdd0d9 100644 --- a/modules/DART/R/date2month.R +++ b/modules/DART/R/date2month.R @@ -1,6 +1,6 @@ days2month <- function(days) { ec <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 302, 334, 365) - + for (i in 1:12) { if (days > ec[i]) { if (days < ec[i + 1]) { @@ -8,7 +8,7 @@ days2month <- function(days) { } } } - + day <- days - ec[month] return(c(day, month)) } # days2month diff --git a/modules/DART/R/filter_ics.R b/modules/DART/R/filter_ics.R index 77417a5674d..e639d3ffe63 100644 --- a/modules/DART/R/filter_ics.R +++ b/modules/DART/R/filter_ics.R @@ -3,10 +3,10 @@ create_filter_ics <- function(n, t, x, y, ms, ss, mf, sf) { py <- y + runif(n, -0, 0) pz <- rnorm(n, ms, ss) pf <- rnorm(n, mf, sf) - + bt <- c(0, t) g <- cbind(px, py, pz, pf) - + for (j in seq_len(n)) { write(bt, file = "filter_ics", append = j > 1) write(g[j, ], file = "filter_ics", sep = "\n", append = TRUE) diff --git a/modules/allometry/R/AllomAve.R b/modules/allometry/R/AllomAve.R index 357da3edb19..fca4e3f76e4 100644 --- a/modules/allometry/R/AllomAve.R +++ b/modules/allometry/R/AllomAve.R @@ -1,12 +1,11 @@ - #' AllomAve #' #' Allometry wrapper function that handles loading and subsetting the data, #' fitting the Bayesian models, and generating diagnostic figures. Set up to loop over -#' multiple PFTs and components. +#' multiple PFTs and components. #' Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. -#' -#' There are two usages of this function. +#' +#' There are two usages of this function. #' When running 'online' (connected to the PEcAn database), pass the database connection, #' con, and the pfts subsection of the PEcAn settings. #' When running 'stand alone' pass the pft list mapping species to species codes @@ -30,30 +29,30 @@ #' @param dmax maximum dbh of interest #' @return nested list of parameter summary statistics #' @export -#' -#' @examples -#' -#' if(FALSE){ -#' pfts = list(FAGR = data.frame(spcd=531,acronym='FAGR')) -#' allom.stats = AllomAve(pfts,ngibbs=500) -#' +#' +#' @examples +#' +#' if (FALSE) { +#' pfts <- list(FAGR = data.frame(spcd = 531, acronym = "FAGR")) +#' allom.stats <- AllomAve(pfts, ngibbs = 500) +#' #' ## example of a PFT with multiple species (late hardwood) #' ## note that if you're just using Jenkins the acronym column is optional -#' pfts = list(LH = data.frame(spcd = c(531,318),acronym=c('FAGR','ACSA3'))) +#' pfts <- list(LH = data.frame(spcd = c(531, 318), acronym = c("FAGR", "ACSA3"))) #' } -#' +#' #' @author Michael Dietze -#' +#' #' @export -#' -AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NULL, - parm = system.file("data/Table3_GTR-NE-319.v2.csv", package = "PEcAn.allometry"), +#' +AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NULL, + parm = system.file("data/Table3_GTR-NE-319.v2.csv", package = "PEcAn.allometry"), ngibbs = 5000, nchain = 3, dmin = 0.1, dmax = 500) { ## common components: ## 6=stem (Bs), 18 = leaf (Bl) - ## 40 = height (Ht) - ## 41 = rooting depth (Rd) - ## 42 = Rooting volume (Vol) + ## 40 = height (Ht) + ## 41 = rooting depth (Rd) + ## 42 = Rooting volume (Vol) ## 43 = Canopy Area nested.range <- function(obs) { @@ -65,25 +64,25 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU } return(t(w)) } # nested.range - + if (is.null(outdir)) { outdir <- getwd() } print(c("writing output to", outdir)) - + sel <- floor(seq(ngibbs * 0.25, ngibbs, length = min(ngibbs * 0.75, 5000))) - + allom.stats <- list() - - ########## BAYESIAN VERSION ############### + + ########## BAYESIAN VERSION ############### for (ipft in seq_along(pfts)) { # loop over PFTs pft <- pfts[[ipft]] pft.name <- names(pfts)[ipft] allom.stats[[pft.name]] <- list() - + for (component in components) { print(c(pft, component)) - + ## load data if (!is.null(con)) { ### If running within PEcAn, grab the data from the database @@ -92,12 +91,12 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU } else { allom <- read.allom.data(pft, component, field, parm) } - + if (is.null(allom) | (is.null(allom$parm) & is.null(allom$field))) { next } - - mc <- list() + + mc <- list() obs <- list() for (i in seq_len(nchain)) { if (component == 40) { @@ -109,21 +108,21 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU obs[[i]] <- allom.out[["obs"]] } mc <- coda::as.mcmc.list(mc) - + ## Model Selection - D <- as.array(mc)[, "D", ] - pD <- mean(D) - min(D) - DIC <- mean(D) + pD - - Dg <- as.array(mc)[, "Dg", ] - pDg <- mean(Dg) - min(Dg) + D <- as.array(mc)[, "D", ] + pD <- mean(D) - min(D) + DIC <- mean(D) + pD + + Dg <- as.array(mc)[, "Dg", ] + pDg <- mean(Dg) - min(Dg) DICg <- mean(Dg) + pDg - + ## Save MCMC objects (Pass to MCMC diagnostics module) outfile <- file.path(outdir, paste("Allom", pft.name, component, "Rdata", sep = ".")) print(c("saving MCMC output to", outfile)) save(mc, DIC, DICg, pD, pDg, obs, allom, file = outfile) - + allom.stats[[pft.name]][[component]] <- summary(mc) allom.stats[[pft.name]][[component]]$cov <- stats::cov(as.matrix(mc)) @@ -131,29 +130,32 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU pdffile <- file.path(outdir, paste("Allom", pft.name, component, "MCMC", "pdf", sep = ".")) print(c("saving diagnostic graphs to", pdffile)) grDevices::pdf(pdffile) - - ## specifying which rows were used in the fit & should be graphed; + + ## specifying which rows were used in the fit & should be graphed; ## note, this requires removing equations whose n is 0 n <- nu(allom[["parm"]]$n) rng.raw <- cbind(nu(allom$parm$Xmin), nu(allom$parm$Xmax)) n.mod <- n - + for (i in seq_along(n)) { tmp.seq <- seq(rng.raw[i, 1], rng.raw[i, 2], length.out = 100) - n.mod[i] <- round(n[i] * length(tmp.seq[tmp.seq > dmin & tmp.seq < dmax]) / length(tmp.seq), - digits = 0) + n.mod[i] <- round(n[i] * length(tmp.seq[tmp.seq > dmin & tmp.seq < dmax]) / length(tmp.seq), + digits = 0 + ) } - + ntally <- which(nu(allom[["parm"]][, "Xmax"]) >= dmin & - nu(allom[["parm"]][, "Xmin"]) <= dmax) + nu(allom[["parm"]][, "Xmin"]) <= dmax) if (is.null(ntally)) { ntally <- 0 } - - ### scatter plot + + ### scatter plot rng <- nested.range(obs) - plot(1, 1, type = "n", log = "xy", xlim = rng[, "x"], # xlim=c(0.1,1000),ylim=c(0.0001,100000), - ylim = rng[, "y"], xlab = "DBH (cm)", ylab = "Biomass (kg)") + plot(1, 1, + type = "n", log = "xy", xlim = rng[, "x"], # xlim=c(0.1,1000),ylim=c(0.0001,100000), + ylim = rng[, "y"], xlab = "DBH (cm)", ylab = "Biomass (kg)" + ) # pseudodata for (i in seq_len(nchain)) { for (j in seq_along(obs[[i]])) { @@ -161,48 +163,47 @@ AllomAve <- function(pfts, components = 6, outdir = NULL, con = NULL, field = NU } } # naive prediction - dseq <- seq(dmin, dmax, length = 100) ## diameter sequence + dseq <- seq(dmin, dmax, length = 100) ## diameter sequence beta <- allom.stats[[pft.name]][[component]]$statistics[, "Mean"] - y.0 <- exp(beta["mu0"] + beta["mu1"] * log(dseq)) - y.g <- exp(beta["Bg0"] + beta["Bg1"] * log(dseq)) - y.o <- predict_allom_orig(dseq, allom$parm[ntally, ]) + y.0 <- exp(beta["mu0"] + beta["mu1"] * log(dseq)) + y.g <- exp(beta["Bg0"] + beta["Bg1"] * log(dseq)) + y.o <- predict_allom_orig(dseq, allom$parm[ntally, ]) graphics::lines(dseq, y.0, lwd = 2, col = 1) graphics::lines(dseq, y.g, lwd = 2, col = 2) for (i in seq_len(nrow(y.o))) { graphics::lines(dseq, y.o[i, ], col = i + 2) } - graphics::legend("topleft", legend = c("Hier", "global", paste("allom", ntally)), - lwd = c(2, 2, rep(1, nrow(y.o))), col = 1:(2 + nrow(y.o))) - + graphics::legend("topleft", + legend = c("Hier", "global", paste("allom", ntally)), + lwd = c(2, 2, rep(1, nrow(y.o))), col = 1:(2 + nrow(y.o)) + ) + ### MCMC diagnostics plot(mc) grDevices::dev.off() - + ## DIC statistics print(c("DIC", DIC, "pD", pD)) print(c("DICg", DICg, "pDg", pDg)) - - } ## end component loop - } ## end PFT loop + } ## end component loop + } ## end PFT loop return(allom.stats) } # AllomAve predict_allom_orig <- function(x, parm) { - out <- matrix(NA, nrow(parm), length(x)) - - eqn <- nu(parm$eqn) - a <- nu(parm$a) - b <- nu(parm$b) - c <- nu(parm$c) - d <- nu(parm$d) - e <- nu(parm$e) - Xcor <- nu(parm$Xcor) - Ycor <- nu(parm$Ycor) + + eqn <- nu(parm$eqn) + a <- nu(parm$a) + b <- nu(parm$b) + c <- nu(parm$c) + d <- nu(parm$d) + e <- nu(parm$e) + Xcor <- nu(parm$Xcor) + Ycor <- nu(parm$Ycor) Xtype <- as.character(parm$Xtype) - + for (i in seq_len(nrow(parm))) { - # X-unit conversion if (!is.na(Xcor[i])) { x <- Xcor[i] * x @@ -211,10 +212,10 @@ predict_allom_orig <- function(x, parm) { ## convert to sq inches x <- x * x / (2.54 * 2.54) } else { - x <- x * x * pi / 4 ## convert to cm Basal Area + x <- x * x * pi / 4 ## convert to cm Basal Area } } - + if (eqn[i] == 1) { if (b[i] == 0 & c[i] > 0) { b[i] <- 1 @@ -222,7 +223,7 @@ predict_allom_orig <- function(x, parm) { if (c[i] == 0 & b[i] > 0) { c[i] <- 1 } - y <- 10 ^ (a[i] + b[i] * c[i] * log10(x)) + y <- 10^(a[i] + b[i] * c[i] * log10(x)) } else if (eqn[i] == 2) { if (is.na(d[i]) | d[i] == 0) { d[i] <- 1 @@ -236,25 +237,25 @@ predict_allom_orig <- function(x, parm) { } y <- a[i] + b[i] * x + c[i] * x^d[i] } else if (eqn[i] == 5) { - y <- a[i] + b[i] * x + c[i] * x^2 + d[i] * x ^ 3 + y <- a[i] + b[i] * x + c[i] * x^2 + d[i] * x^3 } else if (eqn[i] == 6) { y <- a[i] * (exp(b[i] + (c[i] * log(x)) + d[i] * x)) } else if (eqn[i] == 7) { - y <- a[i] + ((b[i] * (x ^ c[i]))/((x ^ c[i]) + d[i])) + y <- a[i] + ((b[i] * (x^c[i])) / ((x^c[i]) + d[i])) } else if (eqn[i] == 8) { - y <- 10 ^ (a[i] + b[i] * log10(x)) + y <- 10^(a[i] + b[i] * log10(x)) } else if (eqn[i] == 9) { y <- exp(log(a[i]) + b[i] * log(x)) } else if (eqn[i] == 10) { y <- exp(a[i] + b[i] * log(x)) } else if (eqn[i] == 11) { if (is.na(b[i])) { - b[i] <- 0 + b[i] <- 0 } - y <- a[i] * x ^ (b[i]) + y <- a[i] * x^(b[i]) } out[i, ] <- y * Ycor[i] } - + return(out) } # predict_allom_orig diff --git a/modules/allometry/R/allom.BayesFit.R b/modules/allometry/R/allom.BayesFit.R index 91e92022435..95950aec0cf 100644 --- a/modules/allometry/R/allom.BayesFit.R +++ b/modules/allometry/R/allom.BayesFit.R @@ -1,4 +1,3 @@ - #' allom.BayesFit #' #' Module to fit a common power-law allometric model @@ -45,7 +44,6 @@ #' @author Michael Dietze #' allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax = 500) { - ## check for valid inputs if (!(form %in% ("power"))) { print(c("allom.BayesFit: Requested functional form", form, "not currently supported")) @@ -63,103 +61,103 @@ allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax print(c("allom.BayesFit: invalid nrep", nrep)) return(NULL) } - - + + ## grab required variables from allom$parm - n <- nu(allom[["parm"]]$n) - a <- nu(allom[["parm"]]$a) - b <- nu(allom[["parm"]]$b) - c <- nu(allom[["parm"]]$c) - d <- nu(allom[["parm"]]$d) - e <- nu(allom[["parm"]]$e) - se <- nu(allom[["parm"]]$se) - Xcor <- nu(allom[["parm"]]$Xcor) - Ycor <- nu(allom[["parm"]]$Ycor) + n <- nu(allom[["parm"]]$n) + a <- nu(allom[["parm"]]$a) + b <- nu(allom[["parm"]]$b) + c <- nu(allom[["parm"]]$c) + d <- nu(allom[["parm"]]$d) + e <- nu(allom[["parm"]]$e) + se <- nu(allom[["parm"]]$se) + Xcor <- nu(allom[["parm"]]$Xcor) + Ycor <- nu(allom[["parm"]]$Ycor) Xtype <- as.character(allom[["parm"]]$Xtype) - eqn <- nu(allom[["parm"]]$eqn) - rng <- cbind(nu(allom$parm$Xmin), nu(allom$parm$Xmax)) - spp <- nu(allom[["parm"]]$spp) - + eqn <- nu(allom[["parm"]]$eqn) + rng <- cbind(nu(allom$parm$Xmin), nu(allom$parm$Xmax)) + spp <- nu(allom[["parm"]]$spp) + ## declare constants - + ## Drop equations outside of DBH range of interest & modifying the pseduodata as necessary We need ## the max of the allometry EQ to be >= min of interest and the min to be <= ntally = ## nrow(allom[['parm']]); if(is.null(ntally)) ntally = 0; - rng.mod <- cbind(ifelse(nu(allom$parm$Xmin) > dmin, nu(allom$parm$Xmin), dmin), - ifelse(nu(allom$parm$Xmax) < dmax, nu(allom$parm$Xmax), dmax)) - + rng.mod <- cbind( + ifelse(nu(allom$parm$Xmin) > dmin, nu(allom$parm$Xmin), dmin), + ifelse(nu(allom$parm$Xmax) < dmax, nu(allom$parm$Xmax), dmax) + ) + n.mod <- n for (i in seq_along(n)) { tmp.seq <- seq(rng[i, 1], rng[i, 2], length.out = 100) n.mod[i] <- round(n[i] * length(tmp.seq[tmp.seq > dmin & tmp.seq < dmax]) / - length(tmp.seq), digits = 0) + length(tmp.seq), digits = 0) } - + ntally <- which(nu(allom[["parm"]][, "Xmax"]) >= dmin & - nu(allom[["parm"]][, "Xmin"]) <= dmax & - n.mod > 0) + nu(allom[["parm"]][, "Xmin"]) <= dmax & + n.mod > 0) if (is.null(ntally)) { ntally <- 0 } print(c("Dropping allom rows: ", which(!(1:nrow(allom[["parm"]]) %in% ntally)))) - + nfield <- length(allom[["field"]]) nsite <- length(ntally[ntally > 0]) + nfield my.spp <- unique(spp) nspp <- length(my.spp) - + if (nsite == 0) { print(c("allomBayesFit no data")) return(NULL) } - + ## define priors - s1 <- s2 <- 0.1 # IG prior on the within-study variance - mu0 <- c(0.2, 8 / 3) # normal prior mean on global mean - V0 <- matrix(c(100, 0, 0, 100), 2, 2) # normal prior variance on global mean - V0I <- solve(V0) - m0V0 <- t(mu0) %*% V0I %*% mu0 + s1 <- s2 <- 0.1 # IG prior on the within-study variance + mu0 <- c(0.2, 8 / 3) # normal prior mean on global mean + V0 <- matrix(c(100, 0, 0, 100), 2, 2) # normal prior variance on global mean + V0I <- solve(V0) + m0V0 <- t(mu0) %*% V0I %*% mu0 V0Imu0 <- V0I %*% mu0 - v <- 0.1 ## wishart prior on across-study variance - S <- diag(0.1, 2) - + v <- 0.1 ## wishart prior on across-study variance + S <- diag(0.1, 2) + ## declare storage - b0GIBBS <- matrix(0, nrep, nsite) - b1GIBBS <- matrix(0, nrep, nsite) - muGIBBS <- matrix(0, nrep, 2) + b0GIBBS <- matrix(0, nrep, nsite) + b1GIBBS <- matrix(0, nrep, nsite) + muGIBBS <- matrix(0, nrep, 2) sigGIBBS <- rep(NA, nrep) tauGIBBS <- matrix(0, nrep, 3) - DGIBBS <- rep(NA, nrep) - BgGIBBS <- matrix(0, nrep, 2) - SgGIBBS <- rep(NA, nrep) - DgGIBBS <- rep(NA, nrep) - + DGIBBS <- rep(NA, nrep) + BgGIBBS <- matrix(0, nrep, 2) + SgGIBBS <- rep(NA, nrep) + DgGIBBS <- rep(NA, nrep) + ## initialization - mu <- mu0 - b0 <- rep(mu[1], nsite) - b1 <- rep(mu[2], nsite) - tau <- diag(c(1, 1)) - tauI <- solve(tau) + mu <- mu0 + b0 <- rep(mu[1], nsite) + b1 <- rep(mu[2], nsite) + tau <- diag(c(1, 1)) + tauI <- solve(tau) sigma <- 0.3 - sinv <- 1 / sigma - data <- allom[["field"]] + sinv <- 1 / sigma + data <- allom[["field"]] for (i in seq_along(ntally)) { data[[i + nfield]] <- list(x = rep(0, n.mod[ntally[i]]), y = rep(0, n.mod[ntally[i]])) } - - x <- y <- NULL - Sg <- 1 - Bg <- mu0 + + x <- y <- NULL + Sg <- 1 + Bg <- mu0 SgI <- 1 / Sg - D <- Dg <- 0 - + D <- Dg <- 0 + ## MCMC LOOP pb <- utils::txtProgressBar(min = 0, max = nrep, style = 3) for (g in seq_len(nrep)) { - ## For tabulated equations, impute X,Y data -------------------------------------- - if (ntally[1] > 0) - { + if (ntally[1] > 0) { for (j in ntally) { x0 <- stats::runif(n.mod[j], rng.mod[j, 1], rng.mod[j, 2]) if (!is.na(Xcor[j])) { @@ -169,7 +167,7 @@ allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax ## convert to sq inches x <- x0 * x0 / (2.54 * 2.54) } else { - x <- x0 * x0 * pi / 4 ## convert to cm Basal Area + x <- x0 * x0 * pi / 4 ## convert to cm Basal Area } } y <- NA @@ -192,7 +190,7 @@ allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax y <- a[j] * (exp(b[j] + (c[j] * log(x)) + d[j] * x)) y <- stats::rnorm(n.mod[j], y, se[j]) } else if (eqn[j] == 7) { - y <- a[j] + ((b[j] * (x^c[j]))/((x^c[j]) + d[j])) + y <- a[j] + ((b[j] * (x^c[j])) / ((x^c[j]) + d[j])) y <- stats::rnorm(n.mod[j], y, se[j]) } else if (eqn[j] == 8) { y <- a[j] + b[j] * log10(x) @@ -208,13 +206,13 @@ allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax y <- stats::rnorm(n.mod[j], y, se[j]) } y[y <= 0] <- NA - y <- y * Ycor[j] - s2 <- which(!is.na(y)) - data[[nfield + which(ntally == j)]]$x <- x0[s2] ## store the std units, not the transformed - data[[nfield + which(ntally == j)]]$y <- y[s2] ## store y transformed to std units - } ## end loop over tally entries - } ## end check for ntally > 0 - + y <- y * Ycor[j] + s2 <- which(!is.na(y)) + data[[nfield + which(ntally == j)]]$x <- x0[s2] ## store the std units, not the transformed + data[[nfield + which(ntally == j)]]$y <- y[s2] ## store y transformed to std units + } ## end loop over tally entries + } ## end check for ntally > 0 + if (FALSE) { # diagnostics grDevices::pdf("DvBscatter.pdf") @@ -229,35 +227,33 @@ allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax plot(BETA) grDevices::dev.off() } - - if (nsite > 1) - { + + if (nsite > 1) { # Hierarchical Bayes Fit Model - + tauImu <- tauI %*% mu - u1 <- s1 - u2 <- s2 + u1 <- s1 + u2 <- s2 for (j in seq_len(nsite)) { - ## Update study-level regression parameters - X <- cbind(rep(1, length(data[[j]]$x)), log(data[[j]]$x)) - Y <- log(data[[j]]$y) - bigV <- solve(sinv * t(X) %*% X + tauI) + X <- cbind(rep(1, length(data[[j]]$x)), log(data[[j]]$x)) + Y <- log(data[[j]]$y) + bigV <- solve(sinv * t(X) %*% X + tauI) littlev <- sinv * t(X) %*% Y + tauImu - beta <- t(mvtnorm::rmvnorm(1, bigV %*% littlev, bigV)) - b0[j] <- beta[1] - b1[j] <- beta[2] - + beta <- t(mvtnorm::rmvnorm(1, bigV %*% littlev, bigV)) + b0[j] <- beta[1] + b1[j] <- beta[2] + ## Update study-level error u1 <- u1 + nrow(X) / 2 u2 <- u2 + as.numeric(0.5 * crossprod(Y - X %*% beta)) - + ## Calculate Deviance D[j] <- -2 * sum(stats::dnorm(Y, X %*% beta, sqrt(sigma), log = TRUE)) } - sinv <- stats::rgamma(1, u1, u2) ## precision - sigma <- 1 / sinv ## variance - + sinv <- stats::rgamma(1, u1, u2) ## precision + sigma <- 1 / sinv ## variance + ## Update across-study means B <- cbind(b0, b1) bigV <- solve(nrow(B) * tauI + V0I) @@ -266,70 +262,71 @@ allom.BayesFit <- function(allom, nrep = 10000, form = "power", dmin = 0.1, dmax littlev <- littlev + tauI %*% B[i, ] } mu <- t(mvtnorm::rmvnorm(1, bigV %*% littlev, bigV)) - + ## Update across-study variance - u1 <- v + nrow(B) - u2 <- S + crossprod(B - t(matrix(mu, nrow = 2, ncol = nrow(B)))) - tau <- MCMCpack::riwish(u1, u2) + u1 <- v + nrow(B) + u2 <- S + crossprod(B - t(matrix(mu, nrow = 2, ncol = nrow(B)))) + tau <- MCMCpack::riwish(u1, u2) tauI <- solve(tau) - + ## Store Parameter estimates - b0GIBBS[g, ] <- b0 - b1GIBBS[g, ] <- b1 - muGIBBS[g, ] <- mu - sigGIBBS[g] <- sigma + b0GIBBS[g, ] <- b0 + b1GIBBS[g, ] <- b1 + muGIBBS[g, ] <- mu + sigGIBBS[g] <- sigma tauGIBBS[g, ] <- MCMCpack::vech(tau) - DGIBBS[g] <- sum(D) - } ## end (if nsite > 1) - + DGIBBS[g] <- sum(D) + } ## end (if nsite > 1) + ## Fit 'random species' hierarchical model ------------------------------------- if (nspp > 1) { - + } - + ## Fit alternative non-heirarchical model X <- Y <- NULL for (i in seq_len(nsite)) { X <- c(X, data[[i]]$x) Y <- c(Y, data[[i]]$y) } - Y <- log(Y) - X <- cbind(rep(1, length(X)), log(X)) - bigV <- solve(SgI * t(X) %*% X + V0I) + Y <- log(Y) + X <- cbind(rep(1, length(X)), log(X)) + bigV <- solve(SgI * t(X) %*% X + V0I) littlev <- SgI * t(X) %*% Y + V0Imu0 - Bg <- t(mvtnorm::rmvnorm(1, bigV %*% littlev, bigV)) - u1 <- s1 + nrow(X) / 2 - u2 <- s2 + as.numeric(0.5 * crossprod(Y - X %*% Bg)) - SgI <- stats::rgamma(1, u1, u2) ## precision - Sg <- 1 / SgI ## variance - Dg <- -2 * sum(stats::dnorm(Y, X %*% Bg, sqrt(Sg), log = TRUE)) - + Bg <- t(mvtnorm::rmvnorm(1, bigV %*% littlev, bigV)) + u1 <- s1 + nrow(X) / 2 + u2 <- s2 + as.numeric(0.5 * crossprod(Y - X %*% Bg)) + SgI <- stats::rgamma(1, u1, u2) ## precision + Sg <- 1 / SgI ## variance + Dg <- -2 * sum(stats::dnorm(Y, X %*% Bg, sqrt(Sg), log = TRUE)) + BgGIBBS[g, ] <- Bg - SgGIBBS[g] <- Sg - DgGIBBS[g] <- Dg - - if(interactive()) { + SgGIBBS[g] <- Sg + DgGIBBS[g] <- Dg + + if (interactive()) { utils::setTxtProgressBar(pb, g) } - } ## END MCMC LOOP + } ## END MCMC LOOP close(pb) - + if (nsite <= 1) { - b0GIBBS[1:nrep, ] <- 0 - b1GIBBS[1:nrep, ] <- 0 - muGIBBS[1:nrep, ] <- 0 - sigGIBBS[1:nrep] <- 0 + b0GIBBS[1:nrep, ] <- 0 + b1GIBBS[1:nrep, ] <- 0 + muGIBBS[1:nrep, ] <- 0 + sigGIBBS[1:nrep] <- 0 tauGIBBS[1:nrep, ] <- 0 - DGIBBS[1:nrep] <- 0 + DGIBBS[1:nrep] <- 0 } - + out <- cbind(b0GIBBS, b1GIBBS, muGIBBS, sigGIBBS, tauGIBBS, DGIBBS, BgGIBBS, SgGIBBS, DgGIBBS) - colnames(out) <- c(paste("b0", 1:nsite, sep = "."), - paste("b1", 1:nsite, sep = "."), - "mu0", "mu1", - "sigma", - "tau11", "tau12", "tau22", - "D", "Bg0", "Bg1", "Sg", "Dg") + colnames(out) <- c( + paste("b0", 1:nsite, sep = "."), + paste("b1", 1:nsite, sep = "."), + "mu0", "mu1", + "sigma", + "tau11", "tau12", "tau22", + "D", "Bg0", "Bg1", "Sg", "Dg" + ) return(list(mc = coda::as.mcmc(out), obs = data)) - -} # allom.BayesFit +} # allom.BayesFit diff --git a/modules/allometry/R/allom.predict.R b/modules/allometry/R/allom.predict.R index ea94f83526f..0c41d34d3a3 100644 --- a/modules/allometry/R/allom.predict.R +++ b/modules/allometry/R/allom.predict.R @@ -1,11 +1,10 @@ - #' allom.predict #' #' Function for making tree-level Monte Carlo predictions #' from allometric equations estimated from the PEcAn allometry module #' #' @param object Allometry model object. Option includes -#'\describe{ +#' \describe{ #' \item{'list of mcmc'}{ - mcmc outputs in a list by PFT then component} #' \item{'vector of file paths'}{ - path(s) to AllomAve RData files} #' \item{'directory where files are located}{ - } @@ -24,12 +23,10 @@ #' #' #' @examples -#' #' \dontrun{ -#' object = '~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/' -#' dbh = seq(10,50,by=5) -#' mass = allom.predict(object,dbh,n=100) -#' +#' object <- "~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/" +#' dbh <- seq(10, 50, by = 5) +#' mass <- allom.predict(object, dbh, n = 100) #' } #' #' @author Michael Dietze, Christy Rollinson @@ -40,11 +37,10 @@ # 'prediction',single.tree=TRUE) allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, use = "Bg", interval = "prediction", single.tree = FALSE) { - if (is.character(object)) { object <- load.allom(object) } - + ## error checking npft <- length(object) if (npft == 0) { @@ -74,8 +70,8 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u print("ERROR: number of PFT records does not match number of DBH records") return(NA) } - - + + ## build PFT x Component table and convert mcmclist objects to mcmc pftByComp <- matrix(NA, npft, ncomp) for (i in seq_len(npft)) { @@ -116,7 +112,7 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u if (n < 1 | is.na(n)) { print(paste("invalid n", n)) } - + ## Extract relevant parameter vectors stick in a list by PFT params <- list() for (i in seq_len(npft)) { @@ -140,9 +136,8 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u params[[i]] <- object[[i]][[component]][sel, c("Bg0", "Bg1")] } else if (use[i] == "mu") { params[[i]] <- object[[i]][[component]][sel, c("mu0", "mu1")] - + #### *** should this case include random effects too ???? - } else { print(paste("use =", use[i], "not currently supported")) return(NA) @@ -171,7 +166,7 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u } } names(params) <- names(object) - + ### perform actual allometric calculation if (methods::is(dbh, "list")) { out <- list(length(dbh)) @@ -180,19 +175,19 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u } for (p in unique(pft)) { sel <- which(pft == p) - a <- params[[p]][,1] - b <- params[[p]][,2] + a <- params[[p]][, 1] + b <- params[[p]][, 2] if (ncol(params[[p]]) > 2) { - s <- sqrt(params[[p]][,3]) ## sigma was originally calculated as a variance, so convert to std dev + s <- sqrt(params[[p]][, 3]) ## sigma was originally calculated as a variance, so convert to std dev } else { s <- 0 } - + if (methods::is(dbh, "list")) { for (j in 1:length(sel)) { if ((methods::is(dbh[[sel[j]]], "numeric")) & (all(is.na(dbh[[sel[j]]])))) { - out[[sel[j]]] <- array(NA, c(n,1,length(dbh[[sel[j]]]))) - out[[sel[j]]][,,] <- NA + out[[sel[j]]] <- array(NA, c(n, 1, length(dbh[[sel[j]]]))) + out[[sel[j]]][, , ] <- NA next } else if (methods::is(dbh[[sel[j]]], "numeric")) { ntrees <- 1 @@ -201,36 +196,47 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u ntrees <- nrow(dbh[[sel[j]]]) nyears <- ncol(dbh[[sel[j]]]) } - - out[[sel[j]]] <- array(NA, c(n,ntrees,nyears)) - + + out[[sel[j]]] <- array(NA, c(n, ntrees, nyears)) + for (k in 1:ntrees) { epsilon <- stats::rnorm(n, 0, s) # don't fix this for a single tree; fix for a single iteration for a single site across all trees if (methods::is(dbh[[sel[j]]], "numeric")) { dbh_sel_k <- dbh[[sel[j]]] } else { - dbh_sel_k <- dbh[[sel[j]]][k,] + dbh_sel_k <- dbh[[sel[j]]][k, ] } - - log_x <- sapply(dbh_sel_k, function(x) if(is.na(x)|(x<=0)){return(NA)}else{log(x)}) - out[[sel[j]]][,k,] <- sapply(log_x, function(x) if(is.na(x)){rep(NA, n)}else{exp(a+b*x + epsilon)}) + + log_x <- sapply(dbh_sel_k, function(x) { + if (is.na(x) | (x <= 0)) { + return(NA) + } else { + log(x) + } + }) + out[[sel[j]]][, k, ] <- sapply(log_x, function(x) { + if (is.na(x)) { + rep(NA, n) + } else { + exp(a + b * x + epsilon) + } + }) } } } else if (single.tree == TRUE) { # for a dbh time-series for a single tree, fix error for each draw - epsilon = stats::rnorm(n, 0, s) + epsilon <- stats::rnorm(n, 0, s) for (i in 1:n) { - out[i,] <- exp(a[i]+b[i]*log(dbh) + epsilon[i]) + out[i, ] <- exp(a[i] + b[i] * log(dbh) + epsilon[i]) } } else { # for a dbh time-series for different trees, error not fixed across draws for (i in sel) { - out[,i] <- exp(stats::rnorm(n, a+b*log(dbh[i]),s)) + out[, i] <- exp(stats::rnorm(n, a + b * log(dbh[i]), s)) } } - } - + return(out) } # allom.predict @@ -239,7 +245,7 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u #' loads allom files #' #' @param object Allometry model object. Option includes -#'\describe{ +#' \describe{ #' \item{'vector of file paths'}{ - path(s) to AllomAve RData files} #' \item{'directory where files are located}{ - } #' } @@ -247,11 +253,9 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u #' @return mcmc outputs in a list by PFT then component #' #' @examples -#' #' \dontrun{ -#' object = '~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/' -#' allom.mcmc = load.allom(object) -#' +#' object <- "~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/" +#' allom.mcmc <- load.allom(object) #' } #' #' @author Michael Dietze @@ -262,7 +266,6 @@ load.allom <- function(object) { ## assuming object is file path, load up tmp <- list() for (i in seq_along(object)) { - if (tolower(tools::file_ext(object[i])) == "rdata") { my.files <- object[i] } else { @@ -271,14 +274,14 @@ load.allom <- function(object) { ## Need to add a 3rd option if the files are remotely on Dropbox ## download_file(object,'foo.Rdata',method='curl') works for a single file not sure how to get the ## file listing - + for (j in seq_along(my.files)) { ## parse file name my.name <- basename(my.files[j]) my.name.parts <- strsplit(my.name, split = ".", fixed = TRUE)[[1]] my.pft <- my.name.parts[length(my.name.parts) - 2] my.comp <- as.numeric(my.name.parts[length(my.name.parts) - 1]) - + ## load file itself if (my.pft %in% names(tmp)) { k <- which(names(tmp) == my.pft) @@ -292,7 +295,7 @@ load.allom <- function(object) { tmp[[k]][[my.comp]] <- tmp_env$mc } } - + ## convert mcmclist objects to mcmc for (i in seq_along(tmp)) { for (j in which(!sapply(tmp[[i]], is.null))) { @@ -301,6 +304,6 @@ load.allom <- function(object) { } } } - + return(tmp) } # load.allom diff --git a/modules/allometry/R/query.allom.data.R b/modules/allometry/R/query.allom.data.R index 26eb0ed8aac..f41b07569e3 100644 --- a/modules/allometry/R/query.allom.data.R +++ b/modules/allometry/R/query.allom.data.R @@ -1,4 +1,3 @@ - #' query.allom.data #' #' Module to grab allometric information from the raw data table @@ -17,7 +16,6 @@ #' @param nsim number of pseudo-data simulations for estimating SE #' query.allom.data <- function(pft_name, variable, con, nsim = 10000) { - ## check validity of inputs if (is.null(pft_name) | is.na(pft_name)) { print(c("invalid PFT_NAME in QUERY.ALLOM.DATA", pft_name)) @@ -35,28 +33,30 @@ query.allom.data <- function(pft_name, variable, con, nsim = 10000) { print("Connection not open in query.allom.data") return(NULL) } - + ## define storage allomParms <- NULL - + ## PFTs from trait database ################################################################## ## used to match species data to functional type - query <- paste0("SELECT s.spcd,p.id as pft,s.commonname as common,s.scientificname as scientific,", - "s.\"Symbol\"", " as acronym,s.genus,", "s.\"Family\"", ",p.name from pfts as p join pfts_species on p.id = pfts_species.pft_id join species as s on pfts_species.specie_id = s.id where p.name like '%", - pft_name, "%'") + query <- paste0( + "SELECT s.spcd,p.id as pft,s.commonname as common,s.scientificname as scientific,", + "s.\"Symbol\"", " as acronym,s.genus,", "s.\"Family\"", ",p.name from pfts as p join pfts_species on p.id = pfts_species.pft_id join species as s on pfts_species.specie_id = s.id where p.name like '%", + pft_name, "%'" + ) pft.data <- PEcAn.DB::db.query(query, con) if (length(pft.data) < 1) { print(c("QUERY.ALLOM.DATA: No species found for PFT - ", pft_name)) return(NULL) } - + ## Field data from 'Inputs' data table #################################################################### allomField <- NULL query <- "select * from Inputs as r join formats as f on f.id = r.format_id where f.name like 'crownAllom'" allomField.files <- PEcAn.DB::db.query(query, con) - + ## Tally data from 'Input' data table ##################################################################### ## Species = FIA code (table 4, also includes sp gravity) @@ -65,10 +65,11 @@ query.allom.data <- function(pft_name, variable, con, nsim = 10000) { ## Component.ID = table 5. priorities: Foliar=18,stem=6,16, maybe 4, fine root=28, query <- "select * from Inputs as r join formats as f on f.id = r.format_id where f.name like 'allomTally'" allomTally.files <- PEcAn.DB::db.query(query, con) - + allom <- read.allom.data(pft.data, variable, allomField.files$filepath, allomTally.files$filepath, - nsim = nsim) - + nsim = nsim + ) + return(allom) } # query.allom.data @@ -89,20 +90,20 @@ nu <- function(x) { #' @param x units: mm, cm, cm2, m, in, g, kg, lb, Mg #' @param tp diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc' AllomUnitCoef <- function(x, tp = NULL) { - y <- rep(1, length(x)) - + for (i in seq_along(x)) { y[i] <- switch(x[i], - mm = 10, - cm = 1, - cm2 = NA, - m = 0.01, - `in` = 1 / 2.54, - g = 0.001, - kg = 1, - lb = 0.4545, - Mg = 1000) + mm = 10, + cm = 1, + cm2 = NA, + m = 0.01, + `in` = 1 / 2.54, + g = 0.001, + kg = 1, + lb = 0.4545, + Mg = 1000 + ) ## variable type corrections if (!is.null(tp)) { if (tp[i] == "d.b.h.^2") { diff --git a/modules/allometry/R/read.allom.data.R b/modules/allometry/R/read.allom.data.R index 8b48962a9dc..eb22288b9d9 100644 --- a/modules/allometry/R/read.allom.data.R +++ b/modules/allometry/R/read.allom.data.R @@ -1,9 +1,8 @@ - #' read.allom.data -#' +#' #' Extracts PFT- and component-specific data and allometeric equations from the specified files. -#' -#' This code also estimates the standard error from R-squared, +#' +#' This code also estimates the standard error from R-squared, #' which is required to simulate pseudodata from the allometric eqns. #' #' @param pft.data PFT dataframe @@ -18,80 +17,75 @@ #' @return \item{field}{PFT-filtered field Data} #' \item{parm}{Component- and PFT-filtered Allometric Equations} read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { - allom <- list(parm = NULL, field = NULL) - + ## make sure some data was found if (is.null(field) & is.null(parm)) { print("READ.ALLOM.DATA: Neither field data nor tally data was found") return(allom) } - + ## FIELD DATA ------------------------------------------------ - - if (!is.null(field)) - { + + if (!is.null(field)) { allom$field <- list() for (i in seq_along(field)) { - ## load data dat <- utils::read.csv(field[i]) - + ## grab the response component - y <- switch(as.character(component), - `40` = dat$Ht, - `43` = dat$Ca) - + y <- switch(as.character(component), + `40` = dat$Ht, + `43` = dat$Ca + ) + ## if it exists, grab the other columns - if (!is.null(y)) - { + if (!is.null(y)) { spp <- dat$spp x <- dat$Dia entry <- data.frame(x, y, spp) - + ## match spp to PFT spp2pft <- match(dat$spp, pft.data$acronym) entry <- entry[!is.na(spp2pft), ] - + ## insert each species separately for (s in unique(entry$spp)) { sel <- which(entry$spp == s) allom$field[[length(allom$field) + 1]] <- entry[sel, ] } - } ## end Y data exists - - } ## end loop over FIELD files - } ## end field files exist - + } ## end Y data exists + } ## end loop over FIELD files + } ## end field files exist + ## EQUATION TALLY -------------------------------------------------------- - - if (!is.null(parm)) - { + + if (!is.null(parm)) { allom$parm <- utils::read.csv(parm, skip = 2, as.is = TRUE) - + ## debugging hack allom$parm <- ## utils::read.csv('/home/mdietze/stats/AllomAve/Table3_GTR-NE-319.csv',skip=2) - + ## Match TALLY data to PFT allompft <- rep(NA, nrow(allom$parm)) for (i in seq_len(nrow(allom$parm))) { - sel <- which(pft.data$spcd == allom$parm$Species[i]) ## match on USFS spp code + sel <- which(pft.data$spcd == allom$parm$Species[i]) ## match on USFS spp code if (length(sel) > 0) { allompft[i] <- pft.data$spcd[sel[1]] } } - + ## Select the entries that match the component we want sel <- which(nu(allom$parm$Component.ID) == component) ## Select just the PFT we want sel <- sel[!is.na(allompft[sel])] - + ## eliminate entries that lack an error estimate - R2 <- apply(rbind(nu(allom$parm$R2[sel]), nu(allom$parm$r[sel])^2), 2, max) + R2 <- apply(rbind(nu(allom$parm$R2[sel]), nu(allom$parm$r[sel])^2), 2, max) R2[R2 == 0] <- NA - sel <- sel[!is.na(R2)] - spp <- allompft[sel] - + sel <- sel[!is.na(R2)] + spp <- allompft[sel] + ## check and make sure we have data and can continue if (sum(!is.na(allompft)) == 0 | length(sel) == 0) { print(c("READ.ALLOM.DATA: ** Warning no match of PFT to tally data **", pft.data)) @@ -100,50 +94,52 @@ read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { return(NULL) } } - + ## extract parameter set - + ## coefficients a <- nu(allom$parm$a[sel]) b <- nu(allom$parm$b[sel]) c <- nu(allom$parm$c[sel]) d <- nu(allom$parm$d[sel]) e <- nu(allom$parm$e[sel]) - + ## equation number eqn <- nu(allom$parm$Equation.Form.ID[sel]) - + ## data range for each study - rng <- rbind(nu(allom$parm$MinDiameter[sel]), - nu(allom$parm$MaxDiameter[sel])) ## units = cm + rng <- rbind( + nu(allom$parm$MinDiameter[sel]), + nu(allom$parm$MaxDiameter[sel]) + ) ## units = cm rng[1, is.na(rng[1, ])] <- ceiling(mean(rng[1, ], na.rm = TRUE)) rng[2, is.na(rng[2, ])] <- floor(mean(rng[2, ], na.rm = TRUE)) - n <- nu(allom$parm$Sample.size[sel]) ## sample size for each study + n <- nu(allom$parm$Sample.size[sel]) ## sample size for each study n[is.na(n)] <- min(n, na.rm = TRUE) - nt <- sum(n) ## total sample size - + nt <- sum(n) ## total sample size + ## data error - R2 <- apply(rbind(nu(allom$parm$R2[sel]), nu(allom$parm$r[sel]) ^ 2), 2, max) + R2 <- apply(rbind(nu(allom$parm$R2[sel]), nu(allom$parm$r[sel])^2), 2, max) R2[R2 == 0] <- NA - + ## bias correction factor (multiplicative) - cf <- nu(allom$parm$Bias.correction..CF.[sel]) - cf[is.na(cf)] <- 1 ## non-log don't get a correction factor - cf[cf == 0] <- 1 ## non-log don't get a correction factor - + cf <- nu(allom$parm$Bias.correction..CF.[sel]) + cf[is.na(cf)] <- 1 ## non-log don't get a correction factor + cf[cf == 0] <- 1 ## non-log don't get a correction factor + ## units corrections Xunits <- as.character(allom$parm$Units.diameter[sel]) - Xtype <- as.character(allom$parm$Diameter[sel]) + Xtype <- as.character(allom$parm$Diameter[sel]) Xunits[Xunits == "cm" & Xtype %in% c("BA", "BArc", "d150 (BA)")] <- "cm2" - Xcor <- AllomUnitCoef(Xunits, Xtype) - + Xcor <- AllomUnitCoef(Xunits, Xtype) + Yunits <- as.character(allom$parm$Units.biomass[sel]) - Ycor <- AllomUnitCoef(Yunits) - + Ycor <- AllomUnitCoef(Yunits) + ## citations cite <- allom$parm$Source[sel] - - ##estimate standard error from the R2 + + ## estimate standard error from the R2 ##################################### ## ## code assumes a uniform distribution on the X @@ -153,19 +149,18 @@ read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { ## for definitions of equations and equation codes ## See Jenkins 2004 USFS GTR-NE-319 Table 6 ## - se <- 1 / 12 * (rng[2, ] - rng[1, ]) ^ 2 ## start by calculating var(x) for later + se <- 1 / 12 * (rng[2, ] - rng[1, ])^2 ## start by calculating var(x) for later Rratio <- (1 - R2) / R2 for (i in seq_along(sel)) { - x <- stats::runif(nsim, rng[1, i], rng[2, i]) if (!is.na(Xcor[i])) { x <- Xcor[i] * x } else { if (Xtype[i] == "d.b.h.^2") { ## convert to sq inches - x <- x * x/(2.54 * 2.54) + x <- x * x / (2.54 * 2.54) } else { - x <- x * x * pi/4 ## convert to cm Basal Area + x <- x * x * pi / 4 ## convert to cm Basal Area } } if (eqn[i] == 1) { @@ -193,7 +188,7 @@ read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { } else if (eqn[i] == 6) { y <- a[i] * (exp(b[i] + (c[i] * log(x)) + d[i] * x)) } else if (eqn[i] == 7) { - y <- a[i] + ((b[i] * (x^c[i]))/((x^c[i]) + d[i])) + y <- a[i] + ((b[i] * (x^c[i])) / ((x^c[i]) + d[i])) } else if (eqn[i] == 8) { y <- a[i] + b[i] * log10(x) } else if (eqn[i] == 9) { @@ -206,40 +201,41 @@ read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { } y <- a[i] * x^(b[i]) } - + se[i] <- sqrt(Rratio[i] * stats::var(y)) ## note: y is not units corrected because SE needs to be in original units, same as the other parms } Xmin <- rng[1, ] Xmax <- rng[2, ] - - allomParms <- as.data.frame(cbind(a, b, c, d, e, se, R2, Rratio, cf, eqn, n, - Xmin, Xmax, Xcor, Ycor, Xtype, cite, spp)) - + + allomParms <- as.data.frame(cbind( + a, b, c, d, e, se, R2, Rratio, cf, eqn, n, + Xmin, Xmax, Xcor, Ycor, Xtype, cite, spp + )) + ## screen TALLY data for nonsensible allometric coefficients drop <- NULL - drop <- c(drop, which(eqn == 1 & b * c <= 0)) # eqn 1, negative slope - drop <- c(drop, which(eqn == 2 & b == 0 & c * d <= 0)) # eqn 2, negative slope - drop <- c(drop, which(eqn == 4 & b == 0 & c * d < 0)) # eqn 4, negative slope - drop <- c(drop, which(eqn == 8 & b <= 0)) # eqn 8, negative slope - drop <- c(drop, which(eqn == 9 & b <= 0)) # eqn 9, negative slope - drop <- c(drop, which(eqn == 11 & a * b < 0)) # eqn 11, negative slope - + drop <- c(drop, which(eqn == 1 & b * c <= 0)) # eqn 1, negative slope + drop <- c(drop, which(eqn == 2 & b == 0 & c * d <= 0)) # eqn 2, negative slope + drop <- c(drop, which(eqn == 4 & b == 0 & c * d < 0)) # eqn 4, negative slope + drop <- c(drop, which(eqn == 8 & b <= 0)) # eqn 8, negative slope + drop <- c(drop, which(eqn == 9 & b <= 0)) # eqn 9, negative slope + drop <- c(drop, which(eqn == 11 & a * b < 0)) # eqn 11, negative slope + ## HACK: drop papers known to be suspect until units and coef can be confirmed drop <- c(drop, which(cite %in% c(12, 103, 121))) drop <- c(drop, which(cite == 91 & eqn == 4)) - + ## can't confirm equation for #8, predictions way off, looks like actually #2 drop <- c(drop, which(eqn == 8)) - + if (!is.null(drop) & length(drop) > 0) { print("** WARNING: DROPPING EQUATIONS WITH ILL-DEFINED PARAMETERS") print(c("Entry = ", sel[drop])) print(allomParms[drop, ]) allomParms <- allomParms[-drop, ] } - - } ## end HAVE ALLOM TALLY DATA - + } ## end HAVE ALLOM TALLY DATA + return(list(parm = allomParms, field = allom$field)) } # read.allom.data diff --git a/modules/allometry/inst/AllomDriver.R b/modules/allometry/inst/AllomDriver.R index eac746f3889..08c1642e7e7 100644 --- a/modules/allometry/inst/AllomDriver.R +++ b/modules/allometry/inst/AllomDriver.R @@ -1,16 +1,18 @@ ## code to run Allom stand alone ## ## load pecan settings -if(interactive()){ - settings.file = "/home/mdietze/pecan/tests/settings.bartlett.xml" +if (interactive()) { + settings.file <- "/home/mdietze/pecan/tests/settings.bartlett.xml" } else { settings.file <- Sys.getenv("PECANSETTINGS") } library(XML) settings.xml <- XML::xmlParse(settings.file) settings <- XML::xmlToList(settings.xml) -if(!is.null(settings$Rlib)){ .libPaths(settings$Rlib)} -#library(PECAn) +if (!is.null(settings$Rlib)) { + .libPaths(settings$Rlib) +} +# library(PECAn) ## libraries & database connection library(mvtnorm) @@ -20,14 +22,13 @@ library(PostgreSQL) dvr <- dbDriver("PostgreSQL") con <- db.open(settings$database$bety) ## mcmc settings -ngibbs = nu(settings$meta.analysis$iter) - +ngibbs <- nu(settings$meta.analysis$iter) -pfts = list(FAGR = data.frame(spcd=531,name="FAgr")) -outdir = "~/Downloads/" -components = 6 -allom.stats = AllomAve(pfts,components,outdir,parm="~/git/pecan/modules/allometry/data/Table3_GTR-NE-319.v2.csv",ngibbs=500,nchain=3) -pfts = list(FAGR = data.frame(spcd=531,name="FAgr")) -allom.stats = AllomAve(pfts,ngibbs=500) +pfts <- list(FAGR = data.frame(spcd = 531, name = "FAgr")) +outdir <- "~/Downloads/" +components <- 6 +allom.stats <- AllomAve(pfts, components, outdir, parm = "~/git/pecan/modules/allometry/data/Table3_GTR-NE-319.v2.csv", ngibbs = 500, nchain = 3) +pfts <- list(FAGR = data.frame(spcd = 531, name = "FAgr")) +allom.stats <- AllomAve(pfts, ngibbs = 500) diff --git a/modules/allometry/inst/JenkinsPFTs.R b/modules/allometry/inst/JenkinsPFTs.R index ec18f91d465..e55c62d1cfd 100644 --- a/modules/allometry/inst/JenkinsPFTs.R +++ b/modules/allometry/inst/JenkinsPFTs.R @@ -1,91 +1,122 @@ -#Jenkins PFT groupings (tables 1 and 3) -#actual query is performed based on the spcd number -#querying whole aboveground (2), whole aboveground (above stump) (3) and whole stem (6) biomass eqns. +# Jenkins PFT groupings (tables 1 and 3) +# actual query is performed based on the spcd number +# querying whole aboveground (2), whole aboveground (above stump) (3) and whole stem (6) biomass eqns. ######### Table 1 -aspen.alder.willow <- list(AAW = data.frame(spcd = c(350,351,353,740,741,742,743,746), - acronym=c("ALNUS","ALRU2","ALVIS","POPUL","POBA2","PODE3","POGR4","POTR5"))) - -allom.stats.aspen = AllomAve(aspen.alder.willow,components=c(2,3,6),ngibbs=500) - -###ACMA3 isn't being queried -###there isn't a component 2,3 or 6 for bigleaf maple (ACMA3) -maple.birch <- list(MB = data.frame(spcd = c(312,315,316,319,371,372,375,379), - acronym=c("ACMA3","ACPE","ACRU","ACSP2","BEAL2","BELE","BEPA","BEPO"))) - -allom.stats.maple.birch = AllomAve(maple.birch,components=c(2,3,6),ngibbs=500) - -#aesculus octandra synonym: A. flava -#Castanopsis chrysophylla synonym: Chrysolepis chrysophylla : golden chinkapin -###CACH6 isn't being queried - no 2,3, or 6 component eqns. in table 3 -###TIAMH 952 is white basswood Tilia heterophylla -mixed.hardwood <- list(MH = data.frame(spcd = c(332,431,491,541,543,544,611,621,691, - 693,711,731,761,762,763,931,951,952, - 972,970), - acronym=c("AEOC2","CACH6","COFL2","FRAM2", - "FRNI","FRPE","LIST2","LITU","NYAQ2", - "NYSY","OXAR","PLOC","PRPE2","PRSE2", - "PRVI","SAAL5","TIAM","TIAMH","ULAM","ULMUS"))) - -allom.stats.mixed.hardwood = AllomAve(mixed.hardwood,components=c(2,3,6),ngibbs=500) - -#QUPR2 is chestnut oak (quercus prinus - now quercus montana) - code also QUMO4 -#will cause issues with own data - code is QUMO4 -maple.oak.hickory.beech <- list(MOHB = data.frame(spcd = c(318,400,531,802,806,809, - 812,820,827,832,833,835, - 837), - acronym=c("ACSA3","CARYA","FAGR", - "QUAL","QUCO2","QUEL", - "QUFA","QULA3","QUNI", - "QUPR2","QURU","QUST", - "QUVE"))) - -allom.stats.mohb = AllomAve(maple.oak.hickory.beech,components=c(2,3,6),ngibbs=500) +aspen.alder.willow <- list(AAW = data.frame( + spcd = c(350, 351, 353, 740, 741, 742, 743, 746), + acronym = c("ALNUS", "ALRU2", "ALVIS", "POPUL", "POBA2", "PODE3", "POGR4", "POTR5") +)) + +allom.stats.aspen <- AllomAve(aspen.alder.willow, components = c(2, 3, 6), ngibbs = 500) + +### ACMA3 isn't being queried +### there isn't a component 2,3 or 6 for bigleaf maple (ACMA3) +maple.birch <- list(MB = data.frame( + spcd = c(312, 315, 316, 319, 371, 372, 375, 379), + acronym = c("ACMA3", "ACPE", "ACRU", "ACSP2", "BEAL2", "BELE", "BEPA", "BEPO") +)) + +allom.stats.maple.birch <- AllomAve(maple.birch, components = c(2, 3, 6), ngibbs = 500) + +# aesculus octandra synonym: A. flava +# Castanopsis chrysophylla synonym: Chrysolepis chrysophylla : golden chinkapin +### CACH6 isn't being queried - no 2,3, or 6 component eqns. in table 3 +### TIAMH 952 is white basswood Tilia heterophylla +mixed.hardwood <- list(MH = data.frame( + spcd = c( + 332, 431, 491, 541, 543, 544, 611, 621, 691, + 693, 711, 731, 761, 762, 763, 931, 951, 952, + 972, 970 + ), + acronym = c( + "AEOC2", "CACH6", "COFL2", "FRAM2", + "FRNI", "FRPE", "LIST2", "LITU", "NYAQ2", + "NYSY", "OXAR", "PLOC", "PRPE2", "PRSE2", + "PRVI", "SAAL5", "TIAM", "TIAMH", "ULAM", "ULMUS" + ) +)) + +allom.stats.mixed.hardwood <- AllomAve(mixed.hardwood, components = c(2, 3, 6), ngibbs = 500) + +# QUPR2 is chestnut oak (quercus prinus - now quercus montana) - code also QUMO4 +# will cause issues with own data - code is QUMO4 +maple.oak.hickory.beech <- list(MOHB = data.frame( + spcd = c( + 318, 400, 531, 802, 806, 809, + 812, 820, 827, 832, 833, 835, + 837 + ), + acronym = c( + "ACSA3", "CARYA", "FAGR", + "QUAL", "QUCO2", "QUEL", + "QUFA", "QULA3", "QUNI", + "QUPR2", "QURU", "QUST", + "QUVE" + ) +)) + +allom.stats.mohb <- AllomAve(maple.oak.hickory.beech, components = c(2, 3, 6), ngibbs = 500) ####### Table 3 -#Chamaecyparis/Thuja spp. haven't found yet - should be 3rd entry -#entry 41 and 43 don't exist in table - these are only other Chamaecyparis spp. not -#accounted for. Both Thuja spp are accounted for -cedar.larch <- list(CL = data.frame(spcd = c(81,42,68,71,73,70,212,241,242), - acronym = c("CADE27","CANO9","JUVI","LALA","LAOC", - "LARIX","SEGI2","THOC2","THPL"))) - -allom.stats.cedar.larch = AllomAve(cedar.larch,components=c(2,3,6),ngibbs=500) - -#fir/hemlock -#ABIES - fir spp. -#equation for pacific silver fir dropped (entry 14), citation 91, eqn. form: biomass = a + b*ln(X) -#entry 1085 also dropped, but it's mountain hemlock and original paper doesn't appear to have an eqn for this spp. -#citation number for 1085 is incorrect -fir.hemlock <- list(FH = data.frame(spcd = c(202,11,12,15,17,19,20,22,10,261,263,264), - acronym = c("PSME","ABAM","ABBA","ABCO","ABGR", - "ABLA","ABMA","ABPR","ABIES","TSCA", - "TSHE","TSME"))) - -allom.stats.fir.hemlock = AllomAve(fir.hemlock,components=c(2,3,6),ngibbs=500) - -#Pine -#pinyon pine also called Twoneedle pinyon (106) -pine <- list(P = data.frame(spcd = c(101,105,108,136,106,116,117,133,119,122,125,126, - 129,131), - acronym = c("PIAL","PIBA2","PICO","PIDI3","PIED","PIJE", - "PILA","PIMO","PIMO3","PIPO","PIRE","PIRI", - "PIST","PITA"))) - -allom.stats.pine = AllomAve(pine,components=c(2,3,6),ngibbs=500) - -#Spruce -#eqn 337 for Black Spruce (95) dropped by default code. Source: citation 82 -spruce <- list(S = data.frame(spcd = c(91,93,94,95,97,98,90), - acronym = c("PIAB","PIEN","PIGL","PIMA","PIRU","PISI","PICEA"))) - -allom.stats.spruce = AllomAve(spruce,components=c(2,3,6),ngibbs=500) - -#Woodland Spp. -#spp ID 300 = Acacia Spp. - not present in table 3 but prickly, earleaf & willow acacia are -woodland <- list(W = data.frame(spcd = c(475,69,65,986,814,843), - acronym = c("CELE3","JUMO","JUOS","PROSO","QUGA","QUHY"))) - -allom.stats.woodland = AllomAve(woodland,components=c(2,3,6),ngibbs=500) - - - +# Chamaecyparis/Thuja spp. haven't found yet - should be 3rd entry +# entry 41 and 43 don't exist in table - these are only other Chamaecyparis spp. not +# accounted for. Both Thuja spp are accounted for +cedar.larch <- list(CL = data.frame( + spcd = c(81, 42, 68, 71, 73, 70, 212, 241, 242), + acronym = c( + "CADE27", "CANO9", "JUVI", "LALA", "LAOC", + "LARIX", "SEGI2", "THOC2", "THPL" + ) +)) + +allom.stats.cedar.larch <- AllomAve(cedar.larch, components = c(2, 3, 6), ngibbs = 500) + +# fir/hemlock +# ABIES - fir spp. +# equation for pacific silver fir dropped (entry 14), citation 91, eqn. form: biomass = a + b*ln(X) +# entry 1085 also dropped, but it's mountain hemlock and original paper doesn't appear to have an eqn for this spp. +# citation number for 1085 is incorrect +fir.hemlock <- list(FH = data.frame( + spcd = c(202, 11, 12, 15, 17, 19, 20, 22, 10, 261, 263, 264), + acronym = c( + "PSME", "ABAM", "ABBA", "ABCO", "ABGR", + "ABLA", "ABMA", "ABPR", "ABIES", "TSCA", + "TSHE", "TSME" + ) +)) + +allom.stats.fir.hemlock <- AllomAve(fir.hemlock, components = c(2, 3, 6), ngibbs = 500) + +# Pine +# pinyon pine also called Twoneedle pinyon (106) +pine <- list(P = data.frame( + spcd = c( + 101, 105, 108, 136, 106, 116, 117, 133, 119, 122, 125, 126, + 129, 131 + ), + acronym = c( + "PIAL", "PIBA2", "PICO", "PIDI3", "PIED", "PIJE", + "PILA", "PIMO", "PIMO3", "PIPO", "PIRE", "PIRI", + "PIST", "PITA" + ) +)) + +allom.stats.pine <- AllomAve(pine, components = c(2, 3, 6), ngibbs = 500) + +# Spruce +# eqn 337 for Black Spruce (95) dropped by default code. Source: citation 82 +spruce <- list(S = data.frame( + spcd = c(91, 93, 94, 95, 97, 98, 90), + acronym = c("PIAB", "PIEN", "PIGL", "PIMA", "PIRU", "PISI", "PICEA") +)) + +allom.stats.spruce <- AllomAve(spruce, components = c(2, 3, 6), ngibbs = 500) + +# Woodland Spp. +# spp ID 300 = Acacia Spp. - not present in table 3 but prickly, earleaf & willow acacia are +woodland <- list(W = data.frame( + spcd = c(475, 69, 65, 986, 814, 843), + acronym = c("CELE3", "JUMO", "JUOS", "PROSO", "QUGA", "QUHY") +)) + +allom.stats.woodland <- AllomAve(woodland, components = c(2, 3, 6), ngibbs = 500) diff --git a/modules/allometry/inst/workflow.allometry.R b/modules/allometry/inst/workflow.allometry.R index 003f76ec982..d99c0c4a1e3 100644 --- a/modules/allometry/inst/workflow.allometry.R +++ b/modules/allometry/inst/workflow.allometry.R @@ -1,13 +1,13 @@ ############################################## -# Example allometry Workflow Script -# Christy Rollinson, crollinson@gmail.com +# Example allometry Workflow Script +# Christy Rollinson, crollinson@gmail.com # 25 March 2015 ############################################## ########################################################################## # Set Directories, load libraries ########################################################################## -#setwd("~/Desktop/pecan/modules/allometry/R") +# setwd("~/Desktop/pecan/modules/allometry/R") library(ggplot2) @@ -15,7 +15,7 @@ library(ggplot2) # Script Querying allometries setwd("~/Desktop/pecan/modules/allometry/R") -#outdir <- "~/Dropbox/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/AllomFiles/Size_tests" # CR Office +# outdir <- "~/Dropbox/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/AllomFiles/Size_tests" # CR Office outdir <- "~/Desktop/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing/PecanAllom" # CR Home source("AllomAve.R") @@ -33,35 +33,39 @@ source("read.allom.data.R") # White Oak (802, QUAL) # Northern Red Oak (833, QURU) ########################################################################## -spp.list = list(ABBA = data.frame(spcd=12,acronym="ABBA"), - PSME = data.frame(spcd=202,acronym="PSME"), - ACRU = data.frame(spcd=316,acronym="ACRU"), - ACSA = data.frame(spcd=318,acronym="ACSA"), - QUAL = data.frame(spcd=802,acronym="QUAL"), - QURU = data.frame(spcd=833,acronym="QURU")) +spp.list <- list( + ABBA = data.frame(spcd = 12, acronym = "ABBA"), + PSME = data.frame(spcd = 202, acronym = "PSME"), + ACRU = data.frame(spcd = 316, acronym = "ACRU"), + ACSA = data.frame(spcd = 318, acronym = "ACSA"), + QUAL = data.frame(spcd = 802, acronym = "QUAL"), + QURU = data.frame(spcd = 833, acronym = "QURU") +) # Querying full range -AllomAve(spp.list,2,outdir=file.path(outdir, "0.1-500"),parm="../data/Table3_GTR-NE-319.v2.csv",ngibbs=5000, dmin=0.1, dmax=500) +AllomAve(spp.list, 2, outdir = file.path(outdir, "0.1-500"), parm = "../data/Table3_GTR-NE-319.v2.csv", ngibbs = 5000, dmin = 0.1, dmax = 500) # Querying double-truncated range -AllomAve(spp.list,2,outdir=file.path(outdir, "10-50"),parm="../data/Table3_GTR-NE-319.v2.csv",ngibbs=5000, dmin=10, dmax=50) +AllomAve(spp.list, 2, outdir = file.path(outdir, "10-50"), parm = "../data/Table3_GTR-NE-319.v2.csv", ngibbs = 5000, dmin = 10, dmax = 50) # Querying left-truncated range -AllomAve(spp.list,2,outdir=file.path(outdir, "10-500"),parm="../data/Table3_GTR-NE-319.v2.csv",ngibbs=5000, dmin=10, dmax=500) +AllomAve(spp.list, 2, outdir = file.path(outdir, "10-500"), parm = "../data/Table3_GTR-NE-319.v2.csv", ngibbs = 5000, dmin = 10, dmax = 500) # Querying right-truncated range -AllomAve(spp.list,2,outdir=file.path(outdir, "0.1-50"),parm="../data/Table3_GTR-NE-319.v2.csv",ngibbs=5000, dmin=0.1, dmax=50) +AllomAve(spp.list, 2, outdir = file.path(outdir, "0.1-50"), parm = "../data/Table3_GTR-NE-319.v2.csv", ngibbs = 5000, dmin = 0.1, dmax = 50) ########################################################################## # Plotting & running some diagnostics on the allometry with different size cutoffs ########################################################################## -#setwd("~/Desktop/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing") +# setwd("~/Desktop/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing") setwd("~/Dropbox/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing") -#allom.dir <- "~/Desktop/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing/PecanAllom/" +# allom.dir <- "~/Desktop/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing/PecanAllom/" allom.dir <- "~/Dropbox/PalEON CR/Tree Rings/Tree-Rings-and-Biomass/Uncertainty_analysis/Pecan_Size_Testing/PecanAllom/" -allom.eq <- function(mu0, mu1, DBH) { exp(mu0 + mu1 * log(DBH) )} +allom.eq <- function(mu0, mu1, DBH) { + exp(mu0 + mu1 * log(DBH)) +} @@ -70,43 +74,43 @@ n.samp <- 500 # This number of samples will be pulled from each mc chain # Sampling the different distributions # ---------------------------------------------------- allom.full <- list() -for(s in names(spp.list)){ - load(paste0(allom.dir, "0.1-500/Allom.", s, ".2.Rdata")) - samp.temp <- array() - for(i in 1:length(mc)){ - samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size=n.samp, replace=T),]) - } - allom.full[[s]] <- samp.temp +for (s in names(spp.list)) { + load(paste0(allom.dir, "0.1-500/Allom.", s, ".2.Rdata")) + samp.temp <- array() + for (i in 1:length(mc)) { + samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size = n.samp, replace = T), ]) + } + allom.full[[s]] <- samp.temp } allom.left <- list() -for(s in names(spp.list)){ - load(paste0(allom.dir, "10-500/Allom.", s, ".2.Rdata")) - samp.temp <- array() - for(i in 1:length(mc)){ - samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size=n.samp, replace=T),]) - } - allom.left[[s]] <- samp.temp +for (s in names(spp.list)) { + load(paste0(allom.dir, "10-500/Allom.", s, ".2.Rdata")) + samp.temp <- array() + for (i in 1:length(mc)) { + samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size = n.samp, replace = T), ]) + } + allom.left[[s]] <- samp.temp } allom.right <- list() -for(s in names(spp.list)){ - load(paste0(allom.dir, "0.1-50/Allom.", s, ".2.Rdata")) - samp.temp <- array() - for(i in 1:length(mc)){ - samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size=n.samp, replace=T),]) - } - allom.right[[s]] <- samp.temp +for (s in names(spp.list)) { + load(paste0(allom.dir, "0.1-50/Allom.", s, ".2.Rdata")) + samp.temp <- array() + for (i in 1:length(mc)) { + samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size = n.samp, replace = T), ]) + } + allom.right[[s]] <- samp.temp } allom.both <- list() -for(s in names(spp.list)){ - load(paste0(allom.dir, "10-50/Allom.", s, ".2.Rdata")) - samp.temp <- array() - for(i in 1:length(mc)){ - samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size=n.samp, replace=T),]) - } - allom.both[[s]] <- samp.temp +for (s in names(spp.list)) { + load(paste0(allom.dir, "10-50/Allom.", s, ".2.Rdata")) + samp.temp <- array() + for (i in 1:length(mc)) { + samp.temp <- rbind(samp.temp, mc[[i]][sample(1:nrow(mc[[i]]), size = n.samp, replace = T), ]) + } + allom.both[[s]] <- samp.temp } # ---------------------------------------------------- @@ -117,57 +121,57 @@ temp.full <- temp.left <- temp.right <- temp.both <- list() # ------------------------------------------------ # Creating a distribution of Biomass estimations for 1 example tree # ------------------------------------------------ -for(s in names(spp.list)){ - temp.full[[s]] <- array(NA, dim=c(length(1:length(dbh.range)), nrow(allom.full[[1]]))) - temp.left[[s]] <- array(NA, dim=c(length(1:length(dbh.range)), nrow(allom.left[[1]]))) - temp.right[[s]] <- array(NA, dim=c(length(1:length(dbh.range)), nrow(allom.right[[1]]))) - temp.both[[s]] <- array(NA, dim=c(length(1:length(dbh.range)), nrow(allom.both[[1]]))) - - # ------------------------------------ - # Getting MCMC iteration estimations - # Note: the ifelse statement was originally designed to pull the hierarchical - # Coefficients by default. Because there is no hierarchical model when there's - # only 1 equation being used, those scenarios must use the global coeffcients. - # - # As of 25 March 2015, there's some weirdness going on with the hierarchical - # coefficients and it's safer to just pull the global coefficients instead - # ------------------------------------ - for(i in 1:nrow(allom.full[[1]])){ - # mu0 = ifelse(!(allom.full[[s]][i,"mu0"]==0 & allom.full[[s]][i,"mu1"]==0), allom.full[[s]][i,"mu0"], allom.full[[s]][i,"Bg0"]) - # mu1 = ifelse(!(allom.full[[s]][i,"mu0"]==0 & allom.full[[s]][i,"mu1"]==0), allom.full[[s]][i,"mu1"], allom.full[[s]][i,"Bg1"]) - mu0 = allom.full[[s]][i,"Bg0"] - mu1 = allom.full[[s]][i,"Bg1"] - temp.full[[s]][,i] <- allom.eq(mu0=mu0, mu1=mu1, DBH=dbh.range) - } - - for(i in 1:nrow(allom.left[[1]])){ - # mu0 = ifelse(!(allom.left[[s]][i,"mu0"]==0 & allom.left[[s]][i,"mu1"]==0), allom.left[[s]][i,"mu0"], allom.left[[s]][i,"Bg0"]) - # mu1 = ifelse(!(allom.left[[s]][i,"mu0"]==0 & allom.left[[s]][i,"mu1"]==0), allom.left[[s]][i,"mu1"], allom.left[[s]][i,"Bg1"]) - mu0 = allom.left[[s]][i,"Bg0"] - mu1 = allom.left[[s]][i,"Bg1"] - temp.left[[s]][,i] <- allom.eq(mu0=mu0, mu1=mu1, DBH=dbh.range) - } - - for(i in 1:nrow(allom.right[[1]])){ - # mu0 = ifelse(!(allom.right[[s]][i,"mu0"]==0 & allom.right[[s]][i,"mu1"]==0), allom.right[[s]][i,"mu0"], allom.right[[s]][i,"Bg0"]) - # mu1 = ifelse(!(allom.right[[s]][i,"mu0"]==0 & allom.right[[s]][i,"mu1"]==0), allom.right[[s]][i,"mu1"], allom.right[[s]][i,"Bg1"]) - mu0 = allom.right[[s]][i,"Bg0"] - mu1 = allom.right[[s]][i,"Bg1"] - temp.right[[s]][,i] <- allom.eq(mu0=mu0, mu1=mu1, DBH=dbh.range) - } - - for(i in 1:nrow(allom.both[[1]])){ - # mu0 = ifelse(!(allom.both[[s]][i,"mu0"]==0 & allom.both[[s]][i,"mu1"]==0), allom.both[[s]][i,"mu0"], allom.both[[s]][i,"Bg0"]) - # mu1 = ifelse(!(allom.both[[s]][i,"mu0"]==0 & allom.both[[s]][i,"mu1"]==0), allom.both[[s]][i,"mu1"], allom.both[[s]][i,"Bg1"]) - mu0 = allom.both[[s]][i,"Bg0"] - mu1 = allom.both[[s]][i,"Bg1"] - temp.both[[s]][,i] <- allom.eq(mu0=mu0, mu1=mu1, DBH=dbh.range) - } - # End iterations loops - # ------------------------------------ +for (s in names(spp.list)) { + temp.full[[s]] <- array(NA, dim = c(length(1:length(dbh.range)), nrow(allom.full[[1]]))) + temp.left[[s]] <- array(NA, dim = c(length(1:length(dbh.range)), nrow(allom.left[[1]]))) + temp.right[[s]] <- array(NA, dim = c(length(1:length(dbh.range)), nrow(allom.right[[1]]))) + temp.both[[s]] <- array(NA, dim = c(length(1:length(dbh.range)), nrow(allom.both[[1]]))) + + # ------------------------------------ + # Getting MCMC iteration estimations + # Note: the ifelse statement was originally designed to pull the hierarchical + # Coefficients by default. Because there is no hierarchical model when there's + # only 1 equation being used, those scenarios must use the global coeffcients. + # + # As of 25 March 2015, there's some weirdness going on with the hierarchical + # coefficients and it's safer to just pull the global coefficients instead + # ------------------------------------ + for (i in 1:nrow(allom.full[[1]])) { + # mu0 = ifelse(!(allom.full[[s]][i,"mu0"]==0 & allom.full[[s]][i,"mu1"]==0), allom.full[[s]][i,"mu0"], allom.full[[s]][i,"Bg0"]) + # mu1 = ifelse(!(allom.full[[s]][i,"mu0"]==0 & allom.full[[s]][i,"mu1"]==0), allom.full[[s]][i,"mu1"], allom.full[[s]][i,"Bg1"]) + mu0 <- allom.full[[s]][i, "Bg0"] + mu1 <- allom.full[[s]][i, "Bg1"] + temp.full[[s]][, i] <- allom.eq(mu0 = mu0, mu1 = mu1, DBH = dbh.range) + } + + for (i in 1:nrow(allom.left[[1]])) { + # mu0 = ifelse(!(allom.left[[s]][i,"mu0"]==0 & allom.left[[s]][i,"mu1"]==0), allom.left[[s]][i,"mu0"], allom.left[[s]][i,"Bg0"]) + # mu1 = ifelse(!(allom.left[[s]][i,"mu0"]==0 & allom.left[[s]][i,"mu1"]==0), allom.left[[s]][i,"mu1"], allom.left[[s]][i,"Bg1"]) + mu0 <- allom.left[[s]][i, "Bg0"] + mu1 <- allom.left[[s]][i, "Bg1"] + temp.left[[s]][, i] <- allom.eq(mu0 = mu0, mu1 = mu1, DBH = dbh.range) + } + + for (i in 1:nrow(allom.right[[1]])) { + # mu0 = ifelse(!(allom.right[[s]][i,"mu0"]==0 & allom.right[[s]][i,"mu1"]==0), allom.right[[s]][i,"mu0"], allom.right[[s]][i,"Bg0"]) + # mu1 = ifelse(!(allom.right[[s]][i,"mu0"]==0 & allom.right[[s]][i,"mu1"]==0), allom.right[[s]][i,"mu1"], allom.right[[s]][i,"Bg1"]) + mu0 <- allom.right[[s]][i, "Bg0"] + mu1 <- allom.right[[s]][i, "Bg1"] + temp.right[[s]][, i] <- allom.eq(mu0 = mu0, mu1 = mu1, DBH = dbh.range) + } + + for (i in 1:nrow(allom.both[[1]])) { + # mu0 = ifelse(!(allom.both[[s]][i,"mu0"]==0 & allom.both[[s]][i,"mu1"]==0), allom.both[[s]][i,"mu0"], allom.both[[s]][i,"Bg0"]) + # mu1 = ifelse(!(allom.both[[s]][i,"mu0"]==0 & allom.both[[s]][i,"mu1"]==0), allom.both[[s]][i,"mu1"], allom.both[[s]][i,"Bg1"]) + mu0 <- allom.both[[s]][i, "Bg0"] + mu1 <- allom.both[[s]][i, "Bg1"] + temp.both[[s]][, i] <- allom.eq(mu0 = mu0, mu1 = mu1, DBH = dbh.range) + } + # End iterations loops + # ------------------------------------ } # end species loop # ------------------------------------------------ -plot(temp.both[[1]][,2]) +plot(temp.both[[1]][, 2]) # ------------------------------------------------ # Condensing the Distributions into ranges & 95% CI @@ -176,37 +180,37 @@ full.final <- left.final <- right.final <- both.final <- allom.final.list <- lis allom.final <- data.frame() -for(s in names(spp.list)){ - full.mean <- apply(temp.full[[s]], 1, mean, na.rm=T) - full.ci <- apply(temp.full[[s]], 1, quantile, c(0.025, 0.975), na.rm=T) - full.final[[s]] <- data.frame(DBH=dbh.range, Mean=full.mean, LB=full.ci[1,], UB=full.ci[2,], Allom="Full Distribution", Species=s) +for (s in names(spp.list)) { + full.mean <- apply(temp.full[[s]], 1, mean, na.rm = T) + full.ci <- apply(temp.full[[s]], 1, quantile, c(0.025, 0.975), na.rm = T) + full.final[[s]] <- data.frame(DBH = dbh.range, Mean = full.mean, LB = full.ci[1, ], UB = full.ci[2, ], Allom = "Full Distribution", Species = s) + + left.mean <- apply(temp.left[[s]], 1, mean, na.rm = T) + left.ci <- apply(temp.left[[s]], 1, quantile, c(0.025, 0.975), na.rm = T) + left.final[[s]] <- data.frame(DBH = dbh.range, Mean = left.mean, LB = left.ci[1, ], UB = left.ci[2, ], Allom = "Left-Truncated", Species = s) - left.mean <- apply(temp.left[[s]], 1, mean, na.rm=T) - left.ci <- apply(temp.left[[s]], 1, quantile, c(0.025, 0.975), na.rm=T) - left.final[[s]] <- data.frame(DBH=dbh.range, Mean=left.mean, LB=left.ci[1,], UB=left.ci[2,], Allom="Left-Truncated", Species=s) + right.mean <- apply(temp.right[[s]], 1, mean, na.rm = T) + right.ci <- apply(temp.right[[s]], 1, quantile, c(0.025, 0.975), na.rm = T) + right.final[[s]] <- data.frame(DBH = dbh.range, Mean = right.mean, LB = right.ci[1, ], UB = right.ci[2, ], Allom = "Right-Truncated", Species = s) - right.mean <- apply(temp.right[[s]], 1, mean, na.rm=T) - right.ci <- apply(temp.right[[s]], 1, quantile, c(0.025, 0.975), na.rm=T) - right.final[[s]] <- data.frame(DBH=dbh.range, Mean=right.mean, LB=right.ci[1,], UB=right.ci[2,], Allom="Right-Truncated", Species=s) + both.mean <- apply(temp.both[[s]], 1, mean, na.rm = T) + both.ci <- apply(temp.both[[s]], 1, quantile, c(0.025, 0.975), na.rm = T) + both.final[[s]] <- data.frame(DBH = dbh.range, Mean = both.mean, LB = both.ci[1, ], UB = both.ci[2, ], Allom = "Double-Truncated", Species = s) - both.mean <- apply(temp.both[[s]], 1, mean, na.rm=T) - both.ci <- apply(temp.both[[s]], 1, quantile, c(0.025, 0.975), na.rm=T) - both.final[[s]] <- data.frame(DBH=dbh.range, Mean=both.mean, LB=both.ci[1,], UB=both.ci[2,], Allom="Double-Truncated", Species=s) - - allom.final <- rbind(allom.final, full.final[[s]], left.final[[s]], right.final[[s]], both.final[[s]]) + allom.final <- rbind(allom.final, full.final[[s]], left.final[[s]], right.final[[s]], both.final[[s]]) } # ------------------------------------------------ summary(allom.final) -q.blank <- theme(axis.line=element_line(color="black", size=0.5), panel.grid.major=element_blank(), panel.grid.minor= element_blank(), panel.border= element_blank(), panel.background= element_blank(), axis.text.x=element_text(angle=0, color="black", size=12), axis.text.y=element_text(color="black", size=12), axis.title.x=element_text(face="bold", size=14), axis.title.y=element_text(face="bold", size=14)) - - -ggplot(data=allom.final) + facet_wrap(~Species) + - geom_ribbon(aes(x=DBH, ymin=LB, ymax=UB, fill=Allom), alpha=0.25) + - geom_line(aes(x=DBH, y=Mean, color=Allom, linetype=Allom), size=2) + - scale_y_continuous(name="Biomass (Mg/tree)") + - scale_x_continuous(name="DBH (cm)") + - q.blank + - theme(legend.position=c(0.15, 0.8)) - +q.blank <- theme(axis.line = element_line(color = "black", size = 0.5), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank(), axis.text.x = element_text(angle = 0, color = "black", size = 12), axis.text.y = element_text(color = "black", size = 12), axis.title.x = element_text(face = "bold", size = 14), axis.title.y = element_text(face = "bold", size = 14)) + + +ggplot(data = allom.final) + + facet_wrap(~Species) + + geom_ribbon(aes(x = DBH, ymin = LB, ymax = UB, fill = Allom), alpha = 0.25) + + geom_line(aes(x = DBH, y = Mean, color = Allom, linetype = Allom), size = 2) + + scale_y_continuous(name = "Biomass (Mg/tree)") + + scale_x_continuous(name = "DBH (cm)") + + q.blank + + theme(legend.position = c(0.15, 0.8)) diff --git a/modules/allometry/tests/testthat/test_AllomAve.R b/modules/allometry/tests/testthat/test_AllomAve.R index 0bda132225b..6738e11f7de 100644 --- a/modules/allometry/tests/testthat/test_AllomAve.R +++ b/modules/allometry/tests/testthat/test_AllomAve.R @@ -1,35 +1,35 @@ - test_that("AllomAve writes raw outputs to specified path", { - outdir <- tempfile("allomAve_test") - withr::local_file(outdir) # deletes outdir when test ends - dir.create(outdir, recursive = TRUE) - - pfts <- list(FAGR = data.frame(spcd = 531, acronym = "FAGR")) - allom_stats <- AllomAve( - pfts, - components = 6, - outdir = outdir, - ngibbs = 5, - nchain = 2) + outdir <- tempfile("allomAve_test") + withr::local_file(outdir) # deletes outdir when test ends + dir.create(outdir, recursive = TRUE) - expect_true(file.exists(file.path(outdir, "Allom.FAGR.6.Rdata"))) - expect_true(file.exists(file.path(outdir, "Allom.FAGR.6.MCMC.pdf"))) + pfts <- list(FAGR = data.frame(spcd = 531, acronym = "FAGR")) + allom_stats <- AllomAve( + pfts, + components = 6, + outdir = outdir, + ngibbs = 5, + nchain = 2 + ) + expect_true(file.exists(file.path(outdir, "Allom.FAGR.6.Rdata"))) + expect_true(file.exists(file.path(outdir, "Allom.FAGR.6.MCMC.pdf"))) }) test_that("AllomAve writes to cwd by default", { - outdir <- tempfile("allomAve_test_cwd") - withr::local_file(outdir) # deletes outdir when test ends - dir.create(outdir, recursive = TRUE) - withr::local_dir(outdir) # sets working dir until test ends + outdir <- tempfile("allomAve_test_cwd") + withr::local_file(outdir) # deletes outdir when test ends + dir.create(outdir, recursive = TRUE) + withr::local_dir(outdir) # sets working dir until test ends - pfts <- list(FAGR = data.frame(spcd = 531, acronym = "FAGR")) - allom_stats <- AllomAve( - pfts, - components = 18, - ngibbs = 5, - nchain = 2) + pfts <- list(FAGR = data.frame(spcd = 531, acronym = "FAGR")) + allom_stats <- AllomAve( + pfts, + components = 18, + ngibbs = 5, + nchain = 2 + ) - expect_true(file.exists(file.path(outdir, "Allom.FAGR.18.Rdata"))) - expect_true(file.exists(file.path(outdir, "Allom.FAGR.18.MCMC.pdf"))) + expect_true(file.exists(file.path(outdir, "Allom.FAGR.18.Rdata"))) + expect_true(file.exists(file.path(outdir, "Allom.FAGR.18.MCMC.pdf"))) }) diff --git a/modules/allometry/tests/testthat/test_coefs.R b/modules/allometry/tests/testthat/test_coefs.R index 8791c2e4f05..eae321d5d71 100644 --- a/modules/allometry/tests/testthat/test_coefs.R +++ b/modules/allometry/tests/testthat/test_coefs.R @@ -1,22 +1,26 @@ test_that("unit conversion", { - expect_equal( - AllomUnitCoef(c("mm", "Mg", "in")), - c(10, 1000, 1 / 2.54)) + expect_equal( + AllomUnitCoef(c("mm", "Mg", "in")), + c(10, 1000, 1 / 2.54) + ) - # unknown value of x -> error - expect_error(AllomUnitCoef("invalid")) + # unknown value of x -> error + expect_error(AllomUnitCoef("invalid")) - expect_equal( - AllomUnitCoef(x = c("cm", "cm", "m"), tp = c("d.b.h.^2", "crc", "cbh")), - c(NA, pi, 0.01 * pi)) + expect_equal( + AllomUnitCoef(x = c("cm", "cm", "m"), tp = c("d.b.h.^2", "crc", "cbh")), + c(NA, pi, 0.01 * pi) + ) - # unknown value of tp -> ignored - expect_equal( - AllomUnitCoef(x = "cm", tp = "invalid"), - 1) + # unknown value of tp -> ignored + expect_equal( + AllomUnitCoef(x = "cm", tp = "invalid"), + 1 + ) - # length(tp) must equal length(x) - expect_error( - AllomUnitCoef(x = c("kg", "cm"), tp = "crc"), - "missing value") + # length(tp) must equal length(x) + expect_error( + AllomUnitCoef(x = c("kg", "cm"), tp = "crc"), + "missing value" + ) }) diff --git a/modules/assim.batch/R/autoburnin.R b/modules/assim.batch/R/autoburnin.R index c369c6bb761..1a63d9015e5 100644 --- a/modules/assim.batch/R/autoburnin.R +++ b/modules/assim.batch/R/autoburnin.R @@ -1,28 +1,28 @@ #' @title Calculate burnin value -#' +#' #' @description Automatically detect burnin based on one of several methods. #' @param jags_out List of MCMC sample matrices or `mcmc.list` object #' @param threshold Maximum value of Gelman diagnostic -#' @param method Character string indicating method. Options are +#' @param method Character string indicating method. Options are #' "moving.window" (default) or "gelman.plot". -#' @param use.confidence Logical. If TRUE (default), use 95% confidence +#' @param use.confidence Logical. If TRUE (default), use 95% confidence #' interval for Gelman Diagnostic. If FALSE, use the point estimate. #' @param plotfile path #' @param ... Other parameters to methods -#' -#' @details +#' +#' @details #' See "gelman_diag_mw" and "gelman_diag_gelmanPlot" -#' +#' #' @examples -#' z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) -#' z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) -#' z <- coda::mcmc.list(z1, z2) -#' burnin <- getBurnin(z, threshold = 1.05) +#' z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) +#' z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) +#' z <- coda::mcmc.list(z1, z2) +#' burnin <- getBurnin(z, threshold = 1.05) #' @author Alexey Shiklomanov, Michael Dietze #' @export getBurnin <- function(jags_out, - threshold = 1.1, + threshold = 1.1, use.confidence = TRUE, method = "moving.window", plotfile = "/dev/null", @@ -43,11 +43,13 @@ getBurnin <- function(jags_out, gbr_exceed <- gbr_values > threshold if (all(!gbr_exceed)) { # Chains converged instantly -- no burnin required - burnin <- 2 # This isn't 1 to allow testing for convergence with `burnin == 1` + burnin <- 2 # This isn't 1 to allow testing for convergence with `burnin == 1` } else { index <- utils::tail(which(rowSums(gbr_exceed) > 0), 1) + 1 - stopifnot(length(index) == 1, - inherits(index, c("numeric", "integer"))) + stopifnot( + length(index) == 1, + inherits(index, c("numeric", "integer")) + ) if (index > dim(GBR)[1]) { burnin <- NA } else { @@ -70,15 +72,15 @@ getBurnin <- function(jags_out, #' #' @author Michael Dietze, Alexey Shiklomanov #' @param jags_out JAGS output -#' @param return.burnin Logical. If `TRUE`, return burnin value in addition to +#' @param return.burnin Logical. If `TRUE`, return burnin value in addition to #' samples (as list). Default = FALSE. -#' @param ... Additional arguments for \code{getBurnin}, \code{gelman_diag_mw}, +#' @param ... Additional arguments for \code{getBurnin}, \code{gelman_diag_mw}, #' and \code{gelman.diag}. #' @examples -#' z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) -#' z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) -#' z <- coda::mcmc.list(z1, z2) -#' z_burned <- autoburnin(z) +#' z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) +#' z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) +#' z <- coda::mcmc.list(z1, z2) +#' z_burned <- autoburnin(z) #' @export autoburnin <- function(jags_out, return.burnin = FALSE, ...) { burnin <- getBurnin(jags_out, ...) @@ -87,8 +89,10 @@ autoburnin <- function(jags_out, return.burnin = FALSE, ...) { } else if (burnin > 1) { samples <- stats::window(jags_out, start = burnin) } else { - stop("Bad return value for burnin: \n", - burnin) + stop( + "Bad return value for burnin: \n", + burnin + ) } if (return.burnin) { out <- list(samples = samples, burnin = burnin) diff --git a/modules/assim.batch/R/gelman_diag.R b/modules/assim.batch/R/gelman_diag.R index ea67b1bdc43..6610b1897f1 100644 --- a/modules/assim.batch/R/gelman_diag.R +++ b/modules/assim.batch/R/gelman_diag.R @@ -11,11 +11,10 @@ #' @export gelman_diag_mw <- function(x, width_fraction = 0.1, - width = ceiling(coda::niter(x)*width_fraction), + width = ceiling(coda::niter(x) * width_fraction), njump = 50, include.mpsrf = TRUE, ...) { - stopifnot(inherits(x, c("mcmc", "mcmc.list"))) stopifnot(width %% 1 == 0) stopifnot(njump %% 1 == 0) @@ -30,9 +29,11 @@ gelman_diag_mw <- function(x, stop("End index vector has length 0") } if (length(a) != length(b)) { - stop("Start and end index vector length mismatch.\n", - "Start length = ", length(a), "\n", - "End length = ", length(b)) + stop( + "Start and end index vector length mismatch.\n", + "Start length = ", length(a), "\n", + "End length = ", length(b) + ) } n_row <- length(a) n_col <- coda::nvar(x) + 2 @@ -47,28 +48,29 @@ gelman_diag_mw <- function(x, } gdmat <- array(numeric(), c(n_row, n_col, 2)) dimnames(gdmat)[[2]] <- col_names - gdmat[,1,] <- a - gdmat[,2,] <- b + gdmat[, 1, ] <- a + gdmat[, 2, ] <- b for (i in seq_len(n_row)) { - xsub <- stats::window(x, start=a[i], end=b[i]) - gd_raw <- coda::gelman.diag(xsub, - autoburnin=FALSE, - multivariate = include.mpsrf) + xsub <- stats::window(x, start = a[i], end = b[i]) + gd_raw <- coda::gelman.diag(xsub, + autoburnin = FALSE, + multivariate = include.mpsrf + ) gd <- gd_raw$psrf if (include.mpsrf) { gd <- rbind(gd, "mpsrf" = rep(gd_raw$mpsrf, 2)) } gdmat[i, -(1:2), ] <- gd } - return (gdmat) + return(gdmat) } # gelman_diag_mw #' @title Calculate Gelman Diagnostic using coda::gelman.plot -#' +#' #' @author Alexey Shiklomanov #' @param x MCMC samples -#' @param ... additional arguments -#' @description Calculates Gelman diagnostic cumulatively. This is a much +#' @param ... additional arguments +#' @description Calculates Gelman diagnostic cumulatively. This is a much #' more conservative approach than the moving-window method. #' @export gelman_diag_gelmanPlot <- function(x, ...) { @@ -77,9 +79,8 @@ gelman_diag_gelmanPlot <- function(x, ...) { grDevices::dev.off() GBR <- array(numeric(), dim(GBR_raw$shrink) + c(0, 2, 0)) dimnames(GBR)[[2]] <- c("Start", "End", dimnames(GBR_raw$shrink)[[2]]) - GBR[,-(1:2),] <- GBR_raw$shrink + GBR[, -(1:2), ] <- GBR_raw$shrink GBR[, 2, ] <- GBR_raw$last.iter GBR[, 1, 1] <- GBR[, 1, 2] <- c(1, GBR[-nrow(GBR), 2, 1] + 1) return(GBR) } - diff --git a/modules/assim.batch/R/get.da.data.R b/modules/assim.batch/R/get.da.data.R index 16997b7b4ca..72e897f620a 100644 --- a/modules/assim.batch/R/get.da.data.R +++ b/modules/assim.batch/R/get.da.data.R @@ -2,10 +2,10 @@ ## ported by M. Dietze 08/30/12 ## some of this is redundant with other parts of PEcAn and needs to be cleaned up -#library(hdf5) -#source('./code/R/edview.base.R') -#source('./code/R/utils.R') -#source('./code/R/model.specific.R') +# library(hdf5) +# source('./code/R/edview.base.R') +# source('./code/R/utils.R') +# source('./code/R/model.specific.R') dlaplace <- function(x, mean, shape, ...) { stats::dexp(abs(mean - x), shape, ...) @@ -14,8 +14,10 @@ dlaplace <- function(x, mean, shape, ...) { # LIKELIHOOD calculate.nee.L <- function(yeardoytime, model.i.nee, observed.flux, be, bu) { - model.flux <- data.frame(yeardoytime = yeardoytime[seq(model.i.nee)], - model.i.nee = model.i.nee) + model.flux <- data.frame( + yeardoytime = yeardoytime[seq(model.i.nee)], + model.i.nee = model.i.nee + ) all.fluxes <- merge(observed.flux, model.flux, by = "yeardoytime") sigma <- with(all.fluxes, stats::coef(stats::lm(abs(model.i.nee - FC) ~ abs(model.i.nee)))) @@ -26,10 +28,14 @@ calculate.nee.L <- function(yeardoytime, model.i.nee, observed.flux, be, bu) { uptake <- which(all.fluxes$model.i.nee > 0) ## are these calculations correct, with respect to slope and intercepts? - logL[emissions] <- with(all.fluxes[emissions, ], - dlaplace(FC, model.i.nee, 1 / (be[1] + be[2] * abs(model.i.nee)), log = TRUE)) - logL[uptake] <- with(all.fluxes[uptake, ], - dlaplace(FC, model.i.nee, 1/(bu[1] + bu[2] * abs(model.i.nee)), log = TRUE)) + logL[emissions] <- with( + all.fluxes[emissions, ], + dlaplace(FC, model.i.nee, 1 / (be[1] + be[2] * abs(model.i.nee)), log = TRUE) + ) + logL[uptake] <- with( + all.fluxes[uptake, ], + dlaplace(FC, model.i.nee, 1 / (bu[1] + bu[2] * abs(model.i.nee)), log = TRUE) + ) # NEE.acf <- stats::acf(all.fluxes$model.i.nee, 100, plot=FALSE) ar.coef <- stats::ar(model.i.nee, FALSE, 1)$ar @@ -41,8 +47,8 @@ calculate.nee.L <- function(yeardoytime, model.i.nee, observed.flux, be, bu) { get.da.data <- function(out.dir, ameriflux.dir, years, be, bu, ensemble.size = 199) { - samples.file <- paste(out.dir, "samples.Rdata" , sep = "") - if(file.exists(samples.file)) { + samples.file <- paste(out.dir, "samples.Rdata", sep = "") + if (file.exists(samples.file)) { samples <- new.env() load(samples.file, envir = samples) ensemble.samples <- samples$ensemble.samples @@ -86,7 +92,6 @@ get.da.data <- function(out.dir, ameriflux.dir, years, be, bu, ensemble.size = 1 sa.x[[run.id]] <- do.call(cbind, trait.samples) ## loop over pfts for (i in seq(names(sa.samples))) { - traits <- colnames(sa.samples[[i]]) quantiles.str <- rownames(sa.samples[[i]]) @@ -98,8 +103,9 @@ get.da.data <- function(out.dir, ameriflux.dir, years, be, bu, ensemble.size = 1 trait.samples <- median.samples trait.samples[[i]][trait] <- sa.samples[[i]][quantile.str, trait] run.id <- PEcAn.utils::get.run.id("SA", round(quantile, 3), - trait = trait, - pft.name = names(trait.samples)[i]) + trait = trait, + pft.name = names(trait.samples)[i] + ) sa.x[[run.id]] <- do.call(cbind, trait.samples) } } @@ -108,26 +114,29 @@ get.da.data <- function(out.dir, ameriflux.dir, years, be, bu, ensemble.size = 1 } sa.x <- do.call(rbind, sa.x) sa.run.ids <- rownames(sa.x) - run.ids <- ensemble.run.ids # c(ensemble.run.ids, sa.run.ids) - x <- ensemble.x # rbind(ensemble.x, sa.x) + run.ids <- ensemble.run.ids # c(ensemble.run.ids, sa.run.ids) + x <- ensemble.x # rbind(ensemble.x, sa.x) points.per.day <- 48 - dtime <- do.call(c, lapply(years, - function(year) { - nodays <- PEcAn.utils::days_in_year(year) - year + seq(1, nodays, by = 1 / points.per.day)[-1] / nodays - })) + dtime <- do.call(c, lapply( + years, + function(year) { + nodays <- PEcAn.utils::days_in_year(year) + year + seq(1, nodays, by = 1 / points.per.day)[-1] / nodays + } + )) # run.ids<-ensemble.run.ids # x <- ensemble.x y <- t(as.data.frame(lapply(run.ids, function(run.id) { - outname <- paste0(run.id, "-T-(", paste(paste("(", years, ")", sep = ""), collapse = "|"), ")") data <- read.output.type(out.dir, outname = outname, pattern = "-T-") data <- data$AVG_GPP - data$AVG_PLANT_RESP - data$AVG_HTROPH_RESP - calculate.nee.L(dtime, data, - observed[c("yeardoytime", "FC")], - be, bu) + calculate.nee.L( + dtime, data, + observed[c("yeardoytime", "FC")], + be, bu + ) }))) save(x, y, file = paste(out.dir, "L.nee.Rdata", sep = "")) @@ -136,7 +145,7 @@ get.da.data <- function(out.dir, ameriflux.dir, years, be, bu, ensemble.size = 1 } # get.da.data -#get.da.data('./pecan/BarrowDA5param/', 'barrow/validation/usakbarr', years=1998:2006, +# get.da.data('./pecan/BarrowDA5param/', 'barrow/validation/usakbarr', years=1998:2006, # be=c(0.20, 0.04), bu=c(0.31, -0.05)) -#get.da.data('./pecan/AtqasukDA5param/', 'atqasuk/validation/usatqasu', years=2000:2006, +# get.da.data('./pecan/AtqasukDA5param/', 'atqasuk/validation/usatqasu', years=2000:2006, # be=c(0.75, 0.23), bu=c(1.08, -0.21)) diff --git a/modules/assim.batch/R/get.da.data.growth.R b/modules/assim.batch/R/get.da.data.growth.R index 37737b37bd7..3854ac3be15 100644 --- a/modules/assim.batch/R/get.da.data.growth.R +++ b/modules/assim.batch/R/get.da.data.growth.R @@ -1,32 +1,31 @@ ## Carl Davidson code for dealing with height growth data for emulator-based DA ## ported by M. Dietze 08/30/12 -## some of this is redundant with other parts of PEcAn and needs to be cleaned up +## some of this is redundant with other parts of PEcAn and needs to be cleaned up -#library(hdf5) -#source('./code/R/edview.base.R') -#source('./code/R/utils.R') -#source('./code/R/model.specific.R') +# library(hdf5) +# source('./code/R/edview.base.R') +# source('./code/R/utils.R') +# source('./code/R/model.specific.R') get.height <- function(growth, monthly, j, year.i0) { - monthly$HITE[year.i0][growth$plot[j] == monthly$PATCH_NAME[year.i0] & - growth$individual[j] == monthly$COHORT_NAME[year.i0] & - monthly$HITE[year.i0] > 0.01] + monthly$HITE[year.i0][growth$plot[j] == monthly$PATCH_NAME[year.i0] & + growth$individual[j] == monthly$COHORT_NAME[year.i0] & + monthly$HITE[year.i0] > 0.01] # ignore new cohorts by matching against hgt_min } # get.height # LIKELIHOOD calculate.growth.L <- function(yearly, growth, error, years) { - year.names <- as.character(years) year.ranges <- c(0, cumsum(yearly$NCOHORTS_GLOBAL)) years.i <- lapply(seq(years), function(i) { year.ranges[i]:year.ranges[i + 1] }) names(years.i) <- year.names - + cumLogL <- 0 squares <- c() - + for (i in seq(years)[-1]) { year <- year.names[i] year.i1 <- years.i[[i]] @@ -41,7 +40,7 @@ calculate.growth.L <- function(yearly, growth, error, years) { warnings("0 growth") } modeled.growth <- height1 - height0 - + if (length(modeled.growth) > 0 && !is.na(modeled.growth)) { # print(growth$pft[j]) print(paste('diff ', (modeled.growth - observed.growth))) if (any(modeled.growth < 0)) { @@ -50,7 +49,7 @@ calculate.growth.L <- function(yearly, growth, error, years) { # squares <- c(squares, (modeled.growth - observed.growth)^2) logL <- stats::dnorm((observed.growth), (modeled.growth), error, log = TRUE) if (any(is.infinite(logL))) { - stop("Infinite likelihood") #AKA really large value + stop("Infinite likelihood") # AKA really large value } # else print(paste(growth$pft[j], 'logL', logL)) cumLogL <- cumLogL - sum(logL, na.rm = TRUE) @@ -67,22 +66,21 @@ calculate.growth.L <- function(yearly, growth, error, years) { } # calculate.growth.L get.da.data.growth <- function() { - out.dir <- "./pecan/Toolik/growth/" - + buds <- utils::read.csv("./toolik/validation/Survey/ToolikVegSurvey.csv", sep = "\t") buds <- buds[!is.na(buds$length), ] buds <- buds[buds$pft != "graminoid", ] heights <- buds[, c("length", paste0("X", 2010:2003))] heights <- as.matrix(heights[!is.na(heights$length), ]) / 1000 - growth <- do.call(rbind, lapply(1:nrow(heights), function(i) - (diff(as.numeric(heights[i, ]))))) + growth <- do.call(rbind, lapply(1:nrow(heights), function(i) -(diff(as.numeric(heights[i, ]))))) colnames(growth) <- 2011:2004 growth <- cbind(buds[, c("plot", "individual", "pft")], growth) - + ensemble.size <- 500 - samples.file <- paste(out.dir, "samples.Rdata" , sep = "") - - if(file.exists(samples.file)) { + samples.file <- paste(out.dir, "samples.Rdata", sep = "") + + if (file.exists(samples.file)) { samples <- new.env() load(samples.file, envir = samples) ensemble.samples <- samples$ensemble.samples @@ -90,20 +88,20 @@ get.da.data.growth <- function() { } else { PEcAn.logger::logger.error(samples.file, "not found, this file is required by the get.da.data function") } - + pfts <- names(ensemble.samples) pfts <- pfts[pfts != "env"] - + # ENSEMBLE omitted <- c(87) ensemble.run.ids <- PEcAn.utils::get.run.id("ENS", PEcAn.utils::left.pad.zeros((1:ensemble.size)[-omitted])) ensemble.x <- do.call(cbind, ensemble.samples[pfts])[(1:ensemble.size)[-omitted], ] - + # SENSITIVITY ANALYSIS sa.x <- list() for (pft in pfts) { MEDIAN <- "50" - + median.samples <- list() for (i in seq_along(sa.samples)) { median.samples[[i]] <- sa.samples[[i]][MEDIAN, ] @@ -113,10 +111,9 @@ get.da.data.growth <- function() { sa.x[[run.id]] <- do.call(cbind, trait.samples) ## loop over pfts for (i in seq(names(sa.samples))) { - traits <- colnames(sa.samples[[i]]) quantiles.str <- rownames(sa.samples[[i]]) - + ## loop over variables for (trait in traits) { for (quantile.str in quantiles.str) { @@ -125,8 +122,9 @@ get.da.data.growth <- function() { trait.samples <- median.samples trait.samples[[i]][trait] <- sa.samples[[i]][quantile.str, trait] run.id <- PEcAn.utils::get.run.id("SA", round(quantile, 3), - trait = trait, - pft.name = names(trait.samples)[i]) + trait = trait, + pft.name = names(trait.samples)[i] + ) sa.x[[run.id]] <- do.call(cbind, trait.samples) } } @@ -135,16 +133,18 @@ get.da.data.growth <- function() { } sa.x <- do.call(rbind, sa.x) sa.run.ids <- rownames(sa.x) - run.ids <- ensemble.run.ids # c(ensemble.run.ids, sa.run.ids) - x <- ensemble.x # rbind(ensemble.x, sa.x) - - # run.ids<-ensemble.run.ids + run.ids <- ensemble.run.ids # c(ensemble.run.ids, sa.run.ids) + x <- ensemble.x # rbind(ensemble.x, sa.x) + + # run.ids<-ensemble.run.ids # x <- ensemble.x y <- t(as.data.frame(lapply(run.ids, function(run.id) { print(run.id) - yearly <- paste0(run.id, "-E-(((", - paste(paste0("(", 2003:2010, ")"), collapse = "|"), - ")-01)|(2011-07))") + yearly <- paste0( + run.id, "-E-(((", + paste(paste0("(", 2003:2010, ")"), collapse = "|"), + ")-01)|(2011-07))" + ) yearly <- read.output.type(out.dir, outname = yearly, pattern = "-E-") if (length(yearly) <= 0) { return(NA) @@ -152,11 +152,11 @@ get.da.data.growth <- function() { calculate.growth.L(yearly, growth, 0.023, years = 2003:2011) # error param was calculated from stddev, see below }))) - "squares<-lapply(run.ids, - function(run.id){ + "squares<-lapply(run.ids, + function(run.id){ print(run.id) - yearly <-paste(run.id, '-E-(((', - paste(paste('(', 2003:2010, ')', sep=''), collapse='|'), ')-01)|(2011-07))', + yearly <-paste(run.id, '-E-(((', + paste(paste('(', 2003:2010, ')', sep=''), collapse='|'), ')-01)|(2011-07))', sep='') yearly <- read.output.type(out.dir, outname=yearly, pattern='-E-') @@ -164,11 +164,11 @@ get.da.data.growth <- function() { return(NA) } squares <- calculate.growth.L(yearly, growth, 0.0278, years=2003:2011) - #observation process + #observation process return(squares) })" # stddev <- sqrt(mean(unlist(squares))) print(stddev) - + # filter out runs that have not completed log likelihoods default to 0 x <- x[y != 0, ] y <- y[y != 0] diff --git a/modules/assim.batch/R/helpers.R b/modules/assim.batch/R/helpers.R index b68e0731919..521013356fd 100644 --- a/modules/assim.batch/R/helpers.R +++ b/modules/assim.batch/R/helpers.R @@ -27,4 +27,3 @@ testForPackage <- function(pkg) { stop("Package", pkg, "required but not installed") } } - diff --git a/modules/assim.batch/R/hier.mcmc.R b/modules/assim.batch/R/hier.mcmc.R index cca363ba76e..183d7585319 100644 --- a/modules/assim.batch/R/hier.mcmc.R +++ b/modules/assim.batch/R/hier.mcmc.R @@ -17,20 +17,19 @@ ##' ##' @author Istem Fer ##' @export -##' +##' ########### hierarchical MCMC function with Gibbs ############## hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, - jmp0, mu_site_init, nparam, nsites, llik.fn, prior.fn.all, prior.ind.all){ - + jmp0, mu_site_init, nparam, nsites, llik.fn, prior.fn.all, prior.ind.all) { pos.check <- sapply(settings$assim.batch$inputs, `[[`, "ss.positive") - - if(length(unlist(pos.check)) == 0){ + + if (length(unlist(pos.check)) == 0) { # if not passed from settings assume none pos.check <- rep(FALSE, length(settings$assim.batch$inputs)) - }else if(length(unlist(pos.check)) != length(settings$assim.batch$inputs)){ + } else if (length(unlist(pos.check)) != length(settings$assim.batch$inputs)) { # maybe one provided, but others are forgotten # check which ones are provided in settings from.settings <- sapply(seq_along(pos.check), function(x) !is.null(pos.check[[x]])) @@ -38,10 +37,10 @@ hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, # replace those with the values provided in the settings tmp.check[from.settings] <- as.logical(unlist(pos.check)) pos.check <- tmp.check - }else{ + } else { pos.check <- as.logical(pos.check) } - + ################################################################ # # mu_site : site level parameters (nsite x nparam) @@ -50,9 +49,9 @@ hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, # tau_global : global precision matrix (nparam x nparam) # ################################################################ - - - + + + ###### (hierarchical) global mu priors # # mu_global_mean : prior mean vector @@ -60,24 +59,24 @@ hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, # mu_global_tau : prior precision matrix # # mu_global ~ MVN (mu_global_mean, mu_global_tau) - + # approximate a normal dist mu_init_samp <- matrix(NA, ncol = nparam, nrow = 1000) - for(ps in seq_along(prior.ind.all)){ + for (ps in seq_along(prior.ind.all)) { prior.quantiles <- eval(prior.fn.all$rprior[[prior.ind.all[ps]]], list(n = 1000)) - mu_init_samp[,ps] <- prior.quantiles + mu_init_samp[, ps] <- prior.quantiles } - + # mean hyperprior - mu_global_mean <- apply(mu_init_samp, 2, mean) + mu_global_mean <- apply(mu_init_samp, 2, mean) # sigma/tau hyperprior mu_global_sigma <- stats::cov(mu_init_samp) - mu_global_tau <- solve(mu_global_sigma) - + mu_global_tau <- solve(mu_global_sigma) + ## initialize mu_global (nparam) mu_global <- mvtnorm::rmvnorm(1, mu_global_mean, mu_global_sigma) - - + + ###### (hierarchical) global tau priors # # tau_global_df : Wishart degrees of freedom @@ -86,65 +85,67 @@ hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, # tau_global ~ W (tau_global_df, tau_global_sigma) # sigma_global <- solve(tau_global) # - + # sigma_global hyperpriors - sigma_global_df <- nparam + 1 # test results with nparam since it is the least informative - sigma_global_scale <- mu_global_sigma/sigma_global_df - + sigma_global_df <- nparam + 1 # test results with nparam since it is the least informative + sigma_global_scale <- mu_global_sigma / sigma_global_df + # initialize sigma_global (nparam x nparam) - sigma_global <- MCMCpack::riwish(sigma_global_df, sigma_global_scale) + sigma_global <- MCMCpack::riwish(sigma_global_df, sigma_global_scale) # initialize jcov.arr (jump variances per site) - jcov.arr <- array(NA_real_, c(nparam, nparam, nsites)) - for(j in seq_len(nsites)) jcov.arr[,,j] <- jmp0 - + jcov.arr <- array(NA_real_, c(nparam, nparam, nsites)) + for (j in seq_len(nsites)) jcov.arr[, , j] <- jmp0 + # prepare mu_site (nsite x nparam) - mu_site_new <- matrix(NA_real_, nrow = nsites, ncol= nparam) - + mu_site_new <- matrix(NA_real_, nrow = nsites, ncol = nparam) + # start - mu_site_curr <- matrix(rep(mu_site_init, nsites), ncol=nparam, byrow = TRUE) - + mu_site_curr <- matrix(rep(mu_site_init, nsites), ncol = nparam, byrow = TRUE) + # values for each site will be accepted/rejected in themselves - currSS <- sapply(seq_len(nsites), function(v) get_ss(gp.stack[[v]], mu_site_curr[v,], pos.check)) + currSS <- sapply(seq_len(nsites), function(v) get_ss(gp.stack[[v]], mu_site_curr[v, ], pos.check)) # force it to be nvar x nsites matrix - currSS <- matrix(currSS, nrow = length(settings$assim.batch$inputs), ncol = nsites) - currllp <- lapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik.par(settings, nstack[[v]], currSS[,v])) - + currSS <- matrix(currSS, nrow = length(settings$assim.batch$inputs), ncol = nsites) + currllp <- lapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik.par(settings, nstack[[v]], currSS[, v])) + # storage - mu_site_samp <- array(NA_real_, c(nmcmc, nparam, nsites)) - mu_global_samp <- matrix(NA_real_, nrow = nmcmc, ncol= nparam) - sigma_global_samp <- array(NA_real_, c(nmcmc, nparam, nparam)) - - musite.accept.count <- rep(0, nsites) - + mu_site_samp <- array(NA_real_, c(nmcmc, nparam, nsites)) + mu_global_samp <- matrix(NA_real_, nrow = nmcmc, ncol = nparam) + sigma_global_samp <- array(NA_real_, c(nmcmc, nparam, nparam)) + + musite.accept.count <- rep(0, nsites) + adapt_orig <- settings$assim.batch$jump$adapt settings$assim.batch$jump$adapt <- adapt_orig * nsites - + ########################## Start MCMC ######################## - - for(g in 1:nmcmc){ - + + for (g in 1:nmcmc) { # jump adaptation step if ((g > 2) && ((g - 1) %% settings$assim.batch$jump$adapt == 0)) { - # update site level jvars params.recent <- mu_site_samp[(g - settings$assim.batch$jump$adapt):(g - 1), , ] - #colnames(params.recent) <- names(x0) + # colnames(params.recent) <- names(x0) settings$assim.batch$jump$adapt <- adapt_orig - jcov.list <- lapply(seq_len(nsites), function(v) pda.adjust.jumps.bs(settings, jcov.arr[,,v], musite.accept.count[v], - params.recent[seq(v, adapt_orig * nsites, by=12), , v])) - jcov.list <- lapply(jcov.list, lqmm::make.positive.definite, tol=1e-12) - jcov.arr <- abind::abind(jcov.list, along=3) - musite.accept.count <- rep(0, nsites) # Reset counter + jcov.list <- lapply(seq_len(nsites), function(v) { + pda.adjust.jumps.bs( + settings, jcov.arr[, , v], musite.accept.count[v], + params.recent[seq(v, adapt_orig * nsites, by = 12), , v] + ) + }) + jcov.list <- lapply(jcov.list, lqmm::make.positive.definite, tol = 1e-12) + jcov.arr <- abind::abind(jcov.list, along = 3) + musite.accept.count <- rep(0, nsites) # Reset counter settings$assim.batch$jump$adapt <- adapt_orig * nsites } - - + + ######################################## # gibbs update tau_global | mu_global, mu_site # # W(tau_global | mu_global, mu_site) ~ MVN( mu_site | mu_global, tau_global) * W(tau_global | tau_df, tau_V) - # + # # # using MVN-Wishart conjugacy # prior hyperparameters: tau_global_df, tau_global_sigma @@ -152,21 +153,21 @@ hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, # # update: # tau_global ~ W(tau_global_df_gibbs, tau_global_sigma_gibbs) - - + + sigma_global_df_gibbs <- sigma_global_df + nsites - + pairwise_deviation <- apply(mu_site_curr, 1, function(r) r - t(mu_global)) sum_term <- pairwise_deviation %*% t(pairwise_deviation) - + sigma_global_scale_gibbs <- sigma_global_scale + sum_term - + # update sigma - sigma_global <- MCMCpack::riwish(sigma_global_df_gibbs, sigma_global_scale_gibbs) # across-site covariance - - - + sigma_global <- MCMCpack::riwish(sigma_global_df_gibbs, sigma_global_scale_gibbs) # across-site covariance + + + ######################################## # update mu_global | mu_site, tau_global # @@ -176,94 +177,99 @@ hier.mcmc <- function(settings, gp.stack, nstack = NULL, nmcmc, rng_orig, # # mu_global : global parameters # global_mu : precision weighted average between the data (mu_site) and prior mean (mu_f) - # global_Sigma : sum of mu_site and mu_f precision + # global_Sigma : sum of mu_site and mu_f precision # # Dietze, 2017, Eqn 13.6 - # mu_global ~ MVN(solve((nsites * sigma_global) + P_f_inv)) * ((nsites * sigma_global) + P_f_inv * mu_f), + # mu_global ~ MVN(solve((nsites * sigma_global) + P_f_inv)) * ((nsites * sigma_global) + P_f_inv * mu_f), # solve((nsites * sigma_global) + P_f_inv)) - + # prior hyperparameters : mu_global_mean, mu_global_sigma # posterior hyperparameters : mu_global_mean_gibbs, mu_global_sigma_gibbs # # update: # mu_global ~ MVN(mu_global_mean_gibbs, mu_global_sigma_gibbs) - + # calculate mu_global_sigma_gibbs from prior hyperparameters and tau_global mu_global_sigma_gibbs <- solve(mu_global_tau + nsites * solve(sigma_global)) - - - mu_site_bar <- apply(mu_site_curr, 2, mean) - + + + mu_site_bar <- apply(mu_site_curr, 2, mean) + # calculate mu_global_mean_gibbs from prior hyperparameters, mu_site_means and tau_global - mu_global_mean_gibbs <- mu_global_sigma_gibbs %*% - (mu_global_tau %*% mu_global_mean + ((nsites*solve(sigma_global)) %*% mu_site_bar)) - + mu_global_mean_gibbs <- mu_global_sigma_gibbs %*% + (mu_global_tau %*% mu_global_mean + ((nsites * solve(sigma_global)) %*% mu_site_bar)) + # update mu_global mu_global <- mvtnorm::rmvnorm(1, mu_global_mean_gibbs, mu_global_sigma_gibbs) # new prior mu to be used below for prior prob. calc. - - + + # site level M-H ######################################## - + # propose new site parameter vectors thissite <- g %% nsites - if(thissite == 0) thissite <- nsites - proposed <- TruncatedNormal::rtmvnorm(1, - mu = mu_site_curr[thissite,], - sigma = jcov.arr[,,thissite], - lb = rng_orig[,1], - ub = rng_orig[,2]) - - mu_site_new <- matrix(rep(proposed, nsites),ncol=nparam, byrow = TRUE) - + if (thissite == 0) thissite <- nsites + proposed <- TruncatedNormal::rtmvnorm(1, + mu = mu_site_curr[thissite, ], + sigma = jcov.arr[, , thissite], + lb = rng_orig[, 1], + ub = rng_orig[, 2] + ) + + mu_site_new <- matrix(rep(proposed, nsites), ncol = nparam, byrow = TRUE) + # re-predict current SS - currSS <- sapply(seq_len(nsites), function(v) get_ss(gp.stack[[v]], mu_site_curr[v,], pos.check)) + currSS <- sapply(seq_len(nsites), function(v) get_ss(gp.stack[[v]], mu_site_curr[v, ], pos.check)) currSS <- matrix(currSS, nrow = length(settings$assim.batch$inputs), ncol = nsites) - + # calculate posterior - currLL <- sapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik(currSS[,v], llik.fn, currllp[[v]])) + currLL <- sapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik(currSS[, v], llik.fn, currllp[[v]])) # use new priors for calculating prior probability currPrior <- mvtnorm::dmvnorm(mu_site_curr, mu_global, sigma_global, log = TRUE) - currPost <- currLL + currPrior - + currPost <- currLL + currPrior + # calculate jump probabilities currHR <- sapply(seq_len(nsites), function(v) { - TruncatedNormal::dtmvnorm(mu_site_curr[v,], mu_site_new[v,], jcov.arr[,,v], - lb = rng_orig[,1], - ub = rng_orig[,2], log = TRUE, B = 1e2) + TruncatedNormal::dtmvnorm(mu_site_curr[v, ], mu_site_new[v, ], jcov.arr[, , v], + lb = rng_orig[, 1], + ub = rng_orig[, 2], log = TRUE, B = 1e2 + ) }) - + # predict new SS - newSS <- sapply(seq_len(nsites), function(v) get_ss(gp.stack[[v]], mu_site_new[v,], pos.check)) + newSS <- sapply(seq_len(nsites), function(v) get_ss(gp.stack[[v]], mu_site_new[v, ], pos.check)) newSS <- matrix(newSS, nrow = length(settings$assim.batch$inputs), ncol = nsites) - + # calculate posterior - newllp <- lapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik.par(settings, nstack[[v]], newSS[,v])) - newLL <- sapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik(newSS[,v], llik.fn, newllp[[v]])) + newllp <- lapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik.par(settings, nstack[[v]], newSS[, v])) + newLL <- sapply(seq_len(nsites), function(v) PEcAn.assim.batch::pda.calc.llik(newSS[, v], llik.fn, newllp[[v]])) # use new priors for calculating prior probability newPrior <- mvtnorm::dmvnorm(mu_site_new, mu_global, sigma_global, log = TRUE) - newPost <- newLL + newPrior - + newPost <- newLL + newPrior + # calculate jump probabilities newHR <- sapply(seq_len(nsites), function(v) { - TruncatedNormal::dtmvnorm(mu_site_new[v,], mu_site_curr[v,], jcov.arr[,,v], - lb = rng_orig[,1], - ub = rng_orig[,2], log = TRUE, B = 1e2) + TruncatedNormal::dtmvnorm(mu_site_new[v, ], mu_site_curr[v, ], jcov.arr[, , v], + lb = rng_orig[, 1], + ub = rng_orig[, 2], log = TRUE, B = 1e2 + ) }) - + # Accept/reject with MH rule ar <- is.accepted(currPost + currHR, newPost + newHR) mu_site_curr[ar, ] <- mu_site_new[ar, ] musite.accept.count[thissite] <- musite.accept.count[thissite] + ar[thissite] - - - mu_site_samp[g, , seq_len(nsites)] <- t(mu_site_curr)[,seq_len(nsites)] - mu_global_samp[g,] <- mu_global # 100% acceptance for gibbs + + + mu_site_samp[g, , seq_len(nsites)] <- t(mu_site_curr)[, seq_len(nsites)] + mu_global_samp[g, ] <- mu_global # 100% acceptance for gibbs sigma_global_samp[g, , ] <- sigma_global # 100% acceptance for gibbs - - if(g %% 500 == 0) PEcAn.logger::logger.info(g, "of", nmcmc, "iterations") + + if (g %% 500 == 0) PEcAn.logger::logger.info(g, "of", nmcmc, "iterations") } - - return(list(mu_site_samp = mu_site_samp, mu_global_samp = mu_global_samp, sigma_global_samp = sigma_global_samp, - musite.accept.count = musite.accept.count)) -} # hier.mcmc \ No newline at end of file + + return(list( + mu_site_samp = mu_site_samp, mu_global_samp = mu_global_samp, sigma_global_samp = sigma_global_samp, + musite.accept.count = musite.accept.count + )) +} # hier.mcmc diff --git a/modules/assim.batch/R/load.L2Ameriflux.cf.R b/modules/assim.batch/R/load.L2Ameriflux.cf.R index 78754b6453e..e532e1df292 100644 --- a/modules/assim.batch/R/load.L2Ameriflux.cf.R +++ b/modules/assim.batch/R/load.L2Ameriflux.cf.R @@ -9,14 +9,13 @@ ##' @author Ryan Kelly ##' @export load.L2Ameriflux.cf <- function(file.in) { - nc <- ncdf4::nc_open(file.in) - + vars <- list() for (varname in names(nc$var)) { vars[[varname]] <- ncdf4::ncvar_get(nc, varname) } ncdf4::nc_close(nc) - + return(as.data.frame(do.call(cbind, vars))) } # load.L2Ameriflux.cf diff --git a/modules/assim.batch/R/minimize.GP.R b/modules/assim.batch/R/minimize.GP.R index 1c54b3e3aa6..d1af1f16073 100644 --- a/modules/assim.batch/R/minimize.GP.R +++ b/modules/assim.batch/R/minimize.GP.R @@ -6,28 +6,27 @@ ##' @param rng range ##' @param x0 initial values ##' @param splinefuns spline functions -##' +##' ##' @author Michael Dietze minimize.GP <- function(gp, rng, x0, splinefuns = NULL) { - isotropic <- gp$isotropic - x.id <- gp$x.id - ey <- 0 - + x.id <- gp$x.id + ey <- 0 + if (gp$method == "bayes") { samp <- gp$samp tauw <- coda::mcmc(gp$tauw[samp, ]) - psi <- coda::mcmc(gp$psi[samp, ]) - mu <- coda::mcmc(gp$mu) + psi <- coda::mcmc(gp$psi[samp, ]) + mu <- coda::mcmc(gp$mu) tauv <- W <- NULL } else { ## MLE - psi <- gp$psi - mu <- gp$mu + psi <- gp$psi + mu <- gp$mu tauw <- gp$tauw tauv <- gp$tauv } - + psibar <- NULL if (isotropic) { psibar <- stats::median(psi) @@ -46,13 +45,15 @@ minimize.GP <- function(gp, rng, x0, splinefuns = NULL) { if (gp$zeroMean) { ey <- 0 } else { - ey <- max(mu) #mean(y) + ey <- max(mu) # mean(y) } ybar <- tapply(gp$y, gp$x.id, mean) - k <- S22inv %*% (ybar - ey) - - stats::nlm(gpeval, x0, k = k, mu = ey, tau = tauwbar, psi = psibar, - x = gp$x.compact, rng = rng, splinefuns = splinefuns) + k <- S22inv %*% (ybar - ey) + + stats::nlm(gpeval, x0, + k = k, mu = ey, tau = tauwbar, psi = psibar, + x = gp$x.compact, rng = rng, splinefuns = splinefuns + ) } # minimize.GP @@ -70,16 +71,15 @@ minimize.GP <- function(gp, rng, x0, splinefuns = NULL) { ##' @param x Name of variable to plot on X axis ##' @param rng range ##' @param splinefuns spline functions -##' -##' @author Michael Dietze +##' +##' @author Michael Dietze gpeval <- function(xnew, k, mu, tau, psi, x, rng, splinefuns) { - ## second calc value S12 <- sapply(seq_along(k), function(i) { - tau * exp(-sum(psi * (xnew - x[i, ]) ^ 2)) + tau * exp(-sum(psi * (xnew - x[i, ])^2)) }) yprime <- mu + sum(S12 * k) - + if (!is.null(splinefuns)) { ## add trend surface back on y0 <- splinefuns[[length(xnew) + 1]] @@ -89,7 +89,7 @@ gpeval <- function(xnew, k, mu, tau, psi, x, rng, splinefuns) { y.trend <- y0 + sum(f - y0) yprime <- yprime + y.trend } - + return(yprime) } # gpeval @@ -97,7 +97,7 @@ gpeval <- function(xnew, k, mu, tau, psi, x, rng, splinefuns) { ##' @name ddist ##' @title ddist ##' @export -##' +##' ##' @param x vector of values (e.g. observations) to be evaluated by the specified probability density function ##' @param prior data.frame specifying a prior probability distribution in terms of the distribution name (distn) and first and second parameters (parama, paramb) ddist <- function(x, prior) { @@ -112,7 +112,7 @@ ddist <- function(x, prior) { ##' @name calculate.prior ##' @title calculate.prior ##' @export -##' +##' ##' @param samples Matrix of MCMC samples ##' @param priors prior list calculate.prior <- function(samples, priors) { @@ -122,24 +122,23 @@ calculate.prior <- function(samples, priors) { ##' @name get_ss ##' @title get_ss ##' @export -##' +##' ##' @param gp Gaussian Process ##' @param xnew new x coordinate ##' @param pos.check check if value needs to be positive (if TRUE, returns -Inf when GP is negative) get_ss <- function(gp, xnew, pos.check) { - SS <- numeric(length(gp)) - + X <- matrix(unlist(xnew), nrow = 1, byrow = TRUE) - - for(igp in seq_along(gp)){ - Y <- mlegp::predict.gp(gp[[igp]], newData = X[, 1:ncol(gp[[igp]]$X), drop=FALSE], se.fit = TRUE) - - j <- (igp %% length(pos.check)) - if(j == 0) j <- length(pos.check) - - if(pos.check[j]){ - if(Y$fit < 0){ + + for (igp in seq_along(gp)) { + Y <- mlegp::predict.gp(gp[[igp]], newData = X[, 1:ncol(gp[[igp]]$X), drop = FALSE], se.fit = TRUE) + + j <- (igp %% length(pos.check)) + if (j == 0) j <- length(pos.check) + + if (pos.check[j]) { + if (Y$fit < 0) { return(-Inf) } repeat { @@ -148,39 +147,36 @@ get_ss <- function(gp, xnew, pos.check) { break } } - }else{ + } else { SS[igp] <- stats::rnorm(1, Y$fit, Y$se.fit) } } return(SS) - } # get_ss ##' @name get_y ##' @title get_y ##' @export -##' +##' ##' @param SSnew new summary statistic ##' @param xnew new x coordinate ##' @param llik.fn list that contains likelihood functions ##' @param priors prior list ##' @param llik.par parameters to be passed llik functions get_y <- function(SSnew, xnew, llik.fn, priors, llik.par) { - likelihood <- pda.calc.llik(SSnew, llik.fn, llik.par) - + prior.prob <- calculate.prior(xnew, priors) posterior.prob <- likelihood + prior.prob - + return(posterior.prob) - } # get_y ##' @name is.accepted ##' @title is.accepted ##' @export -##' +##' ##' @param ycurr current value on y axis ##' @param ynew new y coordinate @@ -203,7 +199,7 @@ is.accepted <- function(ycurr, ynew, format = "lin") { ##' @param nmcmc number of iterations ##' @param rng range of knots ##' @param format lin = lnlike fcn, log = log(lnlike) -##' @param mix each = jump each dim. independently, joint = jump all at once +##' @param mix each = jump each dim. independently, joint = jump all at once ##' @param splinefuns spline functions, not used ##' @param jmp0 initial jump variances ##' @param ar.target acceptance rate target @@ -214,18 +210,17 @@ is.accepted <- function(ycurr, ynew, format = "lin") { ##' @param llik.fn list that contains likelihood functions ##' @param hyper.pars hyper parameters ##' @param resume.list list of needed info if we are running the chain longer -##' +##' ##' @author Michael Dietze mcmc.GP <- function(gp, x0, nmcmc, rng, format = "lin", mix = "joint", splinefuns = NULL, - jmp0 = 0.35 * (rng[, 2] - rng[, 1]), ar.target = 0.5, priors = NA, settings, + jmp0 = 0.35 * (rng[, 2] - rng[, 1]), ar.target = 0.5, priors = NA, settings, run.block = TRUE, n.of.obs, llik.fn, hyper.pars, resume.list = NULL) { - pos.check <- sapply(settings$assim.batch$inputs, `[[`, "ss.positive") - - if(length(unlist(pos.check)) == 0){ + + if (length(unlist(pos.check)) == 0) { # if not passed from settings assume none pos.check <- rep(FALSE, length(settings$assim.batch$inputs)) - }else if(length(unlist(pos.check)) != length(settings$assim.batch$inputs)){ + } else if (length(unlist(pos.check)) != length(settings$assim.batch$inputs)) { # maybe one provided, but others are forgotten # check which ones are provided in settings from.settings <- sapply(seq_along(pos.check), function(x) !is.null(pos.check[[x]])) @@ -233,23 +228,23 @@ mcmc.GP <- function(gp, x0, nmcmc, rng, format = "lin", mix = "joint", splinefun # replace those with the values provided in the settings tmp.check[from.settings] <- as.logical(unlist(pos.check)) pos.check <- tmp.check - }else{ + } else { pos.check <- as.logical(pos.check) } - + # get SS currSS <- get_ss(gp, x0, pos.check) - - + + currllp <- pda.calc.llik.par(settings, n.of.obs, currSS, hyper.pars) - pcurr <- unlist(sapply(currllp, `[[` , "par")) - + pcurr <- unlist(sapply(currllp, `[[`, "par")) + xcurr <- unlist(x0) - dim <- length(x0) - samp <- matrix(NA, nmcmc, dim) - par <- matrix(NA, nmcmc, length(pcurr), dimnames = list(NULL, names(pcurr))) # note: length(pcurr) can be 0 - - + dim <- length(x0) + samp <- matrix(NA, nmcmc, dim) + par <- matrix(NA, nmcmc, length(pcurr), dimnames = list(NULL, names(pcurr))) # note: length(pcurr) can be 0 + + if (run.block) { jcov <- diag((jmp0)^2) accept.count <- 0 @@ -258,69 +253,68 @@ mcmc.GP <- function(gp, x0, nmcmc, rng, format = "lin", mix = "joint", splinefun } else { jcov <- jmp0 accept.count <- resume.list$ac - prev.samp <- resume.list$prev.samp - prev.par <- resume.list$par + prev.samp <- resume.list$prev.samp + prev.par <- resume.list$par colnames(prev.samp) <- names(x0) - samp <- rbind(prev.samp, samp) - par <- rbind(prev.par, par) + samp <- rbind(prev.samp, samp) + par <- rbind(prev.par, par) start <- dim(prev.samp)[1] + 1 nmcmc <- dim(samp)[1] # jmp <- mvjump(ic=diag(jmp0),rate=ar.target, nc=dim) } - + # make sure it is positive definite, see note below - jcov <- lqmm::make.positive.definite(jcov, tol=1e-12) - + jcov <- lqmm::make.positive.definite(jcov, tol = 1e-12) + for (g in start:nmcmc) { - if (mix == "joint") { - # adapt if ((g > 2) && ((g - 1) %% settings$assim.batch$jump$adapt == 0)) { params.recent <- samp[(g - settings$assim.batch$jump$adapt):(g - 1), ] colnames(params.recent) <- names(x0) # accept.count <- round(jmp@arate[(g-1)/settings$assim.batch$jump$adapt]*100) jcov <- pda.adjust.jumps.bs(settings, jcov, accept.count, params.recent) - accept.count <- 0 # Reset counter - + accept.count <- 0 # Reset counter + # make sure precision is not going to be an issue # NOTE: for very small values this is going to be an issue # maybe include a scaling somewhere while building the emulator - jcov <- lqmm::make.positive.definite(jcov, tol=1e-12) + jcov <- lqmm::make.positive.definite(jcov, tol = 1e-12) } - + ## propose new parameters - xnew <- TruncatedNormal::rtmvnorm(1, mu = c(xcurr), sigma = jcov, lb = rng[,1], ub = rng[,2]) + xnew <- TruncatedNormal::rtmvnorm(1, mu = c(xcurr), sigma = jcov, lb = rng[, 1], ub = rng[, 2]) # if(bounded(xnew,rng)){ - + # re-predict SS currSS <- get_ss(gp, xcurr, pos.check) - - - + + + # don't update the currllp ( = llik.par, e.g. tau) yet # calculate posterior with xcurr | currllp - ycurr <- get_y(currSS, xcurr, llik.fn, priors, currllp) + ycurr <- get_y(currSS, xcurr, llik.fn, priors, currllp) HRcurr <- TruncatedNormal::dtmvnorm(c(xnew), c(xcurr), jcov, - lb = rng[,1], ub = rng[,2], log = TRUE, B = 1e2) - - newSS <- get_ss(gp, xnew, pos.check) - if(all(newSS != -Inf)){ - + lb = rng[, 1], ub = rng[, 2], log = TRUE, B = 1e2 + ) + + newSS <- get_ss(gp, xnew, pos.check) + if (all(newSS != -Inf)) { newllp <- pda.calc.llik.par(settings, n.of.obs, newSS, hyper.pars) - ynew <- get_y(newSS, xnew, llik.fn, priors, newllp) + ynew <- get_y(newSS, xnew, llik.fn, priors, newllp) HRnew <- TruncatedNormal::dtmvnorm(c(xcurr), c(xnew), jcov, - lb = rng[,1], ub = rng[,2], log = TRUE, B = 1e2) - - if (is.accepted(ycurr+HRcurr, ynew+HRnew)) { - xcurr <- xnew + lb = rng[, 1], ub = rng[, 2], log = TRUE, B = 1e2 + ) + + if (is.accepted(ycurr + HRcurr, ynew + HRnew)) { + xcurr <- xnew currSS <- newSS accept.count <- accept.count + 1 } - + # now update currllp | xcurr currllp <- pda.calc.llik.par(settings, n.of.obs, currSS, hyper.pars) - pcurr <- unlist(sapply(currllp, `[[` , "par")) + pcurr <- unlist(sapply(currllp, `[[`, "par")) } # } mix = each } else { @@ -335,39 +329,39 @@ mcmc.GP <- function(gp, x0, nmcmc, rng, format = "lin", mix = "joint", splinefun } # if(bounded(xnew,rng)){ currSS <- get_ss(gp, xcurr, pos.check) - - ycurr <- get_y(currSS, xcurr, llik.fn, priors, currllp) - - - newSS <- get_ss(gp, xnew, pos.check) - - + + ycurr <- get_y(currSS, xcurr, llik.fn, priors, currllp) + + + newSS <- get_ss(gp, xnew, pos.check) + + newllp <- pda.calc.llik.par(settings, n.of.obs, newSS, hyper.pars) - ynew <- get_y(newSS, xnew, llik.fn, priors, newllp) + ynew <- get_y(newSS, xnew, llik.fn, priors, newllp) if (is.accepted(ycurr, ynew)) { - xcurr <- xnew + xcurr <- xnew currSS <- newSS } - + currllp <- pda.calc.llik.par(settings, n.of.obs, currSS, hyper.pars) - pcurr <- unlist(sapply(currllp, `[[` , "par")) - + pcurr <- unlist(sapply(currllp, `[[`, "par")) + # } } } samp[g, ] <- unlist(xcurr) - par[g, ] <- pcurr - - if(g %% 1000 == 0) PEcAn.logger::logger.info(g, "of", nmcmc, "iterations") + par[g, ] <- pcurr + + if (g %% 1000 == 0) PEcAn.logger::logger.info(g, "of", nmcmc, "iterations") # print(p(jmp)) jmp <- update(jmp,samp) } - - + + chain.res <- list(jump = jcov, ac = accept.count, prev.samp = samp, par = par, n.of.obs = n.of.obs) - + return(list(mcmc.samp = samp, mcmc.par = par, chain.res = chain.res)) ## xnew <- gpeval,x0,k=k,mu=ey,tau=tauwbar,psi=psibar,x=gp$x.compact,rng=rng) - + ################### IN PROGRESS ############## } # mcmc.GP @@ -375,7 +369,7 @@ mcmc.GP <- function(gp, x0, nmcmc, rng, format = "lin", mix = "joint", splinefun ##' @name bounded ##' @title bounded ##' @export -##' +##' ##' @param xnew new x coordinate ##' @param rng range bounded <- function(xnew, rng) { diff --git a/modules/assim.batch/R/pda.bayesian.tools.R b/modules/assim.batch/R/pda.bayesian.tools.R index 49f91ad797b..ff2016abea2 100644 --- a/modules/assim.batch/R/pda.bayesian.tools.R +++ b/modules/assim.batch/R/pda.bayesian.tools.R @@ -23,13 +23,11 @@ ##' ##' @author Istem Fer ##' @export -pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = NULL, - external.formats = NULL, ensemble.id = NULL, - params.id = NULL, param.names = NULL, prior.id = NULL, - chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, +pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = NULL, + external.formats = NULL, ensemble.id = NULL, + params.id = NULL, param.names = NULL, prior.id = NULL, + chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, ar.target = NULL, jvar = NULL, remote = FALSE, ...) { - - sampler <- settings$assim.batch$bt.settings$sampler ## this bit of code is useful for defining the variables @@ -40,10 +38,10 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = adapt <- adj.min <- ar.target <- jvar <- NULL remote <- FALSE } - + # is this an extension run - extension.check <- is.null(settings$assim.batch$extension) - + extension.check <- is.null(settings$assim.batch$extension) + if (extension.check) { # not an extension run run.normal <- TRUE @@ -53,23 +51,24 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = run.normal <- FALSE run.longer <- TRUE } - + # load inputs with neff if this is another round - if(!run.normal){ + if (!run.normal) { external_data_path <- file.path(settings$outdir, paste0("external.", settings$assim.batch$ensemble.id, ".Rdata")) - if(file.exists(external_data_path)){ + if (file.exists(external_data_path)) { load(external_data_path) # and delete the file afterwards because it will be re-written with a new ensemble id in the end file.remove(external_data_path) } } - + ## -------------------------------------- Setup ------------------------------------- ## Handle settings settings <- pda.settings( settings = settings, params.id = params.id, param.names = param.names, prior.id = prior.id, chain = chain, iter = iter, adapt = adapt, - adj.min = adj.min, ar.target = ar.target, jvar = jvar) + adj.min = adj.min, ar.target = ar.target, jvar = jvar + ) ## will be used to check if multiplicative Gaussian is requested any.mgauss <- sapply(settings$assim.batch$inputs, `[[`, "likelihood") @@ -87,24 +86,24 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = } ## Load priors - if(is.null(external.priors)){ - temp <- pda.load.priors(settings, con, run.normal) - prior.list <- temp$prior - settings <- temp$settings - }else{ - prior.list <- external.priors + if (is.null(external.priors)) { + temp <- pda.load.priors(settings, con, run.normal) + prior.list <- temp$prior + settings <- temp$settings + } else { + prior.list <- external.priors } - pname <- lapply(prior.list, rownames) + pname <- lapply(prior.list, rownames) n.param.all <- sapply(prior.list, nrow) ## Load data to assimilate against - if(is.null(external.data)){ + if (is.null(external.data)) { inputs <- load.pda.data(settings, con, external.formats) - }else{ + } else { inputs <- external.data } n.input <- length(inputs) - + # get hyper parameters if any hyper.pars <- return_hyperpars(settings$assim.batch, inputs) @@ -117,15 +116,19 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = do.call("require", list(paste0("PEcAn.", settings$model$type))) my.write.config <- paste("write.config.", settings$model$type, sep = "") if (!exists(my.write.config)) { - PEcAn.logger::logger.severe(paste(my.write.config, "does not exist. Please make sure that the PEcAn interface is loaded for", - settings$model$type)) + PEcAn.logger::logger.severe(paste( + my.write.config, "does not exist. Please make sure that the PEcAn interface is loaded for", + settings$model$type + )) } ## Select parameters to constrain - prior.ind <- lapply(seq_along(settings$pfts), - function(x) which(pname[[x]] %in% settings$assim.batch$param.names[[x]])) - n.param <- sapply(prior.ind, length) - + prior.ind <- lapply( + seq_along(settings$pfts), + function(x) which(pname[[x]] %in% settings$assim.batch$param.names[[x]]) + ) + n.param <- sapply(prior.ind, length) + ## NOTE: The listed samplers here require more than 1 parameter for now because of the way their ## cov is calculated @@ -141,38 +144,42 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = } ## Create an ensemble id - if(is.null(ensemble.id)){ + if (is.null(ensemble.id)) { settings$assim.batch$ensemble.id <- pda.create.ensemble(settings, con, workflow.id) - }else{ + } else { settings$assim.batch$ensemble.id <- ensemble.id } - if(!remote){ - settings_outdir <- settings$outdir - }else{ - settings_outdir <- dirname(settings$host$rundir) - settings_outdir <- gsub(settings$assim.batch$ensemble.id, "", settings_outdir) + if (!remote) { + settings_outdir <- settings$outdir + } else { + settings_outdir <- dirname(settings$host$rundir) + settings_outdir <- gsub(settings$assim.batch$ensemble.id, "", settings_outdir) } - + ## Set up likelihood functions llik.fn <- pda.define.llik.fn(settings) - prior.all <- do.call("rbind", prior.list) + prior.all <- do.call("rbind", prior.list) ## Set prior distribution functions (d___, q___, r___, and multivariate versions) - prior.fn.all <- pda.define.prior.fn(prior.all) + prior.fn.all <- pda.define.prior.fn(prior.all) prior.ind.all <- which(unlist(pname) %in% unlist(settings$assim.batch$param.names)) - pname.all <- unlist(pname) + pname.all <- unlist(pname) ## Set initial conditions - parm <- sapply(prior.fn.all$qprior, eval, list(p = 0.5)) - names(parm) <- pname.all - - - PEcAn.logger::logger.info(paste0("Extracting upper and lower boundaries from priors.")) # M/AM/DR/DRAM can't work with -Inf, Inf values - rng <- matrix(c(sapply(prior.fn.all$qprior[prior.ind.all], eval, list(p = 1e-05)), - sapply(prior.fn.all$qprior[prior.ind.all], eval, list(p = 0.99999))), - nrow = sum(n.param)) + parm <- sapply(prior.fn.all$qprior, eval, list(p = 0.5)) + names(parm) <- pname.all + + + PEcAn.logger::logger.info(paste0("Extracting upper and lower boundaries from priors.")) # M/AM/DR/DRAM can't work with -Inf, Inf values + rng <- matrix( + c( + sapply(prior.fn.all$qprior[prior.ind.all], eval, list(p = 1e-05)), + sapply(prior.fn.all$qprior[prior.ind.all], eval, list(p = 0.99999)) + ), + nrow = sum(n.param) + ) # if it's a uniform distribution, use given boundaries for (i in 1:sum(n.param)) { if (prior.all[prior.ind.all, ][i, 1] == "unif") { @@ -181,22 +188,24 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = } } prior.sel <- prior.all[prior.ind.all, ] - prior.sel$lower <- rng[,1] - prior.sel$upper <- rng[,2] - prior.sel$best <- parm[prior.ind.all] - + prior.sel$lower <- rng[, 1] + prior.sel$upper <- rng[, 2] + prior.sel$best <- parm[prior.ind.all] + ## Create prior class object for BayesianTools - bt.prior <- pda.create.btprior(prior.sel) + bt.prior <- pda.create.btprior(prior.sel) ## Let's not write every bruteforce run to DB for now, also DB con might be an issue in parallelizing - if(is.null(external.formats)){ + if (is.null(external.formats)) { external.formats <- list() - for(it in seq_len(n.input)){ - external.formats[[it]] <- PEcAn.DB::query.format.vars(bety = con, - input.id = settings$assim.batch$inputs[[it]]$input.id) + for (it in seq_len(n.input)) { + external.formats[[it]] <- PEcAn.DB::query.format.vars( + bety = con, + input.id = settings$assim.batch$inputs[[it]]$input.id + ) } } - + ## Create log-likelihood function for createbayesianSetup{BayesianTools} ## you test with bt.likelihood(bt.prior$sampler()) bt.likelihood <- function(x) { @@ -213,24 +222,26 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = now <- format(Sys.time(), "%Y%m%d%H%M%OS5") run.id <- pda.init.run(settings, NULL, my.write.config, workflow.id, run.params, n = 1, run.names = paste("run", - now, sep = ".")) + now, + sep = "." + )) ## Start model run PEcAn.remote::start.model.runs(settings, FALSE) - + ## Read model outputs align.return <- pda.get.model.output(settings, run.id, NULL, inputs, external.formats) model.out <- align.return$model.out - if(all(!is.na(model.out))){ + if (all(!is.na(model.out))) { inputs <- align.return$inputs } # retrieve n - n.of.obs <- sapply(inputs,`[[`, "n") - names(n.of.obs) <- sapply(model.out,names) + n.of.obs <- sapply(inputs, `[[`, "n") + names(n.of.obs) <- sapply(model.out, names) # handle bias parameters if multiplicative Gaussian is listed in the likelihoods - if(any(unlist(any.mgauss) == "multipGauss")) { + if (any(unlist(any.mgauss) == "multipGauss")) { isbias <- which(unlist(any.mgauss) == "multipGauss") # testing now nbias <- 1 @@ -240,8 +251,8 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = bias.terms <- NULL } - if(!is.null(bias.terms)){ - all.bias <- lapply(bias.terms, function(n) n[1,]) + if (!is.null(bias.terms)) { + all.bias <- lapply(bias.terms, function(n) n[1, ]) all.bias <- do.call("rbind", all.bias) } else { all.bias <- NULL @@ -249,9 +260,11 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = ## calculate error statistics pda.errors <- pda.calc.error(settings, NULL, model_out = model.out, run.id, inputs, all.bias) - llik.par <- pda.calc.llik.par(settings, n = n.of.obs, - error.stats = unlist(pda.errors), - hyper.pars) + llik.par <- pda.calc.llik.par(settings, + n = n.of.obs, + error.stats = unlist(pda.errors), + hyper.pars + ) ## Calculate likelihood LL.new <- pda.calc.llik(pda.errors = unlist(pda.errors), llik.fn, llik.par) @@ -265,61 +278,66 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = bayesianSetup <- BayesianTools::createBayesianSetup(bt.likelihood, bt.prior, parallel = FALSE) PEcAn.logger::logger.info("MCMC starting. Please wait.") - - nChains <- bt.settings$nrChains - bt.settings$nrChains <- 1 - + + nChains <- bt.settings$nrChains + bt.settings$nrChains <- 1 + # prepare for parallelization dcores <- parallel::detectCores() - 1 ncores <- min(max(dcores, 1), nChains) - + if (!is.null(settings$assim.batch$extension)) { - load(settings$assim.batch$out.path) # loads previous out list + load(settings$assim.batch$out.path) # loads previous out list - cl <- parallel::makeCluster(ncores, type="FORK") + cl <- parallel::makeCluster(ncores, type = "FORK") parallel::clusterEvalQ(cl, library(BayesianTools)) - + ## Parallel over chains - out <- parallel::parLapply(cl, out, function(x){ + out <- parallel::parLapply(cl, out, function(x) { out <- BayesianTools::runMCMC(bayesianSetup = x, sampler = sampler, settings = bt.settings) return(out) - }) - + }) } else { - - cl <- parallel::makeCluster(ncores, type="FORK") + cl <- parallel::makeCluster(ncores, type = "FORK") parallel::clusterEvalQ(cl, library(BayesianTools)) - + ## Parallel over chains - out <- parallel::parLapply(cl, seq_len(ncores), function(x){ + out <- parallel::parLapply(cl, seq_len(ncores), function(x) { out <- BayesianTools::runMCMC(bayesianSetup = bayesianSetup, sampler = sampler, settings = bt.settings) return(out) - }) - + }) } - + parallel::stopCluster(cl) - + ## Combine the chains out <- BayesianTools::createMcmcSamplerList(out) # save the out object for restart functionality and further inspection - settings$assim.batch$out.path <- file.path(settings$outdir, - paste0("out.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$out.path <- file.path( + settings$outdir, + paste0( + "out.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(out, file = settings$assim.batch$out.path) - + # save inputs list, this object has been processed for autocorrelation correction # this can take a long time depending on the data, re-load and skip in next iteration external.data <- inputs - save(external.data, file = file.path(settings_outdir, - paste0("external.", - settings$assim.batch$ensemble.id, - ".Rdata"))) + save(external.data, file = file.path( + settings_outdir, + paste0( + "external.", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + )) # prepare for post-process - samples <- lapply(out, BayesianTools::getSample, parametersOnly = TRUE) # BayesianTools::getSample + samples <- lapply(out, BayesianTools::getSample, parametersOnly = TRUE) # BayesianTools::getSample mcmc.list <- lapply(samples, `colnames<-`, pname.all[prior.ind.all]) # Separate each PFT's parameter samples to their own list @@ -341,5 +359,4 @@ pda.bayesian.tools <- function(settings, external.data = NULL, external.priors = ## Output an updated settings list return(settings) - } # pda.bayesian.tools diff --git a/modules/assim.batch/R/pda.bayestools.helpers.R b/modules/assim.batch/R/pda.bayestools.helpers.R index da0d766f18c..a589c1ba03c 100644 --- a/modules/assim.batch/R/pda.bayestools.helpers.R +++ b/modules/assim.batch/R/pda.bayestools.helpers.R @@ -1,5 +1,5 @@ ##' Create priors for BayesianTools -##' +##' ##' Helper function for creating log-priors compatible with BayesianTools package ##' ##' @param prior.sel `data.frame` containing prior distributions of the selected parameters @@ -17,22 +17,21 @@ ##' @author Istem Fer, Alexey Shiklomanov ##' @export pda.create.btprior <- function(prior.sel) { - # TODO: test exponential -- it only has one argument, so this won't work - # Returns a function that calculates the density of the specified + # Returns a function that calculates the density of the specified # distribution given the parameters ddist_generator <- function(distn, a, b) { - fun_string <- paste0('d', distn) + fun_string <- paste0("d", distn) f <- match.fun(fun_string) out <- function(x) f(x, a, b, log = TRUE) return(out) } - # Returns a function that draws from the specified distribution with the + # Returns a function that draws from the specified distribution with the # specified parameters rdist_generator <- function(distn, a, b) { - fun_string <- paste0('r', distn) + fun_string <- paste0("r", distn) f <- match.fun(fun_string) out <- function(n = 1) f(n, a, b) return(out) @@ -41,13 +40,13 @@ pda.create.btprior <- function(prior.sel) { # Create a list of density and random draw functions ddist_funs <- with(prior.sel, mapply(ddist_generator, distn, parama, paramb)) rdist_funs <- with(prior.sel, mapply(rdist_generator, distn, parama, paramb)) - if ('param_name' %in% names(prior.sel)) { - names(ddist_funs) <- names(rdist_funs) <- prior.sel[['param_name']] + if ("param_name" %in% names(prior.sel)) { + names(ddist_funs) <- names(rdist_funs) <- prior.sel[["param_name"]] } # `mapply` statement returns density <- function(params) { - dens_vec <- mapply(function(f, x) f(x), ddist_funs, params) # Returns vector of log densities + dens_vec <- mapply(function(f, x) f(x), ddist_funs, params) # Returns vector of log densities out <- sum(dens_vec) return(out) } @@ -60,22 +59,24 @@ pda.create.btprior <- function(prior.sel) { # BayesianTools lower and upper bounds and best guess, if specified in data.frame lower <- NULL - if ('lower' %in% names(prior.sel)) { - lower <- prior.sel[['lower']] + if ("lower" %in% names(prior.sel)) { + lower <- prior.sel[["lower"]] } upper <- NULL - if ('upper' %in% names(prior.sel)) { - upper <- prior.sel[['upper']] + if ("upper" %in% names(prior.sel)) { + upper <- prior.sel[["upper"]] } best <- NULL - if ('best' %in% names(prior.sel)) { - best <- prior.sel[['best']] + if ("best" %in% names(prior.sel)) { + best <- prior.sel[["best"]] } - + # Use createPrior{BayesianTools} function to create prior class object compatible # with rest of the functions - out <- BayesianTools::createPrior(density = density, sampler = sampler, - lower = lower, upper = upper, best = best) + out <- BayesianTools::createPrior( + density = density, sampler = sampler, + lower = lower, upper = upper, best = best + ) return(out) } # pda.create.btprior @@ -89,39 +90,45 @@ pda.create.btprior <- function(prior.sel) { ##' ##' @author Istem Fer ##' @export -##' +##' pda.settings.bt <- function(settings) { - sampler <- settings$assim.batch$bt.settings$sampler - - iterations <- ifelse(!is.null(settings$assim.batch$bt.settings$iterations), - as.numeric(settings$assim.batch$bt.settings$iterations), - 1000) - - chain <- ifelse(!is.null(settings$assim.batch$bt.settings$chain), - as.numeric(settings$assim.batch$bt.settings$chain), - 2) - - optimize <- ifelse(!is.null(settings$assim.batch$bt.settings$optimize), - settings$assim.batch$bt.settings$optimize, - TRUE) - - adapt <- ifelse(!is.null(settings$assim.batch$bt.settings$adapt), - settings$assim.batch$bt.settings$adapt, - TRUE) - - adaptationInverval = ifelse(!is.null(settings$assim.batch$bt.settings$adaptationInverval), - as.numeric(settings$assim.batch$bt.settings$adaptationInverval), - max(round(iterations/100*5),100)) - - adaptationNotBefore <- ifelse(!is.null(settings$assim.batch$bt.settings$adaptationNotBefore), - as.numeric(settings$assim.batch$bt.settings$adaptationNotBefore), - adaptationInverval) - + + iterations <- ifelse(!is.null(settings$assim.batch$bt.settings$iterations), + as.numeric(settings$assim.batch$bt.settings$iterations), + 1000 + ) + + chain <- ifelse(!is.null(settings$assim.batch$bt.settings$chain), + as.numeric(settings$assim.batch$bt.settings$chain), + 2 + ) + + optimize <- ifelse(!is.null(settings$assim.batch$bt.settings$optimize), + settings$assim.batch$bt.settings$optimize, + TRUE + ) + + adapt <- ifelse(!is.null(settings$assim.batch$bt.settings$adapt), + settings$assim.batch$bt.settings$adapt, + TRUE + ) + + adaptationInverval <- ifelse(!is.null(settings$assim.batch$bt.settings$adaptationInverval), + as.numeric(settings$assim.batch$bt.settings$adaptationInverval), + max(round(iterations / 100 * 5), 100) + ) + + adaptationNotBefore <- ifelse(!is.null(settings$assim.batch$bt.settings$adaptationNotBefore), + as.numeric(settings$assim.batch$bt.settings$adaptationNotBefore), + adaptationInverval + ) + DRlevels <- ifelse(!is.null(settings$assim.batch$bt.settings$DRlevels), - as.numeric(settings$assim.batch$bt.settings$DRlevels), - 1) - + as.numeric(settings$assim.batch$bt.settings$DRlevels), + 1 + ) + if (!is.null(settings$assim.batch$bt.settings$gibbsProbabilities)) { gibbsProbabilities <- as.numeric(unlist(settings$assim.batch$bt.settings$gibbsProbabilities)) } else { @@ -130,14 +137,16 @@ pda.settings.bt <- function(settings) { # parallel always FALSE because currently we parallelize over whole chains using parLapply if (sampler == "Metropolis") { - bt.settings <- list(iterations = iterations, - nrChains = chain, - optimize = optimize, - DRlevels = DRlevels, - adapt = adapt, - adaptationNotBefore = adaptationNotBefore, - gibbsProbabilities = gibbsProbabilities, - parallel = FALSE) + bt.settings <- list( + iterations = iterations, + nrChains = chain, + optimize = optimize, + DRlevels = DRlevels, + adapt = adapt, + adaptationNotBefore = adaptationNotBefore, + gibbsProbabilities = gibbsProbabilities, + parallel = FALSE + ) } else if (sampler %in% c("AM", "M", "DRAM", "DR")) { bt.settings <- list(iterations = iterations, startValue = "prior", parallel = FALSE) } else if (sampler %in% c("DE", "DEzs", "DREAM", "DREAMzs", "Twalk")) { @@ -147,31 +156,30 @@ pda.settings.bt <- function(settings) { } else { PEcAn.logger::logger.error(paste0(sampler, " sampler not found!")) } - + return(bt.settings) } # pda.settings.bt #' Flexible function to create correlation density plots -#' +#' #' numeric matrix or data.frame #' @author Florian Hartig #' @param mat matrix or data frame of variables #' @param density type of plot to do -#' @param thin thinning of the matrix to make things faster. Default is to thin to 5000 +#' @param thin thinning of the matrix to make things faster. Default is to thin to 5000 #' @param method method for calculating correlations #' @param whichParameters all params or some #' @references The code for the correlation density plot originates from Hartig, F.; Dislich, C.; Wiegand, T. & Huth, A. (2014) Technical Note: Approximate Bayesian parameterization of a process-based tropical forest model. Biogeosciences, 11, 1261-1272. #' @export -#' +#' correlationPlot <- function(mat, density = "smooth", thin = "auto", method = "pearson", whichParameters = NULL) { - if (inherits(mat, "bayesianOutput")) { mat <- BayesianTools::getSample(mat, thin = thin, whichParameters = whichParameters) } - + numPars <- ncol(mat) names <- colnames(mat) - + panel.hist.dens <- function(x, ...) { usr <- graphics::par("usr") on.exit(graphics::par(usr), add = TRUE) @@ -180,10 +188,10 @@ correlationPlot <- function(mat, density = "smooth", thin = "auto", method = "pe breaks <- h$breaks nB <- length(breaks) y <- h$counts - y <- y/max(y) + y <- y / max(y) graphics::rect(breaks[-nB], 0, breaks[-1], y, col = "blue4", ...) } # panel.hist.dens - + # replaced by spearman panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) { usr <- graphics::par("usr") @@ -197,7 +205,7 @@ correlationPlot <- function(mat, density = "smooth", thin = "auto", method = "pe } graphics::text(0.5, 0.5, txt, cex = cex.cor * abs(r)) } # panel.cor - + plotEllipse <- function(x, y) { usr <- graphics::par("usr") on.exit(graphics::par(usr), add = TRUE) @@ -206,9 +214,8 @@ correlationPlot <- function(mat, density = "smooth", thin = "auto", method = "pe el <- ellipse::ellipse(cor) graphics::polygon(el[, 1] + mean(x), el[, 2] + mean(y), col = "red") } # plotEllipse - - correlationEllipse <- function(x) { + correlationEllipse <- function(x) { cor <- stats::cor(x) ToRGB <- function(x) { grDevices::rgb(x[1] / 255, x[2] / 255, x[3] / 255) @@ -228,7 +235,7 @@ correlationPlot <- function(mat, density = "smooth", thin = "auto", method = "pe colors <- unlist(CustomPalette(100)) ellipse::plotcorr(xc, col = colors[xc * 50 + 50]) } # correlationEllipse - + if (density == "smooth") { ellipse::pairs(mat, lower.panel = function(...) { graphics::par(new = TRUE) @@ -240,7 +247,9 @@ correlationPlot <- function(mat, density = "smooth", thin = "auto", method = "pe correlationEllipse(mat) } else if (density == F) { ellipse::pairs(mat, lower.panel = panel.cor, diag.panel = panel.hist.dens, upper.panel = panel.cor) - } else stop("wrong sensity argument") - + } else { + stop("wrong sensity argument") + } + # The if block above is generating return values } # correlationPlot diff --git a/modules/assim.batch/R/pda.define.llik.R b/modules/assim.batch/R/pda.define.llik.R index 576d953ed48..5a84db04e69 100644 --- a/modules/assim.batch/R/pda.define.llik.R +++ b/modules/assim.batch/R/pda.define.llik.R @@ -8,33 +8,26 @@ ##' @author Ryan Kelly, Istem Fer ##' @export pda.define.llik.fn <- function(settings) { - llik.fn <- list() - + for (i in seq_along(settings$assim.batch$inputs)) { - # heteroskedastic Laplace likelihood, error stats is the likelihood if (settings$assim.batch$inputs[[i]]$likelihood == "Laplace") { - llik.fn[[i]] <- function(pda.errors, llik.par) { LL <- pda.errors return(LL) } - - } else { # Gaussian or multiplicative Gaussian - + llik.fn[[i]] <- function(pda.errors, llik.par) { # lnL = (n/2) * log(tau) - (tau/2) * SS - - LL <- (llik.par$n/2) * log(llik.par$par) - (llik.par$par/2) * pda.errors + + LL <- (llik.par$n / 2) * log(llik.par$par) - (llik.par$par / 2) * pda.errors return(LL) - } - } # if-block } # for-loop - + return(llik.fn) } # pda.define.llik.fn @@ -50,107 +43,99 @@ pda.define.llik.fn <- function(settings) { ##' @param inputs list ##' @param bias.terms matrix ##' -##' @return pda.errors +##' @return pda.errors ##' ##' @author Istem Fer ##' @export -pda.calc.error <-function(settings, con, model_out, run.id, inputs, bias.terms){ - - if(anyNA(model_out, recursive = TRUE)) { # Probably indicates model failed entirely +pda.calc.error <- function(settings, con, model_out, run.id, inputs, bias.terms) { + if (anyNA(model_out, recursive = TRUE)) { # Probably indicates model failed entirely NA.list <- as.list(rep(NA, length(inputs))) return(NA.list) } - + n.input <- length(inputs) pda.errors <- list() SSdb <- list() # multiplicative Gaussian counter bc <- 1 - - + + for (k in seq_len(n.input)) { - if (settings$assim.batch$inputs[[k]]$likelihood == "Laplace") { # heteroskedastic laplacian - + resid <- abs(model_out[[k]] - inputs[[k]]$obs) pos <- (model_out[[k]] >= 0) # SS <- c(stats::dexp(resid[pos], - # 1 / (inputs[[k]]$par[1] + (inputs[[k]]$par[2] * - # sqrt(inputs[[k]]$n_eff/inputs[[k]]$n) * + # 1 / (inputs[[k]]$par[1] + (inputs[[k]]$par[2] * + # sqrt(inputs[[k]]$n_eff/inputs[[k]]$n) * # model_out[[k]][pos])), log = TRUE), # stats::dexp(resid[!pos], - # 1 / (inputs[[k]]$par[1] + (inputs[[k]]$par[3] * - # sqrt(inputs[[k]]$n_eff/inputs[[k]]$n) * + # 1 / (inputs[[k]]$par[1] + (inputs[[k]]$par[3] * + # sqrt(inputs[[k]]$n_eff/inputs[[k]]$n) * # model_out[[k]][!pos])), log = TRUE)) - # - # pda.errors[[k]] <- sum(SS, na.rm = TRUE) - # SSdb[[k]] <- sum(SS, na.rm = TRUE) - - # heteroskedastic slopes, slope varies with magnitude of the flux + # + # pda.errors[[k]] <- sum(SS, na.rm = TRUE) + # SSdb[[k]] <- sum(SS, na.rm = TRUE) + + # heteroskedastic slopes, slope varies with magnitude of the flux # inflated by sqrt(n/neff) because var is 2b^2 for laplacian likelihood - beta_p <- (inputs[[k]]$par[1] + inputs[[k]]$par[2] * model_out[[k]][pos]) * sqrt(inputs[[k]]$n/inputs[[k]]$n_eff) - beta_n <- (inputs[[k]]$par[1] + inputs[[k]]$par[3] * model_out[[k]][!pos])* sqrt(inputs[[k]]$n/inputs[[k]]$n_eff) - + beta_p <- (inputs[[k]]$par[1] + inputs[[k]]$par[2] * model_out[[k]][pos]) * sqrt(inputs[[k]]$n / inputs[[k]]$n_eff) + beta_n <- (inputs[[k]]$par[1] + inputs[[k]]$par[3] * model_out[[k]][!pos]) * sqrt(inputs[[k]]$n / inputs[[k]]$n_eff) + # there might not be a negative slope if non-negative variable, assign zero, move on - suppressWarnings(if(length(beta_n) == 0) beta_n <- 0) - - SS_p <- - log(2*beta_p) - resid[[1]][pos]/beta_p - SS_n <- - log(2*beta_n) - resid[[1]][!pos]/beta_n - suppressWarnings(if(length(SS_n) == 0) SS_n <- 0) + suppressWarnings(if (length(beta_n) == 0) beta_n <- 0) + + SS_p <- -log(2 * beta_p) - resid[[1]][pos] / beta_p + SS_n <- -log(2 * beta_n) - resid[[1]][!pos] / beta_n + suppressWarnings(if (length(SS_n) == 0) SS_n <- 0) pda.errors[[k]] <- sum(SS_p, SS_n, na.rm = TRUE) SSdb[[k]] <- pda.errors[[k]] - - } else if (settings$assim.batch$inputs[[k]]$likelihood == "multipGauss") { + } else if (settings$assim.batch$inputs[[k]]$likelihood == "multipGauss") { # multiplicative Gaussian - + SS <- rep(NA, length(bias.terms)) - for(b in seq_along(SS)){ - SS[b] <- sum((bias.terms[bc,][b] * model_out[[k]] - inputs[[k]]$obs)^2, na.rm = TRUE) + for (b in seq_along(SS)) { + SS[b] <- sum((bias.terms[bc, ][b] * model_out[[k]] - inputs[[k]]$obs)^2, na.rm = TRUE) } - + bc <- bc + 1 - pda.errors[[k]] <- SS - SSdb[[k]] <- log(SS) - + pda.errors[[k]] <- SS + SSdb[[k]] <- log(SS) } else { # Gaussian - + SS <- sum((model_out[[k]] - inputs[[k]]$obs)^2, na.rm = TRUE) - - pda.errors[[k]] <- SS - SSdb[[k]] <- log(SS) - + + pda.errors[[k]] <- SS + SSdb[[k]] <- log(SS) } - - } # for-loop - + ## insert sufficient statistics in database if (!is.null(con)) { - # BETY requires sufficient statistics to be associated with inputs, so only proceed # for inputs with valid input ID (i.e., not the -1 dummy id). # Note that analyses requiring sufficient statistics to be stored therefore require # inputs to be registered in BETY first. - + # TODO : insert multiple SS per unique run, input when it is allowed on DB db.input.ind <- which(sapply(inputs, function(x) x$input.id) != -1) for (k in db.input.ind) { - PEcAn.DB::db.query( - paste0("INSERT INTO likelihoods ", - "(run_id, variable_id, input_id, ", - " loglikelihood, n_eff)", - "values ('", - run.id, "', '", inputs[[k]]$variable.id, "', '", inputs[[k]]$input.id, "', '", - SSdb[[k]], "', '", inputs[[k]]$n_eff, "')" + paste0( + "INSERT INTO likelihoods ", + "(run_id, variable_id, input_id, ", + " loglikelihood, n_eff)", + "values ('", + run.id, "', '", inputs[[k]]$variable.id, "', '", inputs[[k]]$input.id, "', '", + SSdb[[k]], "', '", inputs[[k]]$n_eff, "')" ), - con) + con + ) } } - + return(pda.errors) - } # pda.calc.error @@ -166,21 +151,19 @@ pda.calc.error <-function(settings, con, model_out, run.id, inputs, bias.terms){ ##' @author Ryan Kelly, Istem Fer ##' @export pda.calc.llik <- function(pda.errors, llik.fn, llik.par) { - n.var <- length(pda.errors) - + LL.vec <- numeric(n.var) - + for (k in seq_len(n.var)) { - j <- k %% length(llik.fn) - if(j==0) j <- length(llik.fn) - + if (j == 0) j <- length(llik.fn) + LL.vec[k] <- llik.fn[[j]](pda.errors[k], llik.par[[k]]) } - + LL.total <- sum(LL.vec) - + return(LL.total) } # pda.calc.llik @@ -188,42 +171,35 @@ pda.calc.llik <- function(pda.errors, llik.fn, llik.par) { ##' Calculate likelihood parameters ##' ##' @title pda.calc.llik.par -##' +##' ##' @param settings list ##' @param n named vector, sample sizes of inputs -##' @param error.stats list, Sufficient Statistics +##' @param error.stats list, Sufficient Statistics ##' @param hyper.pars list, hyperparameters -##' +##' ##' @author Istem Fer ##' @export -pda.calc.llik.par <-function(settings, n, error.stats, hyper.pars){ - +pda.calc.llik.par <- function(settings, n, error.stats, hyper.pars) { llik.par <- list() - - for(k in seq_along(error.stats)){ - + + for (k in seq_along(error.stats)) { j <- k %% length(settings$assim.batch$inputs) - if(j==0) j <- length(settings$assim.batch$inputs) - + if (j == 0) j <- length(settings$assim.batch$inputs) + llik.par[[k]] <- list() - + if (settings$assim.batch$inputs[[j]]$likelihood == "Gaussian" | - settings$assim.batch$inputs[[j]]$likelihood == "multipGauss") { - - - llik.par[[k]]$par <- stats::rgamma(1, hyper.pars[[k]]$parama + n[k]/2, - hyper.pars[[k]]$paramb + error.stats[k]/2) - + settings$assim.batch$inputs[[j]]$likelihood == "multipGauss") { + llik.par[[k]]$par <- stats::rgamma( + 1, hyper.pars[[k]]$parama + n[k] / 2, + hyper.pars[[k]]$paramb + error.stats[k] / 2 + ) + names(llik.par[[k]]$par) <- paste0("tau.", names(n)[k]) - } - llik.par[[k]]$n <- n[k] - + llik.par[[k]]$n <- n[k] } - + return(llik.par) - } # pda.calc.llik.par - - diff --git a/modules/assim.batch/R/pda.emulator.R b/modules/assim.batch/R/pda.emulator.R index 1af46f2221a..1b39c90779e 100644 --- a/modules/assim.batch/R/pda.emulator.R +++ b/modules/assim.batch/R/pda.emulator.R @@ -32,7 +32,6 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, ar.target = NULL, jvar = NULL, n.knot = NULL, individual = TRUE, remote = FALSE) { - ## this bit of code is useful for defining the variables passed to this function if you are ## debugging if (FALSE) { @@ -67,14 +66,15 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, ## -------------------------------------- Setup ------------------------------------- ## Handle settings settings <- pda.settings( - settings=settings, params.id=params.id, param.names=param.names, - prior.id=prior.id, chain=chain, iter=iter, adapt=adapt, - adj.min=adj.min, ar.target=ar.target, jvar=jvar, n.knot=n.knot, run.round) + settings = settings, params.id = params.id, param.names = param.names, + prior.id = prior.id, chain = chain, iter = iter, adapt = adapt, + adj.min = adj.min, ar.target = ar.target, jvar = jvar, n.knot = n.knot, run.round + ) # load inputs with neff if this is another round - if(!run.normal){ + if (!run.normal) { external_data_path <- file.path(settings$outdir, paste0("external.", settings$assim.batch$ensemble.id, ".Rdata")) - if(file.exists(external_data_path)){ + if (file.exists(external_data_path)) { load(external_data_path) # and delete the file afterwards because it will be re-written with a new ensemble id in the end file.remove(external_data_path) @@ -103,47 +103,49 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, } else { con <- NULL } - + ## Load priors - if(is.null(external.priors)){ - temp <- pda.load.priors(settings, con, run.normal) - prior.list <- temp$prior - settings <- temp$settings - }else{ - prior.list <- external.priors + if (is.null(external.priors)) { + temp <- pda.load.priors(settings, con, run.normal) + prior.list <- temp$prior + settings <- temp$settings + } else { + prior.list <- external.priors } - pname <- lapply(prior.list, rownames) + pname <- lapply(prior.list, rownames) n.param.all <- sapply(prior.list, nrow) - if(is.null(external.data)){ + if (is.null(external.data)) { inputs <- load.pda.data(settings, con, external.formats) - }else{ + } else { inputs <- external.data } - n.input <- length(inputs) + n.input <- length(inputs) ## Set model-specific functions do.call("library", list(paste0("PEcAn.", settings$model$type))) my.write.config <- paste("write.config.", settings$model$type, sep = "") if (!exists(my.write.config)) { - PEcAn.logger::logger.severe(paste(my.write.config, - "does not exist. Please make sure that the PEcAn interface is loaded for", - settings$model$type)) + PEcAn.logger::logger.severe(paste( + my.write.config, + "does not exist. Please make sure that the PEcAn interface is loaded for", + settings$model$type + )) } ## Select parameters to constrain all_pft_names <- sapply(settings$pfts, `[[`, "name") prior.ind <- prior.ind.orig <- vector("list", length(settings$pfts)) names(prior.ind) <- names(prior.ind.orig) <- all_pft_names - for(i in seq_along(settings$pfts)){ + for (i in seq_along(settings$pfts)) { pft.name <- settings$pfts[[i]]$name - if(pft.name %in% names(settings$assim.batch$param.names)){ - prior.ind[[i]] <- which(pname[[i]] %in% settings$assim.batch$param.names[[pft.name]]) + if (pft.name %in% names(settings$assim.batch$param.names)) { + prior.ind[[i]] <- which(pname[[i]] %in% settings$assim.batch$param.names[[pft.name]]) prior.ind.orig[[i]] <- which(pname[[i]] %in% settings$assim.batch$param.names[[pft.name]] | - pname[[i]] %in% any.scaling[[pft.name]]) + pname[[i]] %in% any.scaling[[pft.name]]) } } @@ -158,24 +160,27 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, } ## Create an ensemble id - if(is.null(ensemble.id)){ + if (is.null(ensemble.id)) { settings$assim.batch$ensemble.id <- pda.create.ensemble(settings, con, workflow.id) - }else{ + } else { settings$assim.batch$ensemble.id <- ensemble.id } ## history restart - if(!remote){ - settings_outdir <- settings$outdir - pda.restart.file <- file.path(settings_outdir, paste0("history.pda", - settings$assim.batch$ensemble.id, ".Rdata")) - - }else{ - settings_outdir <- dirname(settings$host$rundir) - settings_outdir <- gsub(settings$assim.batch$ensemble.id, "", settings_outdir) - pda.restart.file <- paste0(settings_outdir, "history.pda", - settings$assim.batch$ensemble.id, ".Rdata") + if (!remote) { + settings_outdir <- settings$outdir + pda.restart.file <- file.path(settings_outdir, paste0( + "history.pda", + settings$assim.batch$ensemble.id, ".Rdata" + )) + } else { + settings_outdir <- dirname(settings$host$rundir) + settings_outdir <- gsub(settings$assim.batch$ensemble.id, "", settings_outdir) + pda.restart.file <- paste0( + settings_outdir, "history.pda", + settings$assim.batch$ensemble.id, ".Rdata" + ) } current.step <- "START" @@ -187,12 +192,12 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, # if we are going to throw scaling factor(s) instead of parameters # 1. append scaling factor priors to prior.list # 2. use the same probs for all pft params to be scaled - if(!is.null(sf)){ + if (!is.null(sf)) { sf.ind <- length(prior.list) + 1 sf.list <- pda.generate.sf(settings$assim.batch$n.knot, sf, prior.list) probs.sf <- sf.list$probs prior.list <- sf.list$priors - }else { + } else { probs.sf <- NULL } @@ -201,13 +206,19 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, ## Propose parameter knots (X) for emulator design - knots.list <- lapply(seq_along(settings$pfts), - function(x) pda.generate.knots(settings$assim.batch$n.knot, sf, probs.sf, - n.param.all[x], - prior.ind.orig[[x]], - prior.fn[[x]], - pname[[x]])) - names(knots.list) <- sapply(settings$pfts,"[[",'name') + knots.list <- lapply( + seq_along(settings$pfts), + function(x) { + pda.generate.knots( + settings$assim.batch$n.knot, sf, probs.sf, + n.param.all[x], + prior.ind.orig[[x]], + prior.fn[[x]], + pname[[x]] + ) + } + ) + names(knots.list) <- sapply(settings$pfts, "[[", "name") knots.params <- lapply(knots.list, `[[`, "params") @@ -215,70 +226,76 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, # knots.probs <- lapply(knots.list, `[[`, "probs") # if knots were passed externally overwrite them - if(!is.null(external.knots)){ + if (!is.null(external.knots)) { PEcAn.logger::logger.info("Overwriting the knots list.") knots.params <- external.knots } current.step <- "GENERATE KNOTS" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) ## Run this block if this is a "round" extension if (run.round & is.null(external.knots)) { - ## Propose a percentage (if not specified 90%) of the new parameter knots from the posterior of the previous run - knot.par <- ifelse(!is.null(settings$assim.batch$knot.par), - as.numeric(settings$assim.batch$knot.par), - 0.9) + knot.par <- ifelse(!is.null(settings$assim.batch$knot.par), + as.numeric(settings$assim.batch$knot.par), + 0.9 + ) - n.post.knots <- floor(knot.par * settings$assim.batch$n.knot) + n.post.knots <- floor(knot.par * settings$assim.batch$n.knot) # trim down, as a placeholder knots.params.temp <- lapply(knots.params, function(x) x[1:n.post.knots, ]) - if(!is.null(sf)){ + if (!is.null(sf)) { load(settings$assim.batch$sf.samp) - }else{ + } else { sf.samp <- NULL } - sampled_knots <- sample_MCMC(settings$assim.batch$mcmc.path, n.param.orig, prior.ind.orig, - n.post.knots, knots.params.temp, - prior.list, prior.fn, sf, sf.samp) + sampled_knots <- sample_MCMC( + settings$assim.batch$mcmc.path, n.param.orig, prior.ind.orig, + n.post.knots, knots.params.temp, + prior.list, prior.fn, sf, sf.samp + ) knots.params.temp <- sampled_knots$knots.params.temp - probs.round.sf <- sampled_knots$sf_knots - pass2bias <- sampled_knots$pass2bias + probs.round.sf <- sampled_knots$sf_knots + pass2bias <- sampled_knots$pass2bias # mixture of knots mix.knots <- sample(settings$assim.batch$n.knot, (settings$assim.batch$n.knot - n.post.knots)) for (i in seq_along(settings$pfts)) { - knots.list[[i]]$params <- rbind(knots.params[[i]][mix.knots, ], - knots.params.temp[[i]]) - names(knots.list)[i] <- settings$pfts[[i]]['name'] + knots.list[[i]]$params <- rbind( + knots.params[[i]][mix.knots, ], + knots.params.temp[[i]] + ) + names(knots.list)[i] <- settings$pfts[[i]]["name"] } - if(!is.null(sf)){ + if (!is.null(sf)) { probs.sf <- rbind(probs.sf[mix.knots, ], probs.round.sf) } knots.params <- lapply(knots.list, `[[`, "params") current.step <- "Generate Knots: round-if block" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) } # end round-if block ## Run this block if this is normal run or a "round" extension - if(run.normal | run.round){ - + if (run.normal | run.round) { ## Set up runs and write run configs for all proposed knots run.ids <- pda.init.run(settings, con, my.write.config, workflow.id, knots.params, - n = settings$assim.batch$n.knot, - run.names = paste0(settings$assim.batch$ensemble.id, ".knot.", - 1:settings$assim.batch$n.knot)) + n = settings$assim.batch$n.knot, + run.names = paste0( + settings$assim.batch$ensemble.id, ".knot.", + 1:settings$assim.batch$n.knot + ) + ) current.step <- "pda.init.run" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) ## start model runs PEcAn.workflow::start_model_runs(settings, (as.logical(settings$database$bety$write) & !remote)) @@ -292,33 +309,33 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, for (i in seq_len(settings$assim.batch$n.knot)) { align.return <- pda.get.model.output(settings, run.ids[i], con, inputs, external.formats) model.out[[i]] <- align.return$model.out - if(all(!is.na(model.out[[i]]))){ + if (all(!is.na(model.out[[i]]))) { inputs <- align.return$inputs } } current.step <- "pda.get.model.output" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) # efficient sample size calculation inputs <- pda.neff.calc(inputs) # handle bias parameters if multiplicative Gaussian is listed in the likelihoods - if(any(unlist(any.mgauss) == "multipGauss")) { - bias.list <- return.bias(settings, isbias, model.out, inputs, prior.list, run.round, pass2bias) + if (any(unlist(any.mgauss) == "multipGauss")) { + bias.list <- return.bias(settings, isbias, model.out, inputs, prior.list, run.round, pass2bias) bias.terms <- bias.list$bias.params prior.list <- bias.list$prior.list.bias - nbias <- bias.list$nbias - prior.fn <- lapply(prior.list, pda.define.prior.fn) + nbias <- bias.list$nbias + prior.fn <- lapply(prior.list, pda.define.prior.fn) } else { bias.terms <- NULL } for (i in seq_len(settings$assim.batch$n.knot)) { - if(!is.null(bias.terms)){ - all.bias <- lapply(bias.terms, function(n) n[i,]) + if (!is.null(bias.terms)) { + all.bias <- lapply(bias.terms, function(n) n[i, ]) all.bias <- do.call("rbind", all.bias) } else { all.bias <- NULL @@ -326,10 +343,9 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, ## calculate error statistics and save in the DB pda.errors[[i]] <- pda.calc.error(settings, con, model_out = model.out[[i]], run.id = run.ids[i], inputs, bias.terms = all.bias) } - } # end if-block current.step <- "pda.calc.error" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) init.list <- list() jmp.list <- list() @@ -338,30 +354,29 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, length.pars <- 0 prior.ind.list <- prior.ind.list.ns <- list() # now I need to go through all parameters for each pft, but leave out the ones that scaling factor is requested - for(p in seq_along(settings$assim.batch$param.names)){ + for (p in seq_along(settings$assim.batch$param.names)) { param.names <- settings$assim.batch$param.names[[p]] prior.ind.list[[p]] <- length.pars + which(pname[[p]] %in% unlist(param.names) & - !(pname[[p]] %in% sf)) + !(pname[[p]] %in% sf)) prior.ind.list.ns[[p]] <- length.pars + which(pname[[p]] %in% unlist(param.names)) length.pars <- length.pars + length(pname[[p]]) } - prior.ind.all <- unlist(prior.ind.list) + prior.ind.all <- unlist(prior.ind.list) prior.ind.all.ns <- unlist(prior.ind.list.ns) # if no scaling is requested prior.ind.all == prior.ind.all.ns # keep this ind.all w/o bias until extracting prob values below if (run.normal | run.round) { - # retrieve n - n.of.obs <- sapply(inputs,`[[`, "n") - names(n.of.obs) <- sapply(model.out[[1]],names) + n.of.obs <- sapply(inputs, `[[`, "n") + names(n.of.obs) <- sapply(model.out[[1]], names) # UPDATE: Use mlegp package, I can now draw from parameter space knots.params.all <- do.call("cbind", knots.params) X <- knots.params.all[, prior.ind.all, drop = FALSE] - if(!is.null(sf)){ + if (!is.null(sf)) { X <- cbind(X, probs.sf) } @@ -371,7 +386,7 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, bc <- 1 # what percentage of runs is allowed to fail? - if(!is.null(settings$assim.batch$allow.fail)){ + if (!is.null(settings$assim.batch$allow.fail)) { allow.fail <- as.numeric(settings$assim.batch$allow.fail) } else { allow.fail <- 0.5 @@ -379,11 +394,10 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, # what is it in number of runs? no.of.allowed <- floor(settings$assim.batch$n.knot * allow.fail) - for(inputi in seq_len(n.input)){ - error.statistics[[inputi]] <- sapply(pda.errors,`[[`, inputi) - - if(unlist(any.mgauss)[inputi] == "multipGauss") { + for (inputi in seq_len(n.input)) { + error.statistics[[inputi]] <- sapply(pda.errors, `[[`, inputi) + if (unlist(any.mgauss)[inputi] == "multipGauss") { # if yes, then we need to include bias term in the emulator bias.params <- bias.terms biases <- c(t(bias.params[[bc]])) @@ -391,11 +405,10 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, # replicate model parameter set per bias parameter rep.rows <- rep(1:nrow(X), each = nbias) - X.rep <- X[rep.rows,] + X.rep <- X[rep.rows, ] Xnew <- cbind(X.rep, biases) colnames(Xnew) <- c(colnames(X.rep), paste0("bias.", names(n.of.obs)[inputi])) SS.list[[inputi]] <- cbind(Xnew, c(error.statistics[[inputi]])) - } else { SS.list[[inputi]] <- cbind(X, error.statistics[[inputi]]) } # if-block @@ -405,15 +418,14 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, no.of.failed <- sum(is.na(SS.list[[inputi]][, ncol(SS.list[[inputi]])])) # check if you're left with enough sets - if(no.of.failed < no.of.allowed & (settings$assim.batch$n.knot - no.of.failed) > 1){ + if (no.of.failed < no.of.allowed & (settings$assim.batch$n.knot - no.of.failed) > 1) { SS.list[[inputi]] <- SS.list[[inputi]][!rowSums(is.na(SS.list[[inputi]])), ] - if( no.of.failed > 0){ + if (no.of.failed > 0) { PEcAn.logger::logger.info(paste0(no.of.failed, " runs failed. Emulator for ", names(n.of.obs)[inputi], " will be built with ", settings$assim.batch$n.knot - no.of.failed, " knots.")) } - } else{ + } else { PEcAn.logger::logger.error(paste0("Too many runs failed, not enough parameter set to build emulator for ", names(n.of.obs)[inputi], ".")) } - } # for-loop if (run.round) { @@ -423,7 +435,6 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, load(settings$assim.batch$ss.path) # add on SS <- lapply(seq_along(SS), function(iss) rbind(SS.list[[iss]], SS[[iss]])) - } else { SS <- SS.list } @@ -439,7 +450,7 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, dcores <- parallel::detectCores() - 1 ncores <- min(max(dcores, 1), length(SS)) - cl <- parallel::makeCluster(ncores, type="FORK") + cl <- parallel::makeCluster(ncores, type = "FORK") ## Parallel fit for GPs GPmodel <- parallel::parLapply(cl, SS, function(x) mlegp::mlegp(X = x[, -ncol(x), drop = FALSE], Z = x[, ncol(x), drop = FALSE], nugget = 0, nugget.known = 1, verbose = 0)) @@ -454,16 +465,15 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, gp <- GPmodel - } else { # is this a "longer" type of extension run - load(settings$assim.batch$emulator.path) # load previously built emulator(s) to run a longer mcmc + load(settings$assim.batch$emulator.path) # load previously built emulator(s) to run a longer mcmc load(settings$assim.batch$ss.path) load(settings$assim.batch$resume.path) n.of.obs <- resume.list[[1]]$n.of.obs - if(any(unlist(any.mgauss) == "multipGauss")){ + if (any(unlist(any.mgauss) == "multipGauss")) { load(settings$assim.batch$bias.path) # load prior.list with bias term from previous run prior.all <- do.call("rbind", prior.list) } @@ -476,22 +486,28 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, } # add indice and increase n.param for scaling factor - if(!is.null(sf)){ - prior.ind.all <- c(prior.ind.all, - ((length.pars + 1): (length.pars + length(sf)))) - n.param <- c(n.param, length(sf)) - length.pars <- length.pars + length(sf) + if (!is.null(sf)) { + prior.ind.all <- c( + prior.ind.all, + ((length.pars + 1):(length.pars + length(sf))) + ) + n.param <- c(n.param, length(sf)) + length.pars <- length.pars + length(sf) } # add indice and increase n.param for bias - if(any(unlist(any.mgauss) == "multipGauss")){ - prior.ind.all <- c(prior.ind.all, - ((length.pars + 1) : (length.pars + length(isbias)))) - prior.ind.all.ns <- c(prior.ind.all.ns, - ((length.pars + 1) : (length.pars + length(isbias)))) + if (any(unlist(any.mgauss) == "multipGauss")) { + prior.ind.all <- c( + prior.ind.all, + ((length.pars + 1):(length.pars + length(isbias))) + ) + prior.ind.all.ns <- c( + prior.ind.all.ns, + ((length.pars + 1):(length.pars + length(isbias))) + ) n.param <- c(n.param, length(isbias)) n.param.orig <- c(n.param.orig, length(isbias)) - length.pars <- length.pars + length(isbias) + length.pars <- length.pars + length(isbias) } @@ -504,23 +520,24 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, # but then, there are other things that needs to change in the emulator workflow # such as the way proposed parameters are used in estimation in get_ss function # so punting this development until it is needed - if(any(unlist(any.mgauss) == "multipGauss")){ + if (any(unlist(any.mgauss) == "multipGauss")) { colsel <- isbias - }else{ # first is as good as any + } else { # first is as good as any colsel <- 1 } - rng <- t(apply(SS[[colsel]][,-ncol(SS[[colsel]])], 2, range)) + rng <- t(apply(SS[[colsel]][, -ncol(SS[[colsel]])], 2, range)) if (run.normal | run.round) { - resume.list <- list() # start from knots indx <- sample(seq_len(settings$assim.batch$n.knot), settings$assim.batch$chain) for (c in seq_len(settings$assim.batch$chain)) { - jmp.list[[c]] <- sapply(prior.fn.all$qprior, - function(x) 0.1 * diff(eval(x, list(p = c(0.05, 0.95)))))[prior.ind.all] + jmp.list[[c]] <- sapply( + prior.fn.all$qprior, + function(x) 0.1 * diff(eval(x, list(p = c(0.05, 0.95)))) + )[prior.ind.all] jmp.list[[c]] <- sqrt(jmp.list[[c]]) init.list[[c]] <- as.list(SS[[colsel]][indx[c], -ncol(SS[[colsel]])]) @@ -543,7 +560,7 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, PEcAn.logger::logger.info(paste0("Starting emulator MCMC. Please wait.")) current.step <- "pre-MCMC" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) # start the clock ptm.start <- proc.time() @@ -557,25 +574,26 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, PEcAn.logger::logger.setOutputFile(logfile_path) - cl <- parallel::makeCluster(ncores, type="FORK", outfile = logfile_path) + cl <- parallel::makeCluster(ncores, type = "FORK", outfile = logfile_path) ## Sample posterior from emulator mcmc.out <- parallel::parLapply(cl, 1:settings$assim.batch$chain, function(chain) { - mcmc.GP(gp = gp, ## Emulator(s) - x0 = init.list[[chain]], ## Initial conditions - nmcmc = settings$assim.batch$iter, ## Number of reps - rng = rng, ## range - format = "lin", ## "lin"ear vs "log" of LogLikelihood - mix = mix, ## Jump "each" dimension independently or update them "joint"ly - jmp0 = jmp.list[[chain]], ## Initial jump size - ar.target = settings$assim.batch$jump$ar.target, ## Target acceptance rate - priors = prior.fn.all$dprior[prior.ind.all], ## priors - settings = settings, - run.block = (run.normal | run.round), - n.of.obs = n.of.obs, - llik.fn = llik.fn, - hyper.pars = hyper.pars, - resume.list = resume.list[[chain]] + mcmc.GP( + gp = gp, ## Emulator(s) + x0 = init.list[[chain]], ## Initial conditions + nmcmc = settings$assim.batch$iter, ## Number of reps + rng = rng, ## range + format = "lin", ## "lin"ear vs "log" of LogLikelihood + mix = mix, ## Jump "each" dimension independently or update them "joint"ly + jmp0 = jmp.list[[chain]], ## Initial jump size + ar.target = settings$assim.batch$jump$ar.target, ## Target acceptance rate + priors = prior.fn.all$dprior[prior.ind.all], ## priors + settings = settings, + run.block = (run.normal | run.round), + n.of.obs = n.of.obs, + llik.fn = llik.fn, + hyper.pars = hyper.pars, + resume.list = resume.list[[chain]] ) }) @@ -586,16 +604,15 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, PEcAn.logger::logger.info(paste0("Emulator MCMC took ", paste0(round(ptm.finish[3])), " seconds for ", paste0(settings$assim.batch$iter), " iterations.")) current.step <- "post-MCMC" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) mcmc.samp.list <- sf.samp.list <- list() for (c in seq_len(settings$assim.batch$chain)) { + m <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(prior.ind.all.ns)) - m <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(prior.ind.all.ns)) - - if(!is.null(sf)){ - sfm <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(sf)) + if (!is.null(sf)) { + sfm <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(sf)) # give colnames but the order can change, we'll overwrite anyway colnames(sfm) <- paste0(sf, "_SF") } @@ -611,17 +628,17 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, for (i in seq_along(prior.ind.all.ns)) { sf.check <- prior.all.rownames[prior.ind.all.ns][i] idx <- grep(sf.check, rownames(prior.all)[prior.ind.all]) - if(any(grepl(sf.check, sf))){ - - m[, i] <- eval(prior.fn.all$qprior[prior.ind.all.ns][[i]], - list(p = mcmc.out[[c]]$mcmc.samp[, idx])) - if(sc <= length(sf)){ + if (any(grepl(sf.check, sf))) { + m[, i] <- eval( + prior.fn.all$qprior[prior.ind.all.ns][[i]], + list(p = mcmc.out[[c]]$mcmc.samp[, idx]) + ) + if (sc <= length(sf)) { sfm[, sc] <- mcmc.out[[c]]$mcmc.samp[, idx] colnames(sfm)[sc] <- paste0(sf.check, "_SF") sc <- sc + 1 } - - }else{ + } else { m[, i] <- mcmc.out[[c]]$mcmc.samp[, idx] } } @@ -629,7 +646,7 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, colnames(m) <- prior.all.rownames[prior.ind.all.ns] mcmc.samp.list[[c]] <- m - if(!is.null(sf)){ + if (!is.null(sf)) { sf.samp.list[[c]] <- sfm } @@ -639,75 +656,103 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, if (FALSE) { - gp = gp - x0 = init.list[[chain]] - nmcmc = settings$assim.batch$iter - rng = rng - format = "lin" - mix = mix - jmp0 = jmp.list[[chain]] - ar.target = settings$assim.batch$jump$ar.target - priors = prior.fn.all$dprior[prior.ind.all] - settings = settings - run.block = (run.normal | run.round) - n.of.obs = n.of.obs - llik.fn = llik.fn - hyper.pars = hyper.pars - resume.list = resume.list[[chain]] + gp <- gp + x0 <- init.list[[chain]] + nmcmc <- settings$assim.batch$iter + rng <- rng + format <- "lin" + mix <- mix + jmp0 <- jmp.list[[chain]] + ar.target <- settings$assim.batch$jump$ar.target + priors <- prior.fn.all$dprior[prior.ind.all] + settings <- settings + run.block <- (run.normal | run.round) + n.of.obs <- n.of.obs + llik.fn <- llik.fn + hyper.pars <- hyper.pars + resume.list <- resume.list[[chain]] } ## ------------------------------------ Clean up ------------------------------------ current.step <- "clean up" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) ## Save emulator, outputs files - settings$assim.batch$emulator.path <- file.path(settings_outdir, - paste0("emulator.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$emulator.path <- file.path( + settings_outdir, + paste0( + "emulator.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(gp, file = settings$assim.batch$emulator.path) - settings$assim.batch$ss.path <- file.path(settings_outdir, - paste0("ss.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$ss.path <- file.path( + settings_outdir, + paste0( + "ss.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(SS, file = settings$assim.batch$ss.path) - settings$assim.batch$mcmc.path <- file.path(settings_outdir, - paste0("mcmc.list.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$mcmc.path <- file.path( + settings_outdir, + paste0( + "mcmc.list.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(mcmc.samp.list, file = settings$assim.batch$mcmc.path) - settings$assim.batch$resume.path <- file.path(settings_outdir, - paste0("resume.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$resume.path <- file.path( + settings_outdir, + paste0( + "resume.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(resume.list, file = settings$assim.batch$resume.path) # save inputs list, this object has been processed for autocorrelation correction # this can take a long time depending on the data, re-load and skip in next iteration external.data <- inputs - save(external.data, file = file.path(settings_outdir, - paste0("external.", - settings$assim.batch$ensemble.id, - ".Rdata"))) + save(external.data, file = file.path( + settings_outdir, + paste0( + "external.", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + )) # save prior.list with bias term - if(any(unlist(any.mgauss) == "multipGauss")){ - settings$assim.batch$bias.path <- file.path(settings_outdir, - paste0("bias.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + if (any(unlist(any.mgauss) == "multipGauss")) { + settings$assim.batch$bias.path <- file.path( + settings_outdir, + paste0( + "bias.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(prior.list, file = settings$assim.batch$bias.path) } # save sf posterior - if(!is.null(sf)){ - sf.post.filename <- file.path(settings_outdir, - paste0("post.distns.pda.sf", "_", settings$assim.batch$ensemble.id, ".Rdata")) - sf.samp.filename <- file.path(settings_outdir, - paste0("samples.pda.sf", "_", settings$assim.batch$ensemble.id, ".Rdata")) + if (!is.null(sf)) { + sf.post.filename <- file.path( + settings_outdir, + paste0("post.distns.pda.sf", "_", settings$assim.batch$ensemble.id, ".Rdata") + ) + sf.samp.filename <- file.path( + settings_outdir, + paste0("samples.pda.sf", "_", settings$assim.batch$ensemble.id, ".Rdata") + ) sf.prior <- prior.list[[sf.ind]] sf.post.distns <- write_sf_posterior(sf.samp.list, sf.prior, sf.samp.filename) save(sf.post.distns, file = sf.post.filename) @@ -724,29 +769,30 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, } # Collect non-model parameters in their own list - if(length(mcmc.param.list) > length(settings$pfts)) { + if (length(mcmc.param.list) > length(settings$pfts)) { # means bias parameter was at least one bias param in the emulator # it will be the last list in mcmc.param.list # there will always be at least one tau for bias - for(c in seq_len(settings$assim.batch$chain)){ - mcmc.param.list[[length(mcmc.param.list)]][[c]] <- cbind( mcmc.param.list[[length(mcmc.param.list)]][[c]], - mcmc.out[[c]]$mcmc.par) + for (c in seq_len(settings$assim.batch$chain)) { + mcmc.param.list[[length(mcmc.param.list)]][[c]] <- cbind( + mcmc.param.list[[length(mcmc.param.list)]][[c]], + mcmc.out[[c]]$mcmc.par + ) } - - } else if (ncol(mcmc.out[[1]]$mcmc.par) != 0){ + } else if (ncol(mcmc.out[[1]]$mcmc.par) != 0) { # means no bias param but there are still other params, e.g. Gaussian - mcmc.param.list[[length(mcmc.param.list)+1]] <- list() - for(c in seq_len(settings$assim.batch$chain)){ + mcmc.param.list[[length(mcmc.param.list) + 1]] <- list() + for (c in seq_len(settings$assim.batch$chain)) { mcmc.param.list[[length(mcmc.param.list)]][[c]] <- mcmc.out[[c]]$mcmc.par } } # I can use a counter to run pre-defined number of emulator rounds - if(is.null(settings$assim.batch$round_counter)){ + if (is.null(settings$assim.batch$round_counter)) { settings$assim.batch$round_counter <- 1 - settings$assim.batch$extension <- "round" - }else{ - settings$assim.batch$round_counter <- 1 + as.numeric(settings$assim.batch$round_counter) + settings$assim.batch$extension <- "round" + } else { + settings$assim.batch$round_counter <- 1 + as.numeric(settings$assim.batch$round_counter) } @@ -760,9 +806,7 @@ pda.emulator <- function(settings, external.data = NULL, external.priors = NULL, ## Output an updated settings list current.step <- "pda.finish" - save(list = ls(all.names = TRUE),envir=environment(),file=pda.restart.file) + save(list = ls(all.names = TRUE), envir = environment(), file = pda.restart.file) return(settings) - - -} ## end pda.emulator +} ## end pda.emulator diff --git a/modules/assim.batch/R/pda.emulator.ms.R b/modules/assim.batch/R/pda.emulator.ms.R index e4c4da114fc..7898fac45f7 100644 --- a/modules/assim.batch/R/pda.emulator.ms.R +++ b/modules/assim.batch/R/pda.emulator.ms.R @@ -8,144 +8,147 @@ ##' @author Istem Fer ##' @export pda.emulator.ms <- function(multi.settings) { - - ## -------------------------------------- Initialization --------------------------------------------------- - - # check mode - pda.mode <- unique(sapply(multi.settings$assim.batch,`[[`, "mode")) - - if(pda.mode == "individual"){ + ## -------------------------------------- Initialization --------------------------------------------------- + + # check mode + pda.mode <- unique(sapply(multi.settings$assim.batch, `[[`, "mode")) + + if (pda.mode == "individual") { individual <- TRUE joint <- hierarchical <- FALSE - }else if(pda.mode == "joint"){ + } else if (pda.mode == "joint") { joint <- TRUE individual <- hierarchical <- FALSE - }else if(pda.mode == "hierarchical"){ + } else if (pda.mode == "hierarchical") { hieararchical <- TRUE individual <- joint <- FALSE - }else{ + } else { individual <- joint <- hierarchical <- TRUE } - + # how many sites nsites <- length(multi.settings) - + # lists to collect emulators and run MCMC per site later - gp.stack <- vector("list", nsites) - SS.stack <- vector("list", nsites) - #nstack <- vector("list", nsites) - + gp.stack <- vector("list", nsites) + SS.stack <- vector("list", nsites) + # nstack <- vector("list", nsites) + ## -------------------------------------- Individual runs and calibration ------------------------------------------ - - if(individual){ - # NOTE: individual flag -mode functionality in general- is not currently in use, + + if (individual) { + # NOTE: individual flag -mode functionality in general- is not currently in use, # preparation for future use, the idea is to skip site-level fitting if we've already done it - # if this flag is FALSE, pda.emulator will not fit GP and run MCMC, - # but this requires some re-arrangement in pda.emulator function + # if this flag is FALSE, pda.emulator will not fit GP and run MCMC, + # but this requires some re-arrangement in pda.emulator function # for now we will always run site-level calibration - + # Open the tunnel (might not need) PEcAn.remote::open_tunnel(multi.settings[[1]]$host$name, - user = multi.settings[[1]]$host$user, - tunnel_dir = dirname(multi.settings[[1]]$host$tunnel)) - + user = multi.settings[[1]]$host$user, + tunnel_dir = dirname(multi.settings[[1]]$host$tunnel) + ) + # Until a check function is implemented, run a predefined number of emulator rounds n_rounds <- ifelse(is.null(multi.settings[[1]]$assim.batch$n_rounds), 5, as.numeric(multi.settings[[1]]$assim.batch$n_rounds)) PEcAn.logger::logger.info(n_rounds, " individual PDA rounds will be run per site. Please wait.") repeat{ - # number of sites will probably get big real quick, so some multi-site PDA runs should be run on the cluster # unfortunately pda.emulator function was not fully designed for remote runs, so first we need to prepare a few things it needs # (1) all sites should be running the same knots # (2) all sites will use the same prior.list # (3) the format information for the assimilated data (pda.emulator needs DB connection to query it if it's not externally provided) # (4) ensemble ids (they provide unique trackers for emulator functions) - multi_site_objects <- return_multi_site_objects(multi.settings) - + multi_site_objects <- return_multi_site_objects(multi.settings) + emulator_jobs <- rep(NA, length(multi.settings)) - for(ms in seq_along(multi.settings)){ - + for (ms in seq_along(multi.settings)) { # Sync to remote subfile <- prepare_pda_remote(multi.settings[[ms]], site = ms, multi_site_objects) - + # Submit emulator scripts tmp <- PEcAn.remote::remote.execute.cmd(multi.settings[[ms]]$host, paste0("qsub ", subfile)) - emulator_jobs[ms] <- as.numeric( sub("\\D*(\\d+).*", "\\1", tmp)) + emulator_jobs[ms] <- as.numeric(sub("\\D*(\\d+).*", "\\1", tmp)) } - + # listen repeat{ PEcAn.logger::logger.info("Multi-site calibration running. Please wait.") Sys.sleep(300) - check_all_sites <- sapply(emulator_jobs, PEcAn.remote::qsub_run_finished, multi.settings[[1]]$host, multi.settings[[1]]$host$qstat) - if(all(check_all_sites)) break + check_all_sites <- sapply(emulator_jobs, PEcAn.remote::qsub_run_finished, multi.settings[[1]]$host, multi.settings[[1]]$host$qstat) + if (all(check_all_sites)) break } - + # Sync from remote multi.settings <- sync_pda_remote(multi.settings, multi_site_objects$ensembleidlist) - - + + # continue or stop r_counter <- as.numeric(multi.settings[[1]]$assim.batch$round_counter) # write multi.settings with individual-pda info - PEcAn.settings::write.settings(multi.settings, outputfile = paste0('pecan.PDA_MS', r_counter, '.xml')) + PEcAn.settings::write.settings(multi.settings, outputfile = paste0("pecan.PDA_MS", r_counter, ".xml")) PEcAn.logger::logger.info("Round", r_counter, "finished.") - if(r_counter == n_rounds) break + if (r_counter == n_rounds) break } # Close the tunnel PEcAn.remote::kill.tunnel(settings) - } - - - ## -------------------------------- Prepare for Joint and Hierarchical ----------------------------------------- - - + + + ## -------------------------------- Prepare for Joint and Hierarchical ----------------------------------------- + + # we need some objects that are common to all calibrations - obj_names <- c("init.list", "rng", "jmp.list", "prior.fn.all", "prior.ind.all", "llik.fn", + obj_names <- c( + "init.list", "rng", "jmp.list", "prior.fn.all", "prior.ind.all", "llik.fn", "settings", "prior.ind.all.ns", "sf", "prior.list", "n.param.orig", "pname", "prior.ind.orig", - "hyper.pars", "resume.list") - - need_obj <- load_pda_history(workdir = multi.settings$outdir, - ensemble.id = multi.settings[[1]]$assim.batch$ensemble.id, - objects = obj_names) - - init.list <- need_obj$init.list - rng_orig <- need_obj$rng - jmp.list <- need_obj$jmp.list - prior.list <- need_obj$prior.list - prior.fn.all <- need_obj$prior.fn.all - prior.ind.all <- need_obj$prior.ind.all + "hyper.pars", "resume.list" + ) + + need_obj <- load_pda_history( + workdir = multi.settings$outdir, + ensemble.id = multi.settings[[1]]$assim.batch$ensemble.id, + objects = obj_names + ) + + init.list <- need_obj$init.list + rng_orig <- need_obj$rng + jmp.list <- need_obj$jmp.list + prior.list <- need_obj$prior.list + prior.fn.all <- need_obj$prior.fn.all + prior.ind.all <- need_obj$prior.ind.all prior.ind.all.ns <- need_obj$prior.ind.all.ns - llik.fn <- need_obj$llik.fn - tmp.settings <- need_obj$settings - sf <- need_obj$sf - n.param.orig <- need_obj$n.param.orig - prior.ind.orig <- need_obj$prior.ind.orig - pname <- need_obj$pname - hyper.pars <- need_obj$hyper.pars - nparam <- length(prior.ind.all) - prior.all <- do.call("rbind", prior.list) - + llik.fn <- need_obj$llik.fn + tmp.settings <- need_obj$settings + sf <- need_obj$sf + n.param.orig <- need_obj$n.param.orig + prior.ind.orig <- need_obj$prior.ind.orig + pname <- need_obj$pname + hyper.pars <- need_obj$hyper.pars + nparam <- length(prior.ind.all) + prior.all <- do.call("rbind", prior.list) + resume.list <- vector("list", multi.settings[[1]]$assim.batch$chain) - + SS <- NULL # will be loaded from history objects next loop, but R CMD check can't see that - + # collect GPs and SSs - for(s in seq_along(multi.settings)){ - - load(file.path(multi.settings[[s]]$outdir, - basename(multi.settings[[s]]$assim.batch$emulator.path))) - load(file.path(multi.settings[[s]]$outdir, - basename(multi.settings[[s]]$assim.batch$ss.path))) + for (s in seq_along(multi.settings)) { + load(file.path( + multi.settings[[s]]$outdir, + basename(multi.settings[[s]]$assim.batch$emulator.path) + )) + load(file.path( + multi.settings[[s]]$outdir, + basename(multi.settings[[s]]$assim.batch$ss.path) + )) gp.stack[[s]] <- gp SS.stack[[s]] <- SS remove(gp, SS) - } ## Open database connection @@ -159,126 +162,128 @@ pda.emulator.ms <- function(multi.settings) { } else { con <- NULL } - + ## Get the workflow id if ("workflow" %in% names(tmp.settings)) { workflow.id <- tmp.settings$workflow$id } else { workflow.id <- -1 } - + ## remote hack for now ## currently site-level PDA runs on remote but joint and hierarchical runs locally ## this will change soon (?!) ## but I'm still developing the code so for now let's change the paths back to local - for(i in seq_along(tmp.settings$pfts)){ + for (i in seq_along(tmp.settings$pfts)) { tmp.settings$pfts[[i]]$outdir <- file.path(tmp.settings$outdir, "pft", basename(tmp.settings$pfts[[i]]$outdir)) } tmp.settings$modeloutdir <- file.path(tmp.settings$outdir, basename(tmp.settings$modeloutdir)) - ## -------------------------------------- Joint calibration -------------------------------------------------- - if(joint){ # joint - if begin - + ## -------------------------------------- Joint calibration -------------------------------------------------- + if (joint) { # joint - if begin + ## Get an ensemble id for global calibration tmp.settings$assim.batch$ensemble.id <- pda.create.ensemble(tmp.settings, con, workflow.id) - + ## history restart - hbc.restart.file <- file.path(tmp.settings$outdir,paste0("history.joint", - tmp.settings$assim.batch$ensemble.id, ".Rdata")) - + hbc.restart.file <- file.path(tmp.settings$outdir, paste0( + "history.joint", + tmp.settings$assim.batch$ensemble.id, ".Rdata" + )) + current.step <- "BEG OF JOINT MCMC" - save(list = ls(all.names = TRUE),envir=environment(),file=hbc.restart.file) - + save(list = ls(all.names = TRUE), envir = environment(), file = hbc.restart.file) + gp <- unlist(gp.stack, recursive = FALSE) # start the clock ptm.start <- proc.time() - + # prepare for parallelization (over chains) dcores <- parallel::detectCores() - 1 ncores <- min(max(dcores, 1), multi.settings[[1]]$assim.batch$chain) - + PEcAn.logger::logger.setOutputFile(file.path(multi.settings$outdir, "pda.log")) - - cl <- parallel::makeCluster(ncores, type="FORK", outfile = file.path(multi.settings$outdir, "pda.log")) - + + cl <- parallel::makeCluster(ncores, type = "FORK", outfile = file.path(multi.settings$outdir, "pda.log")) + ## Sample posterior from emulator mcmc.out <- parallel::parLapply(cl, 1:multi.settings[[1]]$assim.batch$chain, function(chain) { - mcmc.GP(gp = gp, ## Emulator(s) - x0 = init.list[[chain]], ## Initial conditions - nmcmc = as.numeric(multi.settings[[1]]$assim.batch$iter), ## Number of iters - rng = rng_orig, ## range - format = "lin", ## "lin"ear vs "log" of LogLikelihood - mix = "joint", ## Jump "each" dimension independently or update them "joint"ly - jmp0 = jmp.list[[chain]], ## Initial jump size - ar.target = multi.settings[[1]]$assim.batch$jump$ar.target, ## Target acceptance rate - priors = prior.fn.all$dprior[prior.ind.all], ## priors - settings = tmp.settings, # this is just for checking llik functions downstream - run.block = TRUE, - n.of.obs = NULL, # need this for Gaussian likelihoods, keep it NULL for now - llik.fn = llik.fn, - hyper.pars = hyper.pars, - resume.list = resume.list[[chain]] + mcmc.GP( + gp = gp, ## Emulator(s) + x0 = init.list[[chain]], ## Initial conditions + nmcmc = as.numeric(multi.settings[[1]]$assim.batch$iter), ## Number of iters + rng = rng_orig, ## range + format = "lin", ## "lin"ear vs "log" of LogLikelihood + mix = "joint", ## Jump "each" dimension independently or update them "joint"ly + jmp0 = jmp.list[[chain]], ## Initial jump size + ar.target = multi.settings[[1]]$assim.batch$jump$ar.target, ## Target acceptance rate + priors = prior.fn.all$dprior[prior.ind.all], ## priors + settings = tmp.settings, # this is just for checking llik functions downstream + run.block = TRUE, + n.of.obs = NULL, # need this for Gaussian likelihoods, keep it NULL for now + llik.fn = llik.fn, + hyper.pars = hyper.pars, + resume.list = resume.list[[chain]] ) }) - + parallel::stopCluster(cl) - + # Stop the clock ptm.finish <- proc.time() - ptm.start PEcAn.logger::logger.info(paste0("Emulator MCMC took ", paste0(round(ptm.finish[3])), " seconds for ", paste0(tmp.settings$assim.batch$iter), " iterations.")) - + current.step <- "END OF JOINT MCMC" - save(list = ls(all.names = TRUE),envir=environment(),file=hbc.restart.file) - + save(list = ls(all.names = TRUE), envir = environment(), file = hbc.restart.file) + mcmc.samp.list <- sf.samp.list <- list() - + for (c in seq_len(tmp.settings$assim.batch$chain)) { - - m <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(prior.ind.all.ns)) - - if(!is.null(sf)){ - sfm <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(sf)) + m <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(prior.ind.all.ns)) + + if (!is.null(sf)) { + sfm <- matrix(NA, nrow = nrow(mcmc.out[[c]]$mcmc.samp), ncol = length(sf)) # give colnames but the order can change, we'll overwrite anyway colnames(sfm) <- paste0(sf, "_SF") } ## Set the prior functions back to work with actual parameter range - + prior.all <- do.call("rbind", prior.list) prior.fn.all <- pda.define.prior.fn(prior.all) - + # retrieve rownames separately to get rid of var_name* structures prior.all.rownames <- unlist(sapply(prior.list, rownames)) - + sc <- 1 for (i in seq_along(prior.ind.all.ns)) { sf.check <- prior.all.rownames[prior.ind.all.ns][i] idx <- grep(sf.check, rownames(prior.all)[prior.ind.all]) - if(any(grepl(sf.check, sf))){ - - m[, i] <- eval(prior.fn.all$qprior[prior.ind.all.ns][[i]], - list(p = mcmc.out[[c]]$mcmc.samp[, idx])) - if(sc <= length(sf)){ + if (any(grepl(sf.check, sf))) { + m[, i] <- eval( + prior.fn.all$qprior[prior.ind.all.ns][[i]], + list(p = mcmc.out[[c]]$mcmc.samp[, idx]) + ) + if (sc <= length(sf)) { sfm[, sc] <- mcmc.out[[c]]$mcmc.samp[, idx] colnames(sfm)[sc] <- paste0(sf.check, "_SF") sc <- sc + 1 } - - }else{ + } else { m[, i] <- mcmc.out[[c]]$mcmc.samp[, idx] } } - + colnames(m) <- prior.all.rownames[prior.ind.all.ns] mcmc.samp.list[[c]] <- m - - if(!is.null(sf)){ + + if (!is.null(sf)) { sf.samp.list[[c]] <- sfm } - + resume.list[[c]] <- mcmc.out[[c]]$chain.res } - + # Separate each PFT's parameter samples (and bias term) to their own list mcmc.param.list <- list() ind <- 0 @@ -286,117 +291,118 @@ pda.emulator.ms <- function(multi.settings) { mcmc.param.list[[i]] <- lapply(mcmc.samp.list, function(x) x[, (ind + 1):(ind + n.param.orig[i]), drop = FALSE]) ind <- ind + n.param.orig[i] } - + # Collect non-model parameters in their own list - if(length(mcmc.param.list) > length(tmp.settings$pfts)) { + if (length(mcmc.param.list) > length(tmp.settings$pfts)) { # means bias parameter was at least one bias param in the emulator # it will be the last list in mcmc.param.list # there will always be at least one tau for bias - for(c in seq_len(tmp.settings$assim.batch$chain)){ - mcmc.param.list[[length(mcmc.param.list)]][[c]] <- cbind( mcmc.param.list[[length(mcmc.param.list)]][[c]], - mcmc.out[[c]]$mcmc.par) + for (c in seq_len(tmp.settings$assim.batch$chain)) { + mcmc.param.list[[length(mcmc.param.list)]][[c]] <- cbind( + mcmc.param.list[[length(mcmc.param.list)]][[c]], + mcmc.out[[c]]$mcmc.par + ) } - - } else if (ncol(mcmc.out[[1]]$mcmc.par) != 0){ + } else if (ncol(mcmc.out[[1]]$mcmc.par) != 0) { # means no bias param but there are still other params, e.g. Gaussian - mcmc.param.list[[length(mcmc.param.list)+1]] <- list() - for(c in seq_len(tmp.settings$assim.batch$chain)){ + mcmc.param.list[[length(mcmc.param.list) + 1]] <- list() + for (c in seq_len(tmp.settings$assim.batch$chain)) { mcmc.param.list[[length(mcmc.param.list)]][[c]] <- mcmc.out[[c]]$mcmc.par } } - + tmp.settings <- pda.postprocess(tmp.settings, con, mcmc.param.list, pname, prior.list, prior.ind.orig, sffx = "_joint") - + current.step <- "JOINT - END" - save(list = ls(all.names = TRUE),envir=environment(),file=hbc.restart.file) - + save(list = ls(all.names = TRUE), envir = environment(), file = hbc.restart.file) } # joint - if end - - ## -------------------------------------- Hierarchical MCMC ------------------------------------------ - if(hierarchical){ # hierarchical - if begin - + + ## -------------------------------------- Hierarchical MCMC ------------------------------------------ + if (hierarchical) { # hierarchical - if begin + ## Get an ensemble id for hierarchical calibration tmp.settings$assim.batch$ensemble.id <- pda.create.ensemble(tmp.settings, con, workflow.id) - + ## history restart - hbc.restart.file <- file.path(tmp.settings$outdir,paste0("history.hier", - tmp.settings$assim.batch$ensemble.id, ".Rdata")) + hbc.restart.file <- file.path(tmp.settings$outdir, paste0( + "history.hier", + tmp.settings$assim.batch$ensemble.id, ".Rdata" + )) + + - - ## proposing starting points from knots mu_site_init <- list() - jump_init <- list() + jump_init <- list() # sample without replacement sampind <- sample(seq_len(nrow(SS.stack[[1]][[1]])), tmp.settings$assim.batch$chain) - - for(i in seq_len(tmp.settings$assim.batch$chain)){ + + for (i in seq_len(tmp.settings$assim.batch$chain)) { mu_site_init[[i]] <- SS.stack[[1]][[1]][sampind[i], 1:nparam] - jump_init[[i]] <- need_obj$resume.list[[i]]$jump + jump_init[[i]] <- need_obj$resume.list[[i]]$jump } current.step <- "HIERARCHICAL MCMC PREP" - save(list = ls(all.names = TRUE),envir=environment(),file=hbc.restart.file) - - + save(list = ls(all.names = TRUE), envir = environment(), file = hbc.restart.file) + + # start the clock ptm.start <- proc.time() - + # prepare for parallelization dcores <- parallel::detectCores() - 1 ncores <- min(max(dcores, 1), tmp.settings$assim.batch$chain) - + PEcAn.logger::logger.setOutputFile(file.path(tmp.settings$outdir, "pda.log")) - - cl <- parallel::makeCluster(ncores, type="FORK", outfile = file.path(tmp.settings$outdir, "pda.log")) - - + + cl <- parallel::makeCluster(ncores, type = "FORK", outfile = file.path(tmp.settings$outdir, "pda.log")) + + ## Sample posterior from emulator mcmc.out <- parallel::parLapply(cl, seq_len(tmp.settings$assim.batch$chain), function(chain) { - hier.mcmc(settings = tmp.settings, - gp.stack = gp.stack, - nmcmc = tmp.settings$assim.batch$iter * 3, # need to run chains longer than indv - rng_orig = rng_orig, - jmp0 = jump_init[[chain]], - mu_site_init = mu_site_init[[chain]], - nparam = length(prior.ind.all), - nsites = nsites, - llik.fn = llik.fn, - prior.fn.all = prior.fn.all, - prior.ind.all = prior.ind.all) + hier.mcmc( + settings = tmp.settings, + gp.stack = gp.stack, + nmcmc = tmp.settings$assim.batch$iter * 3, # need to run chains longer than indv + rng_orig = rng_orig, + jmp0 = jump_init[[chain]], + mu_site_init = mu_site_init[[chain]], + nparam = length(prior.ind.all), + nsites = nsites, + llik.fn = llik.fn, + prior.fn.all = prior.fn.all, + prior.ind.all = prior.ind.all + ) }) - + parallel::stopCluster(cl) - + # Stop the clock ptm.finish <- proc.time() - ptm.start PEcAn.logger::logger.info(paste0("Emulator MCMC took ", paste0(round(ptm.finish[3])), " seconds for ", paste0(tmp.settings$assim.batch$iter), " iterations.")) - + current.step <- "HIERARCHICAL MCMC END" - save(list = ls(all.names = TRUE),envir=environment(),file=hbc.restart.file) - + save(list = ls(all.names = TRUE), envir = environment(), file = hbc.restart.file) + # generate hierarhical posteriors - mcmc.out <- generate_hierpost(mcmc.out, prior.fn.all, prior.ind.all) - + mcmc.out <- generate_hierpost(mcmc.out, prior.fn.all, prior.ind.all) + # Collect global params in their own list and postprocess mcmc.param.list <- pda.sort.params(mcmc.out, sub.sample = "mu_global_samp", ns = NULL, prior.all, prior.ind.all.ns, sf, n.param.orig, prior.list, prior.fn.all) # processing these just for further analysis later, but con=NULL because these samples shouldn't be used for new runs later tmp.settings <- pda.postprocess(tmp.settings, con = NULL, mcmc.param.list, pname, prior.list, prior.ind.orig, sffx = "_hierarchical_mean") - + mcmc.param.list <- pda.sort.params(mcmc.out, sub.sample = "hierarchical_samp", ns = NULL, prior.all, prior.ind.all.ns, sf, n.param.orig, prior.list, prior.fn.all) tmp.settings <- pda.postprocess(tmp.settings, con, mcmc.param.list, pname, prior.list, prior.ind.orig, sffx = "_hierarchical") - + # Collect site-level params in their own list and postprocess - for(ns in seq_len(nsites)){ + for (ns in seq_len(nsites)) { mcmc.param.list <- pda.sort.params(mcmc.out, sub.sample = "mu_site_samp", ns = ns, prior.all, prior.ind.all.ns, sf, n.param.orig, prior.list, prior.fn.all) - settings <- pda.postprocess(tmp.settings, con, mcmc.param.list, pname, prior.list, prior.ind.orig, sffx = paste0("_hierarchical_SL",ns)) + settings <- pda.postprocess(tmp.settings, con, mcmc.param.list, pname, prior.list, prior.ind.orig, sffx = paste0("_hierarchical_SL", ns)) } - + current.step <- "HIERARCHICAL - END" - save(list = ls(all.names = TRUE),envir=environment(),file=hbc.restart.file) - + save(list = ls(all.names = TRUE), envir = environment(), file = hbc.restart.file) } # hierarchical - if end - } - diff --git a/modules/assim.batch/R/pda.generate.externals.R b/modules/assim.batch/R/pda.generate.externals.R index b20633eedf1..345b2ab55bd 100644 --- a/modules/assim.batch/R/pda.generate.externals.R +++ b/modules/assim.batch/R/pda.generate.externals.R @@ -6,7 +6,7 @@ ##' IMPORTANT: your obs must be ready to compare with model outputs in general, e.g. if you're passing flux data it should already be ustar filtered ##' e.g. ##' obs\[\[1\]\] -##' NEE posix +##' NEE posix ##' 4.590273e-09 2017-01-01 00:00:00 ##' NA 2017-01-01 00:30:00 ##' NA 2017-01-01 01:00:00 @@ -47,146 +47,152 @@ ##' prior.list <- list(data.frame(distn = c("norm", "beta"), parama = c(4, 1), paramb = c(7,2), n = rep(NA, 2), row.names = c("growth_resp_factor", "leaf_turnover_rate")), ##' data.frame(distn = c("unif", "unif"), parama = c(10, 4), paramb = c(40,27), n = rep(NA, 2), row.names = c("psnTOpt", "half_saturation_PAR"))) ##' @param external.knots boolean, if TRUE pass prior.list, ind.list, nknots OR knots.list arguments too -##' @param knots.list a list of dataframes (one per pft) where each row is a parameter vector, i.e. training points for the emulator. +##' @param knots.list a list of dataframes (one per pft) where each row is a parameter vector, i.e. training points for the emulator. ##' If not NULL these are used, otherwise knots will be generated using prior.list, ind.list and nknots. -##' @param ind.list a named list of vectors (one per pft), where each vector indicates the indices of the parameters on the prior.list targeted in the PDA +##' @param ind.list a named list of vectors (one per pft), where each vector indicates the indices of the parameters on the prior.list targeted in the PDA ##' e.g. ind.list <- list(temperate.deciduous = c(2), temperate.conifer = c(1,2)) ##' @param nknots number of knots you want to train the emulator on ##' @export ##' @examples ##' \dontrun{ -##' pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, +##' pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, ##' varn = "NEE", varid = 297, n_eff = 106.9386, -##' external.formats = TRUE, model_data_diag = TRUE, +##' external.formats = TRUE, model_data_diag = TRUE, ##' model.out = "/tmp/out/outdir", ##' start_date = "2017-01-01", end_date = "2018-12-31") ##' } -pda.generate.externals <- function(external.data = FALSE, obs = NULL, varn = NULL, varid = NULL, n_eff = NULL, align_method = "match_timestep", par = NULL, - model_data_diag = FALSE, model.out = NULL, start_date = NULL, end_date = NULL, - external.formats = FALSE, - external.priors = FALSE, prior.list = NULL, - external.knots = FALSE, knots.list = NULL, ind.list = NULL, nknots = NULL){ - +pda.generate.externals <- function(external.data = FALSE, obs = NULL, varn = NULL, varid = NULL, n_eff = NULL, align_method = "match_timestep", par = NULL, + model_data_diag = FALSE, model.out = NULL, start_date = NULL, end_date = NULL, + external.formats = FALSE, + external.priors = FALSE, prior.list = NULL, + external.knots = FALSE, knots.list = NULL, ind.list = NULL, nknots = NULL) { pda.externals <- list() ##################### external.data ##################### - if(external.data){ - if(is.null(obs) & is.null(varn) & is.null(varid)){ + if (external.data) { + if (is.null(obs) & is.null(varn) & is.null(varid)) { stop("If you want to generate external.data, the following args cannot be NULL: obs, varn, varid") } - external.data <- vector("list", length(varn)) - for(i in seq_along(external.data)){ - + external.data <- vector("list", length(varn)) + for (i in seq_along(external.data)) { # fill in external.data sublists : variable.name - variable.name <- list(variable.name = list(variable.drv = varn[i], variable.eqn = list(variables = varn[i], expression = varn[i]))) - external.data[[i]]$variable.name <- variable.name - + variable.name <- list(variable.name = list(variable.drv = varn[i], variable.eqn = list(variables = varn[i], expression = varn[i]))) + external.data[[i]]$variable.name <- variable.name + # fill in external.data sublists : variable.id - external.data[[i]]$variable.id <- varid[i] - + external.data[[i]]$variable.id <- varid[i] + # fill in external.data sublists : input.id - external.data[[i]]$input.id <- NA - + external.data[[i]]$input.id <- NA + # fill in external.data sublists : align.method - external.data[[i]]$align.method <- align_method - + external.data[[i]]$align.method <- align_method + # fill in external.data sublists : data - external.data[[i]]$data <- obs[[i]] - + external.data[[i]]$data <- obs[[i]] + # fill in external.data sublists : obs - external.data[[i]]$obs <- obs[[i]][[varn[i]]] - + external.data[[i]]$obs <- obs[[i]][[varn[i]]] + # fill in external.data sublists : par - if(!is.null(par)){ + if (!is.null(par)) { external.data[[i]]$par <- par[[i]] - }else{ - AMF.params <- PEcAn.uncertainty::flux.uncertainty(measurement = external.data[[i]]$obs, - QC = rep(0, length(external.data[[i]]$obs)), - flags = TRUE, bin.num = 20) + } else { + AMF.params <- PEcAn.uncertainty::flux.uncertainty( + measurement = external.data[[i]]$obs, + QC = rep(0, length(external.data[[i]]$obs)), + flags = TRUE, bin.num = 20 + ) external.data[[i]]$par <- c(AMF.params$intercept, AMF.params$slopeP, AMF.params$slopeN) } - + # fill in external.data sublists : n external.data[[i]]$n <- sum(!is.na(external.data[[i]]$obs)) - + # fill in external.data sublists : n_eff - if(!is.null(n_eff)){ + if (!is.null(n_eff)) { external.data[[i]]$n_eff <- n_eff[i] } } } pda.externals$external.data <- external.data - + ##################### external.formats ##################### - if(external.formats){ - if(is.null(varn)){ + if (external.formats) { + if (is.null(varn)) { stop("If you want to generate external.formats, varn cannot be NULL.") } external.formats <- list() - for(i in seq_along(varn)){ - external.formats[[i]] <- list(vars = list(bety_name = varn[i], pecan_name = varn[i])) + for (i in seq_along(varn)) { + external.formats[[i]] <- list(vars = list(bety_name = varn[i], pecan_name = varn[i])) } } pda.externals$external.formats <- external.formats - + ##################### external.priors ##################### - if(external.priors){ - if(is.null(prior.list)){ + if (external.priors) { + if (is.null(prior.list)) { stop("If you want to generate external.priors, prior.list cannot be NULL.") } external.priors <- prior.list } pda.externals$external.priors <- external.priors - + ##################### external.knots ##################### - if(external.knots){ - #todo: generate external knots from prior.list - if(is.null(knots.list) | (is.null(prior.list) & is.null(ind.list) & is.null(nknots))){ + if (external.knots) { + # todo: generate external knots from prior.list + if (is.null(knots.list) | (is.null(prior.list) & is.null(ind.list) & is.null(nknots))) { stop("If you want to generate external.knots, please pass either prior.list, ind.list and nknots OR knots.list cannot args.") - }else if(!is.null(knots.list)){ + } else if (!is.null(knots.list)) { external.knots <- knots.list - }else if(!is.null(prior.list) & !is.null(ind.list) & !is.null(nknots)){ + } else if (!is.null(prior.list) & !is.null(ind.list) & !is.null(nknots)) { prior.fcnx <- lapply(prior.list, pda.define.prior.fn) - knots.list <- lapply(seq_along(prior.list), - function(x) pda.generate.knots(nknots, sf = NULL, - n.param.all = nrow(prior.list[[x]]), - prior.ind = ind.list[[x]], - prior.fn = prior.fcnx[[x]], - pname = row.names(prior.list[[x]]))) + knots.list <- lapply( + seq_along(prior.list), + function(x) { + pda.generate.knots(nknots, + sf = NULL, + n.param.all = nrow(prior.list[[x]]), + prior.ind = ind.list[[x]], + prior.fn = prior.fcnx[[x]], + pname = row.names(prior.list[[x]]) + ) + } + ) names(knots.list) <- names(ind.list) external.knots <- lapply(knots.list, `[[`, "params") } - } pda.externals$external.knots <- external.knots - - + + ##################### model & data alignment diagnostics ##################### - if(model_data_diag){ - if(is.null(obs) & is.null(model.out) & is.null(varn) & is.null(start_date) & is.null(end_date)){ + if (model_data_diag) { + if (is.null(obs) & is.null(model.out) & is.null(varn) & is.null(start_date) & is.null(end_date)) { stop("If you want to check model data alignment diagnostics, the following args cannot be NULL: obs, model.out, varn, start_date, end_date") } - model_data_diag <- list() - start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + model_data_diag <- list() + start_year <- lubridate::year(start_date) + end_year <- lubridate::year(end_date) vars <- c("time", varn) - model.raw <- as.data.frame(PEcAn.utils::read.output(basename(model.out), - outdir = model.out, - start_year, end_year, variables = vars)) - model.secs <- PEcAn.utils::ud_convert(model.raw$time, "days" ,"seconds") - model.raw$posix <- seq.POSIXt(from = as.POSIXlt(start_date, tz="GMT"), by = round(diff(model.secs)[1]), length.out = length(model.raw$time)) - - for(i in seq_along(varn)){ - dat <- PEcAn.benchmark::align_data(model.calc = model.raw, obvs.calc = obs[[i]], var = varn[i], align_method = align_method) - obvz <- dat[,colnames(dat) == paste0(varn[i],".o"), drop = FALSE][[1]] - modz <- dat[,colnames(dat) == paste0(varn[i],".m"), drop = FALSE][[1]] + model.raw <- as.data.frame(PEcAn.utils::read.output(basename(model.out), + outdir = model.out, + start_year, end_year, variables = vars + )) + model.secs <- PEcAn.utils::ud_convert(model.raw$time, "days", "seconds") + model.raw$posix <- seq.POSIXt(from = as.POSIXlt(start_date, tz = "GMT"), by = round(diff(model.secs)[1]), length.out = length(model.raw$time)) + + for (i in seq_along(varn)) { + dat <- PEcAn.benchmark::align_data(model.calc = model.raw, obvs.calc = obs[[i]], var = varn[i], align_method = align_method) + obvz <- dat[, colnames(dat) == paste0(varn[i], ".o"), drop = FALSE][[1]] + modz <- dat[, colnames(dat) == paste0(varn[i], ".m"), drop = FALSE][[1]] plot(obvz, main = "Model & data", ylab = varn[i], ylim = range(c(obvz, modz), na.rm = TRUE)) graphics::points(modz, col = "red", pch = "+") - graphics::legend("topleft", legend=c("model", "data"), pch=c("+", "o"), col=c("red", "black")) - model_data_diag[[i]] <- dat + graphics::legend("topleft", legend = c("model", "data"), pch = c("+", "o"), col = c("red", "black")) + model_data_diag[[i]] <- dat } } pda.externals$model_data_diag <- model_data_diag - + return(pda.externals) -} \ No newline at end of file +} diff --git a/modules/assim.batch/R/pda.get.model.output.R b/modules/assim.batch/R/pda.get.model.output.R index 3eac5f602fc..14046501fbe 100644 --- a/modules/assim.batch/R/pda.get.model.output.R +++ b/modules/assim.batch/R/pda.get.model.output.R @@ -8,21 +8,20 @@ ##' @param external.formats format list ##' ##' @return A list containing model outputs extracted to correspond to each observational -##' dataset being used for PDA. +##' dataset being used for PDA. ##' ##' @author Ryan Kelly, Istem Fer ##' @export pda.get.model.output <- function(settings, run.id, bety, inputs, external.formats = NULL) { - input.info <- settings$assim.batch$inputs - - start.year <- strftime(settings$run$start.date,"%Y") - end.year <- strftime(settings$run$end.date,"%Y") - + + start.year <- strftime(settings$run$start.date, "%Y") + end.year <- strftime(settings$run$end.date, "%Y") + model.out <- list() n.input <- length(inputs) - - for(k in 1:n.input){ + + for (k in 1:n.input) { # if there is a deriation is requested this line takes care of it variable <- lapply(input.info[[k]]$variable.name, PEcAn.utils::convert.expr) # convert.expr returns variable names in the data and in the model @@ -33,58 +32,60 @@ pda.get.model.output <- function(settings, run.id, bety, inputs, external.format # get the derivation expression, e.g. 'TotalSoil-Litter' # if no derivation is requested expr will be the same as variable name expr <- lapply(variable.name, `[[`, "expression") - - if(!is.null(external.formats)){ + + if (!is.null(external.formats)) { format <- external.formats[[k]] - }else{ - format <- PEcAn.DB::query.format.vars(bety = bety, - input.id = settings$assim.batch$inputs[[k]]$input.id) + } else { + format <- PEcAn.DB::query.format.vars( + bety = bety, + input.id = settings$assim.batch$inputs[[k]]$input.id + ) } - for(l in seq_along(model.var)){ - - if(length(model.var[[l]][model.var[[l]] %in% format$vars$bety_name]) != 0){ - + for (l in seq_along(model.var)) { + if (length(model.var[[l]][model.var[[l]] %in% format$vars$bety_name]) != 0) { # convert names for model outputs - expr[[l]] <- gsub(model.var[[l]][model.var[[l]] %in% format$vars$bety_name], - format$vars$pecan_name[format$vars$bety_name %in% model.var[[l]][model.var[[l]] %in% format$vars$bety_name]], - expr[[l]]) - - model.var[[l]][model.var[[l]] %in% format$vars$bety_name] <- + expr[[l]] <- gsub( + model.var[[l]][model.var[[l]] %in% format$vars$bety_name], + format$vars$pecan_name[format$vars$bety_name %in% model.var[[l]][model.var[[l]] %in% format$vars$bety_name]], + expr[[l]] + ) + + model.var[[l]][model.var[[l]] %in% format$vars$bety_name] <- format$vars$pecan_name[format$vars$bety_name %in% model.var[[l]][model.var[[l]] %in% format$vars$bety_name]] - - } - # this is only for FC-NEE as we are using them interchangably when NEE isn't present, e.g. Ameriflux data + # this is only for FC-NEE as we are using them interchangably when NEE isn't present, e.g. Ameriflux data # FC - NEE specific hack - if(any(model.var[[l]] %in% c("FC"))){ - model.var[[l]][model.var[[l]] %in% c("FC")] <- "NEE" - expr[[l]] <- gsub("FC", "NEE", expr[[l]]) - } + if (any(model.var[[l]] %in% c("FC"))) { + model.var[[l]][model.var[[l]] %in% c("FC")] <- "NEE" + expr[[l]] <- gsub("FC", "NEE", expr[[l]]) + } } - + # prepare model output variable names vars.used <- unlist(model.var) # UST is never in the model outputs vars.used <- vars.used[!vars.used %in% c("UST")] - + # We also want 'time' from model outputs for aligning step - vars <- c("time", vars.used) - - - + vars <- c("time", vars.used) + + + # read model output - model.raw <- as.data.frame(PEcAn.utils::read.output(run.id, outdir = file.path(settings$modeloutdir, run.id), - start.year, end.year, variables = vars)) - - if(length(model.raw) == 0 | all(is.na(model.raw))) { # Probably indicates model failed entirely + model.raw <- as.data.frame(PEcAn.utils::read.output(run.id, + outdir = file.path(settings$modeloutdir, run.id), + start.year, end.year, variables = vars + )) + + if (length(model.raw) == 0 | all(is.na(model.raw))) { # Probably indicates model failed entirely out <- list() out$model.out <- NA return(out) } - - + + # normally there is only one output variable (derived or not) corresponding to data # as we split different inputs in the inputs list in the beginning (see that here we loop over the n.input) # for Ameriflux we have UST passed through within the same tag which is only data-related @@ -94,41 +95,41 @@ pda.get.model.output <- function(settings, run.id, bety, inputs, external.format vars.used.ind <- which(sapply(model.var, function(v) all(v %in% vars)) == TRUE) sapply(model.var[[vars.used.ind]], function(x) assign(x, model.raw[x], envir = .GlobalEnv)) out <- eval(parse(text = expr[[vars.used.ind]])) - - + + # prepare for the variables that is going to be used in align_data # change data variable names (e.g. "LE") to model output variable names (e.g. "Qle") data.var <- sapply(inputs[[k]]$variable.name, `[[`, "variable.drv") - - data.var[data.var %in% format$vars$input_name] <- + + data.var[data.var %in% format$vars$input_name] <- format$vars$pecan_name[format$vars$input_name %in% data.var[data.var %in% format$vars$input_name]] - + # UST is never in the model outputs data.var <- data.var[!data.var %in% c("UST")] - + colnames(out) <- data.var model <- data.frame(time = model.raw$time, out) - + ## Handle model time # the model output time is in days since the beginning of the year - model.secs <- PEcAn.utils::ud_convert(model$time, "days" ,"seconds") - + model.secs <- PEcAn.utils::ud_convert(model$time, "days", "seconds") + # seq.POSIXt returns class "POSIXct" # the model output is since the beginning of the year but 'settings$run$start.date' may not be the first day of the year, using lubridate::floor_date - if(diff(model.secs)[1] != 0){ - model$posix <- seq.POSIXt(from = as.POSIXlt(settings$run$start.date, tz="GMT"), by = round(diff(model.secs)[1]), length.out = length(model$time)) - }else{ + if (diff(model.secs)[1] != 0) { + model$posix <- seq.POSIXt(from = as.POSIXlt(settings$run$start.date, tz = "GMT"), by = round(diff(model.secs)[1]), length.out = length(model$time)) + } else { # yearly output - model$posix <- seq.POSIXt(from = as.POSIXlt(settings$run$start.date, tz="GMT"), by = "year", length.out = length(model$time)) + model$posix <- seq.POSIXt(from = as.POSIXlt(settings$run$start.date, tz = "GMT"), by = "year", length.out = length(model$time)) } - + dat <- PEcAn.benchmark::align_data(model.calc = model, obvs.calc = inputs[[k]]$data, var = data.var, align_method = inputs[[k]]$align.method) - model.out[[k]] <- dat[,colnames(dat) %in% paste0(data.var,".m"), drop = FALSE] - inputs[[k]]$obs <- dat[,colnames(dat) %in% paste0(data.var,".o"), drop = FALSE][[1]] - inputs[[k]]$n <- sum(!is.na(inputs[[k]]$obs)) + model.out[[k]] <- dat[, colnames(dat) %in% paste0(data.var, ".m"), drop = FALSE] + inputs[[k]]$obs <- dat[, colnames(dat) %in% paste0(data.var, ".o"), drop = FALSE][[1]] + inputs[[k]]$n <- sum(!is.na(inputs[[k]]$obs)) colnames(model.out[[k]]) <- data.var } - + return(list(model.out = model.out, inputs = inputs)) } diff --git a/modules/assim.batch/R/pda.load.data.R b/modules/assim.batch/R/pda.load.data.R index 8f59cd31f18..ab33257ab03 100644 --- a/modules/assim.batch/R/pda.load.data.R +++ b/modules/assim.batch/R/pda.load.data.R @@ -1,9 +1,9 @@ ##' Load Dataset for Paramater Data Assimilation ##' ##' @title Load Dataset for Paramater Data Assimilation -##' ##' This function is used to load and preprocess data for PDA. It is expected to be moved / merged +##' ##' This function is used to load and preprocess data for PDA. It is expected to be moved / merged ##' with a more general PEcAn 'load_data' function eventually. -##' +##' ##' @param settings = PEcAn settings list ##' @param bety database connection object ##' @param external.formats formats list @@ -13,78 +13,78 @@ ##' @author Ryan Kelly, Istem Fer ##' @export load.pda.data <- function(settings, bety, external.formats = NULL) { - # Outlining setup for multiple datasets - inputs <- list() + inputs <- list() input.settings <- settings$assim.batch$inputs - n.input <- length(input.settings) - - for(i in seq_len(n.input)) { - inputs[[i]] <- list() - + n.input <- length(input.settings) + + for (i in seq_len(n.input)) { + inputs[[i]] <- list() + inputs[[i]]$variable.name <- lapply(input.settings[[i]]$variable.name, PEcAn.utils::convert.expr) - data.var <- sapply(inputs[[i]]$variable.name, `[[`, "variable.drv") + data.var <- sapply(inputs[[i]]$variable.name, `[[`, "variable.drv") - data.path <- input.settings[[i]]$path + data.path <- input.settings[[i]]$path + + inputs[[i]]$variable.id <- input.settings[[i]]$variable.id + inputs[[i]]$input.id <- input.settings[[i]]$input.id + inputs[[i]]$align.method <- ifelse(!is.null(input.settings[[i]]$align.method), input.settings[[i]]$align.method, "match_timestep") - inputs[[i]]$variable.id <- input.settings[[i]]$variable.id - inputs[[i]]$input.id <- input.settings[[i]]$input.id - inputs[[i]]$align.method <- ifelse(!is.null(input.settings[[i]]$align.method), input.settings[[i]]$align.method, "match_timestep") - # I require that the user defines data.path in the settings as well, instead of using query.file.path - # because 'data.path <- query.file.path(obvs.id, con)' might return an incomplete path + # because 'data.path <- query.file.path(obvs.id, con)' might return an incomplete path # which results in reading all the files in that particular directory in the load_x_netcdf step if (is.null(inputs[[i]]$input.id) | is.null(data.path)) { PEcAn.logger::logger.error("Must provide both ID and PATH for all data assimilation inputs.") } - - if(is.null(bety)){ + + if (is.null(bety)) { format <- external.formats[[i]] - }else{ + } else { format <- PEcAn.DB::query.format.vars(bety = bety, input.id = inputs[[i]]$input.id) } - + vars.used.index <- which(format$vars$bety_name %in% data.var) - - inputs[[i]]$data <- PEcAn.benchmark::load_data(data.path = data.path, - format = format, - start_year = lubridate::year(settings$run$start.date), - end_year = lubridate::year(settings$run$end.date), - site = settings$run$site, - vars.used.index = vars.used.index, - time.row = format$time.row) - + + inputs[[i]]$data <- PEcAn.benchmark::load_data( + data.path = data.path, + format = format, + start_year = lubridate::year(settings$run$start.date), + end_year = lubridate::year(settings$run$end.date), + site = settings$run$site, + vars.used.index = vars.used.index, + time.row = format$time.row + ) + ## Preprocess data # TODO: Generalize # TODO: Soil Respiration uncertainty calculation - if(all(data.var %in% c("NEE", "FC", "LE", "UST"))) { - - ustar.thresh <- 0.4 # TODO: soft code this - - var.obs <- colnames(inputs[[i]]$data)[!colnames(inputs[[i]]$data) %in% c("UST", "posix", "year", format$vars[format$time.row,]$bety_name)] - - AMFo <- inputs[[i]]$data[[var.obs]] - UST <- inputs[[i]]$data$UST - AMFo[AMFo == -9999] <- NA - AMFo[UST < ustar.thresh] <- NA + if (all(data.var %in% c("NEE", "FC", "LE", "UST"))) { + ustar.thresh <- 0.4 # TODO: soft code this + + var.obs <- colnames(inputs[[i]]$data)[!colnames(inputs[[i]]$data) %in% c("UST", "posix", "year", format$vars[format$time.row, ]$bety_name)] + + AMFo <- inputs[[i]]$data[[var.obs]] + UST <- inputs[[i]]$data$UST + AMFo[AMFo == -9999] <- NA + AMFo[UST < ustar.thresh] <- NA inputs[[i]]$data[[var.obs]] <- AMFo # write filtered data - + # Have to just pretend like these quality control variables exist... - AMFq <- rep(0, length(AMFo)) + AMFq <- rep(0, length(AMFo)) flags <- TRUE - + AMF.params <- PEcAn.uncertainty::flux.uncertainty(AMFo, AMFq, flags, bin.num = 20) - + inputs[[i]]$obs <- AMFo inputs[[i]]$par <- c(AMF.params$intercept, AMF.params$slopeP, AMF.params$slopeN) - }else{ + } else { inputs[[i]]$obs <- inputs[[i]]$data[colnames(inputs[[i]]$data) %in% data.var] inputs[[i]]$par <- stats::sd(unlist(inputs[[i]]$obs), na.rm = TRUE) # testing } inputs[[i]]$n <- sum(!is.na(inputs[[i]]$obs)) - } # end loop over files - + } # end loop over files + return(inputs) } # load.pda.data diff --git a/modules/assim.batch/R/pda.mcmc.R b/modules/assim.batch/R/pda.mcmc.R index 690be603814..f96f7d22627 100644 --- a/modules/assim.batch/R/pda.mcmc.R +++ b/modules/assim.batch/R/pda.mcmc.R @@ -25,7 +25,6 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = NULL, chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, ar.target = NULL, jvar = NULL, n.knot = NULL) { - ## this bit of code is useful for defining the variables passed to this function if you are ## debugging if (FALSE) { @@ -35,9 +34,11 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = ## -------------------------------------- Setup ------------------------------------- ## Handle ## settings - settings <- pda.settings(settings = settings, params.id = params.id, param.names = param.names, - prior.id = prior.id, chain = chain, iter = iter, adapt = adapt, adj.min = adj.min, ar.target = ar.target, - jvar = jvar, n.knot = n.knot) + settings <- pda.settings( + settings = settings, params.id = params.id, param.names = param.names, + prior.id = prior.id, chain = chain, iter = iter, adapt = adapt, adj.min = adj.min, ar.target = ar.target, + jvar = jvar, n.knot = n.knot + ) ## will be used to check if multiplicative Gaussian is requested @@ -58,14 +59,14 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = bety <- PEcAn.DB::db.open(settings$database$bety) ## Load priors - temp <- pda.load.priors(settings, bety) - prior.list <- temp$prior - settings <- temp$settings - pname <- lapply(prior.list, rownames) + temp <- pda.load.priors(settings, bety) + prior.list <- temp$prior + settings <- temp$settings + pname <- lapply(prior.list, rownames) n.param.all <- sapply(prior.list, nrow) ## Load data to assimilate against - inputs <- load.pda.data(settings, bety) + inputs <- load.pda.data(settings, bety) n.input <- length(inputs) # get hyper parameters if any @@ -75,14 +76,18 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = do.call("require", list(paste0("PEcAn.", settings$model$type))) my.write.config <- paste0("write.config.", settings$model$type) if (!exists(my.write.config)) { - PEcAn.logger::logger.severe(paste(my.write.config, - "does not exist. Please make sure that the PEcAn interface is loaded for", - settings$model$type)) + PEcAn.logger::logger.severe(paste( + my.write.config, + "does not exist. Please make sure that the PEcAn interface is loaded for", + settings$model$type + )) } ## Select parameters to constrain - prior.ind <- lapply(seq_along(settings$pfts), - function(x) which(pname[[x]] %in% settings$assim.batch$param.names[[x]])) + prior.ind <- lapply( + seq_along(settings$pfts), + function(x) which(pname[[x]] %in% settings$assim.batch$param.names[[x]]) + ) n.param <- sapply(prior.ind, length) ## Get the workflow id @@ -99,10 +104,10 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = llik.fn <- pda.define.llik.fn(settings) ## Set prior distribution functions (d___, q___, r___, and multivariate versions) - prior.all <- do.call("rbind", prior.list) - prior.fn.all <- pda.define.prior.fn(prior.all) + prior.all <- do.call("rbind", prior.list) + prior.fn.all <- pda.define.prior.fn(prior.all) prior.ind.all <- which(unlist(pname) %in% unlist(settings$assim.batch$param.names)) - pname.all <- unlist(pname) + pname.all <- unlist(pname) ## ----------------------------------- MCMC Setup ----------------------------------- ## @@ -113,23 +118,25 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = ## Initialize empty params matrix (concatenated to params from a previous PDA, if provided) params <- params.list$params - start <- params.list$start + start <- params.list$start finish <- params.list$finish iter.flag <- 1 - if(!is.null(settings$assim.batch$extension) & !is.null(params.list$llpars)){ - llpars <- params.list$llpars + if (!is.null(settings$assim.batch$extension) & !is.null(params.list$llpars)) { + llpars <- params.list$llpars llparnames <- sapply(strsplit(colnames(llpars), "\\."), `[[`, 1) - bias <- llpars[ ,llparnames == "bias"] - nobias <- llpars[ ,llparnames != "bias"] - all.bias <- bias[length(bias)] - parl <- nobias[length(nobias)] - LLpar <- matrix(NA, ncol= ncol(llpars), nrow = (finish-start)+1, - dimnames = list(NULL, colnames(llpars))) + bias <- llpars[, llparnames == "bias"] + nobias <- llpars[, llparnames != "bias"] + all.bias <- bias[length(bias)] + parl <- nobias[length(nobias)] + LLpar <- matrix(NA, + ncol = ncol(llpars), nrow = (finish - start) + 1, + dimnames = list(NULL, colnames(llpars)) + ) LLpar <- rbind(llpars, LLpar) - par.flag <- TRUE - iter.flag <- 0 + par.flag <- TRUE + iter.flag <- 0 } ## Set initial conditions @@ -142,22 +149,28 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = LL.old <- prior.old <- -Inf ## Jump distribution setup - accept.rate <- numeric(sum(n.param)) ## Create acceptance rate vector of 0's (one zero per parameter) + accept.rate <- numeric(sum(n.param)) ## Create acceptance rate vector of 0's (one zero per parameter) # Default jump variances. default to 0.1 * 90% prior CI if (!is.null(settings$assim.batch$extension)) { - load(settings$assim.batch$jvar.path) # loads params + load(settings$assim.batch$jvar.path) # loads params jmp.vars <- jvar.list[[chain]] } else { - jmp.vars <- sapply(prior.fn.all$qprior, - function(x) 0.1 * diff(eval(x, list(p = c(0.05, 0.95)))))[prior.ind.all] + jmp.vars <- sapply( + prior.fn.all$qprior, + function(x) 0.1 * diff(eval(x, list(p = c(0.05, 0.95)))) + )[prior.ind.all] } ## Create dir for diagnostic output if (!is.null(settings$assim.batch$diag.plot.iter)) { - dir.create(file.path(settings$outdir, - paste0("diag.pda", settings$assim.batch$ensemble.id)), - showWarnings = FALSE, recursive = TRUE) + dir.create( + file.path( + settings$outdir, + paste0("diag.pda", settings$assim.batch$ensemble.id) + ), + showWarnings = FALSE, recursive = TRUE + ) } ## save updated settings XML. Will be overwritten at end, but useful in case of crash @@ -165,18 +178,21 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = PEcAn.settings::listToXml(settings, "pecan"), file = file.path( settings$outdir, - paste0("pecan.pda", settings$assim.batch$ensemble.id, ".xml"))) + paste0("pecan.pda", settings$assim.batch$ensemble.id, ".xml") + ) + ) ## --------------------------------- Main MCMC loop --------------------------------- ## for (i in start:finish) { PEcAn.logger::logger.info(paste("Data assimilation MCMC iteration", i, "of", finish)) ## Adjust Jump distribution - if (i%%settings$assim.batch$jump$adapt < 1) { + if (i %% settings$assim.batch$jump$adapt < 1) { jmp.vars <- pda.adjust.jumps(settings, - jmp.vars, - accept.rate, - pnames = pname.all[prior.ind.all]) + jmp.vars, + accept.rate, + pnames = pname.all[prior.ind.all] + ) accept.rate <- numeric(sum(n.param)) # # Save updated settings XML. Will be overwritten at end, but useful in case of crash @@ -207,12 +223,13 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = if (is.finite(prior.star)) { ## Set up run and write run configs run.id <- pda.init.run(settings, - con, - my.write.config, - workflow.id, - run.params, - n = 1, - run.names = paste0("MCMC_chain.", chain, "_iteration.", i, "_variable.", j)) + con, + my.write.config, + workflow.id, + run.params, + n = 1, + run.names = paste0("MCMC_chain.", chain, "_iteration.", i, "_variable.", j) + ) ## Start model run PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) @@ -220,21 +237,21 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = ## Read model outputs align.return <- pda.get.model.output(settings, run.id, bety, inputs) model.out <- align.return$model.out - if(!is.na(model.out)){ + if (!is.na(model.out)) { inputs <- align.return$inputs } # do this once - if(i==1){ + if (i == 1) { inputs <- pda.neff.calc(inputs) } # retrieve n - n.of.obs <- sapply(inputs,`[[`, "n") - names(n.of.obs) <- sapply(model.out,names) + n.of.obs <- sapply(inputs, `[[`, "n") + names(n.of.obs) <- sapply(model.out, names) # handle bias parameters if multiplicative Gaussian is listed in the likelihoods - if(any(unlist(any.mgauss) == "multipGauss")) { + if (any(unlist(any.mgauss) == "multipGauss")) { isbias <- which(unlist(any.mgauss) == "multipGauss") # testing now nbias <- 1 @@ -244,8 +261,8 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = bias.terms <- NULL } - if(!is.null(bias.terms)){ - all.bias <- lapply(bias.terms, function(n) n[1,]) + if (!is.null(bias.terms)) { + all.bias <- lapply(bias.terms, function(n) n[1, ]) all.bias <- do.call("rbind", all.bias) } else { all.bias <- NULL @@ -253,21 +270,25 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = ## calculate error statistics pda.errors <- pda.calc.error(settings, con, model_out = model.out, run.id, inputs, all.bias) - llik.par <- pda.calc.llik.par(settings, n = n.of.obs, - error.stats = unlist(pda.errors), - hyper.pars) + llik.par <- pda.calc.llik.par(settings, + n = n.of.obs, + error.stats = unlist(pda.errors), + hyper.pars + ) # store llik-par - parl <- unlist(sapply(llik.par, `[[` , "par")) - if(!is.null(parl) & iter.flag == 1 & is.null(all.bias)) { - LLpar <- matrix(NA, ncol= length(parl), nrow = finish, dimnames = list(NULL, names(parl))) + parl <- unlist(sapply(llik.par, `[[`, "par")) + if (!is.null(parl) & iter.flag == 1 & is.null(all.bias)) { + LLpar <- matrix(NA, ncol = length(parl), nrow = finish, dimnames = list(NULL, names(parl))) par.flag <- TRUE iter.flag <- 0 - } else if(!is.null(parl) & iter.flag == 1 & !is.null(all.bias)) { - LLpar <- matrix(NA, ncol= length(parl) + nrow(all.bias), nrow = finish, - dimnames = list(NULL, c(rownames(all.bias), names(parl)))) + } else if (!is.null(parl) & iter.flag == 1 & !is.null(all.bias)) { + LLpar <- matrix(NA, + ncol = length(parl) + nrow(all.bias), nrow = finish, + dimnames = list(NULL, c(rownames(all.bias), names(parl))) + ) par.flag <- TRUE iter.flag <- 0 - } else if(iter.flag == 1){ + } else if (iter.flag == 1) { par.flag <- FALSE iter.flag <- 0 } @@ -278,7 +299,7 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = ## Accept or reject step a <- LL.new - LL.old + prior.star - prior.old if (is.na(a)) { - a <- -Inf # Can occur if LL.new == -Inf (due to model crash) and LL.old == -Inf (first run) + a <- -Inf # Can occur if LL.new == -Inf (due to model crash) and LL.old == -Inf (first run) } if (a > log(stats::runif(1))) { @@ -287,40 +308,45 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = parm <- pstar accept.rate[j] <- accept.rate[j] + 1 } - } ## end if(is.finite(prior.star)) - } ## end loop over variables + } ## end if(is.finite(prior.star)) + } ## end loop over variables ## Diagnostic figure if (!is.null(settings$assim.batch$diag.plot.iter) && is.finite(prior.star) && - (i == start | - i == finish | - (i%%settings$assim.batch$diag.plot.iter == 0))) { + (i == start | + i == finish | + (i %% settings$assim.batch$diag.plot.iter == 0))) { grDevices::pdf( file.path( settings$outdir, paste0("diag.pda", settings$assim.batch$ensemble.id), - paste0("data.vs.model_", gsub(" ", "0", sprintf("%5.0f", i)), ".pdf"))) + paste0("data.vs.model_", gsub(" ", "0", sprintf("%5.0f", i)), ".pdf") + ) + ) - NEEo <- inputs[[1]]$obs - NEEm <- model.out[[1]] + NEEo <- inputs[[1]]$obs + NEEm <- model.out[[1]] NEE.resid <- NEEm - NEEo graphics::par(mfrow = c(1, 2)) graphics::plot(NEEo) graphics::points(NEEm, col = 2, cex = 0.5) - graphics::legend("topleft", col = c(1, 2), pch = 1, - legend = c("data", "model")) + graphics::legend("topleft", + col = c(1, 2), pch = 1, + legend = c("data", "model") + ) graphics::hist(NEE.resid, 100, - main = paste0("LLik: ", round(LL.new, 1))) + main = paste0("LLik: ", round(LL.new, 1)) + ) grDevices::dev.off() } ## Store output params[i, ] <- parm - if(!is.null(parl) & is.null(all.bias)){ - LLpar[i, ] <- parl - } else if (!is.null(parl) & !is.null(all.bias)){ - LLpar[i, ] <- c(all.bias, parl) + if (!is.null(parl) & is.null(all.bias)) { + LLpar[i, ] <- parl + } else if (!is.null(parl) & !is.null(all.bias)) { + LLpar[i, ] <- c(all.bias, parl) } ## Add to temp file (overwrite when i=1, append thereafter) @@ -329,15 +355,19 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = mcmc.list[[chain]] <- params jvar.list[[chain]] <- jmp.vars - if(par.flag){ + if (par.flag) { llpar.list[[chain]] <- LLpar } - } # end of chain-loop - - settings$assim.batch$mcmc.path <- file.path(settings$outdir, - paste0("mcmc.list.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + } # end of chain-loop + + settings$assim.batch$mcmc.path <- file.path( + settings$outdir, + paste0( + "mcmc.list.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(mcmc.list, file = settings$assim.batch$mcmc.path) # subset to params of interst only @@ -347,29 +377,37 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = mcmc.param.list <- list() ind <- 0 for (i in seq_along(settings$pfts)) { - mcmc.param.list[[i]] <- lapply(mcmc.list, - function(x) x[, (ind + 1):(ind + n.param[i]), drop = FALSE]) + mcmc.param.list[[i]] <- lapply( + mcmc.list, + function(x) x[, (ind + 1):(ind + n.param[i]), drop = FALSE] + ) ind <- ind + n.param[i] } - if (par.flag){ - mcmc.param.list[[length(mcmc.param.list)+1]] <- list() - prior.list[[length(prior.list)+1]] <- list() - for(c in seq_len(settings$assim.batch$chain)){ + if (par.flag) { + mcmc.param.list[[length(mcmc.param.list) + 1]] <- list() + prior.list[[length(prior.list) + 1]] <- list() + for (c in seq_len(settings$assim.batch$chain)) { mcmc.param.list[[length(mcmc.param.list)]][[c]] <- llpar.list[[c]] } - settings$assim.batch$llpar.path <- file.path(settings$outdir, - paste0("llpar.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$llpar.path <- file.path( + settings$outdir, + paste0( + "llpar.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(llpar.list, file = settings$assim.batch$llpar.path) } ## ------------------------------------ Clean up ------------------------------------ ## Save outputs to plots, files, and db - settings$assim.batch$jvar.path <- file.path(settings$outdir, - paste0("jvar.pda", settings$assim.batch$ensemble.id, ".Rdata")) + settings$assim.batch$jvar.path <- file.path( + settings$outdir, + paste0("jvar.pda", settings$assim.batch$ensemble.id, ".Rdata") + ) save(jvar.list, file = settings$assim.batch$jvar.path) settings <- pda.postprocess(settings, con, mcmc.param.list, pname, prior.list, prior.ind) @@ -381,5 +419,4 @@ pda.mcmc <- function(settings, params.id = NULL, param.names = NULL, prior.id = ## Output an updated settings list return(settings) - } # pda.mcmc diff --git a/modules/assim.batch/R/pda.mcmc.bs.R b/modules/assim.batch/R/pda.mcmc.bs.R index 342ba43c1bb..a294d247271 100644 --- a/modules/assim.batch/R/pda.mcmc.bs.R +++ b/modules/assim.batch/R/pda.mcmc.bs.R @@ -24,7 +24,6 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id = NULL, chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, ar.target = NULL, jvar = NULL, n.knot = NULL) { - ## this bit of code is useful for defining the variables passed to this function if you are ## debugging if (FALSE) { @@ -34,17 +33,19 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id ## -------------------------------------- Setup ------------------------------------- ## Handle ## settings - settings <- pda.settings(settings = settings, - params.id = params.id, - param.names = param.names, - prior.id = prior.id, - chain = chain, - iter = iter, - adapt = adapt, - adj.min = adj.min, - ar.target = ar.target, - jvar = jvar, - n.knot = n.knot) + settings <- pda.settings( + settings = settings, + params.id = params.id, + param.names = param.names, + prior.id = prior.id, + chain = chain, + iter = iter, + adapt = adapt, + adj.min = adj.min, + ar.target = ar.target, + jvar = jvar, + n.knot = n.knot + ) ## will be used to check if multiplicative Gaussian is requested any.mgauss <- sapply(settings$assim.batch$inputs, `[[`, "likelihood") @@ -64,14 +65,14 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id bety <- PEcAn.DB::db.open(settings$database$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) ## Load priors - temp <- pda.load.priors(settings, bety) - prior.list <- temp$prior - settings <- temp$settings - pname <- lapply(prior.list, rownames) + temp <- pda.load.priors(settings, bety) + prior.list <- temp$prior + settings <- temp$settings + pname <- lapply(prior.list, rownames) n.param.all <- sapply(prior.list, nrow) ## Load data to assimilate against - inputs <- load.pda.data(settings, bety) + inputs <- load.pda.data(settings, bety) n.input <- length(inputs) # get hyper parameters if any @@ -81,14 +82,18 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id do.call("require", list(paste0("PEcAn.", settings$model$type))) my.write.config <- paste("write.config.", settings$model$type, sep = "") if (!exists(my.write.config)) { - PEcAn.logger::logger.severe(paste(my.write.config, - "does not exist. Please make sure that the PEcAn interface is loaded for", - settings$model$type)) + PEcAn.logger::logger.severe(paste( + my.write.config, + "does not exist. Please make sure that the PEcAn interface is loaded for", + settings$model$type + )) } ## Select parameters to constrain - prior.ind <- lapply(seq_along(settings$pfts), - function(x) which(pname[[x]] %in% settings$assim.batch$param.names[[x]])) + prior.ind <- lapply( + seq_along(settings$pfts), + function(x) which(pname[[x]] %in% settings$assim.batch$param.names[[x]]) + ) n.param <- sapply(prior.ind, length) ## Get the workflow id @@ -104,11 +109,11 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id ## Set up likelihood functions llik.fn <- pda.define.llik.fn(settings) - prior.all <- do.call("rbind", prior.list) + prior.all <- do.call("rbind", prior.list) ## Set prior distribution functions (d___, q___, r___, and multivariate versions) - prior.fn.all <- pda.define.prior.fn(prior.all) + prior.fn.all <- pda.define.prior.fn(prior.all) prior.ind.all <- which(unlist(pname) %in% unlist(settings$assim.batch$param.names)) - pname.all <- unlist(pname) + pname.all <- unlist(pname) ## ----------------------------------- MCMC Setup ----------------------------------- ## @@ -117,24 +122,26 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id for (chain in seq_len(settings$assim.batch$chain)) { ## Initialize empty params matrix (concatenated to params from a previous PDA, if provided) params.list <- pda.init.params(settings, chain, pname.all, sum(n.param.all)) - start <- params.list$start - finish <- params.list$finish - params <- params.list$params + start <- params.list$start + finish <- params.list$finish + params <- params.list$params iter.flag <- 1 - if(!is.null(settings$assim.batch$extension) & !is.null(params.list$llpars)){ - llpars <- params.list$llpars + if (!is.null(settings$assim.batch$extension) & !is.null(params.list$llpars)) { + llpars <- params.list$llpars llparnames <- sapply(strsplit(colnames(llpars), "\\."), `[[`, 1) - bias <- llpars[ ,llparnames == "bias"] - nobias <- llpars[ ,llparnames != "bias"] - all.bias <- bias[length(bias)] - parl <- nobias[length(nobias)] - LLpar <- matrix(NA, ncol= ncol(llpars), nrow = (finish-start)+1, - dimnames = list(NULL, colnames(llpars))) + bias <- llpars[, llparnames == "bias"] + nobias <- llpars[, llparnames != "bias"] + all.bias <- bias[length(bias)] + parl <- nobias[length(nobias)] + LLpar <- matrix(NA, + ncol = ncol(llpars), nrow = (finish - start) + 1, + dimnames = list(NULL, colnames(llpars)) + ) LLpar <- rbind(llpars, LLpar) - par.flag <- TRUE - iter.flag <- 0 + par.flag <- TRUE + iter.flag <- 0 } ## Set initial conditions @@ -151,19 +158,25 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id # Default jump variances. default to 0.1 * 90% prior CI if (!is.null(settings$assim.batch$extension)) { - load(settings$assim.batch$jcov.path) # load jcov + load(settings$assim.batch$jcov.path) # load jcov jcov <- jcov.list[[chain]] } else { - jmp.vars <- sapply(prior.fn.all$qprior, - function(x) 0.1 * diff(eval(x, list(p = c(0.05, 0.95)))))[prior.ind.all] + jmp.vars <- sapply( + prior.fn.all$qprior, + function(x) 0.1 * diff(eval(x, list(p = c(0.05, 0.95)))) + )[prior.ind.all] jcov <- diag(jmp.vars) } ## Create dir for diagnostic output if (!is.null(settings$assim.batch$diag.plot.iter)) { - dir.create(file.path(settings$outdir, - paste0("diag.pda", settings$assim.batch$ensemble.id)), - showWarnings = F, recursive = T) + dir.create( + file.path( + settings$outdir, + paste0("diag.pda", settings$assim.batch$ensemble.id) + ), + showWarnings = F, recursive = T + ) } ## --------------------------------- Main MCMC loop --------------------------------- ## @@ -171,19 +184,24 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id PEcAn.logger::logger.info(paste("Data assimilation MCMC iteration", i, "of", finish)) ## Adjust Jump distribution - if ((i > (start + 1)) && ((i - start)%%settings$assim.batch$jump$adapt == 0)) { + if ((i > (start + 1)) && ((i - start) %% settings$assim.batch$jump$adapt == 0)) { jcov <- pda.adjust.jumps.bs(settings, jcov, accept.count, params[(i - settings$assim.batch$jump$adapt):(i - - 1), prior.ind.all]) - accept.count <- 0 # Reset counter + 1), prior.ind.all]) + accept.count <- 0 # Reset counter # Save updated settings XML. Will be overwritten at end, but useful in case of crash settings$assim.batch$jump$jvar <- as.list(diag(jcov)) names(settings$assim.batch$jump$jvar) <- rep("jvar", n.param) XML::saveXML( PEcAn.settings::listToXml(settings, "pecan"), - file = file.path(settings$outdir, - paste0("pecan.pda", - settings$assim.batch$ensemble.id, ".xml"))) + file = file.path( + settings$outdir, + paste0( + "pecan.pda", + settings$assim.batch$ensemble.id, ".xml" + ) + ) + ) } pstar <- parm @@ -198,8 +216,8 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id # Convert pstar to a list of 1-row data frame if (is.null(dim(pstar))) { - pnames <- names(pstar) - run.params <- as.data.frame(matrix(pstar, nrow = 1)) + pnames <- names(pstar) + run.params <- as.data.frame(matrix(pstar, nrow = 1)) names(run.params) <- pnames } run.params <- list(run.params) @@ -207,12 +225,13 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id if (is.finite(prior.star)) { ## Set up run and write run configs run.id <- pda.init.run(settings, - con, - my.write.config, - workflow.id, - run.params, - n = 1, - run.names = paste0("MCMC_chain.", chain, "_iteration.", i)) + con, + my.write.config, + workflow.id, + run.params, + n = 1, + run.names = paste0("MCMC_chain.", chain, "_iteration.", i) + ) ## Start model run PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) @@ -220,21 +239,21 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id ## Read model outputs align.return <- pda.get.model.output(settings, run.id, bety, inputs) model.out <- align.return$model.out - if(!is.na(model.out)){ + if (!is.na(model.out)) { inputs <- align.return$inputs } # do this once - if(i==1){ + if (i == 1) { inputs <- pda.neff.calc(inputs) } # retrieve n - n.of.obs <- sapply(inputs,`[[`, "n") - names(n.of.obs) <- sapply(model.out,names) + n.of.obs <- sapply(inputs, `[[`, "n") + names(n.of.obs) <- sapply(model.out, names) # handle bias parameters if multiplicative Gaussian is listed in the likelihoods - if(any(unlist(any.mgauss) == "multipGauss")) { + if (any(unlist(any.mgauss) == "multipGauss")) { isbias <- which(unlist(any.mgauss) == "multipGauss") # testing now nbias <- 1 @@ -244,8 +263,8 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id bias.terms <- NULL } - if(!is.null(bias.terms)){ - all.bias <- lapply(bias.terms, function(n) n[1,]) + if (!is.null(bias.terms)) { + all.bias <- lapply(bias.terms, function(n) n[1, ]) all.bias <- do.call("rbind", all.bias) } else { all.bias <- NULL @@ -253,21 +272,25 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id ## calculate error statistics pda.errors <- pda.calc.error(settings, con, model_out = model.out, run.id, inputs, all.bias) - llik.par <- pda.calc.llik.par(settings, n = n.of.obs, - error.stats = unlist(pda.errors), - hyper.pars) + llik.par <- pda.calc.llik.par(settings, + n = n.of.obs, + error.stats = unlist(pda.errors), + hyper.pars + ) # store llik-par - parl <- unlist(sapply(llik.par, `[[` , "par")) - if(!is.null(parl) & iter.flag == 1 & is.null(all.bias)) { - LLpar <- matrix(NA, ncol= length(parl), nrow = finish, dimnames = list(NULL, names(parl))) + parl <- unlist(sapply(llik.par, `[[`, "par")) + if (!is.null(parl) & iter.flag == 1 & is.null(all.bias)) { + LLpar <- matrix(NA, ncol = length(parl), nrow = finish, dimnames = list(NULL, names(parl))) par.flag <- TRUE iter.flag <- 0 - } else if(!is.null(parl) & iter.flag == 1 & !is.null(all.bias)) { - LLpar <- matrix(NA, ncol= length(parl) + nrow(all.bias), nrow = finish, - dimnames = list(NULL, c(rownames(all.bias), names(parl)))) + } else if (!is.null(parl) & iter.flag == 1 & !is.null(all.bias)) { + LLpar <- matrix(NA, + ncol = length(parl) + nrow(all.bias), nrow = finish, + dimnames = list(NULL, c(rownames(all.bias), names(parl))) + ) par.flag <- TRUE iter.flag <- 0 - } else if(iter.flag == 1){ + } else if (iter.flag == 1) { par.flag <- FALSE iter.flag <- 0 } @@ -278,60 +301,68 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id ## Accept or reject step a <- LL.new - LL.old + prior.star - prior.old if (is.na(a)) { - a <- -Inf # Can occur if LL.new == -Inf (due to model crash) and LL.old == -Inf (first run) + a <- -Inf # Can occur if LL.new == -Inf (due to model crash) and LL.old == -Inf (first run) } if (a > log(stats::runif(1))) { - LL.old <- LL.new - prior.old <- prior.star - parm <- pstar + LL.old <- LL.new + prior.old <- prior.star + parm <- pstar accept.count <- accept.count + 1 } - } ## end if(is.finite(prior.star)) + } ## end if(is.finite(prior.star)) ## Diagnostic figure if (!is.null(settings$assim.batch$diag.plot.iter) && - is.finite(prior.star) && (i == start | i == finish | (i%%settings$assim.batch$diag.plot.iter == 0))) { - grDevices::pdf(file.path(settings$outdir, - paste0("diag.pda", settings$assim.batch$ensemble.id), - paste0("data.vs.model_", gsub(" ", "0", sprintf("%5.0f", i)), ".pdf"))) - NEEo <- inputs[[1]]$obs - NEEm <- model.out[[1]] + is.finite(prior.star) && (i == start | i == finish | (i %% settings$assim.batch$diag.plot.iter == 0))) { + grDevices::pdf(file.path( + settings$outdir, + paste0("diag.pda", settings$assim.batch$ensemble.id), + paste0("data.vs.model_", gsub(" ", "0", sprintf("%5.0f", i)), ".pdf") + )) + NEEo <- inputs[[1]]$obs + NEEm <- model.out[[1]] NEE.resid <- NEEm - NEEo graphics::par(mfrow = c(1, 2)) graphics::plot(NEEo) graphics::points(NEEm, col = 2, cex = 0.5) - graphics::legend("topleft", col = c(1, 2), pch = 1, - legend = c("data", "model")) - graphics::hist(NEE.resid, 100, main = paste0("LLik: ", - round(LL.new, 1))) + graphics::legend("topleft", + col = c(1, 2), pch = 1, + legend = c("data", "model") + ) + graphics::hist(NEE.resid, 100, main = paste0( + "LLik: ", + round(LL.new, 1) + )) grDevices::dev.off() } ## Store output params[i, ] <- parm - if(!is.null(parl) & is.null(all.bias)){ - LLpar[i, ] <- parl - } else if (!is.null(parl) & !is.null(all.bias)){ - LLpar[i, ] <- c(all.bias, parl) + if (!is.null(parl) & is.null(all.bias)) { + LLpar[i, ] <- parl + } else if (!is.null(parl) & !is.null(all.bias)) { + LLpar[i, ] <- c(all.bias, parl) } - - } #end of mcmc-loop + } # end of mcmc-loop mcmc.list[[chain]] <- params jcov.list[[chain]] <- jcov - if(par.flag){ + if (par.flag) { llpar.list[[chain]] <- LLpar } - - } #end of chain-loop + } # end of chain-loop # save as it is for extension - settings$assim.batch$mcmc.path <- file.path(settings$outdir, - paste0("mcmc.list.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$mcmc.path <- file.path( + settings$outdir, + paste0( + "mcmc.list.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(mcmc.list, file = settings$assim.batch$mcmc.path) # subset to params of interst only @@ -341,36 +372,45 @@ pda.mcmc.bs <- function(settings, params.id = NULL, param.names = NULL, prior.id mcmc.param.list <- list() ind <- 0 for (i in seq_along(settings$pfts)) { - mcmc.param.list[[i]] <- lapply(mcmc.list, - function(x) x[, (ind + 1):(ind + n.param[i]), drop = FALSE]) + mcmc.param.list[[i]] <- lapply( + mcmc.list, + function(x) x[, (ind + 1):(ind + n.param[i]), drop = FALSE] + ) ind <- ind + n.param[i] } - if (par.flag){ - mcmc.param.list[[length(mcmc.param.list)+1]] <- list() - prior.list[[length(prior.list)+1]] <- list() - for(c in seq_len(settings$assim.batch$chain)){ + if (par.flag) { + mcmc.param.list[[length(mcmc.param.list) + 1]] <- list() + prior.list[[length(prior.list) + 1]] <- list() + for (c in seq_len(settings$assim.batch$chain)) { mcmc.param.list[[length(mcmc.param.list)]][[c]] <- llpar.list[[c]] } - settings$assim.batch$llpar.path <- file.path(settings$outdir, - paste0("llpar.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$llpar.path <- file.path( + settings$outdir, + paste0( + "llpar.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(llpar.list, file = settings$assim.batch$llpar.path) } ## ------------------------------------ Clean up ------------------------------------ ## Save outputs to plots, files, and db - settings$assim.batch$jcov.path <- file.path(settings$outdir, - paste0("jcov.pda", - settings$assim.batch$ensemble.id, - ".Rdata")) + settings$assim.batch$jcov.path <- file.path( + settings$outdir, + paste0( + "jcov.pda", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ) save(jcov.list, file = settings$assim.batch$jcov.path) settings <- pda.postprocess(settings, con, mcmc.param.list, pname, prior.list, prior.ind) ## Output an updated settings list return(settings) - -} ## end pda.mcmc +} ## end pda.mcmc diff --git a/modules/assim.batch/R/pda.mcmc.recover.R b/modules/assim.batch/R/pda.mcmc.recover.R index 61463482179..c62c2dee28f 100644 --- a/modules/assim.batch/R/pda.mcmc.recover.R +++ b/modules/assim.batch/R/pda.mcmc.recover.R @@ -24,72 +24,72 @@ # settings$assim.batch <- pda.mcmc.recover(settings) # wrap up unfinished run # settings$assim.batch <- pda.mcmc(settings) # start new pda pda.mcmc.recover <- function(settings, params.id = NULL, param.names = NULL, prior.id = NULL, - chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, + chain = NULL, iter = NULL, adapt = NULL, adj.min = NULL, ar.target = NULL, jvar = NULL, n.knot = NULL, burnin = NULL) { - - if (FALSE) { - params.id <- param.names <- prior.id <- chain <- iter <- NULL - n.knot <- adapt <- adj.min <- ar.target <- jvar <- NULL - } - - ## Handle settings - settings <- pda.settings(settings = settings, params.id = params.id, param.names = param.names, - prior.id = prior.id, chain = chain, iter = iter, adapt = adapt, adj.min = adj.min, - ar.target = ar.target, jvar = jvar, n.knot = n.knot) - - ## Open database connection - if (settings$database$bety$write) { - con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) - if (inherits(con, "try-error")) { - con <- NULL - } else { - on.exit(PEcAn.DB::db.close(con), add = TRUE) - } - } else { - con <- NULL - } - - ## Load priors - prior <- pda.load.priors(settings, con)$prior - pname <- rownames(prior) - n.param.all <- nrow(prior) - - # Get start and finish - params.dummy <- pda.init.params(settings, con, pname, n.param.all) - start <- params.dummy$start - finish <- params.dummy$finish - - ## Select parameters to constrain - prior.ind <- which(rownames(prior) %in% settings$assim.batch$param.names) - n.param <- length(prior.ind) - - ## Get the workflow id - if ("workflow" %in% names(settings)) { - workflow.id <- settings$workflow$id + if (FALSE) { + params.id <- param.names <- prior.id <- chain <- iter <- NULL + n.knot <- adapt <- adj.min <- ar.target <- jvar <- NULL + } + + ## Handle settings + settings <- pda.settings( + settings = settings, params.id = params.id, param.names = param.names, + prior.id = prior.id, chain = chain, iter = iter, adapt = adapt, adj.min = adj.min, + ar.target = ar.target, jvar = jvar, n.knot = n.knot + ) + + ## Open database connection + if (settings$database$bety$write) { + con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) + if (inherits(con, "try-error")) { + con <- NULL } else { - workflow.id <- -1 - } - - ## Get ensemble id from diagnostic plot dir - ens.ids <- as.numeric(sub("diag.pda", "", dir(settings$outdir, "diag.pda"))) - settings$assim.batch$ensemble.id <- as.character(max(ens.ids)) - - ## Load up temp file to recreate params - params <- as.matrix(utils::read.table(file.path(settings$outdir, "pda.mcmc.txt"))) - colnames(params) <- pname - - ## Update iters - settings$assim.batch$iter <- finish - nrow(params) - - ## Save outputs to plots, files, and db - settings <- pda.postprocess(settings, con, params, pname, prior, prior.ind, burnin) - - ## close database connection - if (!is.null(con)) { - PEcAn.DB::db.close(con) + on.exit(PEcAn.DB::db.close(con), add = TRUE) } - - ## Output an updated settings list - return(settings$assim.batch) - + } else { + con <- NULL + } + + ## Load priors + prior <- pda.load.priors(settings, con)$prior + pname <- rownames(prior) + n.param.all <- nrow(prior) + + # Get start and finish + params.dummy <- pda.init.params(settings, con, pname, n.param.all) + start <- params.dummy$start + finish <- params.dummy$finish + + ## Select parameters to constrain + prior.ind <- which(rownames(prior) %in% settings$assim.batch$param.names) + n.param <- length(prior.ind) + + ## Get the workflow id + if ("workflow" %in% names(settings)) { + workflow.id <- settings$workflow$id + } else { + workflow.id <- -1 + } + + ## Get ensemble id from diagnostic plot dir + ens.ids <- as.numeric(sub("diag.pda", "", dir(settings$outdir, "diag.pda"))) + settings$assim.batch$ensemble.id <- as.character(max(ens.ids)) + + ## Load up temp file to recreate params + params <- as.matrix(utils::read.table(file.path(settings$outdir, "pda.mcmc.txt"))) + colnames(params) <- pname + + ## Update iters + settings$assim.batch$iter <- finish - nrow(params) + + ## Save outputs to plots, files, and db + settings <- pda.postprocess(settings, con, params, pname, prior, prior.ind, burnin) + + ## close database connection + if (!is.null(con)) { + PEcAn.DB::db.close(con) + } + + ## Output an updated settings list + return(settings$assim.batch) } # pda.mcmc diff --git a/modules/assim.batch/R/pda.neff.R b/modules/assim.batch/R/pda.neff.R index 9b358795986..15505fc0bfc 100644 --- a/modules/assim.batch/R/pda.neff.R +++ b/modules/assim.batch/R/pda.neff.R @@ -1,9 +1,9 @@ ##' Autocorelation correction and efficient sample size calculation on latent process ##' -##' What we're trying to do is to calculate the autocorrelation of the latent state, after attempting to "remove" the observation error. +##' What we're trying to do is to calculate the autocorrelation of the latent state, after attempting to "remove" the observation error. ##' The first step is thus to estimate the latent state using a simple 'process free' state-space model (e.g. random walk). ##' -##' @title Calculate N_eff +##' @title Calculate N_eff ##' @param inputs list ##' @param recalculate repeat neff calculation or not ##' @@ -11,109 +11,101 @@ ##' ##' @author Istem Fer ##' @export -pda.neff.calc <- function(inputs, recalculate = FALSE){ - - - for(i in seq_along(inputs)){ - - n <- inputs[[i]]$n - - if(!is.null(inputs[[i]]$n_eff) && !recalculate){ +pda.neff.calc <- function(inputs, recalculate = FALSE) { + for (i in seq_along(inputs)) { + n <- inputs[[i]]$n + + if (!is.null(inputs[[i]]$n_eff) && !recalculate) { next } # for now we're doing autocorrelation correction on flux data only # NEE, LE, FC flux.vars <- c(297, 298, 1000000042) - if(inputs[[i]]$variable.id %in% flux.vars){ - - rho <- pda.autocorr.calc(inputs[[i]], "heteroskedastic.laplacian") - n_eff <- n*(1-rho)/(1+rho) + if (inputs[[i]]$variable.id %in% flux.vars) { + rho <- pda.autocorr.calc(inputs[[i]], "heteroskedastic.laplacian") + n_eff <- n * (1 - rho) / (1 + rho) inputs[[i]]$n_eff <- n_eff - - }else{ + } else { # assume n_eff is the as n inputs[[i]]$n_eff <- n } } - + return(inputs) - } # pda.neff.calc ##' @title autocorrelation correction ##' @param input list that contains time-series data vector and parameters for heteroskedastic.laplacian -##' @param model data model type, for flux data heteroskedastic laplacian, normal is an example +##' @param model data model type, for flux data heteroskedastic laplacian, normal is an example ##' ##' @return rho AR(1) ##' ##' @author Istem Fer ##' @export -pda.autocorr.calc <- function(input, model = "heteroskedastic.laplacian"){ - - - if(model == "heteroskedastic.laplacian"){ - - HLModel = " +pda.autocorr.calc <- function(input, model = "heteroskedastic.laplacian") { + if (model == "heteroskedastic.laplacian") { + HLModel <- " model{ - + #### Data Model for(i in 1:n){ alphas[i] <- ifelse(x[i]<0, alpha_n, alpha_p) y[i] ~ ddexp(x[i],alpha_0 + alphas[i]*x[i]) } - + #### Process Model for(i in 2:n){ x[i] ~ dnorm(x[i-1], tau_add) } - + #### Priors x[1] ~ dnorm(x_ic,tau_ic) tau_add ~ dgamma(a_add,r_add) } " - - obs <- c(input$obs) - alpha_0 <- input$par[[1]] - alpha_p <- input$par[[2]] + + obs <- c(input$obs) + alpha_0 <- input$par[[1]] + alpha_p <- input$par[[2]] alpha_n <- input$par[[3]] - - # converting back C fluxes from kg C m-2 s-1 to umol C m-2 s-1 + + # converting back C fluxes from kg C m-2 s-1 to umol C m-2 s-1 # reduces the code and makes model fitting easier - if(input$variable.id %in% c(297, 1000000042)){ + if (input$variable.id %in% c(297, 1000000042)) { obs <- PEcAn.utils::misc.convert(obs, "kg C m-2 s-1", "umol C m-2 s-1") - AMFq <- rep(0, length(obs)) + AMFq <- rep(0, length(obs)) flags <- TRUE AMF.params <- PEcAn.uncertainty::flux.uncertainty(obs, AMFq, flags, bin.num = 20) alpha_0 <- AMF.params$intercept[[1]] alpha_p <- AMF.params$slopeP[[1]] alpha_n <- AMF.params$slopeN[[1]] } - - data <- list(y = obs, n = length(obs), - x_ic = 1, tau_ic = 1e-2, a_add = 1, r_add = 1, - alpha_0 = alpha_0, - alpha_p = alpha_p, - alpha_n = alpha_n) - - nchain = 3 + + data <- list( + y = obs, n = length(obs), + x_ic = 1, tau_ic = 1e-2, a_add = 1, r_add = 1, + alpha_0 = alpha_0, + alpha_p = alpha_p, + alpha_n = alpha_n + ) + + nchain <- 3 init <- list() - for(i in 1:nchain){ - y.samp <- sample(obs, length(obs), replace=TRUE) - init[[i]] <- list(tau_add=1/stats::var(diff(y.samp), na.rm=TRUE)) + for (i in 1:nchain) { + y.samp <- sample(obs, length(obs), replace = TRUE) + init[[i]] <- list(tau_add = 1 / stats::var(diff(y.samp), na.rm = TRUE)) } - - j.model <- rjags::jags.model(file = textConnection(HLModel), - data = data, - inits = init, - n.chains = 3) - - - - }else if(model == "gaussian"){ + + j.model <- rjags::jags.model( + file = textConnection(HLModel), + data = data, + inits = init, + n.chains = 3 + ) + } else if (model == "gaussian") { # Gaussian - - GaussianModel = " + + GaussianModel <- " model{ #### Data Model @@ -132,44 +124,48 @@ pda.autocorr.calc <- function(input, model = "heteroskedastic.laplacian"){ tau_add ~ dgamma(a_add,r_add) } " - - obs <- unlist(input$obs) - - data <- list(y = obs, n = length(obs), - x_ic = 1, tau_ic = 1e-2, - a_add = 1, r_add = 1, - a_obs = 1, r_obs = 1) - - nchain = 3 + + obs <- unlist(input$obs) + + data <- list( + y = obs, n = length(obs), + x_ic = 1, tau_ic = 1e-2, + a_add = 1, r_add = 1, + a_obs = 1, r_obs = 1 + ) + + nchain <- 3 init <- list() - for(i in 1:nchain){ - y.samp <- sample(obs,length(obs),replace=TRUE) - init[[i]] <- list(tau_add=1/stats::var(diff(y.samp), na.rm=TRUE), - tau_obs=1/stats::var(diff(y.samp), na.rm=TRUE)) + for (i in 1:nchain) { + y.samp <- sample(obs, length(obs), replace = TRUE) + init[[i]] <- list( + tau_add = 1 / stats::var(diff(y.samp), na.rm = TRUE), + tau_obs = 1 / stats::var(diff(y.samp), na.rm = TRUE) + ) } - - j.model <- rjags::jags.model(file = textConnection(GaussianModel), - data = data, - inits = init, - n.chains = 3) - - }else{ + + j.model <- rjags::jags.model( + file = textConnection(GaussianModel), + data = data, + inits = init, + n.chains = 3 + ) + } else { PEcAn.logger::logger.error(model, "is not data available as data model.") } - + jags.out <- rjags::coda.samples( model = j.model, variable.names = c("x"), n.iter = 5000, - thin = 100) - - + thin = 100 + ) + + out <- as.matrix(jags.out) median.out <- apply(out, 2, stats::median) - - ar <- stats::acf(median.out) + + ar <- stats::acf(median.out) rho <- as.numeric(ar$acf[2]) return(rho) - } # pda.autocorr.calc - diff --git a/modules/assim.batch/R/pda.postprocess.R b/modules/assim.batch/R/pda.postprocess.R index c97628ac4cd..598dee3fea6 100644 --- a/modules/assim.batch/R/pda.postprocess.R +++ b/modules/assim.batch/R/pda.postprocess.R @@ -14,81 +14,97 @@ ##' @author Ryan Kelly, Istem Fer ##' @export pda.postprocess <- function(settings, con, mcmc.param.list, pname, prior, prior.ind, sffx = NULL) { - # prepare for non-model params - if(length(mcmc.param.list) > length(settings$pfts)){ + if (length(mcmc.param.list) > length(settings$pfts)) { # create a subfolder under pft folder for non-model parameters par.file.name <- file.path(settings$outdir, paste0("pft/parameters")) dir.create(par.file.name, showWarnings = FALSE, recursive = TRUE) - + # parameters are in the last list, increase length(prior.ind) accordingly # only bias params will be thrown into emulator though # TODO: save posteriors for likelihood parameters in the database? par.names <- colnames(mcmc.param.list[[length(mcmc.param.list)]][[1]]) extr.bias <- sapply(strsplit(par.names, "\\."), `[[`, 1) bias.ind <- which(extr.bias == "bias") - - prior.ind[[length(prior.ind)+1]] <- bias.ind - pname[[length(pname)+1]] <- par.names[bias.ind] - } else{ + + prior.ind[[length(prior.ind) + 1]] <- bias.ind + pname[[length(pname) + 1]] <- par.names[bias.ind] + } else { par.file.name <- NULL } params.subset <- pda.plot.params(settings, mcmc.param.list, prior.ind, par.file.name, sffx) - + for (i in seq_along(settings$pfts)) { - ## Save params - filename.mcmc <- file.path(settings$pfts[[i]]$outdir, - paste0("mcmc.pda.", - settings$pfts[[i]]$name, - "_", - settings$assim.batch$ensemble.id, - sffx, ".Rdata")) + filename.mcmc <- file.path( + settings$pfts[[i]]$outdir, + paste0( + "mcmc.pda.", + settings$pfts[[i]]$name, + "_", + settings$assim.batch$ensemble.id, + sffx, ".Rdata" + ) + ) params.pft <- params.subset[[i]] save(params.pft, file = filename.mcmc) - - if(!is.null(con)){ + + if (!is.null(con)) { ## create a new Posteriors DB entry - pft.id <- PEcAn.DB::db.query(paste0("SELECT pfts.id FROM pfts, modeltypes WHERE pfts.name='", - settings$pfts[[i]]$name, - "' and pfts.modeltype_id=modeltypes.id and modeltypes.name='", - settings$model$type, "'"), - con)[["id"]] - - - posteriorid <- PEcAn.DB::db.query(paste0("INSERT INTO posteriors (pft_id) VALUES (", - pft.id, ") RETURNING id"), con) - - + pft.id <- PEcAn.DB::db.query( + paste0( + "SELECT pfts.id FROM pfts, modeltypes WHERE pfts.name='", + settings$pfts[[i]]$name, + "' and pfts.modeltype_id=modeltypes.id and modeltypes.name='", + settings$model$type, "'" + ), + con + )[["id"]] + + + posteriorid <- PEcAn.DB::db.query(paste0( + "INSERT INTO posteriors (pft_id) VALUES (", + pft.id, ") RETURNING id" + ), con) + + PEcAn.logger::logger.info(paste0("--- Posteriorid for ", settings$pfts[[i]]$name, " is ", posteriorid, " ---")) settings$pfts[[i]]$posteriorid <- posteriorid } - + ## save named distributions ## *** TODO: Generalize for multiple PFTS - post.distns <- PEcAn.MA::approx.posterior(trait.mcmc = params.subset[[i]], - priors = prior[[i]], - outdir = settings$pfts[[i]]$outdir, - filename.flag = paste0(".pda.", settings$pfts[[i]]$name, "_", - settings$assim.batch$ensemble.id, sffx)) - filename <- file.path(settings$pfts[[i]]$outdir, - paste0("post.distns.pda.", settings$pfts[[i]]$name, "_", - settings$assim.batch$ensemble.id, sffx, ".Rdata")) + post.distns <- PEcAn.MA::approx.posterior( + trait.mcmc = params.subset[[i]], + priors = prior[[i]], + outdir = settings$pfts[[i]]$outdir, + filename.flag = paste0( + ".pda.", settings$pfts[[i]]$name, "_", + settings$assim.batch$ensemble.id, sffx + ) + ) + filename <- file.path( + settings$pfts[[i]]$outdir, + paste0( + "post.distns.pda.", settings$pfts[[i]]$name, "_", + settings$assim.batch$ensemble.id, sffx, ".Rdata" + ) + ) save(post.distns, file = filename) - - if(!is.null(con)){ + + if (!is.null(con)) { PEcAn.DB::dbfile.insert(dirname(filename), basename(filename), "Posterior", posteriorid, con) } - + # Symlink to post.distns.Rdata (no ensemble.id identifier) if (file.exists(file.path(dirname(filename), "post.distns.Rdata"))) { file.remove(file.path(dirname(filename), "post.distns.Rdata")) } file.symlink(filename, file.path(dirname(filename), "post.distns.Rdata")) - + ## coerce parameter output into the same format as trait.mcmc pname <- rownames(post.distns) trait.mcmc <- list() @@ -103,28 +119,33 @@ pda.postprocess <- function(settings, con, mcmc.param.list, pname, prior, prior. names(trait.mcmc)[k] <- pname[prior.ind[[i]]][v] } } - + ## save updated parameter distributions as trait.mcmc so that they can be read by the ensemble code ## *** TODO: Generalize for multiple PFTS - filename <- file.path(settings$pfts[[i]]$outdir, - paste0("trait.mcmc.pda.", - settings$pfts[[i]]$name, - "_", settings$assim.batch$ensemble.id, - sffx, ".Rdata")) + filename <- file.path( + settings$pfts[[i]]$outdir, + paste0( + "trait.mcmc.pda.", + settings$pfts[[i]]$name, + "_", settings$assim.batch$ensemble.id, + sffx, ".Rdata" + ) + ) save(trait.mcmc, file = filename) - - if(!is.null(con)){ + + if (!is.null(con)) { PEcAn.DB::dbfile.insert(dirname(filename), basename(filename), "Posterior", posteriorid, con) } - - } #end of loop over PFTs - + } # end of loop over PFTs + ## save updated settings XML XML::saveXML( PEcAn.settings::listToXml(settings, "pecan"), file = file.path( dirname(settings$modeloutdir), - paste0("pecan.pda", settings$assim.batch$ensemble.id, ".xml"))) + paste0("pecan.pda", settings$assim.batch$ensemble.id, ".xml") + ) + ) return(settings) } # pda.postprocess @@ -144,42 +165,49 @@ pda.postprocess <- function(settings, con, mcmc.param.list, pname, prior, prior. ##' @author Ryan Kelly, Istem Fer ##' @export pda.plot.params <- function(settings, mcmc.param.list, prior.ind, par.file.name = NULL, sffx) { - params.subset <- list() - + # flag for gelman.plot enough.iter <- TRUE - + for (i in seq_along(prior.ind)) { params.subset[[i]] <- coda::as.mcmc.list(lapply(mcmc.param.list[[i]], coda::mcmc)) - + burnin <- getBurnin(params.subset[[i]], method = "gelman.plot") - + # rare, but this can happen; better to throw an error than continue, as it might lead # mis-interpretation of posteriors otherwise if (burnin == nrow(params.subset[[i]][[1]])) { PEcAn.logger::logger.severe(paste0("*** Burn-in is the same as the length of the chain, please run a longer chain ***")) } - + params.subset[[i]] <- stats::window(params.subset[[i]], start = max(burnin, na.rm = TRUE)) - + # chek number of iterations left after throwing the burnin, gelman.plot requires > 50 if (nrow(params.subset[[i]][[1]]) < 50) { PEcAn.logger::logger.info(paste0("*** Not enough iterations in the chain after removing burn-in, skipping gelman.plot ***")) enough.iter <- FALSE } - - if(i <= length(settings$pfts)){ - grDevices::pdf(file.path(settings$pfts[[i]]$outdir, - paste0("mcmc.diagnostics.pda.", - settings$pfts[[i]]$name, - "_", settings$assim.batch$ensemble.id, - sffx, ".pdf"))) + + if (i <= length(settings$pfts)) { + grDevices::pdf(file.path( + settings$pfts[[i]]$outdir, + paste0( + "mcmc.diagnostics.pda.", + settings$pfts[[i]]$name, + "_", settings$assim.batch$ensemble.id, + sffx, ".pdf" + ) + )) } else { - grDevices::pdf(file.path(par.file.name, - paste0("mcmc.diagnostics.pda.par_", - settings$assim.batch$ensemble.id, - sffx, ".pdf"))) + grDevices::pdf(file.path( + par.file.name, + paste0( + "mcmc.diagnostics.pda.par_", + settings$assim.batch$ensemble.id, + sffx, ".pdf" + ) + )) } graphics::layout(matrix(c(1, 2, 3, 4, 5, 6), ncol = 2, byrow = TRUE)) @@ -187,96 +215,104 @@ pda.plot.params <- function(settings, mcmc.param.list, prior.ind, par.file.name graphics::plot(params.subset[[i]], auto.layout = FALSE) dm <- do.call("rbind", params.subset[[i]]) - + if (length(prior.ind[[i]]) > 1) { correlationPlot(dm) } - + if (length(params.subset[[i]]) > 1 & enough.iter) { coda::gelman.plot(params.subset[[i]], auto.layout = FALSE, autoburnin = FALSE) } - + graphics::layout(1) grDevices::dev.off() - + # Write out convergence diagnostics to a txt file - if(i <= length(settings$pfts)){ - filename.mcmc.temp <- file.path(settings$pfts[[i]]$outdir, - paste0("mcmc.diagnostics.pda.", - settings$pfts[[i]]$name, "_", - settings$assim.batch$ensemble.id, - sffx, ".txt")) + if (i <= length(settings$pfts)) { + filename.mcmc.temp <- file.path( + settings$pfts[[i]]$outdir, + paste0( + "mcmc.diagnostics.pda.", + settings$pfts[[i]]$name, "_", + settings$assim.batch$ensemble.id, + sffx, ".txt" + ) + ) } else { - filename.mcmc.temp <- file.path(par.file.name, - paste0("mcmc.diagnostics.pda.par_", - settings$assim.batch$ensemble.id, sffx, ".txt")) + filename.mcmc.temp <- file.path( + par.file.name, + paste0( + "mcmc.diagnostics.pda.par_", + settings$assim.batch$ensemble.id, sffx, ".txt" + ) + ) } - + cat("Summary statistics\n", file = filename.mcmc.temp) utils::capture.output(summary(params.subset[[i]]), file = filename.mcmc.temp, append = TRUE) cat("\n\n\n", file = filename.mcmc.temp, append = TRUE) - + if (length(prior.ind[[i]]) > 1) { cat("Covariance matrix :\n", file = filename.mcmc.temp, append = TRUE) utils::capture.output(stats::cov(dm), file = filename.mcmc.temp, append = TRUE) cat("\n\n\n", file = filename.mcmc.temp, append = TRUE) } - + if (length(prior.ind[[i]]) > 1) { cat("Correlation matrix :\n", file = filename.mcmc.temp, append = TRUE) utils::capture.output(stats::cor(dm), file = filename.mcmc.temp, append = TRUE) cat("\n\n\n", file = filename.mcmc.temp, append = TRUE) } - + if (length(params.subset[[i]]) > 1) { cat("Gelman and Rubin convergence diagnostics\n", file = filename.mcmc.temp, append = TRUE) - utils::capture.output(coda::gelman.diag(params.subset[[i]], autoburnin = FALSE), file = filename.mcmc.temp, - append = TRUE) + utils::capture.output(coda::gelman.diag(params.subset[[i]], autoburnin = FALSE), + file = filename.mcmc.temp, + append = TRUE + ) } - - } # end of for-loop over prior.ind - + } # end of for-loop over prior.ind + # convert mcmc.list to list of matrices params.subset.list <- list() for (i in seq_along(params.subset)) { params.subset.list[[i]] <- do.call("rbind", params.subset[[i]]) - } + } # reformat each sublist such that params have their own list and return return(lapply(seq_along(params.subset.list), function(x) as.list(data.frame(params.subset.list[[x]])))) - } # pda.plot.params ##' Function to write posterior distributions of the scaling factors ##' @param sf.samp.list scaling factor MCMC samples -##' @param sf.prior scaling factor prior +##' @param sf.prior scaling factor prior ##' @param sf.samp.filename scaling factor posterior output file name ##' @export -write_sf_posterior <- function(sf.samp.list, sf.prior, sf.samp.filename){ - +write_sf_posterior <- function(sf.samp.list, sf.prior, sf.samp.filename) { sf.samp <- coda::as.mcmc.list(lapply(sf.samp.list, coda::mcmc)) - + # saving this before discarding burnin, because in resampling we want to keep the samples together save(sf.samp, file = sf.samp.filename) - + burnin <- getBurnin(sf.samp, method = "gelman.plot") - + sf.samp <- stats::window(sf.samp, start = max(burnin, na.rm = TRUE)) - + # convert mcmc.list to list of matrices sf.subset.list <- list() sf.subset.list[[1]] <- as.data.frame(do.call("rbind", sf.samp)) filename.flag <- paste0("_", basename(sf.samp.filename)) - - sf.post.distns <- PEcAn.MA::approx.posterior(trait.mcmc = sf.subset.list[[1]], priors = sf.prior, - outdir = dirname(sf.samp.filename), - filename.flag = filename.flag) - - + + sf.post.distns <- PEcAn.MA::approx.posterior( + trait.mcmc = sf.subset.list[[1]], priors = sf.prior, + outdir = dirname(sf.samp.filename), + filename.flag = filename.flag + ) + + return(sf.post.distns) - } # write_sf_posterior @@ -291,61 +327,55 @@ write_sf_posterior <- function(sf.samp.list, sf.prior, sf.samp.filename){ ##' @param prior.list list of prior dataframes ##' @param prior.fn.all prior functions ##' @export -pda.sort.params <- function(mcmc.out, sub.sample = "mu_global_samp", ns = NULL, prior.all, prior.ind.all.ns, - sf = NULL, n.param.orig, prior.list, prior.fn.all){ - +pda.sort.params <- function(mcmc.out, sub.sample = "mu_global_samp", ns = NULL, prior.all, prior.ind.all.ns, + sf = NULL, n.param.orig, prior.list, prior.fn.all) { mcmc.samp.list <- list() - + for (c in seq_along(mcmc.out)) { - - m <- matrix(NA, nrow = nrow(mcmc.out[[c]][[sub.sample]]), ncol = length(prior.ind.all.ns)) - + m <- matrix(NA, nrow = nrow(mcmc.out[[c]][[sub.sample]]), ncol = length(prior.ind.all.ns)) + # TODO: make this sf compatible for multi site - if(!is.null(sf)){ - sfm <- matrix(NA, nrow = nrow(mcmc.out[[c]][[sub.sample]]), ncol = length(sf)) + if (!is.null(sf)) { + sfm <- matrix(NA, nrow = nrow(mcmc.out[[c]][[sub.sample]]), ncol = length(sf)) } - + # TODO: get back to this when scaling factor is used # # retrieve rownames separately to get rid of var_name* structures prior.all.rownames <- unlist(sapply(prior.list, rownames)) - + sc <- 1 for (i in seq_along(prior.ind.all.ns)) { sf.check <- prior.all.rownames[prior.ind.all.ns][i] idx <- grep(sf.check, rownames(prior.all)[prior.ind.all.ns]) ## it used to be prior.ind.all, check if this was a typo - if(any(grepl(sf.check, sf))){ - - m[, i] <- eval(prior.fn.all$qprior[prior.ind.all.ns][[i]], - list(p = mcmc.out[[c]][[sub.sample]][, idx])) - - - if(sc <= length(sf)){ + if (any(grepl(sf.check, sf))) { + m[, i] <- eval( + prior.fn.all$qprior[prior.ind.all.ns][[i]], + list(p = mcmc.out[[c]][[sub.sample]][, idx]) + ) + + + if (sc <= length(sf)) { sfm[, sc] <- mcmc.out[[c]][[sub.sample]][, idx] sc <- sc + 1 } - - }else{ - - if(is.null(ns)){ + } else { + if (is.null(ns)) { m[, i] <- mcmc.out[[c]][[sub.sample]][, idx] - }else{ + } else { m[, i] <- mcmc.out[[c]][[sub.sample]][, idx, ns] } - - } } - + colnames(m) <- prior.all.rownames[prior.ind.all.ns] mcmc.samp.list[[c]] <- m - - if(!is.null(sf)){ + + if (!is.null(sf)) { colnames(sfm) <- paste0(sf, "_SF") sf.samp.list[[c]] <- sfm } - } - + # Separate each PFT's parameter samples (and bias term) to their own list mcmc.param.list <- list() ind <- 0 @@ -353,6 +383,6 @@ pda.sort.params <- function(mcmc.out, sub.sample = "mu_global_samp", ns = NULL, mcmc.param.list[[i]] <- lapply(mcmc.samp.list, function(x) x[, (ind + 1):(ind + n.param.orig[i]), drop = FALSE]) ind <- ind + n.param.orig[i] } - + return(mcmc.param.list) } # pda.sort.params diff --git a/modules/assim.batch/R/pda.utils.R b/modules/assim.batch/R/pda.utils.R index bf619061aee..2e3030c3e4d 100644 --- a/modules/assim.batch/R/pda.utils.R +++ b/modules/assim.batch/R/pda.utils.R @@ -36,10 +36,10 @@ assim.batch <- function(settings) { ##' @export runModule.assim.batch <- function(settings) { if (PEcAn.settings::is.MultiSettings(settings)) { - pda.method <- unique(sapply(settings$assim.batch,`[[`, "method")) - if(pda.method == "emulator.ms"){ + pda.method <- unique(sapply(settings$assim.batch, `[[`, "method")) + if (pda.method == "emulator.ms") { return(pda.emulator.ms(settings)) - }else{ + } else { return(PEcAn.settings::papply(settings, runModule.assim.batch)) } } else if (PEcAn.settings::is.Settings(settings)) { @@ -78,10 +78,10 @@ pda.settings <- function(settings, params.id = NULL, param.names = NULL, prior.i # If neither an argument or a setting is provided, set a default value in settings. # When there are more than 1 PFT, make sure they are in the same order in PDA tags to avoid index problems - if(length(settings$assim.batch$param.names) > 1){ + if (length(settings$assim.batch$param.names) > 1) { # here I assume if a PFT is listed under the PFT tag, we want to constrain at least one of its parameters - non_match <- which(names(settings$assim.batch$param.names) != sapply(settings$pfts,`[[`, "name")) - if(length(non_match) > 0){ + non_match <- which(names(settings$assim.batch$param.names) != sapply(settings$pfts, `[[`, "name")) + if (length(non_match) > 0) { PEcAn.logger::logger.severe("Please make sure the ORDER of the PFT name tags match under and sections in your pecan.xml and try again.") } } @@ -111,9 +111,11 @@ pda.settings <- function(settings, params.id = NULL, param.names = NULL, prior.i constant.names <- unlist(sapply(settings$pfts, function(x) names(x$constants))) params.in.constants <- which(unlist(settings$assim.batch$param.names) %in% constant.names) if (length(params.in.constants) > 0) { - PEcAn.logger::logger.severe(paste0("PDA requested for parameter(s) [", - paste(unlist(settings$assim.batch$param.names)[params.in.constants], collapse = ", "), - "] but these parameters are specified as constants in pecan.xml!")) + PEcAn.logger::logger.severe(paste0( + "PDA requested for parameter(s) [", + paste(unlist(settings$assim.batch$param.names)[params.in.constants], collapse = ", "), + "] but these parameters are specified as constants in pecan.xml!" + )) } # # if settings$assim.batch$prior$prev.prior.id is not null, it means an extension run was already done @@ -171,22 +173,21 @@ pda.settings <- function(settings, params.id = NULL, param.names = NULL, prior.i # n.knot: Number of emulator knots if (!is.null(n.knot)) { settings$assim.batch$n.knot <- as.numeric(n.knot) - }else if(settings$assim.batch$method == "emulator"){ + } else if (settings$assim.batch$method == "emulator") { if (is.null(settings$assim.batch$n.knot)) { settings$assim.batch$n.knot <- 100 # Default } settings$assim.batch$n.knot <- as.numeric(settings$assim.batch$n.knot) } - # ----- Jump distribution / tuning parameters + # ----- Jump distribution / tuning parameters # adapt: How often to adapt the MCMC. Defaults to iter/10 if (!is.null(adapt)) { settings$assim.batch$jump$adapt <- adapt } if (is.null(settings$assim.batch$jump$adapt)) { - - settings$assim.batch$jump$adapt <- floor(settings$assim.batch$iter/10) # Default + settings$assim.batch$jump$adapt <- floor(settings$assim.batch$iter / 10) # Default } settings$assim.batch$jump$adapt <- as.numeric(settings$assim.batch$jump$adapt) @@ -239,27 +240,30 @@ pda.settings <- function(settings, params.id = NULL, param.names = NULL, prior.i ##' @author Ryan Kelly, Istem Fer ##' @export pda.load.priors <- function(settings, con, extension.check = FALSE) { - # settings$assim.batch$prior$prior.id is not NULL if you've done a PDA or meta.analysis and went through write.configs # then you can proceed loading objects by querying their paths according to their ids # if it's NULL get the most recent id from DB as default if (is.null(settings$assim.batch$prior$prior.id)) { - PEcAn.logger::logger.info(paste0("Defaulting to most recent posterior/prior as PDA prior.")) ## by default, use the most recent posterior/prior as the prior priorids <- list() for (i in seq_along(settings$pfts)) { - - pft.id <- PEcAn.DB::db.query(paste0("SELECT pfts.id FROM pfts, modeltypes WHERE pfts.name='", - settings$pfts[[i]]$name, - "' and pfts.modeltype_id=modeltypes.id and modeltypes.name='", - settings$model$type, "'"), - con)[["id"]] + pft.id <- PEcAn.DB::db.query( + paste0( + "SELECT pfts.id FROM pfts, modeltypes WHERE pfts.name='", + settings$pfts[[i]]$name, + "' and pfts.modeltype_id=modeltypes.id and modeltypes.name='", + settings$model$type, "'" + ), + con + )[["id"]] priors <- PEcAn.DB::db.query(paste0("SELECT * from posteriors where pft_id = ", pft.id), con) - prior.db <- PEcAn.DB::db.query(paste0("SELECT * from dbfiles where container_type = 'Posterior' and container_id IN (", - paste(priors$id, collapse = ","), ")"), con) + prior.db <- PEcAn.DB::db.query(paste0( + "SELECT * from dbfiles where container_type = 'Posterior' and container_id IN (", + paste(priors$id, collapse = ","), ")" + ), con) prior.db.grep <- prior.db[grep("^post\\.distns\\..*Rdata$", prior.db$file_name), ] if (nrow(prior.db.grep) == 0) { @@ -273,9 +277,9 @@ pda.load.priors <- function(settings, con, extension.check = FALSE) { # extension.check == FALSE not an extension run # extension.check == TRUE a "round" extension run - if(extension.check){ + if (extension.check) { priorids <- sapply(settings$pfts, `[[`, "posteriorid") - } else{ + } else { priorids <- settings$assim.batch$prior$prior.id } @@ -290,14 +294,12 @@ pda.load.priors <- function(settings, con, extension.check = FALSE) { # now that you filled priorids load the PDA prior objects # if files becomes NULL try loading objects from workflow oft folders for (i in seq_along(settings$pfts)) { + files <- PEcAn.DB::dbfile.check("Posterior", priorids[[i]], con, tmp_hostname, return.all = TRUE) - files <- PEcAn.DB::dbfile.check("Posterior", priorids[[i]], con, tmp_hostname, return.all = TRUE) - - pid <- grep("post.distns.*Rdata", files$file_name) ## is there a posterior file? + pid <- grep("post.distns.*Rdata", files$file_name) ## is there a posterior file? if (length(pid) == 0) { - pid <- grep("prior.distns.Rdata", files$file_name) ## is there a prior file? - + pid <- grep("prior.distns.Rdata", files$file_name) ## is there a prior file? } if (length(pid) > 0) { @@ -313,7 +315,7 @@ pda.load.priors <- function(settings, con, extension.check = FALSE) { fname <- file.path(pft$outdir, "prior.distns.Rdata") if (file.exists(fname)) { prior.paths[[i]] <- fname - }else{ + } else { ## if no posterior or prior can be found, skip to the next PFT next } @@ -327,25 +329,27 @@ pda.load.priors <- function(settings, con, extension.check = FALSE) { load(prior.paths[[i]], envir = "distns") prior.distns <- distns$prior.distns post.distns <- distns$post.distns - + if (!exists("post.distns")) { prior.out[[i]] <- prior.distns } else { prior.out[[i]] <- post.distns rm(post.distns) } - } # Finally, check that PDA parameters requested are in the prior; can't assimilate them if not. # Could proceed with any valid params. But probably better to just bonk out now to avoid wasting # a lot of time in case the mis-specified parameter(s) is really important to the analysis. - params.no.priors <- which(is.na(match(unlist(settings$assim.batch$param.names), - unlist(lapply(prior.out, rownames))))) + params.no.priors <- which(is.na(match( + unlist(settings$assim.batch$param.names), + unlist(lapply(prior.out, rownames)) + ))) if (length(params.no.priors) > 0) { PEcAn.logger::logger.severe(paste0("PDA requested for parameter(s) [", paste(unlist(settings$assim.batch$param.names)[params.no.priors], - collapse = ", "), "] but no prior found!")) + collapse = ", " + ), "] but no prior found!")) } return(list(prior = prior.out, settings = settings)) @@ -367,17 +371,18 @@ pda.create.ensemble <- function(settings, con, workflow.id) { if (!is.null(con)) { # Identifiers for ensemble 'runtype' if (settings$assim.batch$method == "bruteforce" | - settings$assim.batch$method == "bruteforce.bs" | - settings$assim.batch$method == "bayesian.tools") { + settings$assim.batch$method == "bruteforce.bs" | + settings$assim.batch$method == "bayesian.tools") { ensemble.type <- "pda.MCMC" - } else if (settings$assim.batch$method == "emulator"| - settings$assim.batch$method == "emulator.ms") { + } else if (settings$assim.batch$method == "emulator" | + settings$assim.batch$method == "emulator.ms") { ensemble.type <- "pda.emulator" } ensemble.id <- PEcAn.DB::db.query(paste("INSERT INTO ensembles (runtype, workflow_id) values ('", - ensemble.type, "', ", workflow.id, ") RETURNING id", sep = ""), con) - + ensemble.type, "', ", workflow.id, ") RETURNING id", + sep = "" + ), con) } else { ensemble.id <- NA } @@ -431,8 +436,10 @@ pda.define.prior.fn <- function(prior) { p } - return(list(dprior = dprior, rprior = rprior, qprior = qprior, - pprior = pprior, dmvprior = dmvprior, rmvprior = rmvprior)) + return(list( + dprior = dprior, rprior = rprior, qprior = qprior, + pprior = pprior, dmvprior = dmvprior, rmvprior = rmvprior + )) } # pda.define.prior.fn @@ -454,12 +461,12 @@ pda.init.params <- function(settings, chain, pname, n.param.all) { ## Load params from previous run, if provided. if (!is.null(settings$assim.batch$extension)) { mcmc.list <- NULL # will be loaded in the next line - load(settings$assim.batch$mcmc.path) # loads params + load(settings$assim.batch$mcmc.path) # loads params params <- mcmc.list[[chain]] - start <- nrow(params) + 1 + start <- nrow(params) + 1 finish <- nrow(params) + as.numeric(settings$assim.batch$iter) params <- rbind(params, matrix(NA, finish - start + 1, n.param.all)) - if(!is.null(settings$assim.batch$llpar.path)){ # load llik params + if (!is.null(settings$assim.batch$llpar.path)) { # load llik params llpar.list <- NULL # will be loaded in the next line load(settings$assim.batch$llpar.path) llpars <- llpar.list[[chain]] @@ -468,7 +475,7 @@ pda.init.params <- function(settings, chain, pname, n.param.all) { } } else { # No input given, starting fresh - start <- 1 + start <- 1 finish <- as.numeric(settings$assim.batch$iter) params <- matrix(NA, finish, n.param.all) llpars <- NULL @@ -497,7 +504,6 @@ pda.init.params <- function(settings, chain, pname, n.param.all) { pda.init.run <- function(settings, con, my.write.config, workflow.id, params, n = ifelse(is.null(dim(params)), 1, nrow(params)), run.names = paste("run", 1:n, sep = ".")) { - run.ids <- rep(NA, n) for (i in seq_len(n)) { ## set RUN.ID @@ -510,11 +516,12 @@ pda.init.run <- function(settings, con, my.write.config, workflow.id, params, "ensemble_id, parameter_list) ", "values ('", settings$model$id, "','", settings$run$site$id, "','", settings$run$start.date, "','", - settings$run$end.date, "','", settings$modeloutdir , "','", + settings$run$end.date, "','", settings$modeloutdir, "','", settings$assim.batch$ensemble.id, "','", paramlist, - "') RETURNING id"), - con) - + "') RETURNING id" + ), + con + ) } else { run.ids[i] <- run.names[i] } @@ -527,17 +534,22 @@ pda.init.run <- function(settings, con, my.write.config, workflow.id, params, # names(trait.values)[which(!(names(trait.values) %in% 'env'))] <- newnames ## write config - do.call(my.write.config, args = list(defaults = settings$pfts, - trait.values = lapply(params, - function(x, n) { as.data.frame(x[n, , drop=FALSE]) }, - n = i), - settings = settings, - run.id = run.ids[i])) + do.call(my.write.config, args = list( + defaults = settings$pfts, + trait.values = lapply(params, + function(x, n) { + as.data.frame(x[n, , drop = FALSE]) + }, + n = i + ), + settings = settings, + run.id = run.ids[i] + )) # Identifiers for ensemble 'runtype' if (settings$assim.batch$method == "bruteforce" | - settings$assim.batch$method == "bruteforce.bs" | - settings$assim.batch$method == "bayesian.tools") { + settings$assim.batch$method == "bruteforce.bs" | + settings$assim.batch$method == "bayesian.tools") { ensemble.type <- "pda.MCMC" } else if (settings$assim.batch$method == "emulator") { ensemble.type <- "pda.emulator" @@ -545,30 +557,32 @@ pda.init.run <- function(settings, con, my.write.config, workflow.id, params, ## write a README for the run cat("runtype : ", - paste("pda", settings$assim.batch$method, sep = "."), "\n", "workflow id : ", - as.character(workflow.id), "\n", - "ensemble id : ", as.character(settings$assim.batch$ensemble.id), "\n", - "chain : ", settings$assim.batch$chain, "\n", - "run : ", run.names[i], "\n", - "run id : ", as.character(run.ids[i]), "\n", - "pft names : ", as.character(lapply(settings$pfts, function(x) x[['name']])), "\n", - "site : ", settings$run$site$name, "\n", - "site id : ", settings$run$site$id, "\n", - "met data : ", settings$run$site$met, "\n", - "start date : ", settings$run$start.date, "\n", - "end date : ", settings$run$end.date, "\n", - "hostname : ", settings$host$name, "\n", - "rundir : ", file.path(settings$host$rundir, run.ids[i]), "\n", - "outdir : ", file.path(settings$host$outdir, run.ids[i]), "\n", - file = file.path(settings$rundir, run.ids[i], "README.txt")) + paste("pda", settings$assim.batch$method, sep = "."), "\n", "workflow id : ", + as.character(workflow.id), "\n", + "ensemble id : ", as.character(settings$assim.batch$ensemble.id), "\n", + "chain : ", settings$assim.batch$chain, "\n", + "run : ", run.names[i], "\n", + "run id : ", as.character(run.ids[i]), "\n", + "pft names : ", as.character(lapply(settings$pfts, function(x) x[["name"]])), "\n", + "site : ", settings$run$site$name, "\n", + "site id : ", settings$run$site$id, "\n", + "met data : ", settings$run$site$met, "\n", + "start date : ", settings$run$start.date, "\n", + "end date : ", settings$run$end.date, "\n", + "hostname : ", settings$host$name, "\n", + "rundir : ", file.path(settings$host$rundir, run.ids[i]), "\n", + "outdir : ", file.path(settings$host$outdir, run.ids[i]), "\n", + file = file.path(settings$rundir, run.ids[i], "README.txt") + ) ## add the job to the list of runs append <- ifelse(i == 1, FALSE, TRUE) cat(as.character(run.ids[i]), - file = file.path(settings$rundir, "runs.txt"), - sep = "\n", - append = append) - } # end for + file = file.path(settings$rundir, "runs.txt"), + sep = "\n", + append = append + ) + } # end for return(unlist(run.ids)) } # pda.init.run @@ -587,8 +601,10 @@ pda.init.run <- function(settings, con, my.write.config, workflow.id, params, ##' @author Ryan Kelly ##' @export pda.adjust.jumps <- function(settings, jmp.list, accept.rate, pnames = NULL) { - PEcAn.logger::logger.info(paste0("Acceptance rates were (", paste(pnames, collapse = ", "), ") = (", - paste(round(accept.rate/settings$assim.batch$jump$adapt, 3), collapse = ", "), ")")) + PEcAn.logger::logger.info(paste0( + "Acceptance rates were (", paste(pnames, collapse = ", "), ") = (", + paste(round(accept.rate / settings$assim.batch$jump$adapt, 3), collapse = ", "), ")" + )) # PEcAn.logger::logger.info(paste0('Using jump variances (', # paste(round(unlist(settings$assim.batch$jump$jvar),3), collapse=', '), ')')) @@ -622,19 +638,23 @@ pda.adjust.jumps.bs <- function(settings, jcov, accept.count, params.recent) { params.recent <- params[(i - settings$assim.batch$jump$adapt):(i - 1), prior.ind] } pnames <- colnames(params.recent) - PEcAn.logger::logger.info(paste0("Acceptance rate was ", - round(accept.count / settings$assim.batch$jump$adapt, 3))) - PEcAn.logger::logger.info(paste0("Using jump variance diagonals (", - paste(pnames, collapse = ", "), ") = (", - paste(round(diag(jcov), 3), collapse = ", "), ")")) + PEcAn.logger::logger.info(paste0( + "Acceptance rate was ", + round(accept.count / settings$assim.batch$jump$adapt, 3) + )) + PEcAn.logger::logger.info(paste0( + "Using jump variance diagonals (", + paste(pnames, collapse = ", "), ") = (", + paste(round(diag(jcov), 3), collapse = ", "), ")" + )) r <- ncol(params.recent) if (accept.count == 0) { rescale <- diag(rep(settings$assim.batch$jump$adj.min, r)) - jcov <- rescale %*% jcov %*% rescale + jcov <- rescale %*% jcov %*% rescale } else { - stdev <- apply(params.recent, 2, stats::sd) - corr <- stats::cor(params.recent) + stdev <- apply(params.recent, 2, stats::sd) + corr <- stats::cor(params.recent) if (any(is.na(corr))) { corr <- diag(rep(1, r)) } @@ -645,8 +665,10 @@ pda.adjust.jumps.bs <- function(settings, jcov, accept.count, params.recent) { jcov <- rescale %*% corr %*% rescale } - PEcAn.logger::logger.info(paste0("New jump variance diagonals are (", - paste(round(diag(jcov), 3), collapse = ", "), ")")) + PEcAn.logger::logger.info(paste0( + "New jump variance diagonals are (", + paste(round(diag(jcov), 3), collapse = ", "), ")" + )) return(jcov) } # pda.adjust.jumps.bs @@ -675,14 +697,14 @@ pda.generate.knots <- function(n.knot, sf, probs.sf, n.param.all, prior.ind, pri # if the params are going to be scaled leave them out, if sf is NULL nothing happens inds <- prior.ind[!prior.ind %in% which(pname %in% sf)] - if(length(inds) !=0){ + if (length(inds) != 0) { # Fill in parameters to be sampled with probabilities sampled in a LHC design probs[, inds] <- PEcAn.emulator::lhc(t(matrix(0:1, ncol = length(inds), nrow = 2)), n.knot) } inds <- prior.ind[prior.ind %in% which(pname %in% sf)] - if(!is.null(sf) & length(inds) !=0){ + if (!is.null(sf) & length(inds) != 0) { match.ind <- sapply(pname[inds], function(x) which(sf == x)) probs[, inds] <- probs.sf[, match.ind] } @@ -706,22 +728,22 @@ pda.generate.knots <- function(n.knot, sf, probs.sf, n.param.all, prior.ind, pri ##' ##' @author Istem Fer ##' @export -pda.generate.sf <- function(n.knot, sf, prior.list){ - +pda.generate.sf <- function(n.knot, sf, prior.list) { n.sf <- length(sf) # prior for scaling factor - prior.sf <- data.frame(distn = rep("beta", n.sf), - parama = rep(1, n.sf), - paramb = rep(1, n.sf), - n = rep(NA, n.sf)) - rownames(prior.sf) <- paste0(sf,"_SF") - prior.list[[length(prior.list)+1]] <- prior.sf + prior.sf <- data.frame( + distn = rep("beta", n.sf), + parama = rep(1, n.sf), + paramb = rep(1, n.sf), + n = rep(NA, n.sf) + ) + rownames(prior.sf) <- paste0(sf, "_SF") + prior.list[[length(prior.list) + 1]] <- prior.sf probs.sf <- PEcAn.emulator::lhc(t(matrix(0:1, ncol = n.sf, nrow = 2)), n.knot) - colnames(probs.sf) <- paste0(sf,"_SF") + colnames(probs.sf) <- paste0(sf, "_SF") return(list(probs = probs.sf, priors = prior.list)) - } @@ -736,28 +758,30 @@ pda.generate.sf <- function(n.knot, sf, prior.list){ ##' ##' @author Istem Fer ##' @export -return.bias <- function(settings, isbias, model.out, inputs, prior.list.bias, run.round = FALSE, pass2bias = NULL){ - +return.bias <- function(settings, isbias, model.out, inputs, prior.list.bias, run.round = FALSE, pass2bias = NULL) { # how many bias parameters per dataset requested nbias <- ifelse(is.null(settings$assim.batch$inputs[[isbias]]$nbias), 1, - as.numeric(settings$assim.batch$inputs[[isbias]]$nbias)) + as.numeric(settings$assim.batch$inputs[[isbias]]$nbias) + ) prev.bias <- settings$assim.batch$bias.path # to store priors - bprior <- data.frame(distn = rep(NA,length(isbias)), - parama = rep(NA,length(isbias)), - paramb = rep(NA,length(isbias)), - n = rep(NA,length(isbias))) - for(b in seq_along(isbias)){ + bprior <- data.frame( + distn = rep(NA, length(isbias)), + parama = rep(NA, length(isbias)), + paramb = rep(NA, length(isbias)), + n = rep(NA, length(isbias)) + ) + for (b in seq_along(isbias)) { # any prior passed via settings? - if(!is.null(settings$assim.batch$inputs[[isbias]]$bprior)){ - bprior$distn[b] <- settings$assim.batch$inputs[[isbias[b]]]$bprior$distn + if (!is.null(settings$assim.batch$inputs[[isbias]]$bprior)) { + bprior$distn[b] <- settings$assim.batch$inputs[[isbias[b]]]$bprior$distn bprior$parama[b] <- settings$assim.batch$inputs[[isbias[b]]]$bprior$parama bprior$paramb[b] <- settings$assim.batch$inputs[[isbias[b]]]$bprior$paramb - }else{ # assume log-normal(0,1) + } else { # assume log-normal(0,1) PEcAn.logger::logger.info(paste0("No prior is defined for the bias parameter, assuming standard log-normal")) - bprior$distn[b] <- "lnorm" + bprior$distn[b] <- "lnorm" bprior$parama[b] <- 0 bprior$paramb[b] <- 1 } @@ -772,45 +796,45 @@ return.bias <- function(settings, isbias, model.out, inputs, prior.list.bias, ru prior.names <- rep(NA, ibias) - for(i in seq_along(isbias)){ + for (i in seq_along(isbias)) { bias.params[[i]] <- matrix(NA, nrow = length(model.out), ncol = nbias) - for(iknot in seq_along(model.out)){ - if(anyNA(model.out[[iknot]], recursive = TRUE)){ + for (iknot in seq_along(model.out)) { + if (anyNA(model.out[[iknot]], recursive = TRUE)) { bias.params[[i]][iknot, ] <- NA - }else { + } else { # calculate optimum bias parameter for the model output that has bias regdf <- data.frame(inputs[[isbias[i]]]$obs, model.out[[iknot]][[isbias[i]]]) - colnames(regdf) <- c("data","model") - fit <- stats::lm( regdf$data ~ (regdf$model - 1)) - bias.params[[i]][iknot,1] <- fit$coefficients[[1]] - if(ncol(bias.params[[i]]) > 1){ - bias.params[[i]][iknot, 2:ncol(bias.params[[i]])] <- stats::rnorm( - ncol(bias.params[[i]]) - 1, - bias.params[[i]][iknot, 1], - bias.params[[i]][iknot, 1] * 0.1) + colnames(regdf) <- c("data", "model") + fit <- stats::lm(regdf$data ~ (regdf$model - 1)) + bias.params[[i]][iknot, 1] <- fit$coefficients[[1]] + if (ncol(bias.params[[i]]) > 1) { + bias.params[[i]][iknot, 2:ncol(bias.params[[i]])] <- stats::rnorm( + ncol(bias.params[[i]]) - 1, + bias.params[[i]][iknot, 1], + bias.params[[i]][iknot, 1] * 0.1 + ) } } } - prior.names[i] <- paste0("bias.", sapply(model.out[[1]],names)[isbias[i]]) - names(bias.params)[i] <- paste0("bias.", sapply(model.out[[1]],names)[isbias[i]]) + prior.names[i] <- paste0("bias.", sapply(model.out[[1]], names)[isbias[i]]) + names(bias.params)[i] <- paste0("bias.", sapply(model.out[[1]], names)[isbias[i]]) } rownames(bias.prior) <- prior.names - prior.list.bias[[(length(prior.list.bias)+1)]] <- bias.prior + prior.list.bias[[(length(prior.list.bias) + 1)]] <- bias.prior # if this is another round, use the first priors - if(run.round){ + if (run.round) { prior.list <- NULL # will be loaded in the next line load(prev.bias) prior.list.bias <- prior.list # TODO: implementation for multiple bias params, this requires multiple changes int he PDA workflow - bias.params[[1]][(nrow(bias.params[[1]])-length(pass2bias)+1):nrow(bias.params[[1]]), 1] <- pass2bias + bias.params[[1]][(nrow(bias.params[[1]]) - length(pass2bias) + 1):nrow(bias.params[[1]]), 1] <- pass2bias } return(list(bias.params = bias.params, prior.list.bias = prior.list.bias, nbias = nbias)) - } # return.bias @@ -821,38 +845,32 @@ return.bias <- function(settings, isbias, model.out, inputs, prior.list.bias, ru ##' ##' @author Istem Fer ##' @export -return_hyperpars <- function(assim.settings, inputs){ - +return_hyperpars <- function(assim.settings, inputs) { check.hypers <- sapply(assim.settings$inputs, `[[`, "hyper.pars") hyper.pars <- list() - if(length(unlist(check.hypers)) == 0){ + if (length(unlist(check.hypers)) == 0) { # no hyper parameters passed via settings # default to scaled hyper params - for(k in seq_along(assim.settings$inputs)){ + for (k in seq_along(assim.settings$inputs)) { hyper.pars[[k]] <- list() hyper.pars[[k]]$parama <- 0.001 - hyper.pars[[k]]$paramb <- 0.001 * stats::var(inputs[[k]]$data[,1], na.rm = TRUE) + hyper.pars[[k]]$paramb <- 0.001 * stats::var(inputs[[k]]$data[, 1], na.rm = TRUE) } - - }else{ - + } else { # hyperparameters at least for one likelihood was passed - for(k in seq_along(assim.settings$inputs)){ - - if(is.null(check.hypers[[k]])){ + for (k in seq_along(assim.settings$inputs)) { + if (is.null(check.hypers[[k]])) { hyper.pars[[k]] <- list() hyper.pars[[k]]$parama <- 0.001 - hyper.pars[[k]]$paramb <- 0.001 * stats::var(inputs[[k]]$data[,1], na.rm = TRUE) - }else{ + hyper.pars[[k]]$paramb <- 0.001 * stats::var(inputs[[k]]$data[, 1], na.rm = TRUE) + } else { hyper.pars[[k]] <- list() hyper.pars[[k]]$parama <- as.numeric(assim.settings$inputs[[k]]$hyper.pars$parama) hyper.pars[[k]]$paramb <- as.numeric(assim.settings$inputs[[k]]$hyper.pars$paramb) } - } - } return(hyper.pars) @@ -869,8 +887,8 @@ return_hyperpars <- function(assim.settings, inputs){ ##' ##' @author Istem Fer ##' @export -load_pda_history <- function(workdir, ensemble.id, objects){ - load(paste0(workdir, "/history.pda", ensemble.id,".Rdata")) +load_pda_history <- function(workdir, ensemble.id, objects) { + load(paste0(workdir, "/history.pda", ensemble.id, ".Rdata")) alist <- lapply(objects, function(x) assign(x, get(x))) names(alist) <- objects return(alist) @@ -887,32 +905,31 @@ load_pda_history <- function(workdir, ensemble.id, objects){ ##' ##' @author Istem Fer ##' @export -generate_hierpost <- function(mcmc.out, prior.fn.all, prior.ind.all){ - - lower_lim <- sapply(seq_along(prior.ind.all), function(z) eval(prior.fn.all$qprior[[prior.ind.all[z]]], list(p=0.000001))) - upper_lim <- sapply(seq_along(prior.ind.all), function(z) eval(prior.fn.all$qprior[[prior.ind.all[z]]], list(p=0.999999))) +generate_hierpost <- function(mcmc.out, prior.fn.all, prior.ind.all) { + lower_lim <- sapply(seq_along(prior.ind.all), function(z) eval(prior.fn.all$qprior[[prior.ind.all[z]]], list(p = 0.000001))) + upper_lim <- sapply(seq_along(prior.ind.all), function(z) eval(prior.fn.all$qprior[[prior.ind.all[z]]], list(p = 0.999999))) - for(i in seq_along(mcmc.out)){ - mu_global_samp <- mcmc.out[[i]]$mu_global_samp + for (i in seq_along(mcmc.out)) { + mu_global_samp <- mcmc.out[[i]]$mu_global_samp sigma_global_samp <- mcmc.out[[i]]$sigma_global_samp iter_size <- dim(sigma_global_samp)[1] # calculate hierarchical posteriors from mu_global_samp and tau_global_samp hierarchical_samp <- mu_global_samp - for(si in seq_len(iter_size)){ - hierarchical_samp[si,] <- TruncatedNormal::rtmvnorm(1, - mu = mu_global_samp[si,], - sigma = sigma_global_samp[si,,], - lb = lower_lim, - ub = upper_lim) + for (si in seq_len(iter_size)) { + hierarchical_samp[si, ] <- TruncatedNormal::rtmvnorm(1, + mu = mu_global_samp[si, ], + sigma = sigma_global_samp[si, , ], + lb = lower_lim, + ub = upper_lim + ) } - mcmc.out[[i]]$hierarchical_samp <- hierarchical_samp + mcmc.out[[i]]$hierarchical_samp <- hierarchical_samp } return(mcmc.out) - } @@ -931,8 +948,7 @@ generate_hierpost <- function(mcmc.out, prior.fn.all, prior.ind.all){ ##' @author Istem Fer ##' @export sample_MCMC <- function(mcmc_path, n.param.orig, prior.ind.orig, n.post.knots, knots.params.temp, - prior.list, prior.fn, sf, sf.samp){ - + prior.list, prior.fn, sf, sf.samp) { PEcAn.logger::logger.info("Sampling from previous round's MCMC") mcmc.samp.list <- NULL # will be loaded in the next line @@ -949,13 +965,13 @@ sample_MCMC <- function(mcmc_path, n.param.orig, prior.ind.orig, n.post.knots, k for (i in seq_along(mcmc.param.list)) { params.subset[[i]] <- coda::as.mcmc.list(lapply(mcmc.param.list[[i]], coda::mcmc)) - burnin <- getBurnin(params.subset[[i]], method = "gelman.plot") + burnin <- getBurnin(params.subset[[i]], method = "gelman.plot") burnins[i] <- max(burnin, na.rm = TRUE) } maxburn <- max(burnins) - if(maxburn ==1){ # if no convergence, just discard the first bit - maxburn <- 0.2*nrow(params.subset[[1]][[1]]) + if (maxburn == 1) { # if no convergence, just discard the first bit + maxburn <- 0.2 * nrow(params.subset[[1]][[1]]) } collect_samples <- list() @@ -966,40 +982,38 @@ sample_MCMC <- function(mcmc_path, n.param.orig, prior.ind.orig, n.post.knots, k mcmc_samples <- do.call(rbind, collect_samples) get_samples <- sample(1:nrow(mcmc_samples), n.post.knots) - new_knots <- mcmc_samples[get_samples,] + new_knots <- mcmc_samples[get_samples, ] pass2bias <- new_knots[, ncol(new_knots)] # if there is bias param, it will be the last col # if there is no bias param this won't be used anyway # the rest of the code is not ready for bias params for multiple variables # when using sf, need to sample from sf mcmc samples and calculate back actual parameter values - if(!is.null(sf.samp)){ - + if (!is.null(sf.samp)) { sf_samples <- list() for (i in seq_along(sf.samp)) { sf_samples[[i]] <- stats::window(sf.samp[[i]], start = maxburn) } sf_samples <- do.call(rbind, sf_samples) - sf_knots <- sf_samples[get_samples,] + sf_knots <- sf_samples[get_samples, ] ind <- 0 - for(i in seq_along(n.param.orig)){ - if(all(sf %in% rownames(prior.list[[i]]))){ - temp.knots <- sapply(seq_along(prior.ind.orig[[i]]), function(z){ - eval(prior.fn[[i]]$qprior[prior.ind.orig[[i]]][[z]], list(p = sf_knots[,z])) + for (i in seq_along(n.param.orig)) { + if (all(sf %in% rownames(prior.list[[i]]))) { + temp.knots <- sapply(seq_along(prior.ind.orig[[i]]), function(z) { + eval(prior.fn[[i]]$qprior[prior.ind.orig[[i]]][[z]], list(p = sf_knots[, z])) }) new_knots[, (ind + 1):(ind + n.param.orig[i])] <- temp.knots } ind <- ind + n.param.orig[i] } - - }else{ + } else { sf_knots <- NULL } # now replace with new knots ind <- 0 - for(i in seq_along(n.param.orig)){ + for (i in seq_along(n.param.orig)) { sub_knots <- new_knots[, (ind + 1):(ind + n.param.orig[i]), drop = FALSE] knots.params.temp[[i]][, prior.ind.orig[[i]]] <- sub_knots @@ -1007,7 +1021,6 @@ sample_MCMC <- function(mcmc_path, n.param.orig, prior.ind.orig, n.post.knots, k } return(list(knots.params.temp = knots.params.temp, sf_knots = sf_knots, pass2bias = pass2bias)) - } @@ -1017,8 +1030,7 @@ sample_MCMC <- function(mcmc_path, n.param.orig, prior.ind.orig, n.post.knots, k ##' This is a helper function partly uses pda.emulator code ##' @param multi.settings PEcAn multi settings object ##' @export -return_multi_site_objects <- function(multi.settings){ - +return_multi_site_objects <- function(multi.settings) { settings <- multi.settings[[1]] # first one is as good as any ## check if scaling factors are gonna be used @@ -1029,23 +1041,23 @@ return_multi_site_objects <- function(multi.settings){ on.exit(PEcAn.DB::db.close(con), add = TRUE) # get prior.list - temp <- pda.load.priors(settings, con, TRUE) - prior_list <- temp$prior + temp <- pda.load.priors(settings, con, TRUE) + prior_list <- temp$prior # extract other indices to fenerate knots - pname <- lapply(prior_list, rownames) + pname <- lapply(prior_list, rownames) n.param.all <- sapply(prior_list, nrow) ## Select parameters to constrain all_pft_names <- sapply(settings$pfts, `[[`, "name") prior.ind <- prior.ind.orig <- vector("list", length(settings$pfts)) names(prior.ind) <- names(prior.ind.orig) <- all_pft_names - for(i in seq_along(settings$pfts)){ + for (i in seq_along(settings$pfts)) { pft.name <- settings$pfts[[i]]$name - if(pft.name %in% names(settings$assim.batch$param.names)){ - prior.ind[[i]] <- which(pname[[i]] %in% settings$assim.batch$param.names[[pft.name]]) + if (pft.name %in% names(settings$assim.batch$param.names)) { + prior.ind[[i]] <- which(pname[[i]] %in% settings$assim.batch$param.names[[pft.name]]) prior.ind.orig[[i]] <- which(pname[[i]] %in% settings$assim.batch$param.names[[pft.name]] | - pname[[i]] %in% any.scaling[[pft.name]]) + pname[[i]] %in% any.scaling[[pft.name]]) } } @@ -1056,32 +1068,40 @@ return_multi_site_objects <- function(multi.settings){ prior.fn <- lapply(prior_list, pda.define.prior.fn) # get format.list - input_ids <- sapply(settings$assim.batch$inputs, `[[`, "input.id") + input_ids <- sapply(settings$assim.batch$inputs, `[[`, "input.id") format_list <- lapply(input_ids, PEcAn.DB::query.format.vars, bety = con) # get knots # if this is the initial round we will draw from priors - ## Propose parameter knots (X) for emulator design - knots.list <- lapply(seq_along(settings$pfts), - function(x) pda.generate.knots(as.numeric(settings$assim.batch$n.knot), NULL, NULL, - n.param.all[x], - prior.ind.orig[[x]], - prior.fn[[x]], - pname[[x]])) - names(knots.list) <- sapply(settings$pfts,"[[",'name') - external_knots <- lapply(knots.list, `[[`, "params") + ## Propose parameter knots (X) for emulator design + knots.list <- lapply( + seq_along(settings$pfts), + function(x) { + pda.generate.knots( + as.numeric(settings$assim.batch$n.knot), NULL, NULL, + n.param.all[x], + prior.ind.orig[[x]], + prior.fn[[x]], + pname[[x]] + ) + } + ) + names(knots.list) <- sapply(settings$pfts, "[[", "name") + external_knots <- lapply(knots.list, `[[`, "params") - if(!is.null(settings$assim.batch$round_counter)){ + if (!is.null(settings$assim.batch$round_counter)) { collect_site_knots <- list() - for(i in seq_along(multi.settings)){ + for (i in seq_along(multi.settings)) { settings <- multi.settings[[i]] # if not, we have to bring all the MCMC samples from all sites and draw from them. - sampled_knots <- sample_MCMC(file.path(settings$outdir, basename(settings$assim.batch$mcmc.path)), - n.param.orig, prior.ind.orig, - as.numeric(settings$assim.batch$n.knot), external_knots, - prior_list, prior.fn, sf, NULL) + sampled_knots <- sample_MCMC( + file.path(settings$outdir, basename(settings$assim.batch$mcmc.path)), + n.param.orig, prior.ind.orig, + as.numeric(settings$assim.batch$n.knot), external_knots, + prior_list, prior.fn, sf, NULL + ) collect_site_knots[[i]] <- do.call("cbind", sampled_knots$knots.params.temp) } @@ -1089,13 +1109,17 @@ return_multi_site_objects <- function(multi.settings){ collect_site_knots <- do.call("rbind", collect_site_knots) # sample twice as much - collect_site_knots <- collect_site_knots[sample(1:nrow(collect_site_knots), - 2*as.numeric(settings$assim.batch$n.knot)), ] + collect_site_knots <- collect_site_knots[sample( + 1:nrow(collect_site_knots), + 2 * as.numeric(settings$assim.batch$n.knot) + ), ] # bring the previous set in - need_obj <- load_pda_history(workdir = settings$outdir, - ensemble.id = settings$assim.batch$ensemble.id, - objects = c("SS", "prior.ind.all")) + need_obj <- load_pda_history( + workdir = settings$outdir, + ensemble.id = settings$assim.batch$ensemble.id, + objects = c("SS", "prior.ind.all") + ) previous_knots <- need_obj$SS[[1]][, -ncol(need_obj$SS[[1]])] new_site_knots <- rbind(previous_knots, collect_site_knots[, need_obj$prior.ind.all]) @@ -1104,36 +1128,36 @@ return_multi_site_objects <- function(multi.settings){ PEcAn.logger::logger.info("Choosing distant points. Please wait.") repeat{ n <- dim(new_site_knots)[1] - if(n == as.numeric(settings$assim.batch$n.knot) + nrow(previous_knots)) break + if (n == as.numeric(settings$assim.batch$n.knot) + nrow(previous_knots)) break foo <- utils::combn(seq_len(n), 2) dr <- stats::dist(new_site_knots) - if(all(foo[, which.min(dr)] %in% 1:nrow(previous_knots))){ - new_site_knots <- new_site_knots[-foo[, which.min(dr)],] - previous_knots <- previous_knots[-foo[, which.min(dr)],] - }else if(any(foo[, which.min(dr)] %in% 1:nrow(previous_knots))){ - new_site_knots <- new_site_knots[-foo[, which.min(dr)][!(foo[, which.min(dr)] %in% 1:nrow(previous_knots))],] - }else{ - new_site_knots <- new_site_knots[-sample(foo[, which.min(dr)], 1),] + if (all(foo[, which.min(dr)] %in% 1:nrow(previous_knots))) { + new_site_knots <- new_site_knots[-foo[, which.min(dr)], ] + previous_knots <- previous_knots[-foo[, which.min(dr)], ] + } else if (any(foo[, which.min(dr)] %in% 1:nrow(previous_knots))) { + new_site_knots <- new_site_knots[-foo[, which.min(dr)][!(foo[, which.min(dr)] %in% 1:nrow(previous_knots))], ] + } else { + new_site_knots <- new_site_knots[-sample(foo[, which.min(dr)], 1), ] } - } - new_site_knots <- new_site_knots[-(1:nrow(previous_knots)),] - these_knots <- apply(new_site_knots, 1, function(x) prodlim::row.match(x, collect_site_knots[, need_obj$prior.ind.all]) ) - collect_site_knots <- collect_site_knots[these_knots,] + new_site_knots <- new_site_knots[-(1:nrow(previous_knots)), ] + these_knots <- apply(new_site_knots, 1, function(x) prodlim::row.match(x, collect_site_knots[, need_obj$prior.ind.all])) + collect_site_knots <- collect_site_knots[these_knots, ] ind <- 0 - for(p in seq_along(settings$pfts)){ - external_knots[[p]] <- collect_site_knots[, (ind + 1):(ind + ncol(external_knots[[p]]))] + for (p in seq_along(settings$pfts)) { + external_knots[[p]] <- collect_site_knots[, (ind + 1):(ind + ncol(external_knots[[p]]))] ind <- ind + ncol(external_knots[[p]]) } - - } + } ensembleid_list <- sapply(multi.settings, function(x) pda.create.ensemble(x, con, x$workflow$id)) - return(list(priorlist = prior_list, - formatlist = format_list, - externalknots = external_knots, - ensembleidlist = ensembleid_list)) + return(list( + priorlist = prior_list, + formatlist = format_list, + externalknots = external_knots, + ensembleidlist = ensembleid_list + )) } @@ -1142,11 +1166,10 @@ return_multi_site_objects <- function(multi.settings){ ##' @param site site number (which site) ##' @param multi_site_objects information needed for remote runs ##' @export -prepare_pda_remote <- function(settings, site = 1, multi_site_objects){ - +prepare_pda_remote <- function(settings, site = 1, multi_site_objects) { # Check the dimensions of the proposed knots and the number of knots requested # mistakes can happen when the user changes the settings$assim.batch$n.knot only for the first site in the xml - if(settings$assim.batch$n.knot != nrow(multi_site_objects$externalknots[[1]])){ + if (settings$assim.batch$n.knot != nrow(multi_site_objects$externalknots[[1]])) { PEcAn.logger::logger.warn("The number of knots requested and proposed number of knots do not match. Changing settings$assim.batch$n.knot from ", settings$assim.batch$n.knot, "to", nrow(multi_site_objects$externalknots[[1]])) settings$assim.batch$n.knot <- nrow(multi_site_objects$externalknots[[1]]) } @@ -1156,81 +1179,86 @@ prepare_pda_remote <- function(settings, site = 1, multi_site_objects){ # instead find this directory from remote rundir so that it's consistent remote_dir <- dirname(settings$host$rundir) - #save - local_object_file <- paste0(settings$outdir, "/multi_site_objects_s",site,".Rdata") - remote_object_file <- paste0(remote_dir, "/multi_site_objects_s",site,".Rdata") + # save + local_object_file <- paste0(settings$outdir, "/multi_site_objects_s", site, ".Rdata") + remote_object_file <- paste0(remote_dir, "/multi_site_objects_s", site, ".Rdata") ######## prepare the sub.sh # this will need generalization over other machines, can parse some of these from settings$host$qsub - local_sub_file <- paste0(settings$outdir, "/sub" , site, ".sh") + local_sub_file <- paste0(settings$outdir, "/sub", site, ".sh") cat("#!/bin/sh\n", file = local_sub_file) cat(paste0("#$ -wd ", remote_dir, "\n"), file = local_sub_file, append = TRUE) cat("#$ -j y\n", file = local_sub_file, append = TRUE) cat("#$ -S /bin/bash\n", file = local_sub_file, append = TRUE) cat("#$ -V\n", file = local_sub_file, append = TRUE) # parse queue from settings$host$qsub - cat(paste0("#$ -q '", gsub( " .*$", "", sub(".*-q ", "", settings$host$qsub)), "'\n"), file = local_sub_file, append = TRUE) - cat(paste0("#$ -l h_rt=", gsub( " .*$", "", sub(".*h_rt=", "", settings$host$qsub)), "\n"), file = local_sub_file, append = TRUE) - cat(paste0("#$ -N emulator_s", site,"\n"), file = local_sub_file, append = TRUE) + cat(paste0("#$ -q '", gsub(" .*$", "", sub(".*-q ", "", settings$host$qsub)), "'\n"), file = local_sub_file, append = TRUE) + cat(paste0("#$ -l h_rt=", gsub(" .*$", "", sub(".*h_rt=", "", settings$host$qsub)), "\n"), file = local_sub_file, append = TRUE) + cat(paste0("#$ -N emulator_s", site, "\n"), file = local_sub_file, append = TRUE) cat(paste0("#$ -pe omp ", length(settings$assim.batch$inputs), "\n"), file = local_sub_file, append = TRUE) cat(paste0("#cd ", remote_dir, "\n"), file = local_sub_file, append = TRUE) cat(paste0("#", settings$host$prerun, "\n"), file = local_sub_file, append = TRUE) - cat(paste0("Rscript remote_emulator_s",site,".R\n"), file = local_sub_file, append = TRUE) - cat(paste0("mv ", multi_site_objects$ensembleidlist[site],"/pecan.pda", - multi_site_objects$ensembleidlist[site], ".xml ", remote_dir), file = local_sub_file, append = TRUE) - remote_sub_file <- paste0(remote_dir, "/sub" , site, ".sh") + cat(paste0("Rscript remote_emulator_s", site, ".R\n"), file = local_sub_file, append = TRUE) + cat(paste0( + "mv ", multi_site_objects$ensembleidlist[site], "/pecan.pda", + multi_site_objects$ensembleidlist[site], ".xml ", remote_dir + ), file = local_sub_file, append = TRUE) + remote_sub_file <- paste0(remote_dir, "/sub", site, ".sh") ######## create R script - local_script_file <- paste0(settings$outdir, "/remote_emulator_s",site,".R") - first_lines <- c("rm(list=ls(all=TRUE))\n", - "library(PEcAn.assim.batch)\n", - "library(PEcAn.benchmark)\n", - paste0("load(\"",remote_object_file,"\")\n"), + local_script_file <- paste0(settings$outdir, "/remote_emulator_s", site, ".R") + first_lines <- c( + "rm(list=ls(all=TRUE))\n", + "library(PEcAn.assim.batch)\n", + "library(PEcAn.benchmark)\n", + paste0("load(\"", remote_object_file, "\")\n"), "settings <- multi_site_objects$settings\n", "external_priors <- multi_site_objects$priorlist\n", "external_knots <- multi_site_objects$externalknots\n", "external_formats <- multi_site_objects$formatlist\n", - paste0("ensemble_id <- multi_site_objects$ensembleidlist[", site, "]\n")) + paste0("ensemble_id <- multi_site_objects$ensembleidlist[", site, "]\n") + ) # if this is another round - if(!is.null(settings$assim.batch$round_counter)){ - external_data_line <- paste0("load(\"",file.path(remote_dir, - paste0("external.", - settings$assim.batch$ensemble.id, - ".Rdata")),"\")\n") + if (!is.null(settings$assim.batch$round_counter)) { + external_data_line <- paste0("load(\"", file.path( + remote_dir, + paste0( + "external.", + settings$assim.batch$ensemble.id, + ".Rdata" + ) + ), "\")\n") first_lines <- c(first_lines, external_data_line) settings$assim.batch$extension <- "round" last_lines <- c("pda.emulator(settings, external.priors = external_priors, external.data = external.data, external.knots = external_knots, external.formats = external_formats, ensemble.id = ensemble_id, remote = TRUE)") - }else if(!is.null(settings$assim.batch$data.path)){ - - external_data_line <- paste0("load(\"", settings$assim.batch$data.path ,"\")\n") + } else if (!is.null(settings$assim.batch$data.path)) { + external_data_line <- paste0("load(\"", settings$assim.batch$data.path, "\")\n") first_lines <- c(first_lines, external_data_line) last_lines <- c("pda.emulator(settings, external.priors = external_priors, external.data = external.data, external.knots = external_knots, external.formats = external_formats, ensemble.id = ensemble_id, remote = TRUE)") - - }else{ - + } else { last_lines <- c("pda.emulator(settings, external.priors = external_priors, external.knots = external_knots, external.formats = external_formats, ensemble.id = ensemble_id, remote = TRUE)") } writeLines(c(first_lines, last_lines), local_script_file) - remote_script_file <- paste0(remote_dir, "/remote_emulator_s", site,".R") + remote_script_file <- paste0(remote_dir, "/remote_emulator_s", site, ".R") - #cheating. needs to be done after extracting all paths + # cheating. needs to be done after extracting all paths host_info <- settings$host - PEcAn.remote::remote.execute.cmd(host_info, paste0("mkdir -p ", remote_dir,"/pft")) - for(i in seq_along(settings$pfts)){ + PEcAn.remote::remote.execute.cmd(host_info, paste0("mkdir -p ", remote_dir, "/pft")) + for (i in seq_along(settings$pfts)) { settings$pfts[[i]]$outdir <- file.path(remote_dir, "pft", basename(settings$pfts[[i]]$outdir)) PEcAn.remote::remote.execute.cmd(host_info, paste0("mkdir -p ", settings$pfts[[i]]$outdir)) } - settings$host$name <- "localhost" + settings$host$name <- "localhost" newrundir <- paste0(remote_dir, "/", multi_site_objects$ensembleidlist[site], "/run/") newoutdir <- paste0(remote_dir, "/", multi_site_objects$ensembleidlist[site], "/out/") settings$host$rundir <- settings$rundir <- newrundir @@ -1247,7 +1275,6 @@ prepare_pda_remote <- function(settings, site = 1, multi_site_objects){ return(remote_sub_file) - } ##' helper function for syncing remote pda runs @@ -1257,29 +1284,32 @@ prepare_pda_remote <- function(settings, site = 1, multi_site_objects){ ##' @param ensembleidlist ensemble id list for remote runs ##' @param register if register==TRUE, the last files returned will be registered to the DB, TO BE DONE ##' @export -sync_pda_remote <- function(multi.settings, ensembleidlist, register = FALSE){ - +sync_pda_remote <- function(multi.settings, ensembleidlist, register = FALSE) { options <- "--include=pecan.pda*" options <- c(options, "--include=history*") options <- c(options, "--include=pft/***") options <- c(options, "--include=mcmc.list*") options <- c(options, "--include=ss.pda*") options <- c(options, "--include=emulator.pda*") - options <- c(options, "--exclude=*") #exclude everything else - PEcAn.remote::remote.copy.from(host = multi.settings[[1]]$host, - src = paste0(dirname(multi.settings[[1]]$host$outdir),"/"), - dst = multi.settings[[1]]$outdir, - options = options) + options <- c(options, "--exclude=*") # exclude everything else + PEcAn.remote::remote.copy.from( + host = multi.settings[[1]]$host, + src = paste0(dirname(multi.settings[[1]]$host$outdir), "/"), + dst = multi.settings[[1]]$outdir, + options = options + ) # update multi.settings - for(ms in seq_along(multi.settings)){ - tmp_settings <- PEcAn.settings::read.settings(paste0(multi.settings[[ms]]$outdir,"/pecan.pda", - ensembleidlist[[ms]],".xml")) + for (ms in seq_along(multi.settings)) { + tmp_settings <- PEcAn.settings::read.settings(paste0( + multi.settings[[ms]]$outdir, "/pecan.pda", + ensembleidlist[[ms]], ".xml" + )) multi.settings[[ms]]$assim.batch <- tmp_settings$assim.batch multi.settings[[ms]]$pfts <- tmp_settings$pfts } - if(register){ + if (register) { # fcn needs connection to DB } diff --git a/modules/assim.batch/R/plot.da.R b/modules/assim.batch/R/plot.da.R index 3e096f65239..707bfb348c4 100644 --- a/modules/assim.batch/R/plot.da.R +++ b/modules/assim.batch/R/plot.da.R @@ -1,32 +1,31 @@ ## Carl Davidson's code for plotting results from emulator-based DA ## ported by M. Dietze 08/30/12 -## some of this is redundant with other parts of PEcAn and needs to be cleaned up +## some of this is redundant with other parts of PEcAn and needs to be cleaned up plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { - # source('code/R/approx.posterior.R') source('code/R/utils.R') # prior.dir <- './pecan/Toolik/growth/' prior.file<-'/post.distns.Rdata' in.dir <- # './pecan/Toolik/growth/' out.dir <- './pecan/Toolik/growth/' next.run.dir <- # './pecan/Toolik/growth/' - + # prior.dir <- './pecan/Toolik/growth/' prior.file<-'/da.post.distns.Rdata' in.dir <- # './pecan/BarrowDA5param/' out.dir <- './pecan/BarrowDA5param/' next.run.dir <- # './pecan/AnaktuvukControl/' - - prior.dir <- "./pecan/BarrowDA5param/" - prior.file <- "/da.post.distns.Rdata" - in.dir <- "./pecan/AtqasukDA5param/" - out.dir <- "./pecan/AtqasukDA5param/" + + prior.dir <- "./pecan/BarrowDA5param/" + prior.file <- "/da.post.distns.Rdata" + in.dir <- "./pecan/AtqasukDA5param/" + out.dir <- "./pecan/AtqasukDA5param/" next.run.dir <- "./pecan/AnaktuvukControl/" - - num.run.ids <- 5 #commandArgs(trailingOnly = TRUE) + + num.run.ids <- 5 # commandArgs(trailingOnly = TRUE) print(num.run.ids) - + samples.file <- paste(in.dir, "samples.Rdata", sep = "") L.nee.file <- paste(in.dir, "L.nee.Rdata", sep = "") - - if(file.exists(samples.file)) { + + if (file.exists(samples.file)) { samples <- new.env() load(samples.file, envir = "samples") ensemble.samples <- samples$ensemble.samples @@ -34,8 +33,8 @@ plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { } else { PEcAn.logger::logger.error(samples.file, "not found, this file is required by the plot_da function") } - - if(file.exists(L.nee.file)) { + + if (file.exists(L.nee.file)) { L.nee <- new.env() load(L.nee.file, envir = "L.nee") x <- L.nee$x @@ -46,7 +45,7 @@ plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { prior.x <- x prior.y <- y - + ddist <- function(x, prior) { if (prior$distn == "exp") { return(stats::dexp(x, prior$parama)) @@ -71,27 +70,27 @@ plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { return(post.distns[traits, ]) })) traits <- rownames(priors) - + priors0 <- do.call(rbind, lapply(pfts, function(pft) { traits <- names(ensemble.samples[[pft]]) load(paste("./pecan/Toolik/growth/", pft, "/da.post.distns.Rdata", sep = "")) return(post.distns[traits, ]) })) traits <- rownames(priors) - + traits <- rownames(priors) - + # IMMEDIATE PRIORS priors2 <- do.call(rbind, lapply(pfts, function(pft) { traits <- names(ensemble.samples[[pft]]) load(paste(prior.dir, pft, "/", prior.file, sep = "")) return(post.distns[traits, ]) })) - + p.rng <- do.call(rbind, lapply(pfts, function(pft) { t(sa.samples[[pft]][c(1, nrow(sa.samples[[pft]])), ]) })) - + # PLOT LIKELIHOODS graphics::par(mfrow = c(3, 5)) good.runs <- y < stats::quantile(y, 0.95) @@ -99,29 +98,30 @@ plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { print(length(good.runs)) for (i in seq_along(x)) { graphics::plot(x[good.runs, i], y[good.runs], - main = traits[i]$figid, - xlim = p.rng[i, ], - xlab = traits[i]$units, - ylab = "-log(likelihood)", - pch = 1) + main = traits[i]$figid, + xlim = p.rng[i, ], + xlab = traits[i]$units, + ylab = "-log(likelihood)", + pch = 1 + ) graphics::points(prior.x[, i], prior.y, col = "grey") } samp <- lapply(seq(num.run.ids), function(run.id) { print(paste0(in.dir, "./mcmc", run.id, ".Rdata")) run.id.file <- paste0(in.dir, "./mcmc", run.id, ".Rdata") - - if(file.exists(run.id.file)) { + + if (file.exists(run.id.file)) { run.env <- new.env() load(run.id.file, envir = "run.env") m <- run.env$m } else { PEcAn.logger::logger.error(run.id.file, "not found, this file is required by the plot_da function") } - + return(m) }) - + samp <- unlist(samp, recursive = FALSE) nmcmc <- nrow(samp[[1]]) print(nmcmc) @@ -129,73 +129,84 @@ plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { graphics::par(mfrow = c(2, 3)) for (i in seq_along(samp[[1]])) { all <- do.call(rbind, lapply(samp, function(chain) chain[thin, i])) - + # MCMC chain graphics::plot(c(), ylim = range(all, na.rm = TRUE), xlim = c(1, length(thin)), ylab = "", type = "l") for (chain in seq(samp)) { graphics::lines(samp[[chain]][thin, i], col = chain) } - + # Autocorrelation plots samp.mcmc <- coda::as.mcmc.list(lapply(samp, function(chain) coda::as.mcmc(chain[thin, i]))) coda::gelman.plot(samp.mcmc, auto.layout = FALSE, ylab = "") coda::autocorr.plot(samp.mcmc[[1]], auto.layout = FALSE) } - + graphics::par(mfrow = c(3, 5)) for (i in seq_along(samp[[1]])) { all <- do.call(rbind, lapply(samp, function(chain) chain[thin, i])) - + # Density plots graphics::plot(stats::density(all), - xlim = p.rng[i, ], - main = paste(traits[i]$figid), - type = "l", - ylab = "", - xlab = traits[i]$units) + xlim = p.rng[i, ], + main = paste(traits[i]$figid), + type = "l", + ylab = "", + xlab = traits[i]$units + ) x <- seq(p.rng[i, 1], p.rng[i, 2], length = 1000) graphics::lines(x, ddist(x, priors[traits[i]$id, ]), col = "grey") graphics::lines(x, ddist(x, priors2[traits[i]$id, ]), col = "grey", lty = 2) } - + # Now approximate posteriors to data assimilation and store them with posteriors from meta # analysis da.post.i <- 1 # MOST SENSITIVE, BY HEIGHT dummy parameters with no sensitivity - da.traits.hite <- list(tundra.grass = c("plant_min_temp", "hgt_min", "seed_rain"), - tundra.evergreen = c("b1Bs", "SLA", "b1Bl", "seedling_mortality", "Vcmax"), - tundra.deciduous = c("b1Ht", "b2Ht", "r_fract", "b1Bs", "growth_resp_factor")) + da.traits.hite <- list( + tundra.grass = c("plant_min_temp", "hgt_min", "seed_rain"), + tundra.evergreen = c("b1Bs", "SLA", "b1Bl", "seedling_mortality", "Vcmax"), + tundra.deciduous = c("b1Ht", "b2Ht", "r_fract", "b1Bs", "growth_resp_factor") + ) # MOST SENSITIVE, BY NEE - da.traits <- list(tundra.grass = c("seedling_mortality", "f_labile", "root_turnover_rate", "Vcmax", "leaf_width"), - tundra.evergreen = c("b1Bs", "growth_resp_factor", "r_fract", "Vcmax", "b1Bl"), - tundra.deciduous = c("r_fract", "f_labile", "growth_resp_factor", "b1Bl", "stomatal_slope")) + da.traits <- list( + tundra.grass = c("seedling_mortality", "f_labile", "root_turnover_rate", "Vcmax", "leaf_width"), + tundra.evergreen = c("b1Bs", "growth_resp_factor", "r_fract", "Vcmax", "b1Bl"), + tundra.deciduous = c("r_fract", "f_labile", "growth_resp_factor", "b1Bl", "stomatal_slope") + ) # MOST SENSITIVE, BY NEE - da.traits.shared <- list(tundra.grass = c("root_turnover_rate", "seed_rain", "hgt_min", "seedling_mortality", - "growth_resp_factor", "Vcmax", "SLA", "f_labile"), - tundra.evergreen = c("growth_resp_factor", "b1Bs", "b1Bl", "b2Ht", "b1Ht", "SLA", "Vcmax", "r_fract"), - tundra.deciduous = c("b1Ht", "b1Bs", "b2Ht", "growth_resp_factor", "Vcmax", "stomatal_slope", "r_fract", "root_turnover_rate")) - - cv <- function(foo) stats::sd(foo)/mean(foo) + da.traits.shared <- list( + tundra.grass = c( + "root_turnover_rate", "seed_rain", "hgt_min", "seedling_mortality", + "growth_resp_factor", "Vcmax", "SLA", "f_labile" + ), + tundra.evergreen = c("growth_resp_factor", "b1Bs", "b1Bl", "b2Ht", "b1Ht", "SLA", "Vcmax", "r_fract"), + tundra.deciduous = c("b1Ht", "b1Bs", "b2Ht", "growth_resp_factor", "Vcmax", "stomatal_slope", "r_fract", "root_turnover_rate") + ) + + cv <- function(foo) stats::sd(foo) / mean(foo) foo <- matrix(NA, nrow(priors), 8) for (pft in pfts) { print(pft) load(paste(prior.dir, "/", pft, "/", prior.file, sep = "")) for (i in which(rownames(post.distns) %in% da.traits[[pft]])) { samp.i <- list() - samp.i[[rownames(post.distns)[[i]]]] <- + samp.i[[rownames(post.distns)[[i]]]] <- unlist(lapply(samp, function(chain) chain[thin, da.post.i])) - + print(rownames(post.distns)[[i]]) cv.prior1 <- cv(rdist(1e+06, priors2[da.post.i, ])) cv.prior2 <- cv(rdist(1e+06, post.distns[i, ])) cv.post <- cv(samp.i[[rownames(post.distns)[[i]]]]) - foo[da.post.i, ] <- c(pft, rownames(post.distns)[[i]], - cv.prior1, - cv.prior2, - cv.post, - (cv.prior1 - cv.prior2) / cv.prior1, - (cv.prior2 - cv.post)/cv.prior2, - (cv.prior1 - cv.post)/cv.prior1) + foo[da.post.i, ] <- c( + pft, rownames(post.distns)[[i]], + cv.prior1, + cv.prior2, + cv.post, + (cv.prior1 - cv.prior2) / cv.prior1, + (cv.prior2 - cv.post) / cv.prior2, + (cv.prior1 - cv.post) / cv.prior1 + ) post.distns[i, ] <- PEcAn.MA::approx.posterior(samp.i, post.distns[i, ]) da.post.i <- da.post.i + 1 } @@ -206,8 +217,7 @@ plot_da <- function(prior.dir, prior.file, in.dir, out.dir, next.run.dir) { } foo <- as.data.frame(foo) names(foo) <- c("pft", "trait", "cv1", "cv2", "cv3", "reduction1", "reduction2", "reductiontot") - + graphics::par(mfrow = c(1, 1), cex = 0.5) # graphics::plot(foo[,6] ~ as.factor(rownames(priors))) - } # plot_da diff --git a/modules/assim.batch/tests/testthat/test.autoburnin.R b/modules/assim.batch/tests/testthat/test.autoburnin.R index 8e81769f3ca..9e0d9f862b9 100644 --- a/modules/assim.batch/tests/testthat/test.autoburnin.R +++ b/modules/assim.batch/tests/testthat/test.autoburnin.R @@ -7,33 +7,37 @@ context("Autoburnin functions") n1 <- 7200 n2 <- 800 mu_common <- 0 -chain1 <- coda::mcmc(cbind("a" = c(rnorm(n1, 5), rnorm(n2, mu_common)), - "b" = c(rnorm(n1, 5), rnorm(n2, mu_common)))) -chain2 <- coda::mcmc(cbind("a" = c(rnorm(n1, -5), rnorm(n2, mu_common)), - "b" = c(rnorm(n1, -5), rnorm(n2, mu_common)))) +chain1 <- coda::mcmc(cbind( + "a" = c(rnorm(n1, 5), rnorm(n2, mu_common)), + "b" = c(rnorm(n1, 5), rnorm(n2, mu_common)) +)) +chain2 <- coda::mcmc(cbind( + "a" = c(rnorm(n1, -5), rnorm(n2, mu_common)), + "b" = c(rnorm(n1, -5), rnorm(n2, mu_common)) +)) test_mcmc <- coda::mcmc.list(chain1, chain2) burnin <- getBurnin(test_mcmc, threshold = 1.1) burned <- autoburnin(test_mcmc) test_that("Burnin value is a number and within the dimensions of `test_mcmc`", { - expect_is(burnin, "numeric") - expect_is(test_mcmc[burnin,], "list") - expect_is(unlist(test_mcmc[burnin,]), "numeric") + expect_is(burnin, "numeric") + expect_is(test_mcmc[burnin, ], "list") + expect_is(unlist(test_mcmc[burnin, ]), "numeric") }) test_that("Number of chains hasn't changed", { - expect_equal(length(test_mcmc), length(burned)) + expect_equal(length(test_mcmc), length(burned)) }) test_that("Burned-in chains have same dimensions", { - expect_equal(dim(burned[[1]]), dim(burned[[2]])) + expect_equal(dim(burned[[1]]), dim(burned[[2]])) }) test_that("Burned-in chains are shorter than original", { - expect_true(coda::niter(test_mcmc) > coda::niter(burned)) + expect_true(coda::niter(test_mcmc) > coda::niter(burned)) }) test_that("Burnin value is where chains actually converge", { - expect_true(burnin > n1) + expect_true(burnin > n1) }) diff --git a/modules/assim.batch/tests/testthat/test.bt_prior.R b/modules/assim.batch/tests/testthat/test.bt_prior.R index c67086fc5b2..bd03c02e073 100644 --- a/modules/assim.batch/tests/testthat/test.bt_prior.R +++ b/modules/assim.batch/tests/testthat/test.bt_prior.R @@ -5,36 +5,42 @@ context("BayesianTools prior functions") set.seed(26353451) -prior_list <- list(list('normal', 'norm', 0.5, 2), - list('lognormal', 'lnorm', 1, 1), - list('gamma', 'gamma', 0.5, 0.5)) +prior_list <- list( + list("normal", "norm", 0.5, 2), + list("lognormal", "lnorm", 1, 1), + list("gamma", "gamma", 0.5, 0.5) +) prior_df <- do.call(rbind.data.frame, prior_list) -colnames(prior_df) <- c('param_name', 'distn', 'parama', 'paramb') +colnames(prior_df) <- c("param_name", "distn", "parama", "paramb") prior <- pda.create.btprior(prior_df) x <- c(2, 3, 4) -correct_dens <- with(prior_df, dnorm(x[1], parama[1], paramb[1], log = TRUE) + - dlnorm(x[2], parama[2], paramb[2], log = TRUE) + - dgamma(x[3], parama[3], paramb[3], log = TRUE)) +correct_dens <- with(prior_df, dnorm(x[1], parama[1], paramb[1], log = TRUE) + + dlnorm(x[2], parama[2], paramb[2], log = TRUE) + + dgamma(x[3], parama[3], paramb[3], log = TRUE)) prior_dens <- prior$density(x) -test_that('Prior returns correct density', expect_equal(correct_dens, prior_dens)) +test_that("Prior returns correct density", expect_equal(correct_dens, prior_dens)) -correct_mean <- with(prior_df, c(parama[1], - exp(parama[2] + paramb[2]^2 / 2), - parama[3] / paramb[3])) -correct_var <- with(prior_df, c(paramb[1]^2, - (exp(paramb[2]^2) - 1) * exp(2 * parama[2] + paramb[2]^2), - parama[3] / paramb[3]^2)) -names(correct_mean) <- names(correct_var) <- prior_df[['param_name']] +correct_mean <- with(prior_df, c( + parama[1], + exp(parama[2] + paramb[2]^2 / 2), + parama[3] / paramb[3] +)) +correct_var <- with(prior_df, c( + paramb[1]^2, + (exp(paramb[2]^2) - 1) * exp(2 * parama[2] + paramb[2]^2), + parama[3] / paramb[3]^2 +)) +names(correct_mean) <- names(correct_var) <- prior_df[["param_name"]] nsamp <- 10000 prior_samples <- vapply(seq_len(nsamp), function(x) prior$sampler(), numeric(3)) prior_sampmean <- rowMeans(prior_samples) prior_sampvar <- apply(prior_samples, 1, var) -test_that('Prior sampler returns reasonable values', { - expect_equal(correct_mean, prior_sampmean, tolerance = 0.1) - expect_equal(correct_var, prior_sampvar, tolerance = 0.25) - }) +test_that("Prior sampler returns reasonable values", { + expect_equal(correct_mean, prior_sampmean, tolerance = 0.1) + expect_equal(correct_var, prior_sampvar, tolerance = 0.25) +}) diff --git a/modules/assim.sequential/R/Adjustment.R b/modules/assim.sequential/R/Adjustment.R index 93c97228c4c..7d07ebf226b 100644 --- a/modules/assim.sequential/R/Adjustment.R +++ b/modules/assim.sequential/R/Adjustment.R @@ -1,58 +1,55 @@ ##' @title adj.ens ##' @name adj.ens ##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho and Hamze Dokoohaki -##' -##' @param Pf A cov matrix of forecast state variables. +##' +##' @param Pf A cov matrix of forecast state variables. ##' @param X Dataframe or matrix of forecast state variables for different ensembles. ##' @param mu.f A vector with forecast mean estimates of state variables. ##' @param mu.a A vector with analysis mean estimates of state variables. ##' @param Pa The state estimate cov matrix of analysis. -##’ @details -##’ +## ’ @details +## ’ ##' @description This functions gives weights to different ensemble members based on their likelihood during the analysis step. Then it adjusts the analysis mean estimates of state variables based on the estimated weights. -##' +##' ##' @return Returns a vector of adjusted analysis mean estimates of state variables. ##' @export -adj.ens<-function(Pf, X, mu.f, mu.a, Pa){ +adj.ens <- function(Pf, X, mu.f, mu.a, Pa) { + S_f <- svd(Pf) + L_f <- S_f$d + V_f <- S_f$v - S_f <- svd(Pf) - L_f <- S_f$d - V_f <- S_f$v - ## normalize - Z <- X*0 - - for(i in seq_len(nrow(X))){ - - Z[i,] <- 1/sqrt(L_f) * t(V_f)%*%(X[i,]-mu.f) + Z <- X * 0 + for (i in seq_len(nrow(X))) { + Z[i, ] <- 1 / sqrt(L_f) * t(V_f) %*% (X[i, ] - mu.f) } - Z[is.na(Z)]<-0 + Z[is.na(Z)] <- 0 Z[is.infinite(Z)] <- 0 - + ## analysis - S_a <- svd(Pa) - - L_a <- S_a$d - V_a <- S_a$v - - ## analysis ensemble - X_a <- X*0 - for(i in seq_len(nrow(X))){ - # she decomposed Pa - then it's putting it back together but with a different Z which comes from the likelihood of that ens - X_a[i,] <- V_a %*%diag(sqrt(L_a))%*%Z[i,] + mu.a + S_a <- svd(Pa) + + L_a <- S_a$d + V_a <- S_a$v + + ## analysis ensemble + X_a <- X * 0 + for (i in seq_len(nrow(X))) { + # she decomposed Pa - then it's putting it back together but with a different Z which comes from the likelihood of that ens + X_a[i, ] <- V_a %*% diag(sqrt(L_a)) %*% Z[i, ] + mu.a } - - if (sum(mu.a - colMeans(X_a)) > 1 - || sum(mu.a - colMeans(X_a)) < -1) { - PEcAn.logger::logger.warn('Problem with ensemble adjustment (1)') + + if (sum(mu.a - colMeans(X_a)) > 1 || + sum(mu.a - colMeans(X_a)) < -1) { + PEcAn.logger::logger.warn("Problem with ensemble adjustment (1)") } - if (sum(diag(Pa) - diag(stats::cov(X_a))) > 5 - || sum(diag(Pa) - diag(stats::cov(X_a))) < -5) { - PEcAn.logger::logger.warn('Problem with ensemble adjustment (2)') + if (sum(diag(Pa) - diag(stats::cov(X_a))) > 5 || + sum(diag(Pa) - diag(stats::cov(X_a))) < -5) { + PEcAn.logger::logger.warn("Problem with ensemble adjustment (2)") } analysis <- as.data.frame(X_a) - + return(analysis) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/Analysis_sda.R b/modules/assim.sequential/R/Analysis_sda.R index e2af6bbfd0f..11488796640 100644 --- a/modules/assim.sequential/R/Analysis_sda.R +++ b/modules/assim.sequential/R/Analysis_sda.R @@ -1,101 +1,100 @@ ##' @title Analysis.sda ##' @name Analysis.sda ##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho and Hamze Dokoohaki -##' -##' @param settings pecan standard settings list. +##' +##' @param settings pecan standard settings list. ##' @param FUN A Function for performing the analysis step. Two available options are: 1-EnKF and 2-GEF. ##' @param Forecast A list containing the forecasts variables including Q (process variance) and X (a dataframe of forecasts state variables for different ensemble) ##' @param Observed A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables) ##' @param H is a matrix of 1's and 0's specifying which observations go with which variables. ##' @param extraArg This argument is a list containing aqq, bqq and t. The aqq and bqq are shape parameters estimated over time for the proccess covariance and t gives the time in terms of index of obs.list. See Details. ##' @param ... Extra argument sent to the analysis function. In case you're using the `GEF` function, this function requires nt, obs.mean, obs.cov, which are the total number of steps, list of observed means and list of observed cov respectively. -##’ @details -##’ -##' -##' @description This functions uses the FUN to perform the analysis. EnKF function is developed inside the PEcAnAssimSequential package which can be sent to this function to perform the Ensemble Kalman Filter. +## ’ @details +## ’ +##' +##' @description This functions uses the FUN to perform the analysis. EnKF function is developed inside the PEcAnAssimSequential package which can be sent to this function to perform the Ensemble Kalman Filter. ##' The other option is GEF function inside the same package allowing to perform Generalized Ensemble kalman Filter. -##' +##' ##' If you're using an arbitrary function you can use the ... to send any other variables to your desired analysis function. -##' +##' ##' @return Returns whatever the FUN is returning. In case of EnKF and GEF, this function returns a list with estimated mean and cov matrix of forecast state variables as well as mean and cov estimated as a result of assimilation/analysis . ##' @export -Analysis.sda<-function(settings, - FUN, - Forecast=list(Pf=NULL,mu.f=NULL,Q=NULL,X=NULL), - Observed=list(R=NULL,Y=NULL), - H, - extraArg, - ... -){ - - if (is.null(FUN)) PEcAn.logger::logger.severe('Analysis function needs to be defined !') - FUN(settings, Forecast, Observed, H, extraArg ,...) - +Analysis.sda <- function(settings, + FUN, + Forecast = list(Pf = NULL, mu.f = NULL, Q = NULL, X = NULL), + Observed = list(R = NULL, Y = NULL), + H, + extraArg, + ...) { + if (is.null(FUN)) PEcAn.logger::logger.severe("Analysis function needs to be defined !") + FUN(settings, Forecast, Observed, H, extraArg, ...) } ##' @title EnKF ##' @name EnKF ##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho and Hamze Dokoohaki -##' -##' @param settings pecan standard settings list. +##' +##' @param settings pecan standard settings list. ##' @param Forecast A list containing the forecasts variables including Q (process variance) and X (a dataframe of forecasts state variables for different ensemble) ##' @param Observed A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables) ##' @param H is a matrix of 1's and 0's specifying which observations go with which variables. ##' @param extraArg This argument is NOT used inside this function but it is a list containing aqq, bqq and t. The aqq and bqq are shape parameters estimated over time for the proccess covariance and t gives the time in terms of index of obs.list. See Details. ##' @param ... Extra argument sent to the analysis function. -##’ @details -##’ -##' -##' @description Given the Forecast and Observed this function performs the Ensemble Kalamn Filter. -##' +## ’ @details +## ’ +##' +##' @description Given the Forecast and Observed this function performs the Ensemble Kalamn Filter. +##' ##' @return It returns a list with estimated mean and cov matrix of forecast state variables as well as mean and cov estimated as a result of assimilation/analysis . ##' @export -EnKF<-function(settings, Forecast, Observed, H, extraArg=NULL, ...){ - +EnKF <- function(settings, Forecast, Observed, H, extraArg = NULL, ...) { #------------------------------Setup #-- reading the dots and exposing them to the inside of the function - dots<-list(...) - if (length(dots)>0) lapply(names(dots),function(name){assign(name,dots[[name]])}) - for(i in seq_along(dots)) assign(names(dots)[i],dots[[names(dots)[i]]]) - - #Forecast inputs + dots <- list(...) + if (length(dots) > 0) { + lapply(names(dots), function(name) { + assign(name, dots[[name]]) + }) + } + for (i in seq_along(dots)) assign(names(dots)[i], dots[[names(dots)[i]]]) + + # Forecast inputs Q <- Forecast$Q # process error - X <- Forecast$X # states - #Observed inputs + X <- Forecast$X # states + # Observed inputs R <- Observed$R Y <- Observed$Y # Enkf--------------------------------------------------- mu.f <- as.numeric(apply(X, 2, mean, na.rm = TRUE)) Pf <- stats::cov(X) - - + + diag(Pf)[which(diag(Pf) == 0)] <- 0.1 ## hack for zero variance # for those elements with zero value if (length(Y) > 1) { - PEcAn.logger::logger.info("The zero variances in R and Pf is being replaced by half and one fifth of the minimum variance in those matrices respectively.") - diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 - diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 + diag(R)[which(diag(R) == 0)] <- min(diag(R)[which(diag(R) != 0)]) / 2 + diag(Pf)[which(diag(Pf) == 0)] <- min(diag(Pf)[which(diag(Pf) != 0)]) / 5 } - + ## process error if (!is.null(Q)) { Pf <- Pf + Q } - + ## Kalman Gain K <- Pf %*% t(H) %*% solve((R + H %*% Pf %*% t(H))) # Analysis mu.a <- mu.f + K %*% (Y - H %*% mu.f) - Pa <- (diag(ncol(X)) - K %*% H) %*% Pf + Pa <- (diag(ncol(X)) - K %*% H) %*% Pf return(list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa)) } ##' @title GEF ##' @name GEF ##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho and Hamze Dokoohaki -##' -##' @param settings pecan standard settings list. +##' +##' @param settings pecan standard settings list. ##' @param Forecast A list containing the forecasts variables including Q (process variance) and X (a dataframe of forecast state variables for different ensemble) ##' @param Observed A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables) ##' @param H not used @@ -103,176 +102,203 @@ EnKF<-function(settings, Forecast, Observed, H, extraArg=NULL, ...){ ##' @param nitr Number of iterations to run each MCMC chain. ##' @param nburnin Number of initial, pre-thinning, MCMC iterations to discard. ##' @param ... This function requires nt, obs.mean, obs.cov, which are the total number of steps, list of observed means and list of observed cov respectively. -##’ @details -##’ -##' +## ’ @details +## ’ +##' ##' @description Given the Forecast and Observed this function performs the Generalized Ensemble Kalamn Filter. The generalized ensemble filter follows generally the three steps of sequential state data assimilation. But, in the generalized ensemble filter we add a latent state vector that accounts for added process variance. Furthermore, instead of solving the analysis analytically like the EnKF, we have to estimate the mean analysis vector and covariance matrix with MCMC. -##' +##' ##' @return It returns a list with estimated mean and cov matrix of forecast state variables as well as mean and cov estimated as a result of assimilation/analysis . ##' @export -GEF<-function(settings, Forecast, Observed, H, extraArg, nitr=50000, nburnin=10000, ...){ +GEF <- function(settings, Forecast, Observed, H, extraArg, nitr = 50000, nburnin = 10000, ...) { #------------------------------Setup #-- reading the dots and exposing them to the inside of the function - dots<-list(...) - if (length(dots)>0) lapply(names(dots),function(name){assign(name,dots[[name]], pos=1 )}) - - #General - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") - input.vars <- sapply(settings$state.data.assimilation$inputs, '[[', "variable.name") - operators <- sapply(settings$state.data.assimilation$inputs, '[[', "operator") - - #Loading nimbles functions - #PEcAnAssimSequential::load_nimble() - - #Forecast inputs + dots <- list(...) + if (length(dots) > 0) { + lapply(names(dots), function(name) { + assign(name, dots[[name]], pos = 1) + }) + } + + # General + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") + input.vars <- sapply(settings$state.data.assimilation$inputs, "[[", "variable.name") + operators <- sapply(settings$state.data.assimilation$inputs, "[[", "operator") + + # Loading nimbles functions + # PEcAnAssimSequential::load_nimble() + + # Forecast inputs Q <- Forecast$Q # process error - X <- Forecast$X # states - Pf = stats::cov(X) # Cov Forecast - Goes into tobit2space as initial condition but is re-estimated in tobit space - mu.f <- colMeans(X) #mean Forecast - This is used as an initial condition - - diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 #fixing det(Pf)==0 - - #Observed inputs - R <- try(solve(Observed$R), silent = F) #putting solve() here so if not invertible error is before compiling tobit2space #sfsmisc::posdefify( + X <- Forecast$X # states + Pf <- stats::cov(X) # Cov Forecast - Goes into tobit2space as initial condition but is re-estimated in tobit space + mu.f <- colMeans(X) # mean Forecast - This is used as an initial condition + + diag(Pf)[which(diag(Pf) == 0)] <- min(diag(Pf)[which(diag(Pf) != 0)]) / 5 # fixing det(Pf)==0 + + # Observed inputs + R <- try(solve(Observed$R), silent = F) # putting solve() here so if not invertible error is before compiling tobit2space #sfsmisc::posdefify( Y <- Observed$Y wish.df <- function(Om, X, i, j, col) { (Om[i, j]^2 + Om[i, i] * Om[j, j]) / stats::var(X[, col]) } #----------------------------------- GEF----------------------------------------------------- - # Taking care of censored data ------------------------------ + # Taking care of censored data ------------------------------ ### create matrix the describes the support for each observed state variable at time t - path.to.models <- file.path(settings$outdir,"SDA","GEF") + path.to.models <- file.path(settings$outdir, "SDA", "GEF") aqq <- extraArg$aqq bqq <- extraArg$bqq - wts <- extraArg$wts/sum(extraArg$wts) - - if(any(is.na(wts))){ - PEcAn.logger::logger.warn(paste('We found an NA in the wts for the ensemble members. Is this what you want? For now, we will change the NA to a zero.')) + wts <- extraArg$wts / sum(extraArg$wts) + + if (any(is.na(wts))) { + PEcAn.logger::logger.warn(paste("We found an NA in the wts for the ensemble members. Is this what you want? For now, we will change the NA to a zero.")) wts[is.na(wts)] <- 0 } - if(sum(wts==0)){ - wts <- rep(1,nrow(X))/nrow(X) + if (sum(wts == 0)) { + wts <- rep(1, nrow(X)) / nrow(X) } interval <- NULL t <- extraArg$t intervalX <- matrix(NA, ncol(X), 2) rownames(intervalX) <- colnames(X) - outdir <- settings$modeloutdir - - ###Snow no snow hack - for(ii in 1:ncol(X)){ - try(if( sum(X[,ii],na.rm=T)==0 ) X[sample(x = 1:nrow(X),size = .2*nrow(X)),ii] <- .001) + outdir <- settings$modeloutdir + + ### Snow no snow hack + for (ii in 1:ncol(X)) { + try(if (sum(X[, ii], na.rm = T) == 0) X[sample(x = 1:nrow(X), size = .2 * nrow(X)), ii] <- .001) } - - ####getting ready to calculate y.ind and x.ind - for(i in 1:length(var.names)){ - intervalX[which(startsWith(rownames(intervalX), - var.names[i])), ] <- matrix(c(as.numeric(settings$state.data.assimilation$state.variables[[i]]$min_value), - as.numeric(settings$state.data.assimilation$state.variables[[i]]$max_value)), - length(which(startsWith(rownames(intervalX), - var.names[i]))),2,byrow = TRUE) + + #### getting ready to calculate y.ind and x.ind + for (i in 1:length(var.names)) { + intervalX[which(startsWith( + rownames(intervalX), + var.names[i] + )), ] <- matrix( + c( + as.numeric(settings$state.data.assimilation$state.variables[[i]]$min_value), + as.numeric(settings$state.data.assimilation$state.variables[[i]]$max_value) + ), + length(which(startsWith( + rownames(intervalX), + var.names[i] + ))), 2, + byrow = TRUE + ) + } + + #### These vectors are used to categorize data based on censoring from the interval matrix + x.ind <- x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + x.ind[n, j] <- as.numeric(X[n, j] > 0) + x.censored[n, j] <- as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # } - - #### These vectors are used to categorize data based on censoring from the interval matrix - x.ind <- x.censored <- matrix(NA, ncol=ncol(X), nrow=nrow(X)) - for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - x.ind[n,j] <- as.numeric(X[n,j] > 0) - x.censored[n,j] <- as.numeric(ifelse(X[n,j] > intervalX[j,2], 0, X[n,j])) # + } + + recompileTobit <- extraArg$recompileTobit + recompileGEF <- extraArg$recompileGEF + + ### TO DO: needs to recompile if there are new data sources + if (TRUE) { # any(X==0,na.rm=T) + # + if (t == 1 | recompileTobit) { + # The purpose of this step is to impute data for mu.f + # where there are zero values so that + # mu.f is in 'tobit space' in the full model + constants.tobit2space <- list( + N = nrow(X), + J = length(mu.f) + ) + + data.tobit2space <- list( + y.ind = x.ind, + y.censored = x.censored, + mu_0 = rep(0, length(mu.f)), + lambda_0 = diag(1000, length(mu.f)), # can try solve + nu_0 = ncol(X) + 1, + wts = wts * nrow(X), # sigma x2 max Y + Sigma_0 = solve(diag(1000, length(mu.f))) + ) # some measure of prior obs + + inits.tobit2space <- function() { + list( + muf = rmnorm_chol( + 1, colMeans(X), + chol(diag(ncol(X)) * 100) + ), + pf = rwish_chol(1, + df = ncol(X) + 1, + cholesky = chol(solve(Pf)) + ) + ) } - } - - recompileTobit <- extraArg$recompileTobit - recompileGEF <- extraArg$recompileGEF - - ###TO DO: needs to recompile if there are new data sources - if(TRUE){ #any(X==0,na.rm=T) - # - if(t == 1 | recompileTobit){ - #The purpose of this step is to impute data for mu.f - #where there are zero values so that - #mu.f is in 'tobit space' in the full model - constants.tobit2space <- list(N = nrow(X), - J = length(mu.f)) - - data.tobit2space <- list(y.ind = x.ind, - y.censored = x.censored, - mu_0 = rep(0,length(mu.f)), - lambda_0 = diag(1000,length(mu.f)), #can try solve - nu_0 = ncol(X)+1, - wts = wts*nrow(X), #sigma x2 max Y - Sigma_0 = solve(diag(1000,length(mu.f))))#some measure of prior obs - - inits.tobit2space <- function() list(muf = rmnorm_chol(1,colMeans(X), - chol(diag(ncol(X))*100)), - pf = rwish_chol(1,df = ncol(X)+1, - cholesky = chol(solve(Pf)))) - #ptm <- proc.time() - - tobit2space_pred <- nimbleModel(tobit2space.model, data = data.tobit2space, - constants = constants.tobit2space, inits = inits.tobit2space(), - name = 'space') - - try(logprob_y_tobit2space <- tobit2space_pred$calculate('y.censored')) - if(is.na(logprob_y_tobit2space)) PEcAn.logger::logger.warn('We cannot calculate a logprobability for your data in the tobit2space model. Check data.tobit2space variable in the global environment to make sure its what you want.') - if(logprob_y_tobit2space < -1000000) PEcAn.logger::logger.warn(paste('Log probability very low for y in tobit2space model during time',t,'. Check initial conditions.')) - + # ptm <- proc.time() + + tobit2space_pred <- nimbleModel(tobit2space.model, + data = data.tobit2space, + constants = constants.tobit2space, inits = inits.tobit2space(), + name = "space" + ) + + try(logprob_y_tobit2space <- tobit2space_pred$calculate("y.censored")) + if (is.na(logprob_y_tobit2space)) PEcAn.logger::logger.warn("We cannot calculate a logprobability for your data in the tobit2space model. Check data.tobit2space variable in the global environment to make sure its what you want.") + if (logprob_y_tobit2space < -1000000) PEcAn.logger::logger.warn(paste("Log probability very low for y in tobit2space model during time", t, ". Check initial conditions.")) + ## Adding X.mod,q,r as data for building model. - conf_tobit2space <- configureMCMC(tobit2space_pred, thin = 10, print=TRUE) - conf_tobit2space$addMonitors(c("pf", "muf","y.censored")) - - conf_tobit2space$removeSampler('pf') - conf_tobit2space$addSampler('pf','conj_wt_wishart_sampler') - + conf_tobit2space <- configureMCMC(tobit2space_pred, thin = 10, print = TRUE) + conf_tobit2space$addMonitors(c("pf", "muf", "y.censored")) + + conf_tobit2space$removeSampler("pf") + conf_tobit2space$addSampler("pf", "conj_wt_wishart_sampler") + samplerNumberOffset_tobit2space <- length(conf_tobit2space$getSamplers()) - - for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - node <- paste0('y.censored[',n,',',j,']') - conf_tobit2space$addSampler(node, 'toggle', control=list(type='RW')) + + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + node <- paste0("y.censored[", n, ",", j, "]") + conf_tobit2space$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } } - + Rmcmc_tobit2space <- buildMCMC(conf_tobit2space) - - #restarting at good initial conditions is somewhat important here + + # restarting at good initial conditions is somewhat important here Cmodel_tobit2space <- compileNimble(tobit2space_pred) Cmcmc_tobit2space <- compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) - - for(i in seq_along(X)) { + + for (i in seq_along(X)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space+i]], 'toggle', 1-x.ind[i]) + valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + i]], "toggle", 1 - x.ind[i]) } - - }else{ - - Cmodel_tobit2space$wts <- wts*nrow(X) - + } else { + Cmodel_tobit2space$wts <- wts * nrow(X) + Cmodel_tobit2space$y.ind <- x.ind Cmodel_tobit2space$y.censored <- x.censored - - inits.tobit2space <- function() list(muf = rmnorm_chol(1,colMeans(X),chol(diag(ncol(X))*100)), - pf = rwish_chol(1,df = ncol(X)+1,cholesky = chol(solve(stats::cov(X))))) - + + inits.tobit2space <- function() { + list( + muf = rmnorm_chol(1, colMeans(X), chol(diag(ncol(X)) * 100)), + pf = rwish_chol(1, df = ncol(X) + 1, cholesky = chol(solve(stats::cov(X)))) + ) + } + Cmodel_tobit2space$setInits(inits.tobit2space()) - - for(i in seq_along(X)) { + + for (i in seq_along(X)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space+i]], 'toggle', 1-x.ind[i]) + valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + i]], "toggle", 1 - x.ind[i]) } - } - - if(file.exists(file.path(outdir, paste0('dat.tobit2space',t,'.Rdata')))){ - load(file.path(outdir, paste0('dat.tobit2space',t,'.Rdata'))) - }else{ - + + if (file.exists(file.path(outdir, paste0("dat.tobit2space", t, ".Rdata")))) { + load(file.path(outdir, paste0("dat.tobit2space", t, ".Rdata"))) + } else { dat.tobit2space.nchains <- runMCMC( Cmcmc_tobit2space, @@ -282,142 +308,141 @@ GEF<-function(settings, Forecast, Observed, H, extraArg, nitr=50000, nburnin=100 inits = inits.tobit2space(), progressBar = TRUE ) - dat.tobit2space <- do.call(rbind,dat.tobit2space.nchains) - + dat.tobit2space <- do.call(rbind, dat.tobit2space.nchains) + which_f <- grep(c("f"), colnames(dat.tobit2space.nchains[[1]])) gelman.keep.tobit2space <- numeric(length(which_f)) - for(ff in seq_along(which_f)){ + for (ff in seq_along(which_f)) { mcmc.check <- list() - - mcmc.check[[1]] <- coda::mcmc(dat.tobit2space.nchains[[1]][,which_f[ff]]) - mcmc.check[[2]] <- coda::mcmc(dat.tobit2space.nchains[[2]][,which_f[ff]]) - mcmc.check[[3]] <- coda::mcmc(dat.tobit2space.nchains[[3]][,which_f[ff]]) - - - gelman.keep.tobit2space[ff] <- try(coda::gelman.diag(mcmc.check,transform = T)$psrf[1]) + + mcmc.check[[1]] <- coda::mcmc(dat.tobit2space.nchains[[1]][, which_f[ff]]) + mcmc.check[[2]] <- coda::mcmc(dat.tobit2space.nchains[[2]][, which_f[ff]]) + mcmc.check[[3]] <- coda::mcmc(dat.tobit2space.nchains[[3]][, which_f[ff]]) + + + gelman.keep.tobit2space[ff] <- try(coda::gelman.diag(mcmc.check, transform = T)$psrf[1]) } print(gelman.keep.tobit2space) - - if(any(gelman.keep.tobit2space > 1.5)) PEcAn.logger::logger.warn(paste('Gelman value > 1.5 for tobit2space model. Re-assess time point', t)) - save(dat.tobit2space, file = file.path(outdir, paste0('dat.tobit2space',t,'.Rdata'))) + if (any(gelman.keep.tobit2space > 1.5)) PEcAn.logger::logger.warn(paste("Gelman value > 1.5 for tobit2space model. Re-assess time point", t)) + + save(dat.tobit2space, file = file.path(outdir, paste0("dat.tobit2space", t, ".Rdata"))) } - - grDevices::pdf(file.path(outdir,paste0('assessParams',t,'.pdf'))) + + grDevices::pdf(file.path(outdir, paste0("assessParams", t, ".pdf"))) set.seed(t) - try(assessParams(dat = dat.tobit2space[sample(x = 10:nrow(dat.tobit2space),size = 500,replace = F),],wts=wts, Xt = X)) + try(assessParams(dat = dat.tobit2space[sample(x = 10:nrow(dat.tobit2space), size = 500, replace = F), ], wts = wts, Xt = X)) grDevices::dev.off() - + ## TO DO Add MCMC Diagnostics, how do we do it for pecan meta-analysis? - + ## update parameters - #dat.tobit2space <- dat.tobit2space[1000:5000, ] - imuf <- grep("muf", colnames(dat.tobit2space)) + # dat.tobit2space <- dat.tobit2space[1000:5000, ] + imuf <- grep("muf", colnames(dat.tobit2space)) mu.f <- colMeans(dat.tobit2space[, imuf]) - iPf <- grep("pf", colnames(dat.tobit2space)) - Pf <- solve(matrix(colMeans(dat.tobit2space[, iPf]),ncol(X),ncol(X))) + iPf <- grep("pf", colnames(dat.tobit2space)) + Pf <- solve(matrix(colMeans(dat.tobit2space[, iPf]), ncol(X), ncol(X))) #--- This is where the localization needs to happen - After imputing Pf - - iycens <- grep("y.censored",colnames(dat.tobit2space)) - X.new <- matrix(colMeans(dat.tobit2space[,iycens]),nrow(X),ncol(X)) - }else{ + + iycens <- grep("y.censored", colnames(dat.tobit2space)) + X.new <- matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) + } else { ## IDEA not sure if it's a good one - mu.f <- apply(X,2,weighted.mean,wts) - Pf <- stats::cov.wt(X,wts)$cov + mu.f <- apply(X, 2, weighted.mean, wts) + Pf <- stats::cov.wt(X, wts)$cov X.new <- X gelman.keep.tobit2space <- NA } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### # Generalized Ensemble Filter ###----- - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### #### initial conditions - - if(length(aqq)==0){ - aqq <- list() #array(0, dim = c(nt,ncol(X),ncol(X))) - }else{ - if(ncol(X)!=dim(aqq)[1]|ncol(X)!=dim(aqq)[2]){ - print('error: X has changed dimensions') + + if (length(aqq) == 0) { + aqq <- list() # array(0, dim = c(nt,ncol(X),ncol(X))) + } else { + if (ncol(X) != dim(aqq)[1] | ncol(X) != dim(aqq)[2]) { + print("error: X has changed dimensions") } } - if(t == 1){ - bqq <- length(mu.f) - aqq <- diag(length(mu.f)) * bqq #Q + if (t == 1) { + bqq <- length(mu.f) + aqq <- diag(length(mu.f)) * bqq # Q } - + ### create matrix the describes the support for each observed state variable at time t interval <- matrix(NA, length(obs.mean[[t]]), 2) # Each observe variable needs to have its own file tag under inputs - interval <-settings$state.data.assimilation$inputs %>% - purrr::map_dfr( ~ data.frame( - .x$'min_value' %>% as.numeric(),.x$'max_value' %>% as.numeric() + interval <- settings$state.data.assimilation$inputs %>% + purrr::map_dfr(~ data.frame( + .x$"min_value" %>% as.numeric(), .x$"max_value" %>% as.numeric() )) %>% as.matrix() - + rownames(interval) <- names(input.vars) - - #### These vectors are used to categorize data based on censoring + + #### These vectors are used to categorize data based on censoring #### from the interval matrix - y.ind <- as.numeric(Y > interval[,1]) - y.censored <- as.numeric(ifelse(Y > interval[,1], Y, 0)) - - if(sum(y.censored,na.rm=T)==0){ - PEcAn.logger::logger.warn('NO DATA. Check y.censored in Analysis_sda.R') + y.ind <- as.numeric(Y > interval[, 1]) + y.censored <- as.numeric(ifelse(Y > interval[, 1], Y, 0)) + + if (sum(y.censored, na.rm = T) == 0) { + PEcAn.logger::logger.warn("NO DATA. Check y.censored in Analysis_sda.R") } - - #which type of observation do we have at this time point? - input.order <- lapply(input.vars, grep, x=names(obs.mean[[t]][[1]])) # not going to work if AbvGrnWood is given in two different ways like tree rings and refab + + # which type of observation do we have at this time point? + input.order <- lapply(input.vars, grep, x = names(obs.mean[[t]][[1]])) # not going to work if AbvGrnWood is given in two different ways like tree rings and refab names(input.order) <- operators data_available <- unlist(input.order) - - if(any(grep(names(data_available),pattern = 'direct'))){ - which_direct <- data_available[grep(names(data_available),pattern = 'direct')] + + if (any(grep(names(data_available), pattern = "direct"))) { + which_direct <- data_available[grep(names(data_available), pattern = "direct")] X_direct_start <- which_direct[1] X_direct_end <- which_direct[length(which_direct)] - direct_TRUE = TRUE - }else{ + direct_TRUE <- TRUE + } else { X_direct_start <- 0 X_direct_end <- 0 - direct_TRUE = FALSE + direct_TRUE <- FALSE } - - if(any(grep(names(data_available),pattern = 'ALR'))){ - - which_fcomp <- grep(names(data_available),pattern = 'ALR') + + if (any(grep(names(data_available), pattern = "ALR"))) { + which_fcomp <- grep(names(data_available), pattern = "ALR") X_fcomp_start <- which_fcomp[1] X_fcomp_end <- which_fcomp[length(which_fcomp)] - X_fcomp_model <- grep(colnames(X),pattern = '*.pft.*') - - fcomp_TRUE = TRUE - }else{ + X_fcomp_model <- grep(colnames(X), pattern = "*.pft.*") + + fcomp_TRUE <- TRUE + } else { X_fcomp_start <- 0 X_fcomp_end <- 0 X_fcomp_model <- 0 - fcomp_TRUE = FALSE + fcomp_TRUE <- FALSE } - - if(any(grep(names(data_available),pattern = 'pft2total'))){ - which_pft2total <- grep(names(data_available),pattern = 'pft2total') + + if (any(grep(names(data_available), pattern = "pft2total"))) { + which_pft2total <- grep(names(data_available), pattern = "pft2total") X_pft2total_start <- which_pft2total[1] X_pft2total_end <- which_pft2total[length(which_pft2total)] - X_pft2total_model <- grep(colnames(X),pattern = '*.pft.*') - pft2total_TRUE = TRUE - }else{ + X_pft2total_model <- grep(colnames(X), pattern = "*.pft.*") + pft2total_TRUE <- TRUE + } else { X_pft2total_start <- 0 X_pft2total_end <- 0 X_pft2total_model <- 0 - pft2total_TRUE = FALSE + pft2total_TRUE <- FALSE } - - - if(t > 1 & recompileGEF != TRUE){ - if(X_direct_start != constants.tobit$X_direct_start) recompileGEF = TRUE - if(X_direct_end != constants.tobit$X_direct_end) recompileGEF = TRUE - if(X_fcomp_end != constants.tobit$X_fcomp_end) recompileGEF = TRUE - if(X_fcomp_start != constants.tobit$X_fcomp_start) recompileGEF = TRUE + + + if (t > 1 & recompileGEF != TRUE) { + if (X_direct_start != constants.tobit$X_direct_start) recompileGEF <- TRUE + if (X_direct_end != constants.tobit$X_direct_end) recompileGEF <- TRUE + if (X_fcomp_end != constants.tobit$X_fcomp_end) recompileGEF <- TRUE + if (X_fcomp_start != constants.tobit$X_fcomp_start) recompileGEF <- TRUE } - - if(t == 1 | recompileGEF){ + + if (t == 1 | recompileGEF) { constants.tobit <- list( N = ncol(X), YN = length(y.ind), @@ -434,115 +459,119 @@ GEF<-function(settings, Forecast, Observed, H, extraArg, nitr=50000, nburnin=100 fcomp_TRUE = fcomp_TRUE, pft2total_TRUE = pft2total_TRUE ) - - dimensions.tobit <- list(X = length(mu.f), X.mod = ncol(X), - Q = c(length(mu.f),length(mu.f)), - y_star = (length(y.censored))) - - data.tobit <- list(muf = as.vector(mu.f), - pf = solve(Pf), - aq = aqq, bq = bqq, - y.ind = y.ind, - y.censored = y.censored, - r = R) #precision - - inits.pred <- function(){ + + dimensions.tobit <- list( + X = length(mu.f), X.mod = ncol(X), + Q = c(length(mu.f), length(mu.f)), + y_star = (length(y.censored)) + ) + + data.tobit <- list( + muf = as.vector(mu.f), + pf = solve(Pf), + aq = aqq, bq = bqq, + y.ind = y.ind, + y.censored = y.censored, + r = R + ) # precision + + inits.pred <- function() { list( - q = diag(ncol(X)) * stats::runif(1, length(mu.f), length(mu.f) + 1), - X.mod = stats::rnorm(length(mu.f), mu.f, 1), - X = stats::rnorm(length(mu.f), mu.f, .1), - y_star = stats::rnorm(length(y.censored), 0, 1) + q = diag(ncol(X)) * stats::runif(1, length(mu.f), length(mu.f) + 1), + X.mod = stats::rnorm(length(mu.f), mu.f, 1), + X = stats::rnorm(length(mu.f), mu.f, .1), + y_star = stats::rnorm(length(y.censored), 0, 1) ) - } - - # + } + + # # inits.pred <- list(q = diag(length(mu.f))*(length(mu.f)+1), # X.mod = rnorm(length(mu.f),mu.f,1), # X = rnorm(length(mu.f),mu.f,1), # y_star = rnorm(length(y.censored),0,1)) - - model_pred <- nimbleModel(tobit.model, data = data.tobit, dimensions = dimensions.tobit, - constants = constants.tobit, inits = inits.pred(), - name = 'base') - + + model_pred <- nimbleModel(tobit.model, + data = data.tobit, dimensions = dimensions.tobit, + constants = constants.tobit, inits = inits.pred(), + name = "base" + ) + model_pred$initializeInfo() ## Adding X.mod,q,r as data for building model. - conf <- configureMCMC(model_pred, print=TRUE,thin = 10) - conf$addMonitors(c("X","X.mod","q","Q", "y_star","y.censored")) - - if(ncol(X) > length(y.censored)){ - print('Adding a different sampler to X nodes to improve MCMC mixing') - conf$removeSampler('X') - #consider individual slice samplers - #conf$addSampler(paste0('X[1:12]'),'AF_slice') - for(ss in 1:ncol(X)){ - conf$addSampler(paste0('X[',ss,']'),'slice') + conf <- configureMCMC(model_pred, print = TRUE, thin = 10) + conf$addMonitors(c("X", "X.mod", "q", "Q", "y_star", "y.censored")) + + if (ncol(X) > length(y.censored)) { + print("Adding a different sampler to X nodes to improve MCMC mixing") + conf$removeSampler("X") + # consider individual slice samplers + # conf$addSampler(paste0('X[1:12]'),'AF_slice') + for (ss in 1:ncol(X)) { + conf$addSampler(paste0("X[", ss, "]"), "slice") } conf$printSamplers() } - - if(FALSE){ ### Need this for when the state variables are on different scales like NPP and AGB - x.char <- paste0('X[1:',ncol(X),']') + + if (FALSE) { ### Need this for when the state variables are on different scales like NPP and AGB + x.char <- paste0("X[1:", ncol(X), "]") conf$removeSampler(x.char) - propCov.means <- c(rep(1,ncol(X)),1000)#signif(diag(Pf),1)#mean(unlist(lapply(obs.cov,FUN = function(x){diag(x)})))[choose]#c(rep(max(diag(Pf)),ncol(X)))# - if(length(propCov.means)!=ncol(X)) propCov.means <- c(propCov.means,rep(1,ncol(X)-length(Y))) - conf$addSampler(target =c(x.char), - control <- list(propCov = diag(ncol(X))*propCov.means), - type='RW_block') + propCov.means <- c(rep(1, ncol(X)), 1000) # signif(diag(Pf),1)#mean(unlist(lapply(obs.cov,FUN = function(x){diag(x)})))[choose]#c(rep(max(diag(Pf)),ncol(X)))# + if (length(propCov.means) != ncol(X)) propCov.means <- c(propCov.means, rep(1, ncol(X) - length(Y))) + conf$addSampler( + target = c(x.char), + control <- list(propCov = diag(ncol(X)) * propCov.means), + type = "RW_block" + ) } - + ## important! ## this is needed for correct indexing later samplerNumberOffset <- length(conf$getSamplers()) - - for(i in 1:length(y.ind)) { - node <- paste0('y.censored[',i,']') - conf$addSampler(node, 'toggle', control=list(type='RW')) + + for (i in 1:length(y.ind)) { + node <- paste0("y.censored[", i, "]") + conf$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } - + conf$printSamplers() ## can monitor y.censored, if you wish, to verify correct behaviour - #conf$addMonitors('y.censored') - + # conf$addMonitors('y.censored') + Rmcmc <- buildMCMC(conf) - + Cmodel <- compileNimble(model_pred) Cmcmc <- compileNimble(Rmcmc, project = model_pred) - - for(i in 1:length(y.ind)) { + + for (i in 1:length(y.ind)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } - - - - }else{ + } else { Cmodel$y.ind <- y.ind Cmodel$y.censored <- y.censored Cmodel$aq <- aqq Cmodel$bq <- bqq Cmodel$muf <- mu.f Cmodel$pf <- solve(Pf) - Cmodel$r <- (R) #precision - + Cmodel$r <- (R) # precision + # inits.pred = list(q = diag(length(mu.f))*(length(mu.f)+1), # X.mod = rnorm(length(mu.f),mu.f,1), # X = rnorm(ncol(X),mu.f,1), # y_star = rnorm(length(y.censored),mu.f,1)) # - # + # # Cmodel$setInits(inits.pred()) - - for(i in 1:length(y.ind)) { + + for (i in 1:length(y.ind)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } - } dat.nchains <- @@ -552,101 +581,101 @@ GEF<-function(settings, Forecast, Observed, H, extraArg, nitr=50000, nburnin=100 nburnin = nburnin, nchains = 3 ) - - - - which_f <- seq(1,ncol(dat.nchains[[1]]))[-grep(c("y"), colnames(dat.nchains[[1]]))] + + + + which_f <- seq(1, ncol(dat.nchains[[1]]))[-grep(c("y"), colnames(dat.nchains[[1]]))] gelman.keep <- numeric(length(which_f)) - for(ff in which_f){ + for (ff in which_f) { mcmc.check <- list() - - mcmc.check[[1]] <- coda::mcmc(dat.nchains[[1]][,ff]) - mcmc.check[[2]] <- coda::mcmc(dat.nchains[[2]][,ff]) - mcmc.check[[3]] <- coda::mcmc(dat.nchains[[3]][,ff]) - - - gelman.keep[ff] <- try(coda::gelman.diag(mcmc.check,transform = T)$psrf[1]) + + mcmc.check[[1]] <- coda::mcmc(dat.nchains[[1]][, ff]) + mcmc.check[[2]] <- coda::mcmc(dat.nchains[[2]][, ff]) + mcmc.check[[3]] <- coda::mcmc(dat.nchains[[3]][, ff]) + + + gelman.keep[ff] <- try(coda::gelman.diag(mcmc.check, transform = T)$psrf[1]) } print(gelman.keep) - if(any(gelman.keep > 1.5,na.rm = T)) PEcAn.logger::logger.warn(paste('Gelman value > 1.5 for GEF model. Re-assess time point', t)) - - try(save(gelman.keep.tobit2space,gelman.keep,file = file.path(outdir, paste0('gelman.diag',t,'.Rdata')))) - - dat <- do.call(rbind,dat.nchains) + if (any(gelman.keep > 1.5, na.rm = T)) PEcAn.logger::logger.warn(paste("Gelman value > 1.5 for GEF model. Re-assess time point", t)) + + try(save(gelman.keep.tobit2space, gelman.keep, file = file.path(outdir, paste0("gelman.diag", t, ".Rdata")))) + + dat <- do.call(rbind, dat.nchains) set.seed(t) - dat_save <- dat[sample(1:nrow(dat),size=500,replace = F),] - save(dat_save, file = file.path(outdir, paste0('dat',t,'.Rdata'))) - + dat_save <- dat[sample(1:nrow(dat), size = 500, replace = F), ] + save(dat_save, file = file.path(outdir, paste0("dat", t, ".Rdata"))) + ## update parameters - iq <- grep("q", colnames(dat)) - iX <- grep("X[", colnames(dat), fixed = TRUE) - iystar <- grep("y_star", colnames(dat), fixed = TRUE) + iq <- grep("q", colnames(dat)) + iX <- grep("X[", colnames(dat), fixed = TRUE) + iystar <- grep("y_star", colnames(dat), fixed = TRUE) iX.mod <- grep("X.mod", colnames(dat), fixed = TRUE) - + mu.a <- colMeans(dat[, iX]) - + ystar.a <- colMeans(dat[, iystar]) - Pa <- stats::cov(dat[, iX]) + Pa <- stats::cov(dat[, iX]) Pa[is.na(Pa)] <- 0 - - mq <- dat[, iq] # Omega, Precision - q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision - - col <- matrix(1:length(mu.f) ^ 2, length(mu.f), length(mu.f)) - WV <- matrix(0, length(mu.f), length(mu.f)) + + mq <- dat[, iq] # Omega, Precision + q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision + + col <- matrix(1:length(mu.f)^2, length(mu.f), length(mu.f)) + WV <- matrix(0, length(mu.f), length(mu.f)) for (i in seq_along(mu.f)) { for (j in seq_along(mu.f)) { WV[i, j] <- wish.df(q.bar, X = mq, i = i, j = j, col = col[i, j]) } } - + n <- mean(WV) if (n < length(mu.f)) { n <- length(mu.f) } - - V <- solve(q.bar) * n - - aqq <- V - bqq <- n - - grDevices::pdf(file.path(outdir, paste0('dat_plot', t, '.pdf'))) + + V <- solve(q.bar) * n + + aqq <- V + bqq <- n + + grDevices::pdf(file.path(outdir, paste0("dat_plot", t, ".pdf"))) graphics::par(mfrow = c(2, 3)) - - for(rr in 1:length(iX)){ - graphics::plot(dat_save[,iX[rr]],typ = 'l',main = paste('X',rr)) - abline(h=mu.f[rr],col='blue') + + for (rr in 1:length(iX)) { + graphics::plot(dat_save[, iX[rr]], typ = "l", main = paste("X", rr)) + abline(h = mu.f[rr], col = "blue") } - + for (rr in 1:length(iystar)) { - graphics::plot(dat_save[,iystar[rr]], type = 'l', main = paste('iystar',rr)) - abline(h=(mu.a)[rr],col='red') + graphics::plot(dat_save[, iystar[rr]], type = "l", main = paste("iystar", rr)) + abline(h = (mu.a)[rr], col = "red") } - + for (rr in 1:length(iX)) { - graphics::plot(dat_save[,iX.mod[rr]], type = 'l', main = paste('iX.mod',rr)) - abline(h=mu.f[rr],col='blue') + graphics::plot(dat_save[, iX.mod[rr]], type = "l", main = paste("iX.mod", rr)) + abline(h = mu.f[rr], col = "blue") } - eigen_save <- matrix(NA,nrow=nrow(dat_save),ncol=ncol(X)) - for(rr in 1:nrow(dat_save)) { - eigen_save[rr,] <- eigen((matrix(dat_save[rr, iq],ncol(X),ncol(X))))$values + eigen_save <- matrix(NA, nrow = nrow(dat_save), ncol = ncol(X)) + for (rr in 1:nrow(dat_save)) { + eigen_save[rr, ] <- eigen((matrix(dat_save[rr, iq], ncol(X), ncol(X))))$values } - apply(eigen_save,2,graphics::plot,typ='l') + apply(eigen_save, 2, graphics::plot, typ = "l") grDevices::dev.off() - - return(list(mu.f = mu.f, - Pf = Pf, - mu.a = mu.a, - Pa = Pa, - q.bar = q.bar, - n = n, - X.new=X.new, - aqq=aqq, - bqq=bqq, - y.censored=y.censored, - R=R - ) - ) + + return(list( + mu.f = mu.f, + Pf = Pf, + mu.a = mu.a, + Pa = Pa, + q.bar = q.bar, + n = n, + X.new = X.new, + aqq = aqq, + bqq = bqq, + y.censored = y.censored, + R = R + )) } @@ -654,25 +683,24 @@ GEF<-function(settings, Forecast, Observed, H, extraArg, nitr=50000, nburnin=100 ##' @title Construc_H ##' @name Construc_H ##' @author Hamze Dokoohaki -##' +##' ##' @param choose a vector of observations indices oredered based on their appearances in the list of state variable names. ##' @param Y vector of observations ##' @param X Dataframe or matrix of forecast state variables for different ensembles. -##’ @details -##’ -##' +## ’ @details +## ’ +##' ##' @description This function creates a matrix mapping obsereved data to their forecast state variable. -##' +##' ##' @return This returns a matrix specifying which observation go with which state variables. ##' @export -Construct_H <- function(choose, Y, X){ +Construct_H <- function(choose, Y, X) { ## design matrix - H <- matrix(0, length(Y), ncol(X)) #H maps true state to observed data - #linear + H <- matrix(0, length(Y), ncol(X)) # H maps true state to observed data + # linear for (i in choose) { H[i, i] <- 1 } - + return(H) } - diff --git a/modules/assim.sequential/R/Analysis_sda_block.R b/modules/assim.sequential/R/Analysis_sda_block.R index 6c37b4f251f..0fc52815bfd 100644 --- a/modules/assim.sequential/R/Analysis_sda_block.R +++ b/modules/assim.sequential/R/Analysis_sda_block.R @@ -1,140 +1,146 @@ ##' @title analysis_sda_block ##' @name analysis_sda_block ##' @author Dongchen Zhang -##' -##' @param settings pecan standard multi-site settings list. +##' +##' @param settings pecan standard multi-site settings list. ##' @param block.list.all Lists of forecast and analysis outputs for each time point of each block. If t=1, we initialize those outputs of each block with NULL from the `sda.enkf.multisite` function. -##' @param X A matrix contains ensemble forecasts with the dimensions of `[ensemble number, site number * number of state variables]`. The columns are matched with the site.ids and state variable names of the inside the `FORECAST` object in the `sda.enkf.multisite` script. -##' @param obs.mean Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. -##' @param obs.cov Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. +##' @param X A matrix contains ensemble forecasts with the dimensions of `[ensemble number, site number * number of state variables]`. The columns are matched with the site.ids and state variable names of the inside the `FORECAST` object in the `sda.enkf.multisite` script. +##' @param obs.mean Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. +##' @param obs.cov Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. ##' @param t time point in format of YYYY-MM-DD. ##' @param nt total length of time steps, corresponding to the `nt` variable in the `sda.enkf.multisite` function. ##' @param MCMC.args arguments for the MCMC sampling, details can be found in the roxygen strucutre for control list in the `sda.enkf.multisite` function. ##' @param block.list.all.pre pre-existed block.list.all object for passing the aqq and bqq to the current SDA run, the default is NULL. Details can be found in the roxygen structure for `pre_enkf_params` of the `sda.enkf.multisite` function ##' @details This function will add data and constants into each block that are needed for the MCMC sampling. -##' +##' ##' @description This function provides the block-based MCMC sampling approach. -##' +##' ##' @return It returns the `build.block.xy` object and the analysis results. ##' @importFrom dplyr %>% -analysis_sda_block <- function (settings, block.list.all, X, obs.mean, obs.cov, t, nt, MCMC.args, block.list.all.pre = NULL) { - #convert from vector values to block lists. - if ("try-error" %in% class(try(block.results <- build.block.xy(settings = settings, - block.list.all = block.list.all, - X = X, - obs.mean = obs.mean, - obs.cov = obs.cov, - t = t)))) { +analysis_sda_block <- function(settings, block.list.all, X, obs.mean, obs.cov, t, nt, MCMC.args, block.list.all.pre = NULL) { + # convert from vector values to block lists. + if ("try-error" %in% class(try(block.results <- build.block.xy( + settings = settings, + block.list.all = block.list.all, + X = X, + obs.mean = obs.mean, + obs.cov = obs.cov, + t = t + )))) { PEcAn.logger::logger.severe("Something wrong within the build.block.xy function.") return(0) } - #grab block.list and H from the results. + # grab block.list and H from the results. block.list.all <- block.results[[1]] H <- block.results[[2]] Y <- block.results[[3]] R <- block.results[[4]] - - #update q. - if ("try-error" %in% class(try(block.list.all <- update_q(block.list.all, t, nt, aqq.Init = as.numeric(settings$state.data.assimilation$aqq.Init), - bqq.Init = as.numeric(settings$state.data.assimilation$bqq.Init), - MCMC_dat = NULL, - block.list.all.pre)))) { + + # update q. + if ("try-error" %in% class(try(block.list.all <- update_q(block.list.all, t, nt, + aqq.Init = as.numeric(settings$state.data.assimilation$aqq.Init), + bqq.Init = as.numeric(settings$state.data.assimilation$bqq.Init), + MCMC_dat = NULL, + block.list.all.pre + )))) { PEcAn.logger::logger.severe("Something wrong within the update_q function.") return(0) } - - #add initial conditions for the MCMC sampling. + + # add initial conditions for the MCMC sampling. if ("try-error" %in% class(try(block.list.all[[t]] <- MCMC_Init(block.list.all[[t]], X)))) { PEcAn.logger::logger.severe("Something wrong within the MCMC_Init function.") return(0) } - - #update MCMC args. - block.list.all[[t]] <- block.list.all[[t]] %>% - purrr::map(function(l){ + + # update MCMC args. + block.list.all[[t]] <- block.list.all[[t]] %>% + purrr::map(function(l) { l$MCMC <- MCMC.args l }) - - #parallel for loop over each block. + + # parallel for loop over each block. PEcAn.logger::logger.info(paste0("Running MCMC ", "for ", length(block.list.all[[t]]), " blocks")) if ("try-error" %in% class(try(block.list.all[[t]] <- furrr::future_map(block.list.all[[t]], MCMC_block_function, .progress = T)))) { PEcAn.logger::logger.severe("Something wrong within the MCMC_block_function function.") return(0) } PEcAn.logger::logger.info("Completed!") - - #convert from block lists to vector values. + + # convert from block lists to vector values. if ("try-error" %in% class(try(V <- block.2.vector(block.list.all[[t]], X, H)))) { PEcAn.logger::logger.severe("Something wrong within the block.2.vector function.") return(0) } - - #return values - return(list(block.list.all = block.list.all, - mu.f = V$mu.f, - Pf = V$Pf, - mu.a = V$mu.a, - Pa = V$Pa, - Y = Y, - R = R)) + + # return values + return(list( + block.list.all = block.list.all, + mu.f = V$mu.f, + Pf = V$Pf, + mu.a = V$mu.a, + Pa = V$Pa, + Y = Y, + R = R + )) } ##' @title build.block.xy ##' @name build.block.xy ##' @author Dongchen Zhang -##' -##' @param settings pecan standard multi-site settings list. +##' +##' @param settings pecan standard multi-site settings list. ##' @param block.list.all List contains nt empty sub-elements. ##' @param X A matrix contains ensemble forecasts. ##' @param obs.mean List of dataframe of observation means, named with observation datetime. ##' @param obs.cov List of covariance matrices of state variables , named with observation datetime. ##' @param t time point. ##' @details This function will add data and constants into each block that are needed for the MCMC sampling. -##' +##' ##' @description This function split long vector and covariance matrix into blocks corresponding to the localization. -##' +##' ##' @return It returns the `build.block.xy` object with data and constants filled in. build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { - #set q.type from settings. + # set q.type from settings. if (settings$state.data.assimilation$q.type == "vector") { q.type <- 3 } else if (settings$state.data.assimilation$q.type == "wishart") { q.type <- 4 } - #grab basic arguments based on X. + # grab basic arguments based on X. site.ids <- unique(attributes(X)$Site) var.names <- unique(attributes(X)$dimnames[[2]]) mu.f <- colMeans(X) Pf <- stats::cov(X) - if (length(diag(Pf)[which(diag(Pf)==0)]) > 0) { - diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 #fixing det(Pf)==0 + if (length(diag(Pf)[which(diag(Pf) == 0)]) > 0) { + diag(Pf)[which(diag(Pf) == 0)] <- min(diag(Pf)[which(diag(Pf) != 0)]) / 5 # fixing det(Pf)==0 PEcAn.logger::logger.warn("The zero variances in Pf is being replaced by one fifth of the minimum variance in those matrices respectively.") } - #distance calculations and localization + # distance calculations and localization site.locs <- settings$run %>% - purrr::map('site') %>% - purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% - t %>% - `colnames<-`(c("Lon","Lat")) %>% + purrr::map("site") %>% + purrr::map_dfr(~ c(.x[["lon"]], .x[["lat"]]) %>% as.numeric()) %>% + t() %>% + `colnames<-`(c("Lon", "Lat")) %>% `rownames<-`(site.ids) - #Finding the distance between the sites + # Finding the distance between the sites dis.matrix <- sp::spDists(site.locs, longlat = TRUE) if (!is.null(settings$state.data.assimilation$Localization.FUN)) { Localization.FUN <- get(settings$state.data.assimilation$Localization.FUN) - #turn that into a blocked matrix format + # turn that into a blocked matrix format blocked.dis <- block_matrix(dis.matrix %>% as.numeric(), rep(length(var.names), length(site.ids))) Pf <- Localization.FUN(Pf, blocked.dis, settings$state.data.assimilation$scalef %>% as.numeric()) } - #Handle observation - #observation number per site - #free run special case. + # Handle observation + # observation number per site + # free run special case. if (is.null(obs.mean[[t]])) { obs_per_site <- rep(0, length(site.ids)) %>% purrr::set_names(site.ids) } else { obs_per_site <- purrr::map_int(obs.mean[[t]], length) } - #if we do free run or the current obs.mean are all NULL. + # if we do free run or the current obs.mean are all NULL. if (as.logical(settings$state.data.assimilation$free.run) | all(is.null(unlist(obs.mean[[t]])))) { H <- list(ind = seq_along(rep(var.names, length(site.ids)))) Y <- rep(NA, length(H$ind)) @@ -147,68 +153,77 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { Y <- Obs.cons$Y R <- Obs.cons$R if (length(Y) > 1) { - if (length(diag(R)[which(diag(R)==0)]) > 0) { - diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 + if (length(diag(R)[which(diag(R) == 0)]) > 0) { + diag(R)[which(diag(R) == 0)] <- min(diag(R)[which(diag(R) != 0)]) / 2 PEcAn.logger::logger.warn("The zero variances in R is being replaced by half of the minimum variance in those matrices respectively.") } } - #create matrix the describes the support for each observed state variable at time t - min_max <- settings$state.data.assimilation$state.variables %>% - purrr::map(function(state.variable){ - c(as.numeric(state.variable$min_value), - as.numeric(state.variable$max_value)) - }) %>% unlist() %>% as.vector() %>% + # create matrix the describes the support for each observed state variable at time t + min_max <- settings$state.data.assimilation$state.variables %>% + purrr::map(function(state.variable) { + c( + as.numeric(state.variable$min_value), + as.numeric(state.variable$max_value) + ) + }) %>% + unlist() %>% + as.vector() %>% matrix(length(settings$state.data.assimilation$state.variables), 2, byrow = T) %>% `rownames<-`(var.names) - #Create y.censored and y.ind - #describing if the obs are within the defined range. + # Create y.censored and y.ind + # describing if the obs are within the defined range. y.ind <- y.censored <- c() for (i in seq_along(Y)) { if (Y[i] > min_max[names(Y[i]), 1]) { - y.ind[i] = 1; y.censored[i] = Y[i] - } else {y.ind[i] <- y.censored[i] <- 0} + y.ind[i] <- 1 + y.censored[i] <- Y[i] + } else { + y.ind[i] <- y.censored[i] <- 0 + } } - #create H + # create H # if there is any site that has zero observation. if (any(obs_per_site == 0)) { - #name matching between observation names and state variable names. + # name matching between observation names and state variable names. f.2.y.ind <- obs.mean[[t]] %>% purrr::map(\(x)which(var.names %in% names(x))) %>% - unlist %>% - unique - H <- list(ind = f.2.y.ind %>% purrr::map(function(start){ + unlist() %>% + unique() + H <- list(ind = f.2.y.ind %>% purrr::map(function(start) { seq(start, length(site.ids) * length(var.names), length(var.names)) - }) %>% unlist() %>% sort) + }) %>% unlist() %>% sort()) } else { - H <- construct_nimble_H(site.ids = site.ids, - var.names = var.names, - obs.t = obs.mean[[t]], - pft.path = settings[[1]]$run$inputs$pft.site$path, - by = "block_pft_var") + H <- construct_nimble_H( + site.ids = site.ids, + var.names = var.names, + obs.t = obs.mean[[t]], + pft.path = settings[[1]]$run$inputs$pft.site$path, + by = "block_pft_var" + ) } } - #start the blocking process - #should we consider interactions between sites? - if(as.numeric(settings$state.data.assimilation$scalef) == 0){ + # start the blocking process + # should we consider interactions between sites? + if (as.numeric(settings$state.data.assimilation$scalef) == 0) { block.list <- vector("list", length(site.ids)) - #loop over sites + # loop over sites for (i in seq_along(site.ids)) { - #store which block contains which sites. + # store which block contains which sites. block.list[[i]]$sites.per.block <- i block.list[[i]]$site.ids <- site.ids[i] block.list[[i]]$t <- t - #fill in mu.f and Pf + # fill in mu.f and Pf f.start <- (i - 1) * length(var.names) + 1 f.end <- i * length(var.names) block.list[[i]]$data$muf <- mu.f[f.start:f.end] block.list[[i]]$data$pf <- Pf[f.start:f.end, f.start:f.end] - #find indexs for Y. + # find indexs for Y. y.start <- sum(obs_per_site[1:i]) - obs_per_site[i] + 1 y.end <- sum(obs_per_site[1:i]) - #fill in y and r - #if there is no observation for this site. + # fill in y and r + # if there is no observation for this site. if (y.end < y.start) { - #if every site has zero observation/free run. + # if every site has zero observation/free run. if (max(obs_per_site) == 0) { block.list[[i]]$data$y.censored <- rep(NA, length(var.names)) block.list[[i]]$data$r <- diag(1, length(var.names)) @@ -223,7 +238,7 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { block.list[[i]]$data$r <- solve(R[y.start:y.end, y.start:y.end]) block.h <- Construct.H.multisite(site.ids[i], var.names, obs.mean[[t]]) } - #fill in constants. + # fill in constants. block.list[[i]]$H <- block.h block.list[[i]]$constant$H <- which(apply(block.h, 2, sum) == 1) block.list[[i]]$constant$N <- length(f.start:f.end) @@ -232,21 +247,23 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { } names(block.list) <- site.ids } else { - #find networks given TRUE/FALSE matrix representing sites' interactions. + # find networks given TRUE/FALSE matrix representing sites' interactions. block.vec <- matrix_network(dis.matrix <= as.numeric(settings$state.data.assimilation$scalef)) - #check if the matrix_network function is working correctly. - #check if the blocks are calculated correctly. - if (block.vec %>% - purrr::map(function(l){length(l)}) %>% - unlist %>% - sum() != length(site.ids)) { + # check if the matrix_network function is working correctly. + # check if the blocks are calculated correctly. + if (block.vec %>% + purrr::map(function(l) { + length(l) + }) %>% + unlist() %>% + sum() != length(site.ids)) { PEcAn.logger::logger.severe("Block calculation failed, please check the matrix_network function!") return(0) } block.list <- vector("list", length(block.vec)) - #loop over sites - for (i in seq_along(block.vec)) {#i is site index - #store which block contains which sites. + # loop over sites + for (i in seq_along(block.vec)) { # i is site index + # store which block contains which sites. ids <- block.vec[[i]] block.list[[i]]$sites.per.block <- ids block.list[[i]]$site.ids <- site.ids[ids] @@ -259,14 +276,14 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { y.start <- sum(obs_per_site[1:ids[j]]) - obs_per_site[ids[j]] + 1 y.end <- sum(obs_per_site[1:ids[j]]) f.ind <- c(f.ind, f.start:f.end) - #if the current site has greater or equal than 1 observation. + # if the current site has greater or equal than 1 observation. if (y.end >= y.start) { # y.ind <- c(y.ind, y.start:y.end) y.block <- c(y.block, y.censored[y.start:y.end]) r.block <- c(r.block, diag(R)[y.start:y.end]) } else { - #if the current site has zero observation. - #if for free run. + # if the current site has zero observation. + # if for free run. if (max(obs_per_site) == 0) { y.block <- c(y.block, rep(NA, length(var.names))) r.block <- c(r.block, rep(1, length(var.names))) @@ -276,32 +293,34 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { } } } - #if we have NA for y, we will build H differently. + # if we have NA for y, we will build H differently. if (any(is.na(y.block))) { - block.h <- matrix(0, 1, length(ids)*length(var.names)) - #if for free run. + block.h <- matrix(0, 1, length(ids) * length(var.names)) + # if for free run. if (is.null(obs.mean[[t]])) { f.2.y.ind <- seq_along(var.names) } else { f.2.y.ind <- obs.mean[[t]] %>% purrr::map(\(x)which(var.names %in% names(x))) %>% - unlist %>% - unique + unlist() %>% + unique() } - seq.ind <- f.2.y.ind %>% purrr::map(function(start){ - seq(start, dim(block.h)[2], length(var.names)) - }) %>% unlist() + seq.ind <- f.2.y.ind %>% + purrr::map(function(start) { + seq(start, dim(block.h)[2], length(var.names)) + }) %>% + unlist() block.h[1, seq.ind] <- 1 } else { block.h <- Construct.H.multisite(site.ids[ids], var.names, obs.mean[[t]]) } - #fill in mu.f and Pf + # fill in mu.f and Pf block.list[[i]]$data$muf <- mu.f[f.ind] block.list[[i]]$data$pf <- GrabFillMatrix(Pf, f.ind) - #fill in y and R + # fill in y and R block.list[[i]]$data$y.censored <- y.block - if (length(r.block) == 1) { - block.list[[i]]$data$r <- 1/r.block + if (length(r.block) == 1) { + block.list[[i]]$data$r <- 1 / r.block } else { block.list[[i]]$data$r <- solve(diag(r.block)) } @@ -312,12 +331,12 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { block.list[[i]]$constant$q.type <- q.type } } - #if it's Wishart Q, we need to replace any NA Y with corresponding muf, and r with Pf. - #also, if length of observation is 1, the Wishart Q is not suitable for the MCMC. - #we will then need to change the Q type to 3, which is the vector Q. + # if it's Wishart Q, we need to replace any NA Y with corresponding muf, and r with Pf. + # also, if length of observation is 1, the Wishart Q is not suitable for the MCMC. + # we will then need to change the Q type to 3, which is the vector Q. if (q.type == 4) { for (i in seq_along(block.list)) { - #check length. + # check length. if (block.list[[i]]$constant$YN == 1) { block.list[[i]]$constant$q.type <- 3 next @@ -342,7 +361,7 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { # } } } - #return values. + # return values. block.list.all[[t]] <- block.list return(list(block.list.all = block.list.all, H = H, Y = Y, R = R)) } @@ -350,137 +369,147 @@ build.block.xy <- function(settings, block.list.all, X, obs.mean, obs.cov, t) { ##' @title MCMC_Init ##' @name MCMC_Init ##' @author Dongchen Zhang -##' +##' ##' @param block.list lists of blocks generated by the `build.block.xy` function. ##' @param X A matrix contains ensemble forecasts. ##' @details This function helps create initial conditions for the MCMC sampling. -##' +##' ##' @return It returns the `block.list` object with initial conditions filled in. -MCMC_Init <- function (block.list, X) { +MCMC_Init <- function(block.list, X) { var.names <- unique(attributes(X)$dimnames[[2]]) - #sample mu.f from X. + # sample mu.f from X. sample.mu.f <- colMeans(X) for (i in seq_along(block.list)) { - #number of observations. + # number of observations. num.obs <- length(block.list[[i]]$data$y.censored) - #loop over each site within each block + # loop over each site within each block for (j in seq_along(block.list[[i]]$sites.per.block)) { - #initialize mu.f + # initialize mu.f start <- (block.list[[i]]$sites.per.block[j] - 1) * length(var.names) + 1 end <- (block.list[[i]]$sites.per.block[j]) * length(var.names) block.list[[i]]$Inits$X.mod <- c(block.list[[i]]$Inits$X.mod, sample.mu.f[start:end]) - #initialize X + # initialize X block.list[[i]]$Inits$X <- block.list[[i]]$data$y.censored - #initialize Xs + # initialize Xs block.list[[i]]$Inits$Xs <- block.list[[i]]$Inits$X.mod[block.list[[i]]$constant$H] } - #initialize q. - #if we want the vector q. + # initialize q. + # if we want the vector q. if (block.list[[i]]$constant$q.type == 3) { for (j in seq_along(block.list[[i]]$data$y.censored)) { block.list[[i]]$Inits$q <- c(block.list[[i]]$Inits$q, stats::rgamma(1, shape = block.list[[i]]$data$aq[j], rate = block.list[[i]]$data$bq[j])) } } else if (block.list[[i]]$constant$q.type == 4) { - #if we want the wishart Q. - if ("try-error" %in% class(try(block.list[[i]]$Inits$q <- - stats::rWishart(1, df = block.list[[i]]$data$bq, Sigma = block.list[[i]]$data$aq)[,,1], silent = T))) { - block.list[[i]]$Inits$q <- - stats::rWishart(1, df = block.list[[i]]$data$bq, Sigma = stats::toeplitz((block.list[[i]]$constant$YN:1)/block.list[[i]]$constant$YN))[,,1] + # if we want the wishart Q. + if ("try-error" %in% class(try(block.list[[i]]$Inits$q <- + stats::rWishart(1, df = block.list[[i]]$data$bq, Sigma = block.list[[i]]$data$aq)[, , 1], silent = T))) { + block.list[[i]]$Inits$q <- + stats::rWishart(1, df = block.list[[i]]$data$bq, Sigma = stats::toeplitz((block.list[[i]]$constant$YN:1) / block.list[[i]]$constant$YN))[, , 1] } } } - #return values. + # return values. return(block.list) } ##' @title MCMC_block_function ##' @name MCMC_block_function ##' @author Dongchen Zhang -##' +##' ##' @param block each block within the `block.list` lists. -##' +##' ##' @return It returns the `block` object with analysis results filled in. MCMC_block_function <- function(block) { - #build nimble model - #TODO: harmonize the MCMC code between block-based and general analysis functions to reduce the complexity of code. + # build nimble model + # TODO: harmonize the MCMC code between block-based and general analysis functions to reduce the complexity of code. model_pred <- nimble::nimbleModel(GEF.MultiSite.Nimble, - data = block$data, - inits = block$Inits, - constants = block$constant, - name = 'base') - #configure MCMC - conf <- nimble::configureMCMC(model_pred, print=FALSE) + data = block$data, + inits = block$Inits, + constants = block$constant, + name = "base" + ) + # configure MCMC + conf <- nimble::configureMCMC(model_pred, print = FALSE) conf$setMonitors(c("X", "X.mod", "q")) - - #Handle samplers - #hear we change the RW_block sampler to the ess sampler - #because it has a better performance of MVN sampling + + # Handle samplers + # hear we change the RW_block sampler to the ess sampler + # because it has a better performance of MVN sampling samplerLists <- conf$getSamplers() samplerNumberOffset <- length(samplerLists) if (block$constant$q.type == 4) { - #if we have wishart q - #everything should be sampled with ess sampler. - samplerLists %>% purrr::map(function(l){l$setName("ess")}) + # if we have wishart q + # everything should be sampled with ess sampler. + samplerLists %>% purrr::map(function(l) { + l$setName("ess") + }) } conf$setSamplers(samplerLists) - - #add Pf as propCov in the control list of the X.mod nodes. + + # add Pf as propCov in the control list of the X.mod nodes. X.mod.ind <- which(grepl("X.mod", samplerLists %>% purrr::map(~ .x$target) %>% unlist())) conf$removeSampler(samplerLists[[X.mod.ind]]$target) - conf$addSampler(target = samplerLists[[X.mod.ind]]$target, type = "ess", - control = list(propCov= block$data$pf, adaptScaleOnly = TRUE, - latents = "X", pfOptimizeNparticles = TRUE)) + conf$addSampler( + target = samplerLists[[X.mod.ind]]$target, type = "ess", + control = list( + propCov = block$data$pf, adaptScaleOnly = TRUE, + latents = "X", pfOptimizeNparticles = TRUE + ) + ) - #add toggle Y sampler. + # add toggle Y sampler. for (i in 1:block$constant$YN) { - conf$addSampler(paste0("y.censored[", i, "]"), 'toggle', control=list(type='RW')) + conf$addSampler(paste0("y.censored[", i, "]"), "toggle", control = list(type = "RW")) } conf$printSamplers() - #compile MCMC + # compile MCMC Rmcmc <- nimble::buildMCMC(conf) Cmodel <- nimble::compileNimble(model_pred) Cmcmc <- nimble::compileNimble(Rmcmc, project = model_pred, showCompilerOutput = FALSE) - - #if we don't have any NA in the Y. + + # if we don't have any NA in the Y. if (!any(is.na(block$data$y.censored))) { - #add toggle Y sampler. - for(i in 1:block$constant$YN) { - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 0) + # add toggle Y sampler. + for (i in 1:block$constant$YN) { + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 0) } } - - #run MCMC + + # run MCMC dat <- runMCMC(Cmcmc, niter = block$MCMC$niter, nburnin = block$MCMC$nburnin, thin = block$MCMC$nthin, nchains = block$MCMC$nchain) - #update aq, bq, mua, and pa + # update aq, bq, mua, and pa M <- colMeans(dat) block$update$aq <- block$Inits$q if (block$constant$q.type == 3) { - #if it's a vector q case + # if it's a vector q case aq <- bq <- rep(NA, length(block$data$y.censored)) for (i in seq_along(aq)) { CHAR <- paste0("[", i, "]") - aq[i] <- (mean(dat[, paste0("q", CHAR)]))^2/stats::var(dat[, paste0("q", CHAR)]) - bq[i] <- mean(dat[, paste0("q", CHAR)])/stats::var(dat[, paste0("q", CHAR)]) + aq[i] <- (mean(dat[, paste0("q", CHAR)]))^2 / stats::var(dat[, paste0("q", CHAR)]) + bq[i] <- mean(dat[, paste0("q", CHAR)]) / stats::var(dat[, paste0("q", CHAR)]) } - #update aqq and bqq - block$aqq[,block$t+1] <- block$aqq[, block$t] - block$aqq[block$constant$H, block$t+1] <- aq - block$bqq[,block$t+1] <- block$bqq[, block$t] - block$bqq[block$constant$H, block$t+1] <- bq + # update aqq and bqq + block$aqq[, block$t + 1] <- block$aqq[, block$t] + block$aqq[block$constant$H, block$t + 1] <- aq + block$bqq[, block$t + 1] <- block$bqq[, block$t] + block$bqq[block$constant$H, block$t + 1] <- bq } else if (block$constant$q.type == 4) { - #previous updates - mq <- dat[, grep("q", colnames(dat))] # Omega, Precision - q.bar <- matrix(apply(mq, 2, mean), - length(block$constant$H), - length(block$constant$H) + # previous updates + mq <- dat[, grep("q", colnames(dat))] # Omega, Precision + q.bar <- matrix( + apply(mq, 2, mean), + length(block$constant$H), + length(block$constant$H) ) wish.df <- function(Om, X, i, j, col) { (Om[i, j]^2 + Om[i, i] * Om[j, j]) / stats::var(X[, col]) } - col <- matrix(1:length(block$constant$H) ^ 2, - length(block$constant$H), - length(block$constant$H)) - WV <- matrix(0, length(block$constant$H), length(block$constant$H)) + col <- matrix( + 1:length(block$constant$H)^2, + length(block$constant$H), + length(block$constant$H) + ) + WV <- matrix(0, length(block$constant$H), length(block$constant$H)) for (i in seq_along(block$constant$H)) { for (j in seq_along(block$constant$H)) { WV[i, j] <- wish.df(q.bar, X = mq, i = i, j = j, col = col[i, j]) @@ -491,10 +520,10 @@ MCMC_block_function <- function(block) { bq <- block$constant$YN } aq <- solve(q.bar) * bq - block$aqq[,,block$t+1] <- GrabFillMatrix(block$aqq[,,block$t], block$constant$H, aq) - block$bqq[block$t+1] <- bq + block$aqq[, , block$t + 1] <- GrabFillMatrix(block$aqq[, , block$t], block$constant$H, aq) + block$bqq[block$t + 1] <- bq } - #update mua and pa; mufa, and pfa + # update mua and pa; mufa, and pfa iX <- grep("X[", colnames(dat), fixed = TRUE) iX.mod <- grep("X.mod[", colnames(dat), fixed = TRUE) if (length(iX) == 1) { @@ -504,7 +533,7 @@ MCMC_block_function <- function(block) { mua <- colMeans(dat[, iX]) pa <- stats::cov(dat[, iX]) } - + if (length(iX.mod) == 1) { mufa <- mean(dat[, iX.mod]) pfa <- stats::var(dat[, iX.mod]) @@ -512,8 +541,8 @@ MCMC_block_function <- function(block) { mufa <- colMeans(dat[, iX.mod]) pfa <- stats::cov(dat[, iX.mod]) } - - #return values. + + # return values. block$update <- list(aq = aq, bq = bq, mua = mua, pa = pa, mufa = mufa, pfa = pfa) return(block) } @@ -521,7 +550,7 @@ MCMC_block_function <- function(block) { ##' @title update_q ##' @name update_q ##' @author Dongchen Zhang -##' +##' ##' @param block.list.all each block within the `block.list` lists. ##' @param t time point. ##' @param nt total length of time steps. @@ -529,19 +558,19 @@ MCMC_block_function <- function(block) { ##' @param bqq.Init the initial values of bqq, the default is NULL. ##' @param MCMC_dat data frame of MCMC samples, the default it NULL. ##' @param block.list.all.pre pre-existed block.list.all object for passing the aqq and bqq to the current SDA run, the default is NULL. -##' +##' ##' @return It returns the `block.list.all` object with initialized/updated Q filled in. -update_q <- function (block.list.all, t, nt, aqq.Init = NULL, bqq.Init = NULL, MCMC_dat = NULL, block.list.all.pre = NULL) { +update_q <- function(block.list.all, t, nt, aqq.Init = NULL, bqq.Init = NULL, MCMC_dat = NULL, block.list.all.pre = NULL) { block.list <- block.list.all[[t]] - #if it's an update. + # if it's an update. if (is.null(MCMC_dat)) { - #loop over blocks + # loop over blocks if (t == 1) { for (i in seq_along(block.list)) { nvar <- length(block.list[[i]]$data$muf) nobs <- length(block.list[[i]]$data$y.censored) if (block.list[[i]]$constant$q.type == 3) { - #initialize aqq and bqq for nt + # initialize aqq and bqq for nt if (!is.null(aqq.Init) && !is.null(bqq.Init)) { block.list[[i]]$aqq <- array(aqq.Init, dim = c(nvar, nt + 1)) block.list[[i]]$bqq <- array(bqq.Init, dim = c(nvar, nt + 1)) @@ -549,16 +578,16 @@ update_q <- function (block.list.all, t, nt, aqq.Init = NULL, bqq.Init = NULL, M block.list[[i]]$aqq <- array(1, dim = c(nvar, nt + 1)) block.list[[i]]$bqq <- array(1, dim = c(nvar, nt + 1)) } - #update aq and bq based on aqq and bqq + # update aq and bq based on aqq and bqq block.list[[i]]$data$aq <- block.list[[i]]$aqq[block.list[[i]]$constant$H, t] block.list[[i]]$data$bq <- block.list[[i]]$bqq[block.list[[i]]$constant$H, t] } else if (block.list[[i]]$constant$q.type == 4) { - #initialize aqq and bqq for nt + # initialize aqq and bqq for nt block.list[[i]]$aqq <- array(1, dim = c(nvar, nvar, nt + 1)) - block.list[[i]]$aqq[,,t] <- stats::toeplitz((nvar:1)/nvar) + block.list[[i]]$aqq[, , t] <- stats::toeplitz((nvar:1) / nvar) block.list[[i]]$bqq <- rep(nobs, nt + 1) - #update aq and bq based on aqq and bqq - block.list[[i]]$data$aq <- GrabFillMatrix(block.list[[i]]$aqq[,,t], block.list[[i]]$constant$H) + # update aq and bq based on aqq and bqq + block.list[[i]]$data$aq <- GrabFillMatrix(block.list[[i]]$aqq[, , t], block.list[[i]]$constant$H) block.list[[i]]$data$bq <- block.list[[i]]$bqq[t] } } @@ -566,38 +595,38 @@ update_q <- function (block.list.all, t, nt, aqq.Init = NULL, bqq.Init = NULL, M if (!is.null(block.list.all.pre)) { block.list.pre <- block.list.all.pre[[t - 1]] } else { - #if we want to update q from previous SDA runs. + # if we want to update q from previous SDA runs. block.list.pre <- block.list.all[[t - 1]] } for (i in seq_along(block.list)) { nvar <- length(block.list[[i]]$data$muf) nobs <- length(block.list[[i]]$data$y.censored) if (block.list[[i]]$constant$q.type == 3) { - #copy previous aqq and bqq to the current t + # copy previous aqq and bqq to the current t block.list[[i]]$aqq <- block.list.pre[[i]]$aqq block.list[[i]]$bqq <- block.list.pre[[i]]$bqq - #update aq and bq + # update aq and bq block.list[[i]]$data$aq <- block.list[[i]]$aqq[block.list[[i]]$constant$H, t] block.list[[i]]$data$bq <- block.list[[i]]$bqq[block.list[[i]]$constant$H, t] } else if (block.list[[i]]$constant$q.type == 4) { - #initialize aqq and bqq for nt + # initialize aqq and bqq for nt block.list[[i]]$aqq <- block.list.pre[[i]]$aqq block.list[[i]]$bqq <- block.list.pre[[i]]$bqq - #if previous Q is smaller than the actual YN. + # if previous Q is smaller than the actual YN. if (block.list.pre[[i]]$bqq[t] <= block.list[[i]]$constant$YN) { block.list[[i]]$bqq[t] <- block.list[[i]]$constant$YN } - #update aq and bq based on aqq and bqq - block.list[[i]]$data$aq <- GrabFillMatrix(block.list[[i]]$aqq[,,t], block.list[[i]]$constant$H) + # update aq and bq based on aqq and bqq + block.list[[i]]$data$aq <- GrabFillMatrix(block.list[[i]]$aqq[, , t], block.list[[i]]$constant$H) block.list[[i]]$data$bq <- block.list[[i]]$bqq[t] } } } } else { - #TODO: Implement the feature that Q can be updated based on the pft types. + # TODO: Implement the feature that Q can be updated based on the pft types. } - - #return values. + + # return values. block.list.all[[t]] <- block.list return(block.list.all) } @@ -605,13 +634,13 @@ update_q <- function (block.list.all, t, nt, aqq.Init = NULL, bqq.Init = NULL, M ##' @title block.2.vector ##' @name block.2.vector ##' @author Dongchen Zhang -##' +##' ##' @param block.list lists of blocks generated by the `build.block.xy` function. ##' @param X A matrix contains ensemble forecasts. ##' @param H H index created by the `construct_nimble_H` function. -##' +##' ##' @return It returns a list of analysis results by MCMC sampling. -block.2.vector <- function (block.list, X, H) { +block.2.vector <- function(block.list, X, H) { site.ids <- attributes(X)$Site mu.f <- mu.a <- c() Pf <- Pa <- matrix(0, length(site.ids), length(site.ids)) @@ -620,16 +649,18 @@ block.2.vector <- function (block.list, X, H) { for (id in L$site.ids) { ind <- c(ind, which(site.ids == id)) } - #convert mu.f and pf + # convert mu.f and pf mu.a[ind] <- mu.f[ind] <- L$update$mufa Pa[ind, ind] <- Pf[ind, ind] <- L$update$pfa - #convert mu.a and pa + # convert mu.a and pa ind <- intersect(ind, H$H.ind) mu.a[ind] <- L$update$mua Pa[ind, ind] <- L$update$pa } - return(list(mu.f = mu.f, - Pf = Pf, - mu.a = mu.a, - Pa = Pa)) -} \ No newline at end of file + return(list( + mu.f = mu.f, + Pf = Pf, + mu.a = mu.a, + Pa = Pa + )) +} diff --git a/modules/assim.sequential/R/Analysis_sda_multiSite.R b/modules/assim.sequential/R/Analysis_sda_multiSite.R index 9e38a466e6f..bdd70bd531e 100644 --- a/modules/assim.sequential/R/Analysis_sda_multiSite.R +++ b/modules/assim.sequential/R/Analysis_sda_multiSite.R @@ -1,70 +1,79 @@ ##' @title EnKF.MultiSite ##' @name EnKF.MultiSite ##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho and Hamze Dokoohaki -##' -##' @param settings pecan standard settings list. +##' +##' @param settings pecan standard settings list. ##' @param Forecast A list containing the forecasts variables including Q (process variance) and X (a dataframe of forecasts state variables for different ensemble) ##' @param Observed A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables) ##' @param H is a matrix of 1's and 0's specifying which observations go with which state variables. ##' @param extraArg This argument is NOT used inside this function but it is a list containing aqq, bqq and t. The aqq and bqq are shape parameters estimated over time for the proccess covariance and t gives the time in terms of index of obs.list. ##' @param ... Extra argument sent to the analysis function. ##' @details This function is different than `EnKF` function in terms of how it creates the Pf matrix. -##' -##' -##' @description Given the Forecast and Observed this function performs the Ensemble Kalamn Filter. -##' +##' +##' +##' @description Given the Forecast and Observed this function performs the Ensemble Kalamn Filter. +##' ##' @return It returns a list with estimated mean and cov matrix of forecast state variables as well as mean and cov estimated as a result of assimilation/analysis . ##' @export -EnKF.MultiSite <- function(settings, Forecast, Observed, H, extraArg=NULL, ...){ +EnKF.MultiSite <- function(settings, Forecast, Observed, H, extraArg = NULL, ...) { #------------------------------Setup Localization.FUN <- settings$state.data.assimilation$Localization.FUN # localization function scalef <- settings$state.data.assimilation$scalef %>% as.numeric() # scale factor for localization - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") - site.ids <- settings %>% purrr::map(~.x[['run']] ) %>% purrr::map('site') %>% purrr::map('id') %>% unlist() + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") + site.ids <- settings %>% + purrr::map(~ .x[["run"]]) %>% + purrr::map("site") %>% + purrr::map("id") %>% + unlist() #-- reading the dots and exposing them to the inside of the function - dots<-list(...) - if (length(dots)>0) lapply(names(dots),function(name){assign(name,dots[[name]])}) - for(i in seq_along(dots)) assign(names(dots)[i],dots[[names(dots)[i]]]) + dots <- list(...) + if (length(dots) > 0) { + lapply(names(dots), function(name) { + assign(name, dots[[name]]) + }) + } + for (i in seq_along(dots)) assign(names(dots)[i], dots[[names(dots)[i]]]) - #Forcast inputs + # Forcast inputs Q <- Forecast$Q # process error - X <- Forecast$X # states - #Observed inputs + X <- Forecast$X # states + # Observed inputs R <- Observed$R Y <- Observed$Y # Enkf--------------------------------------------------- mu.f <- as.numeric(apply(X, 2, mean, na.rm = TRUE)) %>% - `attr<-`('Site', c(rep(site.ids, each=length(var.names)))) + `attr<-`("Site", c(rep(site.ids, each = length(var.names)))) # I make the Pf in a separate function - if(length(site.ids)>1){ + if (length(site.ids) > 1) { # This the function makes the Pf by creating blocks in a matrix for different sites - # We can also send a localization functions to this + # We can also send a localization functions to this # for extra argumnets like distance matrix for localization use elipsis - Pf <- Contruct.Pf (site.ids, var.names, X, - localization.FUN=eval(parse(text = Localization.FUN)), - t=extraArg$t, - blocked.dis, - scalef) - }else{ + Pf <- Contruct.Pf(site.ids, var.names, X, + localization.FUN = eval(parse(text = Localization.FUN)), + t = extraArg$t, + blocked.dis, + scalef + ) + } else { PEcAn.logger::logger.severe("You need to send this function a multisetting object containing multiple sites/runs.") } ## process error if (!is.null(Q)) { Pf <- Pf + Q } - + if (length(Y) > 1) { PEcAn.logger::logger.info("The zero variances in R and Pf is being replaced by half and one fifth of the minimum variance in those matrices respectively.") - diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 - diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 + diag(R)[which(diag(R) == 0)] <- min(diag(R)[which(diag(R) != 0)]) / 2 + diag(Pf)[which(diag(Pf) == 0)] <- min(diag(Pf)[which(diag(Pf) != 0)]) / 5 } - + ## Kalman Gain K <- Pf %*% t(H) %*% solve((R + H %*% Pf %*% t(H))) # Analysis mu.a <- mu.f + K %*% (Y - H %*% mu.f) - Pa <- (diag(ncol(X)) - K %*% H) %*% Pf + Pa <- (diag(ncol(X)) - K %*% H) %*% Pf return(list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa)) } @@ -72,36 +81,40 @@ EnKF.MultiSite <- function(settings, Forecast, Observed, H, extraArg=NULL, ...){ ##' @rdname GEF ##' @export -GEF.MultiSite <- function(settings, Forecast, Observed, H, extraArg,...){ +GEF.MultiSite <- function(settings, Forecast, Observed, H, extraArg, ...) { #-- reading the dots and exposing them to the inside of the function - dots<-list(...) - if (length(dots) > 0) lapply(names(dots),function(name){assign(name,dots[[name]], pos = 1 )}) - #General - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") - - #Define Q type from settings. + dots <- list(...) + if (length(dots) > 0) { + lapply(names(dots), function(name) { + assign(name, dots[[name]], pos = 1) + }) + } + # General + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") + + # Define Q type from settings. q.type <- toupper(settings$state.data.assimilation$q.type) - single.q <-1 - Site.q <-2 - pft.q <-3 - if (is.null(q.type) | q.type=="SINGLE") { + single.q <- 1 + Site.q <- 2 + pft.q <- 3 + if (is.null(q.type) | q.type == "SINGLE") { q.type <- single.q - } else{ + } else { q.type <- ifelse(q.type == "SITE", Site.q, pft.q) - } - - #Forecast inputs + } + + # Forecast inputs Q <- Forecast$Q # process error - X <- Forecast$X # states - if(!is.null(extraArg$Pf)){ + X <- Forecast$X # states + if (!is.null(extraArg$Pf)) { Pf <- extraArg$Pf - }else{ + } else { Pf <- stats::cov(X) # Cov Forecast - This is used as an initial condition - diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 #fixing det(Pf)==0 + diag(Pf)[which(diag(Pf) == 0)] <- min(diag(Pf)[which(diag(Pf) != 0)]) / 5 # fixing det(Pf)==0 } - mu.f <- colMeans(X) #mean Forecast - This is used as an initial condition - - #Observed inputs + mu.f <- colMeans(X) # mean Forecast - This is used as an initial condition + + # Observed inputs R <- Observed$R Y <- Observed$Y wish.df <- function(Om, X, i, j, col) { @@ -109,44 +122,45 @@ GEF.MultiSite <- function(settings, Forecast, Observed, H, extraArg,...){ } #----------------------------------- GEF----------------------------------------------------- interval <- NULL - #added this line in case you don't need to do censoring. + # added this line in case you don't need to do censoring. X.new <- NULL # Reading the extra arguments aqq <- extraArg$aqq bqq <- extraArg$bqq - wts <- extraArg$wts/sum(extraArg$wts) - if(any(is.na(wts))){ + wts <- extraArg$wts / sum(extraArg$wts) + if (any(is.na(wts))) { PEcAn.logger::logger.warn( "We found an NA in the wts for the ensemble members.", - "Is this what you want? For now, we will change the NA to a zero.") + "Is this what you want? For now, we will change the NA to a zero." + ) wts[is.na(wts)] <- 0 } - if(sum(wts==0)){ - wts <- rep(1,nrow(X))/nrow(X) + if (sum(wts == 0)) { + wts <- rep(1, nrow(X)) / nrow(X) } t <- extraArg$t - nitr.GEF<-extraArg$nitr.GEF - nthin<-extraArg$nthin + nitr.GEF <- extraArg$nitr.GEF + nthin <- extraArg$nthin nburnin <- extraArg$nburnin censored.data <- extraArg$censored.data - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### # if we had censored data and we don't have pre-calculated Pf. - ###-------------------------------------------------------------------###---- + ### -------------------------------------------------------------------###---- if (censored.data && is.null(extraArg$Pf)) { - out.cens<-tobit_model_censored (settings, X, var.names, mu.f, Pf, t) + out.cens <- tobit_model_censored(settings, X, var.names, mu.f, Pf, t) mu.f <- out.cens$mu.f Pf <- out.cens$Pf iycens <- out.cens$iycens X.new <- out.cens$X.new } # end of if we have censored data - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### # Generalized Ensemble Filter ###----- - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### # if(sum(diag(Pf)-diag(cov(X))) > 10 | sum(diag(Pf)-diag(cov(X))) < -10) logger.severe('Increase Sample Size') #--- This is where the localization needs to happen - After imputing Pf elements.W.Data <- which(apply(H, 2, sum) == 1) - if (exists('blocked.dis') & is.null(extraArg$Pf)){ + if (exists("blocked.dis") & is.null(extraArg$Pf)) { Pf <- Local.support(Pf, blocked.dis, settings$state.data.assimilation$scalef %>% as.numeric()) } @@ -155,122 +169,136 @@ GEF.MultiSite <- function(settings, Forecast, Observed, H, extraArg,...){ if (t == 1) { bqq[1] <- length(elements.W.Data) if (is.null(aqq)) { - if (q.type==Site.q) { # if we wanna estimate a q per site + if (q.type == Site.q) { # if we wanna estimate a q per site aqq <- array(1, dim = c(length(elements.W.Data), length(elements.W.Data), nt)) for (i in 1:nt) { - aqq[,,i] <- diag(length(elements.W.Data)) + aqq[, , i] <- diag(length(elements.W.Data)) } - } else if(q.type == pft.q){ # if we wanna estimate a q per PFT + } else if (q.type == pft.q) { # if we wanna estimate a q per PFT site.pfts <- settings %>% - purrr::map( ~ .x[['run']]) %>% - purrr::map('site') %>% - purrr::map('site.pft') %>% - purrr::map('pft.name') %>% + purrr::map(~ .x[["run"]]) %>% + purrr::map("site") %>% + purrr::map("site.pft") %>% + purrr::map("pft.name") %>% purrr::modify(as.factor) %>% purrr::modify(as.numeric) %>% purrr::modify_if(function(x) { if (length(x) > 0) { return(FALSE) - } else{ + } else { return(TRUE) } - }, ~ 1) %>% + }, ~1) %>% unlist() - aqq<- array(1, dim = c(max(site.pfts), max(site.pfts), nt)) - }else{ # This is where we estimate just one q for all - aqq<- array(1, dim = c(1, 1, nt)) + aqq <- array(1, dim = c(max(site.pfts), max(site.pfts), nt)) + } else { # This is where we estimate just one q for all + aqq <- array(1, dim = c(1, 1, nt)) } - } else{ + } else { if (length(elements.W.Data) != dim(aqq)[1] | - length(elements.W.Data) != dim(aqq)[2]) { - PEcAn.logger::logger.warn('error: X has changed dimensions') + length(elements.W.Data) != dim(aqq)[2]) { + PEcAn.logger::logger.warn("error: X has changed dimensions") } } - } else{ + } else { # if(length(elements.W.Data)==ncol(aqq[, , t])){ - if (ncol(aqq) > 1 & nrow(aqq) > 1) - aqq[, , t] <- Local.support( - aqq[, , t], - distances[ceiling(elements.W.Data/length(var.names)), # finding sites with data - ceiling(elements.W.Data/length(var.names))], - settings$state.data.assimilation$scalef %>% as.numeric() - ) - } + if (ncol(aqq) > 1 & nrow(aqq) > 1) { + aqq[, , t] <- Local.support( + aqq[, , t], + distances[ + ceiling(elements.W.Data / length(var.names)), # finding sites with data + ceiling(elements.W.Data / length(var.names)) + ], + settings$state.data.assimilation$scalef %>% as.numeric() + ) + } + } ### create matrix the describes the support for each observed state variable at time t interval <- matrix(NA, length(unlist(obs.mean[[t]])), 2) - + # if this function is revoked by multisite then the structure of data looks a bit different. - if (exists('blocked.dis')){ - rownames(interval) <- obs.mean[[t]] %>% purrr::flatten() %>% names() - - }else{ + if (exists("blocked.dis")) { + rownames(interval) <- obs.mean[[t]] %>% + purrr::flatten() %>% + names() + } else { rownames(interval) <- names(obs.mean[[t]]) } - + for (i in 1:length(var.names)) { - interval[which(startsWith(rownames(interval), - var.names[i])),] <- - matrix(c( - as.numeric( - settings$state.data.assimilation$state.variables[[i]]$min_value + interval[which(startsWith( + rownames(interval), + var.names[i] + )), ] <- + matrix( + c( + as.numeric( + settings$state.data.assimilation$state.variables[[i]]$min_value + ), + as.numeric( + settings$state.data.assimilation$state.variables[[i]]$max_value + ) ), - as.numeric( - settings$state.data.assimilation$state.variables[[i]]$max_value - ) - ), - length(which(startsWith( - rownames(interval), - var.names[i] - ))), 2, byrow = TRUE) + length(which(startsWith( + rownames(interval), + var.names[i] + ))), 2, + byrow = TRUE + ) } #### These vectors are used to categorize data based on censoring #### from the interval matrix y.ind <- as.numeric(Y > interval[, 1]) y.censored <- as.numeric(ifelse(Y > interval[, 1], Y, 0)) - data <- list(elements.W.Data = elements.W.Data, - X = X, - Pf = Pf, - aqq = aqq, - bqq = bqq, - mu.f = mu.f, - q.type = q.type, - R = R, - y.censored = y.censored, - y.ind = y.ind, - nitr.GEF = extraArg$nitr.GEF, - nburnin = extraArg$nburnin, - nthin = extraArg$nthin, - monitors = c("Xall", "qq")) + data <- list( + elements.W.Data = elements.W.Data, + X = X, + Pf = Pf, + aqq = aqq, + bqq = bqq, + mu.f = mu.f, + q.type = q.type, + R = R, + y.censored = y.censored, + y.ind = y.ind, + nitr.GEF = extraArg$nitr.GEF, + nburnin = extraArg$nburnin, + nthin = extraArg$nthin, + monitors = c("Xall", "qq") + ) outputs <- furrr::future_map(lapply(rep("data", as.numeric(settings$state.data.assimilation$chains)), get), MCMC_function) dat <- do.call(rbind, outputs) - + #---- Saving the chains - save(dat, file=file.path(settings$outdir, paste0('dat',t,'.Rdata'))) - + save(dat, file = file.path(settings$outdir, paste0("dat", t, ".Rdata"))) + ## update parameters - iX <- grep("Xall[", colnames(dat), fixed = TRUE) + iX <- grep("Xall[", colnames(dat), fixed = TRUE) mu.a <- colMeans(dat[, iX]) - Pa <- stats::cov(dat[, iX]) + Pa <- stats::cov(dat[, iX]) Pa[is.na(Pa)] <- 0 - mq <- dat[, grep("q", colnames(dat))] # Omega, Precision - q.bar <- matrix(apply(mq, 2, mean), - length(elements.W.Data), - length(elements.W.Data) - ) # Mean Omega, Precision + mq <- dat[, grep("q", colnames(dat))] # Omega, Precision + q.bar <- matrix( + apply(mq, 2, mean), + length(elements.W.Data), + length(elements.W.Data) + ) # Mean Omega, Precision # Setting up the prior for the next step from the posterior of this step - if (t < nt){ - if (q.type == single.q){ #if it's a gamma case - qq <- dat[, grep("qq", colnames(dat))] - aqq[1, 1, t + 1] <- (mean(qq))^2/stats::var(qq) - bqq[t + 1] <- mean(qq)/stats::var(qq) + if (t < nt) { + if (q.type == single.q) { # if it's a gamma case + qq <- dat[, grep("qq", colnames(dat))] + aqq[1, 1, t + 1] <- (mean(qq))^2 / stats::var(qq) + bqq[t + 1] <- mean(qq) / stats::var(qq) } else { # if it's a wish case - col <- matrix(1:length(elements.W.Data) ^ 2, - length(elements.W.Data), - length(elements.W.Data)) - WV <- matrix(0, length(elements.W.Data), length(elements.W.Data)) + col <- matrix( + 1:length(elements.W.Data)^2, + length(elements.W.Data), + length(elements.W.Data) + ) + WV <- matrix(0, length(elements.W.Data), length(elements.W.Data)) for (i in seq_along(elements.W.Data)) { for (j in seq_along(elements.W.Data)) { WV[i, j] <- wish.df(q.bar, X = mq, i = i, j = j, col = col[i, j]) @@ -281,34 +309,35 @@ GEF.MultiSite <- function(settings, Forecast, Observed, H, extraArg,...){ n <- length(mu.f) } V <- solve(q.bar) * n - aqq[, ,t + 1] <- V + aqq[, , t + 1] <- V bqq[t + 1] <- n } } - #---- Trying to release some of the memory back to the os + #---- Trying to release some of the memory back to the os gc() # - return(list(mu.f = mu.f, - Pf = Pf, - mu.a = mu.a, - Pa = Pa, - q.bar = q.bar, - n = n, - X.new=X.new, - aqq=aqq, - bqq=bqq - ) - ) + return(list( + mu.f = mu.f, + Pf = Pf, + mu.a = mu.a, + Pa = Pa, + q.bar = q.bar, + n = n, + X.new = X.new, + aqq = aqq, + bqq = bqq + )) } ##' @title MCMC_function ##' @author Michael Dietze \email{dietze@@bu.edu}, Ann Raiho, Hamze Dokoohaki, and Dongchen Zhang. ##' @param data list containing everything needed for the MCMC sampling. ##' @details This function replace the previous code where implenmented the MCMC sampling part, which allows the MCMC sampling of multiple chains under parallel mode. -MCMC_function <- function(data){ - dimensions.tobit <- list(X = length(data$elements.W.Data), - X.mod = ncol(data$X), - Q = c(nrow(data$aqq), ncol(data$aqq)) +MCMC_function <- function(data) { + dimensions.tobit <- list( + X = length(data$elements.W.Data), + X.mod = ncol(data$X), + Q = c(nrow(data$aqq), ncol(data$aqq)) ) # Contants defined in the model constants.tobit <- @@ -319,26 +348,26 @@ MCMC_function <- function(data){ H = data$elements.W.Data, NotH = which(!(1:ncol(data$X) %in% data$elements.W.Data)), nNotH = which(!(1:ncol(data$X) %in% data$elements.W.Data)) %>% length(), - q.type=data$q.type + q.type = data$q.type ) # Data used for setting the likelihood and other stuff data.tobit <- list( muf = as.vector(data$mu.f), pf = data$Pf, - aq = data$aqq[,,t], + aq = data$aqq[, , t], bq = data$bqq[t], y.ind = data$y.ind, y.censored = data$y.censored, r = solve(data$R) ) - if(constants.tobit$YN == 1){ - #add error message if trying to run SDA with 1 obs and 1 state variable no model currently exists to handle this case, need to remove for loop from GEF_singleobs_nimble for this case and save new model - if(constants.tobit$N == 1){ + if (constants.tobit$YN == 1) { + # add error message if trying to run SDA with 1 obs and 1 state variable no model currently exists to handle this case, need to remove for loop from GEF_singleobs_nimble for this case and save new model + if (constants.tobit$N == 1) { PEcAn.logger::logger.error("No model exists for assimilating 1 observation and 1 state variable, add more state variables or edit GEF_singleobs_nimble to work with 1 state variable") return(0) } - #slight adjustment to inputs for nimble function when running with 1 obs + # slight adjustment to inputs for nimble function when running with 1 obs inits.pred$qq <- 0.368 dimensions.tobit$y.censored <- 1 dimensions.tobit$y.ind <- 1 @@ -352,53 +381,57 @@ MCMC_function <- function(data){ q = diag(1, length(data$elements.W.Data), length(data$elements.W.Data)) ) model_pred <- nimble::nimbleModel(GEF_singleobs_nimble, - data = data.tobit, - dimensions = dimensions.tobit, - constants = constants.tobit, - inits = inits.pred, - name = 'base') - }else{ + data = data.tobit, + dimensions = dimensions.tobit, + constants = constants.tobit, + inits = inits.pred, + name = "base" + ) + } else { model_pred <- nimble::nimbleModel(GEF.MultiSite.Nimble, - data = data.tobit, - dimensions = dimensions.tobit, - constants = constants.tobit, - name = 'base') + data = data.tobit, + dimensions = dimensions.tobit, + constants = constants.tobit, + name = "base" + ) } ## Adding X.mod,q,r as data for building model. - conf <- nimble::configureMCMC(model_pred, print=TRUE) - conf$setMonitors(data$monitors) + conf <- nimble::configureMCMC(model_pred, print = TRUE) + conf$setMonitors(data$monitors) samplerNumberOffset <- length(conf$getSamplers()) - - for(i in 1:length(data$y.ind)) { - node <- paste0('y.censored[',i,']') - conf$addSampler(node, 'toggle', control=list(type='RW')) + + for (i in 1:length(data$y.ind)) { + node <- paste0("y.censored[", i, "]") + conf$addSampler(node, "toggle", control = list(type = "RW")) } - #handling samplers + # handling samplers samplerLists <- conf$getSamplers() - samplerLists[[2]]$control <- list(propCov= data$Pf, adaptScaleOnly = TRUE, adaptive = TRUE) + samplerLists[[2]]$control <- list(propCov = data$Pf, adaptScaleOnly = TRUE, adaptive = TRUE) conf$setSamplers(samplerLists) - + conf$printSamplers() Rmcmc <- nimble::buildMCMC(conf) Cmodel <- nimble::compileNimble(model_pred) Cmcmc <- nimble::compileNimble(Rmcmc, project = model_pred, showCompilerOutput = TRUE) - - for(i in 1:length(data$y.ind)) { - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-data$y.ind[i]) + + for (i in 1:length(data$y.ind)) { + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - data$y.ind[i]) } - inits <- function(){ + inits <- function() { ind <- sample(seq_along(1:nrow(data$X)), 1) - init_muf <- data$X[ind,] - list(X.mod = as.vector(init_muf), - X = as.vector(init_muf)[data$elements.W.Data], - Xall = as.vector(init_muf), - Xs = as.vector(init_muf)[data$elements.W.Data], - q = diag(1, length(data$elements.W.Data), length(data$elements.W.Data))) + init_muf <- data$X[ind, ] + list( + X.mod = as.vector(init_muf), + X = as.vector(init_muf)[data$elements.W.Data], + Xall = as.vector(init_muf), + Xs = as.vector(init_muf)[data$elements.W.Data], + q = diag(1, length(data$elements.W.Data), length(data$elements.W.Data)) + ) } - if(exists("inits.pred")){ + if (exists("inits.pred")) { dat <- runMCMC(Cmcmc, niter = data$nitr.GEF, nburnin = data$nburnin, thin = data$nthin, nchains = 1) - }else{ + } else { dat <- runMCMC(Cmcmc, niter = data$nitr.GEF, nburnin = data$nburnin, thin = data$nthin, nchains = 1, inits = inits) } return(dat) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/Create_Site_PFT_CSV.R b/modules/assim.sequential/R/Create_Site_PFT_CSV.R index 3f75b894038..dca1e578eae 100644 --- a/modules/assim.sequential/R/Create_Site_PFT_CSV.R +++ b/modules/assim.sequential/R/Create_Site_PFT_CSV.R @@ -9,22 +9,24 @@ #' #' @examples #' \dontrun{ -#' NLCD <- file.path( -#' "/fs", "data1", "pecan.data", "input", -#' "nlcd_2001_landcover_2011_edition_2014_10_10", -#' "nlcd_2001_landcover_2011_edition_2014_10_10.img") -#' Ecoregion <- file.path( -#' "/projectnb", "dietzelab", "dongchen", -#' "All_NEON_SDA", "NEON42", "eco-region", "us_eco_l3_state_boundaries.shp") -#' settings <- PEcAn.settings::read.settings( -#' "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/pecan.xml") -#' con <- PEcAn.DB::db.open(settings$database$bety) -#' site_pft_info <- Create_Site_PFT_CSV(settings, Ecoregion, NLCD, con) +#' NLCD <- file.path( +#' "/fs", "data1", "pecan.data", "input", +#' "nlcd_2001_landcover_2011_edition_2014_10_10", +#' "nlcd_2001_landcover_2011_edition_2014_10_10.img" +#' ) +#' Ecoregion <- file.path( +#' "/projectnb", "dietzelab", "dongchen", +#' "All_NEON_SDA", "NEON42", "eco-region", "us_eco_l3_state_boundaries.shp" +#' ) +#' settings <- PEcAn.settings::read.settings( +#' "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/pecan.xml" +#' ) +#' con <- PEcAn.DB::db.open(settings$database$bety) +#' site_pft_info <- Create_Site_PFT_CSV(settings, Ecoregion, NLCD, con) #' } #' #' @export -Create_Site_PFT_CSV <- function(settings, Ecoregion, NLCD, con){ - +Create_Site_PFT_CSV <- function(settings, Ecoregion, NLCD, con) { # Bail out if packages in Suggests not available suggests_needed <- c("glue", "raster") suggests_found <- sapply(suggests_needed, requireNamespace, quietly = TRUE) @@ -33,113 +35,117 @@ Create_Site_PFT_CSV <- function(settings, Ecoregion, NLCD, con){ "Can't find package(s)", sQuote(suggests_needed[!suggests_found]), ", needed by PEcAnAssimSequential::Create_Site_PFT_CSV().", - "Please install these and try again.") + "Please install these and try again." + ) } - #grab Site IDs from settings + # grab Site IDs from settings observations <- c() for (i in 1:length(settings)) { obs <- settings[[i]]$run$site$id - observations <- c(observations,obs) + observations <- c(observations, obs) } - - + + site_ID <- observations suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con + )) suppressWarnings(qry_results <- PEcAn.DB::db.query(site_qry, con)) - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) - - #initialize data pool for NLCD - sites <- as.data.frame(cbind(site_info$site_id,site_info$lon, site_info$lat)) + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) + + # initialize data pool for NLCD + sites <- as.data.frame(cbind(site_info$site_id, site_info$lon, site_info$lat)) names(sites) <- c("id", "lon", "lat") - sp::coordinates(sites) <- ~lon+lat + sp::coordinates(sites) <- ~ lon + lat raster::projection(sites) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " cover <- raster::raster(NLCD) - + # - sites = sp::spTransform(sites, CRS = raster::crs(cover)) + sites <- sp::spTransform(sites, CRS = raster::crs(cover)) # make sure projections match - data = raster::extract(cover, sites) - sites$cover = data - site_data = sites - - ecoregion = raster::shapefile(Ecoregion) - ecoregion = sp::spTransform(ecoregion, CRS = raster::crs(site_data)) - eco_data = raster::extract(ecoregion, site_data) - site_data$region = eco_data$NA_L1CODE - site_data$name = eco_data$NA_L1NAME - - site_data = as.data.frame(site_data) - names(site_data) = c("ID", "cover", "ecoregion", "name", "lon", "lat") - site_data$pft = NA - site_data$cover = as.numeric(site_data$cover) - site_data$ecoregion = as.numeric(site_data$ecoregion) + data <- raster::extract(cover, sites) + sites$cover <- data + site_data <- sites + + ecoregion <- raster::shapefile(Ecoregion) + ecoregion <- sp::spTransform(ecoregion, CRS = raster::crs(site_data)) + eco_data <- raster::extract(ecoregion, site_data) + site_data$region <- eco_data$NA_L1CODE + site_data$name <- eco_data$NA_L1NAME + + site_data <- as.data.frame(site_data) + names(site_data) <- c("ID", "cover", "ecoregion", "name", "lon", "lat") + site_data$pft <- NA + site_data$cover <- as.numeric(site_data$cover) + site_data$ecoregion <- as.numeric(site_data$ecoregion) # remove sites that are categorized as unclassified, water, ice/snow, barren - index = which(site_data$cover == 0 | site_data$cover == 11 | site_data$cover == 12 | site_data$cover == 31) - site_data$pft[index] = NA - + index <- which(site_data$cover == 0 | site_data$cover == 11 | site_data$cover == 12 | site_data$cover == 31) + site_data$pft[index] <- NA + # classify deciduous - index = which(site_data$cover == 41) - site_data$pft[index] = "deciduous" - - + index <- which(site_data$cover == 41) + site_data$pft[index] <- "deciduous" + + # classify evergreen/conifer - index = which(site_data$cover == 42) - site_data$pft[index] = "conifer" - - + index <- which(site_data$cover == 42) + site_data$pft[index] <- "conifer" + + # classify mixed forest - index = which(site_data$cover == 43) - site_data$pft[index] = "mixed forest" - + index <- which(site_data$cover == 43) + site_data$pft[index] <- "mixed forest" + # classify developed - index = which(site_data$cover == 21 | site_data$cover == 22 | site_data$cover == 23 | site_data$cover == 24) - site_data$pft[index] = "developed" - + index <- which(site_data$cover == 21 | site_data$cover == 22 | site_data$cover == 23 | site_data$cover == 24) + site_data$pft[index] <- "developed" + # classify shrub/scrub - index = which(site_data$cover == 52 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) - site_data$pft[index] = "arid grassland" - - index = which(site_data$cover == 52 & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 6 | site_data$ecoregion == 7)) - site_data$pft[index] = "mesic grassland" - - + index <- which(site_data$cover == 52 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) + site_data$pft[index] <- "arid grassland" + + index <- which(site_data$cover == 52 & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 6 | site_data$ecoregion == 7)) + site_data$pft[index] <- "mesic grassland" + + # classify herbaceous - index = which(site_data$cover == 71 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) - site_data$pft[index] = "arid grassland" - - index = which(site_data$cover == 71 & (site_data$ecoregion == 9 | site_data$ecoregion == 15 | site_data$ecoregion == 7 | site_data$ecoregion == 8 | site_data$ecoregion == 5 | site_data$ecoregion == 6)) - site_data$pft[index] = "mesic grassland" - - + index <- which(site_data$cover == 71 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) + site_data$pft[index] <- "arid grassland" + + index <- which(site_data$cover == 71 & (site_data$ecoregion == 9 | site_data$ecoregion == 15 | site_data$ecoregion == 7 | site_data$ecoregion == 8 | site_data$ecoregion == 5 | site_data$ecoregion == 6)) + site_data$pft[index] <- "mesic grassland" + + # classify hay/pasture crops - index = which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) - site_data$pft[index] = "arid grassland" - - index = which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 7)) - site_data$pft[index] = "mesic grassland" - + index <- which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) + site_data$pft[index] <- "arid grassland" + + index <- which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 7)) + site_data$pft[index] <- "mesic grassland" + # classify wetlands - index = which(site_data$cover == 95) - site_data$pft[index] = "mesic grassland" - - index = which(site_data$cover == 90) - site_data$pft[index] = "woody wetland" - - # #rename temporally + index <- which(site_data$cover == 95) + site_data$pft[index] <- "mesic grassland" + + index <- which(site_data$cover == 90) + site_data$pft[index] <- "woody wetland" + + # #rename temporally # site_data$pft[site_data$pft=='mesic grassland'] <- 'semiarid.grassland_HPDA' # site_data$pft[site_data$pft=='woody wetland'] <- 'semiarid.grassland_HPDA' # site_data$pft[site_data$pft=='mixed forest'] <- 'temperate.deciduous.HPDA' # site_data$pft[site_data$pft=='deciduous'] <- 'temperate.deciduous.HPDA' # site_data$pft[site_data$pft=='conifer'] <- 'boreal.coniferous' # site_data$pft[site_data$pft=='arid grassland'] <- 'semiarid.grassland_HPDA' - - #write into csv file + + # write into csv file out.csv <- cbind(site_data$ID, site_data$pft) colnames(out.csv) <- c("site", "pft") - utils::write.csv(out.csv, file = paste0(settings$outdir,"/site_pft.csv"), row.names=FALSE) + utils::write.csv(out.csv, file = paste0(settings$outdir, "/site_pft.csv"), row.names = FALSE) return(site_data) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/GEF_Helper.R b/modules/assim.sequential/R/GEF_Helper.R index 9012cc803c5..a456df5f9b3 100644 --- a/modules/assim.sequential/R/GEF_Helper.R +++ b/modules/assim.sequential/R/GEF_Helper.R @@ -14,24 +14,28 @@ tobit_model_censored <- function(settings, X, var.names, mu.f, Pf, t) { intervalX <- matrix(NA, ncol(X), 2) rownames(intervalX) <- colnames(X) - outdir <- settings$modeloutdir - #TO DO: Not working for fcomp + outdir <- settings$modeloutdir + # TO DO: Not working for fcomp for (i in 1:length(var.names)) { - intervalX[which(startsWith(rownames(intervalX), - var.names[i])), ] <- - matrix(c( - as.numeric( - settings$state.data.assimilation$state.variables[[i]]$min_value + intervalX[which(startsWith( + rownames(intervalX), + var.names[i] + )), ] <- + matrix( + c( + as.numeric( + settings$state.data.assimilation$state.variables[[i]]$min_value + ), + as.numeric( + settings$state.data.assimilation$state.variables[[i]]$max_value + ) ), - as.numeric( - settings$state.data.assimilation$state.variables[[i]]$max_value - ) - ), - length(which(startsWith( - rownames(intervalX), - var.names[i] - ))), 2, byrow = TRUE) - + length(which(startsWith( + rownames(intervalX), + var.names[i] + ))), 2, + byrow = TRUE + ) } #### These vectors are used to categorize data based on censoring from the interval matrix x.ind <- @@ -43,33 +47,35 @@ tobit_model_censored <- function(settings, X, var.names, mu.f, Pf, t) { as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # } } - + if (t == 1) { - #The purpose of this step is to impute data for mu.f - #where there are zero values so that - #mu.f is in 'tobit space' in the full model - constants.tobit2space = list(N = nrow(X), - J = length(mu.f)) - - data.tobit2space = list( + # The purpose of this step is to impute data for mu.f + # where there are zero values so that + # mu.f is in 'tobit space' in the full model + constants.tobit2space <- list( + N = nrow(X), + J = length(mu.f) + ) + + data.tobit2space <- list( y.ind = x.ind, y.censored = x.censored, mu_0 = rep(0, length(mu.f)), lambda_0 = diag(10, length(mu.f)), nu_0 = 3 - )#some measure of prior obs - + ) # some measure of prior obs + inits.tobit2space <<- - list(pf = Pf, muf = colMeans(X)) #pf = cov(X) - #set.seed(0) - #ptm <- proc.time() + list(pf = Pf, muf = colMeans(X)) # pf = cov(X) + # set.seed(0) + # ptm <- proc.time() tobit2space_pred <<- nimbleModel( tobit2space.model, data = data.tobit2space, constants = constants.tobit2space, inits = inits.tobit2space, - name = 'space' + name = "space" ) ## Adding X.mod,q,r as data for building model. conf_tobit2space <<- @@ -79,70 +85,75 @@ tobit_model_censored <- function(settings, X, var.names, mu.f, Pf, t) { ## this is needed for correct indexing later samplerNumberOffset_tobit2space <<- length(conf_tobit2space$getSamplers()) - + for (j in seq_along(mu.f)) { for (n in seq_len(nrow(X))) { - node <- paste0('y.censored[', n, ',', j, ']') - conf_tobit2space$addSampler(node, 'toggle', control = list(type = - 'RW')) + node <- paste0("y.censored[", n, ",", j, "]") + conf_tobit2space$addSampler(node, "toggle", control = list( + type = + "RW" + )) } } - - #conf_tobit2space$printSamplers() - + + # conf_tobit2space$printSamplers() + Rmcmc_tobit2space <<- buildMCMC(conf_tobit2space) - + Cmodel_tobit2space <<- compileNimble(tobit2space_pred) Cmcmc_tobit2space <<- compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) - + for (i in seq_along(X)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + - i]], - 'toggle', - 1 - x.ind[i]) + valueInCompiledNimbleFunction( + Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + + i]], + "toggle", + 1 - x.ind[i] + ) } - - } else{ + } else { Cmodel_tobit2space$y.ind <- x.ind Cmodel_tobit2space$y.censored <- x.censored - - inits.tobit2space = list(pf = Pf, muf = colMeans(X)) + + inits.tobit2space <- list(pf = Pf, muf = colMeans(X)) Cmodel_tobit2space$setInits(inits.tobit2space) - + for (i in seq_along(X)) { - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + - i]], - 'toggle', - 1 - x.ind[i]) + valueInCompiledNimbleFunction( + Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + + i]], + "toggle", + 1 - x.ind[i] + ) } - } - + dat.tobit2space <- runMCMC(Cmcmc_tobit2space, - niter = 50000, - progressBar = TRUE) - + niter = 50000, + progressBar = TRUE + ) + ## update parameters mu.f <- colMeans(dat.tobit2space[, grep("muf", colnames(dat.tobit2space))]) Pf <- matrix(colMeans(dat.tobit2space[, grep("pf", colnames(dat.tobit2space))]), ncol(X), ncol(X)) - - - + + + iycens <- grep("y.censored", colnames(dat.tobit2space)) X.new <- matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) - - return(list(mu.f = mu.f, - Pf=Pf, - iycens=iycens, - X.new=X.new + + return(list( + mu.f = mu.f, + Pf = Pf, + iycens = iycens, + X.new = X.new )) - -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/Helper.functions.R b/modules/assim.sequential/R/Helper.functions.R index 9c3ccfcc31f..f6e1a9473c0 100644 --- a/modules/assim.sequential/R/Helper.functions.R +++ b/modules/assim.sequential/R/Helper.functions.R @@ -1,5 +1,3 @@ - - #' outlier.detector.boxplot #' #' @param X A list of dataframes @@ -9,22 +7,21 @@ #' @export #' @importFrom magrittr %>% #' -outlier.detector.boxplot<-function(X) { - X <- X %>% - purrr::map(function(X.tmp){ - #X.tmp is all the state variables for each element of the list (site) +outlier.detector.boxplot <- function(X) { + X <- X %>% + purrr::map(function(X.tmp) { + # X.tmp is all the state variables for each element of the list (site) X.tmp %>% - purrr::map_dfc(function(col.tmp){ - #naive way of finding the outlier - 3 * IQR + purrr::map_dfc(function(col.tmp) { + # naive way of finding the outlier - 3 * IQR OutVals <- graphics::boxplot(col.tmp, plot = FALSE)$out # if I make this NA then it would stay NA for ever. - #bc adjustment uses X to and comes up with new analysis + # bc adjustment uses X to and comes up with new analysis col.tmp[which((col.tmp %in% OutVals))] <- stats::median(col.tmp, na.rm = TRUE) col.tmp }) - }) - + return(X) } @@ -57,8 +54,7 @@ SDA_control <- debug = FALSE, pause = FALSE, Profiling = FALSE, - OutlierDetection=FALSE) { - + OutlierDetection = FALSE) { return( list( trace = trace, @@ -87,29 +83,29 @@ SDA_control <- #' @return rescaled Matrix #' @export #' @importFrom magrittr %>% -rescaling_stateVars <- function(settings, X, multiply=TRUE) { - - FUN <- ifelse(multiply, .Primitive('*'), .Primitive('/')) - - +rescaling_stateVars <- function(settings, X, multiply = TRUE) { + FUN <- ifelse(multiply, .Primitive("*"), .Primitive("/")) + + # Finding the scaling factors scaling.factors <- settings$state.data.assimilation$state.variables %>% - purrr::map('scaling_factor') %>% + purrr::map("scaling_factor") %>% stats::setNames(settings$state.data.assimilation$state.variables %>% - purrr::map('variable.name')) %>% + purrr::map("variable.name")) %>% purrr::discard(is.null) - - if (length(scaling.factors) == 0) return(X) - - + + if (length(scaling.factors) == 0) { + return(X) + } + + Y <- seq_len(ncol(X)) %>% purrr::map_dfc(function(.x) { - - if(colnames(X)[.x] %in% names(scaling.factors)) { + if (colnames(X)[.x] %in% names(scaling.factors)) { # This function either multiplies or divides - FUN( X[, .x], scaling.factors[[colnames(X)[.x]]] %>% as.numeric()) - }else{ + FUN(X[, .x], scaling.factors[[colnames(X)[.x]]] %>% as.numeric()) + } else { X[, .x] } }) @@ -119,21 +115,23 @@ rescaling_stateVars <- function(settings, X, multiply=TRUE) { colnames(Y) <- colnames(X) } - try({ - # I'm trying to give the new transform variable the attributes of the old one - # X for example has `site` attribute - - attr.X <- names(attributes(X)) %>% - purrr::discard( ~ .x %in% c("dim", "dimnames")) - - if (length(attr.X) > 0) { - for (att in attr.X) { - attr(Y, att) <- attr(X, att) + try( + { + # I'm trying to give the new transform variable the attributes of the old one + # X for example has `site` attribute + + attr.X <- names(attributes(X)) %>% + purrr::discard(~ .x %in% c("dim", "dimnames")) + + if (length(attr.X) > 0) { + for (att in attr.X) { + attr(Y, att) <- attr(X, att) + } } - } - - }, silent = TRUE) - + }, + silent = TRUE + ) + return(Y) } @@ -149,20 +147,20 @@ rescaling_stateVars <- function(settings, X, multiply=TRUE) { #' @export #' @author Dongchen Zhang #' @importFrom lubridate %m+% -obs_timestep2timepoint <- function(start.date, end.date, timestep){ +obs_timestep2timepoint <- function(start.date, end.date, timestep) { start.date <- lubridate::ymd(start.date) end.date <- lubridate::ymd(end.date) - if(timestep$unit == "year"){ + if (timestep$unit == "year") { time_points <- seq(start.date, end.date, paste(timestep$num, "year")) - }else if(timestep$unit == "month"){ + } else if (timestep$unit == "month") { time_points <- seq(start.date, end.date, paste(timestep$num, "month")) - }else if(timestep$unit == "week"){ + } else if (timestep$unit == "week") { time_points <- seq(start.date, end.date, paste(timestep$num, "week")) - }else if(timestep$unit == "day"){ + } else if (timestep$unit == "day") { time_points <- seq(start.date, end.date, paste(timestep$num, "day")) - }else{ + } else { PEcAn.logger::logger.error("The Obs_prep functions only support year, month, week, and day as timestep unit!") return(0) } time_points[which(time_points <= end.date & time_points >= start.date)] -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/Localization.R b/modules/assim.sequential/R/Localization.R index 136f8dfe3b5..3762870dc30 100644 --- a/modules/assim.sequential/R/Localization.R +++ b/modules/assim.sequential/R/Localization.R @@ -1,21 +1,21 @@ ##' @title Local.support ##' @name Local.support ##' @author Hamze Dokoohaki -##' +##' ##' @param Pf Forecast error covariance matrix ##' @param distance.mat is matrix of distances between sites. ##' @param scalef scalef is a numeric value that requires tuning and it controls the shape of the corrolation function -##' @description +##' @description ##' distance.mat matrix doesn't need to be just the physical distance, however it represent a measure of similarity between state variables in different sites. ##' @return It returns a localized covariance matrix by taking a Schur product between Pf and a corrolation function ##' @export -Local.support <-function(Pf, distance.mat, scalef=1){ - #making a matrix as the size of the Pf +Local.support <- function(Pf, distance.mat, scalef = 1) { + # making a matrix as the size of the Pf rho <- exp(((-1) * distance.mat^2) / (2 * scalef^2)) # zeroing out values that are really small - rho[rho <0.001] <-0 - + rho[rho < 0.001] <- 0 + return(Pf * rho) } @@ -23,19 +23,19 @@ Local.support <-function(Pf, distance.mat, scalef=1){ ##' @title simple.local ##' @name simple.local ##' @author Hamze Dokoohaki -##' +##' ##' @param Pf Forecast error covariance matrix ##' @param distance.mat is matrix of distances between sites. ##' @param scalef scalef is a numeric value that requires tuning and it controls the shape of the corrolation function ##' @description Adopted from Data assimilation for spatio-temporal processes - p250 - Sebastian Reich ##' @return It returns a localized covariance matrix by taking a Schur product between Pf and a corrolation function ##' @export -simple.local <-function(Pf, distance.mat, scalef=2){ +simple.local <- function(Pf, distance.mat, scalef = 2) { ### Data assimilation for spatio-temporal processes - p250 - Sebastian Reich - s <- distance.mat/rloc - s[s <2] <- 1- ((0.5)*(s[s <2])) - s[s>2] <- 0 - return(s*Pf) + s <- distance.mat / rloc + s[s < 2] <- 1 - ((0.5) * (s[s < 2])) + s[s > 2] <- 0 + return(s * Pf) } @@ -43,22 +43,22 @@ simple.local <-function(Pf, distance.mat, scalef=2){ ##' @title piecew.poly.local ##' @name piecew.poly.local ##' @author Hamze Dokoohaki -##' +##' ##' @param Pf Forecast error covariance matrix ##' @param distance.mat is matrix of distances between sites. ##' @param scalef scalef is a numeric value that requires tuning and it controls the shape of the corrolation function ##' @description 5th order piecewise polynomial adopted from Data assimilation for spatio-temporal processes - p250 - Sebastian Reich -##' +##' ##' @return It returns a localized covariance matrix by taking a Schur product between Pf and a corrolation function ##' @export -piecew.poly.local <-function(Pf, distance.mat, scalef=2){ +piecew.poly.local <- function(Pf, distance.mat, scalef = 2) { ### Data assimilation for spatio-temporal processes - p250 - Sebastian Reich - s <- distance.mat/rloc - - s[s < 1] <- 1 - ((5/3)*(s[s < 1])^2)+((5/8)*(s[s < 1])^3) + (0.5*(s[s < 1])^4)-(0.25*(s[s < 1])^5) - - s[s <=2 & s >1] <- (-2/3)*(s[s <=2 & s >1])^(-1)+4-(5*(s[s <=2 & s >1]))+((5/3)*(s[s <=2 & s >1])^2)+((5/8)*(s[s <=2 & s >1])^3)-(0.5*(s[s <=2 & s >1])^4)+((1/12)*(s[s <=2 & s >1])^5) - s[s>2] <- 0 - - return(s*Pf) + s <- distance.mat / rloc + + s[s < 1] <- 1 - ((5 / 3) * (s[s < 1])^2) + ((5 / 8) * (s[s < 1])^3) + (0.5 * (s[s < 1])^4) - (0.25 * (s[s < 1])^5) + + s[s <= 2 & s > 1] <- (-2 / 3) * (s[s <= 2 & s > 1])^(-1) + 4 - (5 * (s[s <= 2 & s > 1])) + ((5 / 3) * (s[s <= 2 & s > 1])^2) + ((5 / 8) * (s[s <= 2 & s > 1])^3) - (0.5 * (s[s <= 2 & s > 1])^4) + ((1 / 12) * (s[s <= 2 & s > 1])^5) + s[s > 2] <- 0 + + return(s * Pf) } diff --git a/modules/assim.sequential/R/Multi_Site_Constructors.R b/modules/assim.sequential/R/Multi_Site_Constructors.R index 88a30bd56fb..ed7a404c5ae 100755 --- a/modules/assim.sequential/R/Multi_Site_Constructors.R +++ b/modules/assim.sequential/R/Multi_Site_Constructors.R @@ -1,8 +1,8 @@ ##' @title Contruct.Pf ##' @name Contruct.Pf ##' @author Hamze Dokoohaki -##' -##' @param site.ids a vector name of site ids. +##' +##' @param site.ids a vector name of site ids. ##' @param var.names vector names of state variable names. ##' @param X a matrix of state variables. In this matrix rows represent ensembles, while columns show the variables for different sites. ##' @param localization.FUN This is the function that performs the localization of the Pf matrix and it returns a localized matrix with the same dimensions. @@ -13,122 +13,121 @@ ##' At the moment, the cov between state variables at blocks defining the cov between two sites are assumed zero. ##' @return It returns the var-cov matrix of state variables at multiple sites. ##' @importFrom rlang .data -##' @export +##' @export -Contruct.Pf <- function(site.ids, var.names, X, localization.FUN=NULL, t=1, blocked.dis=NULL, ...) { - #setup +Contruct.Pf <- function(site.ids, var.names, X, localization.FUN = NULL, t = 1, blocked.dis = NULL, ...) { + # setup nsite <- length(site.ids) nvariable <- length(var.names) # I will make a big cov matrix and then I will populate it with the cov of each site - pf.matrix <-matrix(0,(nsite*nvariable),(nsite*nvariable)) - + pf.matrix <- matrix(0, (nsite * nvariable), (nsite * nvariable)) + ## This makes the diagonal of our big matrix - first filters out each site, estimates the cov and puts it where it needs to go. - for (site in site.ids){ - #let's find out where this cov (for the current site needs to go in the main cov matrix) - pos.in.matrix <- which(attr(X,"Site") %in% site) - #foreach site let's get the Xs - pf.matrix [pos.in.matrix, pos.in.matrix] <- stats::cov( X [, pos.in.matrix] ,use="complete.obs") + for (site in site.ids) { + # let's find out where this cov (for the current site needs to go in the main cov matrix) + pos.in.matrix <- which(attr(X, "Site") %in% site) + # foreach site let's get the Xs + pf.matrix[pos.in.matrix, pos.in.matrix] <- stats::cov(X[, pos.in.matrix], use = "complete.obs") } - + # This is where we estimate the cov between state variables of different sites - #I put this into a sperate loop so we can have more control over it - site.cov.orders <- expand.grid(site.ids,site.ids) %>% - dplyr::filter( .data$Var1 != .data$Var2) + # I put this into a sperate loop so we can have more control over it + site.cov.orders <- expand.grid(site.ids, site.ids) %>% + dplyr::filter(.data$Var1 != .data$Var2) - for (i in seq_len(nrow(site.cov.orders))){ + for (i in seq_len(nrow(site.cov.orders))) { # first we need to find out where to put it in the big matrix - rows.in.matrix <- which(attr(X,"Site") %in% site.cov.orders[i,1]) - cols.in.matrix <- which(attr(X,"Site") %in% site.cov.orders[i,2]) - #estimated between these two sites - two.site.cov <- stats::cov( X [, c(rows.in.matrix, cols.in.matrix)],use="complete.obs" )[(nvariable+1):(2*nvariable),1:nvariable] - # I'm setting the off diag to zero - two.site.cov [which(lower.tri(two.site.cov, diag = FALSE),TRUE) %>% rbind (which(upper.tri(two.site.cov,FALSE),TRUE))] <- 0 - #putting it back to the main matrix - pf.matrix [rows.in.matrix, cols.in.matrix] <- two.site.cov + rows.in.matrix <- which(attr(X, "Site") %in% site.cov.orders[i, 1]) + cols.in.matrix <- which(attr(X, "Site") %in% site.cov.orders[i, 2]) + # estimated between these two sites + two.site.cov <- stats::cov(X[, c(rows.in.matrix, cols.in.matrix)], use = "complete.obs")[(nvariable + 1):(2 * nvariable), 1:nvariable] + # I'm setting the off diag to zero + two.site.cov[which(lower.tri(two.site.cov, diag = FALSE), TRUE) %>% rbind(which(upper.tri(two.site.cov, FALSE), TRUE))] <- 0 + # putting it back to the main matrix + pf.matrix[rows.in.matrix, cols.in.matrix] <- two.site.cov } - + # if I see that there is a localization function passed to this - I run it by the function. if (!is.null(localization.FUN) && nsite > 1) { - pf.matrix.out <- localization.FUN (pf.matrix, blocked.dis, ...) - } else{ + pf.matrix.out <- localization.FUN(pf.matrix, blocked.dis, ...) + } else { pf.matrix.out <- pf.matrix } - + # adding labels to rownames and colnames - labelss <- paste0(rep(var.names, length(site.ids)) %>% as.character(),"(", - rep(site.ids, each=length(var.names)),")") - - colnames(pf.matrix.out ) <-labelss - rownames(pf.matrix.out ) <-labelss - - return(pf.matrix.out) + labelss <- paste0( + rep(var.names, length(site.ids)) %>% as.character(), "(", + rep(site.ids, each = length(var.names)), ")" + ) + + colnames(pf.matrix.out) <- labelss + rownames(pf.matrix.out) <- labelss + return(pf.matrix.out) } ##' @title Construct.R ##' @name Construct.R ##' @author Hamze Dokoohaki -##' -##' @param site.ids a vector name of site ids +##' +##' @param site.ids a vector name of site ids ##' @param var.names vector names of state variable names ##' @param obs.t.mean list of vector of means for the time t for different sites. ##' @param obs.t.cov list of list of cov for the time for different sites. -##’ -##' +## ’ +##' ##' @description Make sure that both lists are named with siteids. -##' +##' ##' @return This function returns a list with Y and R ready to be sent to the analysis functions. ##' @export -Construct.R<-function(site.ids, var.names, obs.t.mean, obs.t.cov){ - +Construct.R <- function(site.ids, var.names, obs.t.mean, obs.t.cov) { # keeps Hs of sites - site.specific.Rs <-list() + site.specific.Rs <- list() # nsite <- length(site.ids) # nvariable <- length(var.names) - Y<-c() - - for (site in site.ids){ - choose <- sapply(var.names, agrep, x=names(obs.t.mean[[site]]), max=1, USE.NAMES = FALSE) %>% unlist + Y <- c() + + for (site in site.ids) { + choose <- sapply(var.names, agrep, x = names(obs.t.mean[[site]]), max = 1, USE.NAMES = FALSE) %>% unlist() # if there is no obs for this site - if(length(choose) == 0){ - next; - }else{ + if (length(choose) == 0) { + next + } else { Y <- c(Y, unlist(obs.t.mean[[site]][choose])) - #collecting them - if (ncol(obs.t.mean[[site]]) > 1) - { - site.specific.Rs <- c(site.specific.Rs, list(as.matrix(obs.t.cov[[site]][choose,choose]))) + # collecting them + if (ncol(obs.t.mean[[site]]) > 1) { + site.specific.Rs <- c(site.specific.Rs, list(as.matrix(obs.t.cov[[site]][choose, choose]))) } else { site.specific.Rs <- c(site.specific.Rs, list(as.matrix(obs.t.cov[[site]][choose]))) } } - #make block matrix out of our collection - R <- Matrix::bdiag(site.specific.Rs) %>% as.matrix() - } + # make block matrix out of our collection + R <- Matrix::bdiag(site.specific.Rs) %>% as.matrix() + } - return(list(Y=Y, R=R)) + return(list(Y = Y, R = R)) } ##' @title block_matrix ##' @name block_matrix ##' @author Guy J. Abel -##' +##' ##' @param x Vector of numbers to identify each block. ##' @param b Numeric value for the size of the blocks within the matrix ordered depending on byrow ##' @param byrow logical value. If FALSE (the default) the blocks are filled by columns, otherwise the blocks in the matrix are filled by rows. ##' @param dimnames Character string of name attribute for the basis of the block matrix. If NULL a vector of the same length of b provides the basis of row and column names.#'. -##’ -##' +## ’ +##' ##' @description This function is adopted from migest package. -##' +##' ##' @return Returns a matrix with block sizes determined by the b argument. Each block is filled with the same value taken from x. ##' @export -block_matrix <- function (x = NULL, b = NULL, byrow = FALSE, dimnames = NULL) { +block_matrix <- function(x = NULL, b = NULL, byrow = FALSE, dimnames = NULL) { n <- length(b) bb <- rep(1:n, times = b) dn <- NULL @@ -165,41 +164,41 @@ block_matrix <- function (x = NULL, b = NULL, byrow = FALSE, dimnames = NULL) { ##' @title Construct.H.multisite ##' @name Construct.H.multisite ##' @author Hamze -##' -##' @param site.ids a vector name of site ids +##' +##' @param site.ids a vector name of site ids ##' @param var.names vector names of state variable names -##' @param obs.t.mean list of vector of means for the time t for different sites. -##' +##' @param obs.t.mean list of vector of means for the time t for different sites. +##' ##' @description This function is makes the blocked mapping function. -##' +##' ##' @return Returns a matrix with block sizes determined by the b argument. Each block is filled with the same value taken from x. ##' @export -Construct.H.multisite <- function(site.ids, var.names, obs.t.mean){ - #we first create a matrix containing site.ids, var.names, observations, and the index of observations across obs.mean. - site.ids.matrix <- rep(site.ids, each = length(var.names))#this is replicated site.ids. The number of replication depends on how many vars in total. - var.names.matrix <- rep(var.names, length(site.ids))#this is the state variable names from settings. - H.pre.matrix <- data.frame(site.ids.matrix, var.names.matrix, NA, NA) %>% `colnames<-` (c("site.id", "var.name", "obs", "obs.ind")) +Construct.H.multisite <- function(site.ids, var.names, obs.t.mean) { + # we first create a matrix containing site.ids, var.names, observations, and the index of observations across obs.mean. + site.ids.matrix <- rep(site.ids, each = length(var.names)) # this is replicated site.ids. The number of replication depends on how many vars in total. + var.names.matrix <- rep(var.names, length(site.ids)) # this is the state variable names from settings. + H.pre.matrix <- data.frame(site.ids.matrix, var.names.matrix, NA, NA) %>% `colnames<-`(c("site.id", "var.name", "obs", "obs.ind")) obs.ind <- 1 - #loop over site.ids * var.names + # loop over site.ids * var.names for (i in seq_along(site.ids.matrix)) { - site.id <- H.pre.matrix[i,]$site.id - var.name <- H.pre.matrix[i,]$var.name - site.ind <- which(names(obs.t.mean)==site.id) - if(length(site.ind) > 0){ + site.id <- H.pre.matrix[i, ]$site.id + var.name <- H.pre.matrix[i, ]$var.name + site.ind <- which(names(obs.t.mean) == site.id) + if (length(site.ind) > 0) { obs <- obs.t.mean[[site.ind]] - var.ind <- which(names(obs)==var.name) - if(length(var.ind) > 0){ - #write observation and the index into the matrix. - H.pre.matrix[i,]$obs <- obs[[var.ind]] - H.pre.matrix[i,]$obs.ind <- obs.ind + var.ind <- which(names(obs) == var.name) + if (length(var.ind) > 0) { + # write observation and the index into the matrix. + H.pre.matrix[i, ]$obs <- obs[[var.ind]] + H.pre.matrix[i, ]$obs.ind <- obs.ind obs.ind <- obs.ind + 1 } } } - #convert the matrix into H matrix. - H <- matrix(0, max(H.pre.matrix$obs.ind, na.rm=T), dim(H.pre.matrix)[1]) + # convert the matrix into H matrix. + H <- matrix(0, max(H.pre.matrix$obs.ind, na.rm = T), dim(H.pre.matrix)[1]) for (i in seq_along(site.ids.matrix)) { - H[H.pre.matrix[i,]$obs.ind, i] <- 1 + H[H.pre.matrix[i, ]$obs.ind, i] <- 1 } H } @@ -207,20 +206,20 @@ Construct.H.multisite <- function(site.ids, var.names, obs.t.mean){ ##' @title construct_nimble_H ##' @name construct_nimble_H ##' @author Dongchen Zhang -##' -##' @param site.ids a vector name of site ids +##' +##' @param site.ids a vector name of site ids ##' @param var.names vector names of state variable names ##' @param obs.t list of vector of means for the time t for different sites. ##' @param pft.path physical path to the pft.csv file. ##' @param by criteria, it supports by variable, site, pft, all, and single Q. -##' +##' ##' @description This function is an upgrade to the Construct.H.multisite function which provides the index by different criteria. -##' -##' @return Returns one vector containing index for which Q to be estimated for which variable, +##' +##' @return Returns one vector containing index for which Q to be estimated for which variable, ##' and the other vector gives which state variable has which observation (= element.W.Data). ##' @export -construct_nimble_H <- function(site.ids, var.names, obs.t, pft.path = NULL, by = "single"){ - if(by == "pft" | by == "block_pft_var" & is.null(pft.path)){ +construct_nimble_H <- function(site.ids, var.names, obs.t, pft.path = NULL, by = "single") { + if (by == "pft" | by == "block_pft_var" & is.null(pft.path)) { PEcAn.logger::logger.info("please provide pft path.") return(0) } @@ -248,7 +247,7 @@ construct_nimble_H <- function(site.ids, var.names, obs.t, pft.path = NULL, by = Ind[which(total_pft == pft.names[i])] <- i } } else if (by == "block_pft_var") { - #by pft + # by pft pft <- utils::read.csv(pft.path) rownames(pft) <- pft$site total_site_id <- rep(site.ids, each = length(var.names)) @@ -258,13 +257,13 @@ construct_nimble_H <- function(site.ids, var.names, obs.t, pft.path = NULL, by = for (i in seq_along(pft.names)) { Ind_pft[which(total_pft == pft.names[i])] <- i } - #by var + # by var total_var_name <- rep(var.names, length(site.ids)) Ind_var <- rep(0, dim(H)[2]) for (i in seq_along(var.names)) { Ind_var[which(total_var_name == var.names[i])] <- i } - #by site + # by site total_site_id <- rep(site.ids, each = length(var.names)) Ind_site <- rep(0, dim(H)[2]) for (i in seq_along(site.ids)) { @@ -285,14 +284,17 @@ construct_nimble_H <- function(site.ids, var.names, obs.t, pft.path = NULL, by = return(0) } if (by == "block_pft_var") { - return(list(Ind_pft = Ind_pft[which(apply(H, 2, sum) == 1)], - Ind_site = Ind_site[which(apply(H, 2, sum) == 1)], - Ind_var = Ind_var[which(apply(H, 2, sum) == 1)], - H.ind = which(apply(H, 2, sum) == 1))) + return(list( + Ind_pft = Ind_pft[which(apply(H, 2, sum) == 1)], + Ind_site = Ind_site[which(apply(H, 2, sum) == 1)], + Ind_var = Ind_var[which(apply(H, 2, sum) == 1)], + H.ind = which(apply(H, 2, sum) == 1) + )) } else { - return(list(Q.ind = Ind[which(apply(H, 2, sum) == 1)], - H.ind = which(apply(H, 2, sum) == 1), - H.matrix = H)) + return(list( + Q.ind = Ind[which(apply(H, 2, sum) == 1)], + H.ind = which(apply(H, 2, sum) == 1), + H.matrix = H + )) } - -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/Nimble_codes.R b/modules/assim.sequential/R/Nimble_codes.R index cc63a7e2968..283be008047 100644 --- a/modules/assim.sequential/R/Nimble_codes.R +++ b/modules/assim.sequential/R/Nimble_codes.R @@ -8,12 +8,12 @@ #' @param X state var #' #' @export -y_star_create <- nimbleFunction( +y_star_create <- nimbleFunction( run = function(X = double(1)) { returnType(double(1)) - + y_star <- X - + return(y_star) } ) @@ -21,14 +21,14 @@ y_star_create <- nimbleFunction( #' Additive Log Ratio transform #' @param y state var #' @export -alr <- nimbleFunction( +alr <- nimbleFunction( run = function(y = double(1)) { returnType(double(1)) - + y[y < .00001] <- .000001 - + y_out <- log(y[1:(length(y) - 1)] / y[length(y)]) - + return(y_out) } ) @@ -36,12 +36,12 @@ alr <- nimbleFunction( #' inverse of ALR transform #' @param alr state var #' @export -inv.alr <- nimbleFunction( +inv.alr <- nimbleFunction( run = function(alr = double(1)) { returnType(double(1)) - - y = exp(c(alr, 0)) / sum(exp(c(alr, 0))) - + + y <- exp(c(alr, 0)) / sum(exp(c(alr, 0))) + return(y) } ) @@ -58,8 +58,9 @@ rwtmnorm <- nimbleFunction( prec = double(2), wt = double(0)) { returnType(double(1)) - if (n != 1) + if (n != 1) { nimPrint("rwtmnorm only allows n = 1; using n = 1.") + } Prob <- rmnorm_chol(n = 1, mean, chol(prec), prec_param = TRUE) * wt return(Prob) @@ -80,7 +81,7 @@ dwtmnorm <- nimbleFunction( wt = double(0), log = integer(0, default = 0)) { returnType(double(0)) - + logProb <- dmnorm_chol( x = x, @@ -89,7 +90,7 @@ dwtmnorm <- nimbleFunction( prec_param = TRUE, log = TRUE ) * wt - + if (log) { return((logProb)) } else { @@ -101,141 +102,140 @@ dwtmnorm <- nimbleFunction( registerDistributions(list(dwtmnorm = list( BUGSdist = "dwtmnorm(mean, prec, wt)", types = c( - 'value = double(1)', - 'mean = double(1)', - 'prec = double(2)', - 'wt = double(0)' + "value = double(1)", + "mean = double(1)", + "prec = double(2)", + "wt = double(0)" ) ))) -#tobit2space.model------------------------------------------------------------------------------------------------ +# tobit2space.model------------------------------------------------------------------------------------------------ #' Fit tobit prior to ensemble members #' @format TBD #' @export tobit2space.model <- nimbleCode({ for (i in 1:N) { - y.censored[i, 1:J] ~ dwtmnorm(mean = muf[1:J], - prec = pf[1:J, 1:J], - wt = wts[i]) # + y.censored[i, 1:J] ~ dwtmnorm( + mean = muf[1:J], + prec = pf[1:J, 1:J], + wt = wts[i] + ) # for (j in 1:J) { y.ind[i, j] ~ dinterval(y.censored[i, j], 0) } } - + muf[1:J] ~ dmnorm(mean = mu_0[1:J], prec = Sigma_0[1:J, 1:J]) pf[1:J, 1:J] ~ dwish(S = lambda_0[1:J, 1:J], df = nu_0) - }) -#tobit.model--This does the GEF ---------------------------------------------------- +# tobit.model--This does the GEF ---------------------------------------------------- #' TWEnF #' @export #' @format TBD -tobit.model <- nimbleCode({ - q[1:N, 1:N] ~ dwish(R = aq[1:N, 1:N], df = bq) ## aq and bq are estimated over time +tobit.model <- nimbleCode({ + q[1:N, 1:N] ~ dwish(R = aq[1:N, 1:N], df = bq) ## aq and bq are estimated over time Q[1:N, 1:N] <- inverse(q[1:N, 1:N]) X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N, 1:N]) ## Model Forecast ##muf and pf are assigned from ensembles - + ## add process error - X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N, 1:N]) - - #observation operator - + X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N, 1:N]) + + # observation operator + if (direct_TRUE) { y_star[X_direct_start:X_direct_end] <- y_star_create(X[X_direct_start:X_direct_end]) - } else{ - + } else { + } - + if (fcomp_TRUE) { y_star[X_fcomp_start:X_fcomp_end] <- alr(X[X_fcomp_model_start:X_fcomp_model_end]) - } else{ - + } else { + } - + if (pft2total_TRUE) { y_star[X_pft2total_start] <- sum(X[X_pft2total_model_start:X_pft2total_model_end]) - } else{ - + } else { + } - - #likelihood + + # likelihood y.censored[1:YN] ~ dmnorm(y_star[1:YN], prec = r[1:YN, 1:YN]) for (i in 1:YN) { y.ind[i] ~ dinterval(y.censored[i], 0) } - - }) -#tobit.model--This does the GEF for multi Site ------------------------------------- +# tobit.model--This does the GEF for multi Site ------------------------------------- #' multisite TWEnF #' @format TBD #' @export -GEF.MultiSite.Nimble <- nimbleCode({ +GEF.MultiSite.Nimble <- nimbleCode({ # X model X.mod[1:N] ~ dmnorm(mean = muf[1:N], cov = pf[1:N, 1:N]) if (q.type == 1 | q.type == 2) { - if (q.type == 1) {#single Q + if (q.type == 1) { # single Q # Sorting out qs qq ~ dgamma(aq, bq) ## aq and bq are estimated over time q[1:YN, 1:YN] <- qq * diag(YN) - } else if (q.type == 2) {#site Q + } else if (q.type == 2) { # site Q # Sorting out qs q[1:YN, 1:YN] ~ dwish(R = aq[1:YN, 1:YN], df = bq) ## aq and bq are estimated over time } for (i in 1:nH) { - tmpX[i] <- X.mod[H[i]] + tmpX[i] <- X.mod[H[i]] Xs[i] <- tmpX[i] } ## add process error to x model but just for the state variables that we have data and H knows who - X[1:YN] ~ dmnorm(Xs[1:YN], prec = q[1:YN, 1:YN]) + X[1:YN] ~ dmnorm(Xs[1:YN], prec = q[1:YN, 1:YN]) ## Likelihood y.censored[1:YN] ~ dmnorm(X[1:YN], prec = r[1:YN, 1:YN]) - + # #puting the ones that they don't have q in Xall - They come from X.model # # If I don't have data on then then their q is not identifiable, so we use the same Xs as Xmodel - if(nNotH > 0){ + if (nNotH > 0) { for (j in 1:nNotH) { - tmpXmod[j] <- X.mod[NotH[j]] + tmpXmod[j] <- X.mod[NotH[j]] Xall[NotH[j]] <- tmpXmod[j] } } - } else if (q.type == 3) {#Vector Q + } else if (q.type == 3) { # Vector Q for (i in 1:YN) { - #sample Q. + # sample Q. q[i] ~ dgamma(shape = aq[i], rate = bq[i]) if (length(H) == 1) { - X[i] ~ dnorm(X.mod[H], sd = 1/sqrt(q[i])) - #likelihood - y.censored[i] ~ dnorm(X[i], sd = 1/sqrt(r[i])) + X[i] ~ dnorm(X.mod[H], sd = 1 / sqrt(q[i])) + # likelihood + y.censored[i] ~ dnorm(X[i], sd = 1 / sqrt(r[i])) } else { - #sample latent variable X. - X[i] ~ dnorm(X.mod[H[i]], sd = 1/sqrt(q[i])) - #likelihood - y.censored[i] ~ dnorm(X[i], sd = 1/sqrt(r[i, i])) + # sample latent variable X. + X[i] ~ dnorm(X.mod[H[i]], sd = 1 / sqrt(q[i])) + # likelihood + y.censored[i] ~ dnorm(X[i], sd = 1 / sqrt(r[i, i])) } } - } else if (q.type == 4) {#Wishart Q - #if it's a Wishart Q. - #sample Q. + } else if (q.type == 4) { # Wishart Q + # if it's a Wishart Q. + # sample Q. q[1:YN, 1:YN] ~ dwishart(R = aq[1:YN, 1:YN], df = bq) - #sample latent variable X. + # sample latent variable X. for (i in 1:YN) { Xs[i] <- X.mod[H[i]] } X[1:YN] ~ dmnorm(Xs[1:YN], prec = q[1:YN, 1:YN]) - #likelihood + # likelihood y.censored[1:YN] ~ dmnorm(X[1:YN], prec = r[1:YN, 1:YN]) } }) -#sampler_toggle------------------------------------------------------------------------------------------------ +# sampler_toggle------------------------------------------------------------------------------------------------ #' sampler toggling #' @export #' @param model model @@ -246,22 +246,26 @@ sampler_toggle <- nimbleFunction( contains = sampler_BASE, setup = function(model, mvSaved, target, control) { type <- control$type - nested_sampler_name <- paste0('sampler_', type) - control_new <- nimbleOptions('MCMCcontrolDefaultList') + nested_sampler_name <- paste0("sampler_", type) + control_new <- nimbleOptions("MCMCcontrolDefaultList") control_new[[names(control)]] <- control nested_sampler_list <- nimbleFunctionList(sampler_BASE) nested_sampler_list[[1]] <- - do.call(nested_sampler_name, - list(model, mvSaved, target, control_new)) + do.call( + nested_sampler_name, + list(model, mvSaved, target, control_new) + ) toggle <- 1 }, run = function() { - if (toggle == 1) + if (toggle == 1) { nested_sampler_list[[1]]$run() + } }, methods = list( - reset = function() + reset = function() { nested_sampler_list[[1]]$reset() + } ) ) @@ -271,60 +275,64 @@ sampler_toggle <- nimbleFunction( #' @param target thing being targetted #' @param control unused #' @export -conj_wt_wishart_sampler <- nimbleFunction( +conj_wt_wishart_sampler <- nimbleFunction( contains = sampler_BASE, setup = function(model, mvSaved, target, control) { targetAsScalar <- model$expandNodeNames(target, returnScalarComponents = TRUE) d <- sqrt(length(targetAsScalar)) - + dep_dmnorm_nodeNames <- model$getDependencies(target, self = F, includeData = T) N_dep_dmnorm <- length(dep_dmnorm_nodeNames) - - dep_dmnorm_nodeSize <- d #ragged problem refered to on github? - + + dep_dmnorm_nodeSize <- d # ragged problem refered to on github? + calcNodes <- model$getDependencies(target) - + dep_dmnorm_values <- array(0, c(N_dep_dmnorm, dep_dmnorm_nodeSize)) dep_dmnorm_mean <- array(0, c(N_dep_dmnorm, dep_dmnorm_nodeSize)) dep_dmnorm_wt <- numeric(N_dep_dmnorm) - }, run = function() { - #Find Prior Values + # Find Prior Values prior_R <- model$getParam(target[1], "R") prior_df <- model$getParam(target[1], "df") - - #Loop over multivariate normal + + # Loop over multivariate normal for (iDep in 1:N_dep_dmnorm) { dep_dmnorm_values[iDep, 1:dep_dmnorm_nodeSize] <<- - model$getParam(dep_dmnorm_nodeNames[iDep], - "value") + model$getParam( + dep_dmnorm_nodeNames[iDep], + "value" + ) dep_dmnorm_mean[iDep, 1:dep_dmnorm_nodeSize] <<- - model$getParam(dep_dmnorm_nodeNames[iDep], - "mean") + model$getParam( + dep_dmnorm_nodeNames[iDep], + "mean" + ) dep_dmnorm_wt[iDep] <<- - model$getParam(dep_dmnorm_nodeNames[iDep], - "wt") - + model$getParam( + dep_dmnorm_nodeNames[iDep], + "wt" + ) } - - #Calculate contribution parameters for wishart based on multivariate normal + + # Calculate contribution parameters for wishart based on multivariate normal contribution_R <<- nimArray(0, dim = nimC(d, d)) contribution_df <<- 0 for (iDep in 1:N_dep_dmnorm) { tmp_diff <<- sqrt(dep_dmnorm_wt[iDep]) * asRow(dep_dmnorm_values[iDep, 1:d] - dep_dmnorm_mean[iDep, 1:d]) - + contribution_R <<- contribution_R + t(tmp_diff) %*% tmp_diff - + contribution_df <<- contribution_df + dep_dmnorm_wt[iDep] } - - #Draw a new value based on prior and contribution parameters + + # Draw a new value based on prior and contribution parameters newValue <- rwish_chol( 1, @@ -333,8 +341,8 @@ conj_wt_wishart_sampler <- nimbleFunction( scale_param = 0 ) model[[target]] <<- newValue - - #Calculate probability + + # Calculate probability calculate(model, calcNodes) nimCopy( from = model, @@ -345,41 +353,38 @@ conj_wt_wishart_sampler <- nimbleFunction( ) }, methods = list( - reset = function () { + reset = function() { } ) ) -GEF_singleobs_nimble <- nimbleCode({ - +GEF_singleobs_nimble <- nimbleCode({ # Sorting out qs qq ~ dgamma(aq, bq) ## aq and bq are estimated over time q[1, 1] <- qq * diag(YN) - + # # X model X.mod[1:N] ~ dmnorm(mean = muf[1:N], cov = pf[1:N, 1:N]) # # got rid of for loop no need when nH = 1 Xs[1] <- X.mod[H] - + ## add process error to x model but just for the state variables that we have data and H knows who - #changed model from dmnorm to dnorm to accomodate when only assimilating 1 obs - X[1] ~ dnorm(Xs[1], q[1, 1]) - + # changed model from dmnorm to dnorm to accomodate when only assimilating 1 obs + X[1] ~ dnorm(Xs[1], q[1, 1]) + ## Likelihood - #changed model from dmnorm to dnorm to accomodate when only assimilating 1 obs + # changed model from dmnorm to dnorm to accomodate when only assimilating 1 obs y.censored[1] ~ dnorm(X[1], r[1, 1]) - - #puting the ones that they don't have q in Xall - They come from X.model + + # puting the ones that they don't have q in Xall - They come from X.model # If I don't have data on then then their q is not identifiable, so we use the same Xs as Xmodel for (j in 1:nNotH) { - tmpXmod[j] <- X.mod[NotH[j]] + tmpXmod[j] <- X.mod[NotH[j]] Xall[NotH[j]] <- tmpXmod[j] } # # # #These are the one that they have data and their q can be estimated - #got rid of for loop no need when nH = 1 - Xall[H] <- X[1] - + # got rid of for loop no need when nH = 1 + Xall[H] <- X[1] + y.ind[1] ~ dinterval(y.censored[1], 0) - - -}) \ No newline at end of file +}) diff --git a/modules/assim.sequential/R/Prep_OBS_SDA.R b/modules/assim.sequential/R/Prep_OBS_SDA.R index ff048b3e0ea..394336b182c 100644 --- a/modules/assim.sequential/R/Prep_OBS_SDA.R +++ b/modules/assim.sequential/R/Prep_OBS_SDA.R @@ -10,228 +10,241 @@ #' @importFrom magrittr %>% #' @export #' -Prep_OBS_SDA <- function(settings, out_dir, AGB_dir, Search_Window=30){ - ####working on downloading LAI and extraction AGB - - #getting site ID +Prep_OBS_SDA <- function(settings, out_dir, AGB_dir, Search_Window = 30) { + #### working on downloading LAI and extraction AGB + + # getting site ID observations <- c() for (i in 1:length(settings)) { - observations <- c(observations,settings[[i]]$run$site$id) + observations <- c(observations, settings[[i]]$run$site$id) } - - #query site info - bety <- dplyr::src_postgres(dbname = settings$database$bety$dbname, - host = settings$database$bety$host, - user = settings$database$bety$user, - password = settings$database$bety$password) + + # query site info + bety <- dplyr::src_postgres( + dbname = settings$database$bety$dbname, + host = settings$database$bety$host, + user = settings$database$bety$user, + password = settings$database$bety$password + ) con <- bety$con - - #grab site info + + # grab site info site_ID <- observations suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con + )) suppressWarnings(qry_results <- PEcAn.DB::db.query(site_qry, con)) - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) - - #convert year to YEARDOY - #setting up start and end date based on settings specs - start_date <- as.Date(settings$state.data.assimilation$start.date, tz="UTC") - end_date <- as.Date(settings$state.data.assimilation$end.date, tz="UTC") - - #converting from date to YEAR-DOY(example: 2012-01-01 to 2012001) - start_YEARDOY <- paste0(lubridate::year(start_date),sprintf("%03d",lubridate::yday(start_date))) - end_YEARDOY <- paste0(lubridate::year(end_date),sprintf("%03d",lubridate::yday(end_date))) - - #assigning ncores - if(length(observations) <= 10){ + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) + + # convert year to YEARDOY + # setting up start and end date based on settings specs + start_date <- as.Date(settings$state.data.assimilation$start.date, tz = "UTC") + end_date <- as.Date(settings$state.data.assimilation$end.date, tz = "UTC") + + # converting from date to YEAR-DOY(example: 2012-01-01 to 2012001) + start_YEARDOY <- paste0(lubridate::year(start_date), sprintf("%03d", lubridate::yday(start_date))) + end_YEARDOY <- paste0(lubridate::year(end_date), sprintf("%03d", lubridate::yday(end_date))) + + # assigning ncores + if (length(observations) <= 10) { ncores <- length(observations) - }else{ncores <- 10} - - #parallelly download LAI and LAI std bands - round.length <- ceiling(length(observations)/ncores) - - #initialize lai and lai.sd + } else { + ncores <- 10 + } + + # parallelly download LAI and LAI std bands + round.length <- ceiling(length(observations) / ncores) + + # initialize lai and lai.sd lai_data <- c() lai_sd <- c() - + for (i in 1:round.length) { - #prepare for parallel - start = (1+((i-1)*ncores)) - end = start+ncores-1 - - #grab temp site_info for current loop - temp_site_info <- furrr::future_pmap(list(site_info, start, end), function(X, start, end){X[start:end]}) - - #download LAI data and LAI std - lai_data <- rbind(PEcAn.data.remote::call_MODIS(outdir = NULL, var = "LAI", site_info = site_info, product_dates = c(start_YEARDOY, end_YEARDOY), - run_parallel = TRUE, ncores = ncores, product = "MOD15A2H", band = "Lai_500m", - package_method = "MODISTools", QC_filter = TRUE, progress = FALSE), lai_data) - - lai_sd = rbind(PEcAn.data.remote::call_MODIS(outdir = NULL, var = "LAI", site_info = site_info, product_dates = c(start_YEARDOY, end_YEARDOY), - run_parallel = TRUE, ncores = ncores, product = "MOD15A2H", band = "LaiStdDev_500m", - package_method = "MODISTools", QC_filter = TRUE, progress = FALSE), lai_sd) + # prepare for parallel + start <- (1 + ((i - 1) * ncores)) + end <- start + ncores - 1 + + # grab temp site_info for current loop + temp_site_info <- furrr::future_pmap(list(site_info, start, end), function(X, start, end) { + X[start:end] + }) + + # download LAI data and LAI std + lai_data <- rbind(PEcAn.data.remote::call_MODIS( + outdir = NULL, var = "LAI", site_info = site_info, product_dates = c(start_YEARDOY, end_YEARDOY), + run_parallel = TRUE, ncores = ncores, product = "MOD15A2H", band = "Lai_500m", + package_method = "MODISTools", QC_filter = TRUE, progress = FALSE + ), lai_data) + + lai_sd <- rbind(PEcAn.data.remote::call_MODIS( + outdir = NULL, var = "LAI", site_info = site_info, product_dates = c(start_YEARDOY, end_YEARDOY), + run_parallel = TRUE, ncores = ncores, product = "MOD15A2H", band = "LaiStdDev_500m", + package_method = "MODISTools", QC_filter = TRUE, progress = FALSE + ), lai_sd) } - - #format LAI data by "Site_ID", "Date", "Median", "qc", "SD" - LAI <- cbind(lai_data[,c(5, 2, 9, 10)], lai_sd$data) + + # format LAI data by "Site_ID", "Date", "Median", "qc", "SD" + LAI <- cbind(lai_data[, c(5, 2, 9, 10)], lai_sd$data) colnames(LAI) <- c("Site_ID", "Date", "Median", "qc", "SD") - - #filter by qc band - LAI <- LAI[-(which(LAI$qc=="001")),] - - #compute peak LAI per year per site - peak_lai = data.frame() - - #loop over each site + + # filter by qc band + LAI <- LAI[-(which(LAI$qc == "001")), ] + + # compute peak LAI per year per site + peak_lai <- data.frame() + + # loop over each site for (i in 1:length(unique(LAI$Site_ID))) { site_ID <- unique(LAI$Site_ID)[i] - site_LAI <- LAI[which(LAI$Site_ID==site_ID),] - - #loop over each year + site_LAI <- LAI[which(LAI$Site_ID == site_ID), ] + + # loop over each year for (j in 1:length(unique(lubridate::year(site_LAI$Date)))) { - site_LAI_year <- site_LAI[which(lubridate::year(site_LAI$Date)==unique(lubridate::year(site_LAI$Date))[j]),] - - #calculate the difference between target date and record date - target_date <- lubridate::date(as.Date(paste0(as.character(unique(lubridate::year(site_LAI$Date))[j]),"/07/15"))) - diff_days <- abs(lubridate::days(lubridate::date(site_LAI_year$Date)-lubridate::date(target_date))@day) - - #find records within search window - max <- site_LAI_year[which(diff_days<=Search_Window),] - - #if no record in search window - if(nrow(max)==0){ - peak <- data.frame(Site_ID=site_ID, Date=paste0("Year_", unique(lubridate::year(site_LAI$Date))[j]), Median=NA, SD=NA) - }else{#we do have records - peak <- data.frame(Site_ID=site_ID, Date=paste0("Year_", unique(lubridate::year(site_LAI$Date))[j]), Median=mean(max$Median), SD=max(max$SD)) + site_LAI_year <- site_LAI[which(lubridate::year(site_LAI$Date) == unique(lubridate::year(site_LAI$Date))[j]), ] + + # calculate the difference between target date and record date + target_date <- lubridate::date(as.Date(paste0(as.character(unique(lubridate::year(site_LAI$Date))[j]), "/07/15"))) + diff_days <- abs(lubridate::days(lubridate::date(site_LAI_year$Date) - lubridate::date(target_date))@day) + + # find records within search window + max <- site_LAI_year[which(diff_days <= Search_Window), ] + + # if no record in search window + if (nrow(max) == 0) { + peak <- data.frame(Site_ID = site_ID, Date = paste0("Year_", unique(lubridate::year(site_LAI$Date))[j]), Median = NA, SD = NA) + } else { # we do have records + peak <- data.frame(Site_ID = site_ID, Date = paste0("Year_", unique(lubridate::year(site_LAI$Date))[j]), Median = mean(max$Median), SD = max(max$SD)) } peak_lai <- rbind(peak_lai, peak) } } - #lower boundaries for LAI std - peak_lai$SD[peak_lai$SD < 0.66] = 0.66 - - #extracting AGB data - med_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", AGB_dir, product_dates=lubridate::year(start_date):lubridate::year(end_date))[[1]] - sdev_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", AGB_dir, product_dates=lubridate::year(start_date):lubridate::year(end_date))[[1]] - - #formatting AGB data - ndates = colnames(med_agb_data)[-c(1:2)] # getting dates - med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) - med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) - - sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) - sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) - - agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) - names(agb_data) = c("Site_ID", "Date", "Median", "SD") - agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE) - - - - #making obs.mean and obs.cov - peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) - peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F) - observed_vars = c("AbvGrndWood", "LAI") - observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) - names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") - - observed_data = observed_data[order(observed_data$Date),] - dates = sort(unique(observed_data$Date)) - + # lower boundaries for LAI std + peak_lai$SD[peak_lai$SD < 0.66] <- 0.66 + + # extracting AGB data + med_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", AGB_dir, product_dates = lubridate::year(start_date):lubridate::year(end_date))[[1]] + sdev_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", AGB_dir, product_dates = lubridate::year(start_date):lubridate::year(end_date))[[1]] + + # formatting AGB data + ndates <- colnames(med_agb_data)[-c(1:2)] # getting dates + med_agb_data$Site_Name <- as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) + med_agb_data <- reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) + + sdev_agb_data$Site_Name <- as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) + sdev_agb_data <- reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) + + agb_data <- as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) + names(agb_data) <- c("Site_ID", "Date", "Median", "SD") + agb_data$Date <- as.character(agb_data$Date, stringsAsFactors = FALSE) + + + + # making obs.mean and obs.cov + peak_lai$Site_ID <- as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) + peak_lai$Date <- as.character(peak_lai$Date, stringsAsFactors = F) + observed_vars <- c("AbvGrndWood", "LAI") + observed_data <- merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) + names(observed_data) <- c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") + + observed_data <- observed_data[order(observed_data$Date), ] + dates <- sort(unique(observed_data$Date)) + # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon) - obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) - obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) - + obs.mean <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) + obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) + obs.mean <- split(obs.mean, obs.mean$date) - + # change the dates to be middle of the year date.obs <- strsplit(names(obs.mean), "_") %>% - purrr::map_chr(~paste0(.x[2], "/07/15")) + purrr::map_chr(~ paste0(.x[2], "/07/15")) obs.mean <- purrr::map( names(obs.mean), - function(namesl){ + function(namesl) { split( obs.mean[[namesl]], - obs.mean[[namesl]]$site_id) %>% - purrr::map( - ~.x[3:4] %>% - stats::setNames(c("AbvGrndWood", "LAI")) %>% - `row.names<-`(NULL)) + obs.mean[[namesl]]$site_id + ) %>% + purrr::map( + ~ .x[3:4] %>% + stats::setNames(c("AbvGrndWood", "LAI")) %>% + `row.names<-`(NULL) + ) } ) %>% stats::setNames(date.obs) - - #remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) - names = date.obs + + # remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) + names <- date.obs for (name in names) { for (site in names(obs.mean[[name]])) { - na_index = which(!(is.na(obs.mean[[ name]][[site]]))) - colnames = names(obs.mean[[name]][[site]]) - #we have some records that are not NA - if (length(na_index) > 0) - { - obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index] - }else if(length(na_index) == 0){#we don't have any observations (they are all NAs), we then just remove the whole site + na_index <- which(!(is.na(obs.mean[[name]][[site]]))) + colnames <- names(obs.mean[[name]][[site]]) + # we have some records that are not NA + if (length(na_index) > 0) { + obs.mean[[name]][[site]] <- obs.mean[[name]][[site]][na_index] + } else if (length(na_index) == 0) { # we don't have any observations (they are all NAs), we then just remove the whole site obs.mean[[name]][[site]] <- NULL } } } - + # fillers are 0's for the covariance matrix. This will need to change for differing size matrixes when more variables are added in. # filler_0 = as.data.frame(matrix(0, ncol = length(observed_vars), nrow = nrow(observed_data))) # names(filler_0) = paste0("h", seq_len(length(observed_vars))) - + # create obs.cov dataframe -->list by date - obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai)#, filler_0) - obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F) - + obs.cov <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai) # , filler_0) + obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = F) + obs.cov <- split(obs.cov, obs.cov$date) obs.cov <- purrr::map( names(obs.cov), - function(namesl){ + function(namesl) { purrr::map( split( obs.cov[[namesl]], - obs.cov[[namesl]]$site_id), - ~.x[3:4]^2 %>% - unlist %>% - diag(nrow = 2, ncol = 2)) + obs.cov[[namesl]]$site_id + ), + ~ .x[3:4]^2 %>% + unlist() %>% + diag(nrow = 2, ncol = 2) + ) } ) %>% stats::setNames(date.obs) - - - names = date.obs + + + names <- date.obs for (name in names) { for (site in names(obs.cov[[name]])) { - #if we don't have any observation (diag(cov)==NA) then we remove the whole site - if(length(which(!is.na(diag(obs.cov[[name]][[site]])))) == 0) - { + # if we don't have any observation (diag(cov)==NA) then we remove the whole site + if (length(which(!is.na(diag(obs.cov[[name]][[site]])))) == 0) { obs.cov[[name]][[site]] <- NULL next } - #else we do have some records - bad = which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) - if (length(bad) > 0) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][,-bad] - if (is.null(dim(obs.cov[[name]][[site]]))) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad] + # else we do have some records + bad <- which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) + if (length(bad) > 0) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][, -bad] + if (is.null(dim(obs.cov[[name]][[site]]))) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad] } else { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad,] + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad, ] } } } } - save(peak_lai, file = paste0(out_dir, '/peak_lai.Rdata')) - save(obs.mean, file = paste0(out_dir, '/obs_mean.Rdata')) - save(obs.cov, file = paste0(out_dir, '/obs_cov.Rdata')) - list(cov=obs.cov, mean=obs.mean) + save(peak_lai, file = paste0(out_dir, "/peak_lai.Rdata")) + save(obs.mean, file = paste0(out_dir, "/obs_mean.Rdata")) + save(obs.cov, file = paste0(out_dir, "/obs_cov.Rdata")) + list(cov = obs.cov, mean = obs.mean) } diff --git a/modules/assim.sequential/R/SDA_OBS_Assembler.R b/modules/assim.sequential/R/SDA_OBS_Assembler.R index 315d1fdfb44..339e1ec4f21 100644 --- a/modules/assim.sequential/R/SDA_OBS_Assembler.R +++ b/modules/assim.sequential/R/SDA_OBS_Assembler.R @@ -14,236 +14,243 @@ #' settings <- PEcAn.settings::read.settings(settings_dir) #' OBS <- SDA_OBS_Assembler(settings) #' } -#' -SDA_OBS_Assembler <- function(settings){ - #extract Obs_Prep object from settings. +#' +SDA_OBS_Assembler <- function(settings) { + # extract Obs_Prep object from settings. Obs_Prep <- settings$state.data.assimilation$Obs_Prep - - #check if we want to proceed the free run without any observations. + + # check if we want to proceed the free run without any observations. if (as.logical(settings$state.data.assimilation$free.run)) { PEcAn.logger::logger.info("Create obs for free run!") - #calculate time points. + # calculate time points. time_points <- obs_timestep2timepoint(Obs_Prep$start.date, Obs_Prep$end.date, Obs_Prep$timestep) - - #generate obs.mean and obs.cov with NULL filled. - obs.mean = vector("list", length(time_points)) %>% `names<-`(time_points) - obs.cov = vector("list", length(time_points)) %>% `names<-`(time_points) - - #save files. + + # generate obs.mean and obs.cov with NULL filled. + obs.mean <- vector("list", length(time_points)) %>% `names<-`(time_points) + obs.cov <- vector("list", length(time_points)) %>% `names<-`(time_points) + + # save files. save(obs.mean, file = file.path(Obs_Prep$outdir, "Rdata", "obs.mean.Rdata")) save(obs.cov, file = file.path(Obs_Prep$outdir, "Rdata", "obs.cov.Rdata")) return(list(obs.mean = obs.mean, obs.cov = obs.cov)) } - - #prepare site_info offline, because we need to submit this to server remotely, which might not support the Bety connection. + + # prepare site_info offline, because we need to submit this to server remotely, which might not support the Bety connection. site_info <- settings$run %>% - purrr::map('site')%>% - purrr::map(function(site.list){ - #conversion from string to number + purrr::map("site") %>% + purrr::map(function(site.list) { + # conversion from string to number site.list$lat <- as.numeric(site.list$lat) site.list$lon <- as.numeric(site.list$lon) - list(site_id=site.list$id, lat=site.list$lat, lon=site.list$lon, site_name=site.list$name) - })%>% - dplyr::bind_rows() %>% + list(site_id = site.list$id, lat = site.list$lat, lon = site.list$lon, site_name = site.list$name) + }) %>% + dplyr::bind_rows() %>% as.list() - - #convert from timestep to time points - if (length(Obs_Prep$timestep)>0){ + + # convert from timestep to time points + if (length(Obs_Prep$timestep) > 0) { diff_dates <- FALSE - }else{ + } else { diff_dates <- TRUE } - - #The order of obs.mean and obs.cov objects are relying on the order of how you organize the Obs_Prep section. + + # The order of obs.mean and obs.cov objects are relying on the order of how you organize the Obs_Prep section. OBS <- timestep <- time_points_all <- list() var_ind <- var <- c() - #test for loop + # test for loop for (i in seq_along(Obs_Prep)) { - #detect if current section is for different variable preparation function or not. - if (names(Obs_Prep)[i] %in% c("timestep", "start.date", "end.date", "outdir")){ + # detect if current section is for different variable preparation function or not. + if (names(Obs_Prep)[i] %in% c("timestep", "start.date", "end.date", "outdir")) { next - }else{ + } else { PEcAn.logger::logger.info(paste("Entering", names(Obs_Prep)[i])) fun_name <- names(Obs_Prep)[i] var_ind <- c(var_ind, i) } - - #if we are dealing with different timestep for different variables. - if (diff_dates){ + + # if we are dealing with different timestep for different variables. + if (diff_dates) { timestep[[i]] <- Obs_Prep[[i]]$timestep - if (!exists("timestep")){ + if (!exists("timestep")) { PEcAn.logger::logger.error(paste0("Please provide timestep under each variable if you didn't provide timestep under Obs_Prep section!")) return(0) } time_points <- obs_timestep2timepoint(Obs_Prep[[i]]$start.date, Obs_Prep[[i]]$end.date, timestep[[i]]) - }else{ + } else { timestep[[i]] <- Obs_Prep$timestep time_points <- obs_timestep2timepoint(Obs_Prep$start.date, Obs_Prep$end.date, timestep[[i]]) } - #Search function inside the data.remote package - if(is.character(try(obs_prep_fun <- getExportedValue("PEcAn.data.remote", paste0(fun_name, "_prep")), silent = T))){ - #Search function inside the data.land package, this is explicit for Soilgrids prep function. - if(is.character(try(obs_prep_fun <- getExportedValue("PEcAn.data.land", paste0(fun_name, "_prep")), silent = T))){ + # Search function inside the data.remote package + if (is.character(try(obs_prep_fun <- getExportedValue("PEcAn.data.remote", paste0(fun_name, "_prep")), silent = T))) { + # Search function inside the data.land package, this is explicit for Soilgrids prep function. + if (is.character(try(obs_prep_fun <- getExportedValue("PEcAn.data.land", paste0(fun_name, "_prep")), silent = T))) { PEcAn.logger::logger.info("Couldn't find the function: ", paste0(fun_name, "_prep"), ". Please Check it!") return(0) } } - - #grab function argument names + + # grab function argument names fun_args <- methods::formalArgs(obs_prep_fun) - - #create args list given function argument names + + # create args list given function argument names args <- list() - #fill in args with what we have so far. + # fill in args with what we have so far. Ind_list_match <- c() for (j in seq_along(fun_args)) { - if (!is.character(try(variable <- get(fun_args[j]), silent = T))){ - if (typeof(variable)!="closure"){ + if (!is.character(try(variable <- get(fun_args[j]), silent = T))) { + if (typeof(variable) != "closure") { args[[fun_args[j]]] <- variable Ind_list_match <- c(Ind_list_match, j) } } } - - #fill in more args in the Obs_Prep object. + + # fill in more args in the Obs_Prep object. Temp_unlist <- unlist(Obs_Prep) for (j in seq_along(fun_args)) { - #if we already assigned then jump to the next. - if (j %in% Ind_list_match){ + # if we already assigned then jump to the next. + if (j %in% Ind_list_match) { next } - - #store complex string operation into temp variables. + + # store complex string operation into temp variables. fun_args_temp <- stringr::str_replace_all(fun_args[j], "[^[:alnum:]]", "") obj_exist_temp <- stringr::str_replace_all(names(Temp_unlist), "[^[:alnum:]]", "") - - #match current names of the unlisted array from Obs_Prep list - #with the string pattern of the names of the function argument + + # match current names of the unlisted array from Obs_Prep list + # with the string pattern of the names of the function argument Ind_single_match <- grep(fun_args_temp, obj_exist_temp, ignore.case = T) - - #if we have multiple matches, then search the string pattern by the fun_name. + + # if we have multiple matches, then search the string pattern by the fun_name. funName_args_temp <- stringr::str_replace_all(paste0(fun_name, fun_args[j]), "[^[:alnum:]]", "") - if (length(Ind_single_match)>1){ + if (length(Ind_single_match) > 1) { Ind_single_match <- grep(funName_args_temp, obj_exist_temp, ignore.case = T) } args[[fun_args[j]]] <- as.character(Temp_unlist[Ind_single_match]) } - #clean list (remove any item with length of zero) + # clean list (remove any item with length of zero) cleaned_args <- list() for (j in seq_along(args)) { - if (length(args[[j]])!=0){ + if (length(args[[j]]) != 0) { cleaned_args[[j]] <- args[[j]] } } names(cleaned_args) <- names(args)[seq_along(cleaned_args)] - #function calls + # function calls Temp <- do.call(obs_prep_fun, cleaned_args) OBS[[i]] <- Temp[[1]] time_points_all[[i]] <- Temp[[2]] var <- c(var, Temp[[3]]) } - - #combine different time points from different variables together + + # combine different time points from different variables together time_points <- sort(unique(do.call("c", time_points_all))) - - #Create obs.mean and obs.cov + + # Create obs.mean and obs.cov obs.mean <- obs.cov <- list() - new_diag <- function(vec){ - if (length(vec) == 1){ + new_diag <- function(vec) { + if (length(vec) == 1) { return(vec) - }else{ + } else { return(diag(vec)) } } - - #over time + + # over time for (i in seq_along(time_points)) { t <- time_points[i] dat_all_var <- sd_all_var <- matrix(NA, length(site_info$site_id), length(var)) %>% `colnames<-`(var) - #over variable + # over variable for (j in seq_along(OBS)) { - if (paste0(t, "_", var[j]) %in% colnames(OBS[[j]])){ - dat_all_var[,j] <- OBS[[j]][,paste0(t, "_", var[j])] - sd_all_var[,j] <- OBS[[j]][,paste0(t, "_SD")]^2 #convert from SD to var - }else{ - dat_all_var[,j] <- NA - sd_all_var[,j] <- NA + if (paste0(t, "_", var[j]) %in% colnames(OBS[[j]])) { + dat_all_var[, j] <- OBS[[j]][, paste0(t, "_", var[j])] + sd_all_var[, j] <- OBS[[j]][, paste0(t, "_SD")]^2 # convert from SD to var + } else { + dat_all_var[, j] <- NA + sd_all_var[, j] <- NA } } - #over site + # over site site_dat_var <- site_sd_var <- list() for (j in 1:dim(dat_all_var)[1]) { - site_dat_var[[j]] <- dat_all_var[j,] %>% matrix(1,length(var)) %>% data.frame %>% `colnames<-`(var) - site_sd_var[[j]] <- new_diag(sd_all_var[j,]) + site_dat_var[[j]] <- dat_all_var[j, ] %>% + matrix(1, length(var)) %>% + data.frame() %>% + `colnames<-`(var) + site_sd_var[[j]] <- new_diag(sd_all_var[j, ]) } obs.mean[[i]] <- site_dat_var %>% purrr::set_names(site_info$site_id) obs.cov[[i]] <- site_sd_var %>% purrr::set_names(site_info$site_id) } names(obs.mean) <- names(obs.cov) <- time_points - #remove NA data as this will crash the SDA. - #for soilgrids specifically, calculate the cov multiplier by the sqrt of length of total time steps. - if("TotSoilCarb" %in% var){ + # remove NA data as this will crash the SDA. + # for soilgrids specifically, calculate the cov multiplier by the sqrt of length of total time steps. + if ("TotSoilCarb" %in% var) { Soilgrids_multiplier <- length(time_points_all[[which(var == "TotSoilCarb")]]) } for (i in seq_along(obs.mean)) { for (j in seq_along(obs.mean[[i]])) { - if (sum(is.na(obs.mean[[i]][[j]]))){ + if (sum(is.na(obs.mean[[i]][[j]]))) { na_ind <- which(is.na(obs.mean[[i]][[j]])) - #obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] - if(length(obs.mean[[i]][[j]]) == 1){ + # obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] + if (length(obs.mean[[i]][[j]]) == 1) { obs.cov[[i]][[j]] <- obs.cov[[i]][[j]][-na_ind] - }else{ + } else { obs.cov[[i]][[j]] <- obs.cov[[i]][[j]][-na_ind, -na_ind] } - obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] + obs.mean[[i]][[j]] <- obs.mean[[i]][[j]][-na_ind] } SoilC_ind <- which(names(obs.mean[[i]][[j]]) == "TotSoilCarb") - if (length(SoilC_ind) > 0){ - if(length(obs.mean[[i]][[j]]) > 1){ + if (length(SoilC_ind) > 0) { + if (length(obs.mean[[i]][[j]]) > 1) { diag(obs.cov[[i]][[j]])[SoilC_ind] <- diag(obs.cov[[i]][[j]])[SoilC_ind] * Soilgrids_multiplier - }else{ + } else { obs.cov[[i]][[j]][SoilC_ind] <- obs.cov[[i]][[j]][SoilC_ind] * Soilgrids_multiplier } } } } - - #fill in empty element within obs.mean and obs.cov lists. - #if time steps for all obs are the same - if(length(unique(unlist(timestep))) == 2){ - if(diff_dates){ + + # fill in empty element within obs.mean and obs.cov lists. + # if time steps for all obs are the same + if (length(unique(unlist(timestep))) == 2) { + if (diff_dates) { timepoints_fill <- - purrr::pmap(list(timestep, - Obs_Prep[var_ind] %>% purrr::map(~.x$start.date), - Obs_Prep[var_ind] %>% purrr::map(~.x$end.date)), - function(var_timestep, var_start_date, var_end_date){ - obs_timestep2timepoint(var_start_date, var_end_date, var_timestep) - }) %>% - purrr::map(function(all_timepoints){ + purrr::pmap( + list( + timestep, + Obs_Prep[var_ind] %>% purrr::map(~ .x$start.date), + Obs_Prep[var_ind] %>% purrr::map(~ .x$end.date) + ), + function(var_timestep, var_start_date, var_end_date) { + obs_timestep2timepoint(var_start_date, var_end_date, var_timestep) + } + ) %>% + purrr::map(function(all_timepoints) { all_timepoints[which(!all_timepoints %in% time_points)] - }) %>% - do.call(what = "c") %>% + }) %>% + do.call(what = "c") %>% unique() - }else{ - timepoints_fill <- timestep %>% - purrr::map(function(var_timestep){ + } else { + timepoints_fill <- timestep %>% + purrr::map(function(var_timestep) { obs_timestep2timepoint(Obs_Prep$start.date, Obs_Prep$end.date, var_timestep) - }) %>% - purrr::map(function(all_timepoints){ + }) %>% + purrr::map(function(all_timepoints) { all_timepoints[which(!all_timepoints %in% time_points)] - }) %>% - do.call(what = "c") %>% + }) %>% + do.call(what = "c") %>% unique() } - - if(length(timepoints_fill)>0){ + + if (length(timepoints_fill) > 0) { obs_mean_fill <- obs_cov_fill <- list() time_points_start_end <- sort(c(timepoints_fill, time_points)) for (i in seq_along(time_points_start_end)) { - if(time_points_start_end[i] %in% timepoints_fill){ + if (time_points_start_end[i] %in% timepoints_fill) { obs_mean_fill[[as.character(time_points_start_end[i])]] <- list(NULL) obs_cov_fill[[as.character(time_points_start_end[i])]] <- list(NULL) - }else{ + } else { obs_mean_fill[[as.character(time_points_start_end[i])]] <- obs.mean[[as.character(time_points_start_end[i])]] obs_cov_fill[[as.character(time_points_start_end[i])]] <- obs.cov[[as.character(time_points_start_end[i])]] } @@ -252,12 +259,12 @@ SDA_OBS_Assembler <- function(settings){ obs.cov <- obs_cov_fill } } - - #create folder in case it doesn't exist. - if(!file.exists(file.path(Obs_Prep$outdir, "Rdata"))){ + + # create folder in case it doesn't exist. + if (!file.exists(file.path(Obs_Prep$outdir, "Rdata"))) { dir.create(file.path(Obs_Prep$outdir, "Rdata")) } save(obs.mean, file = file.path(Obs_Prep$outdir, "Rdata", "obs.mean.Rdata")) save(obs.cov, file = file.path(Obs_Prep$outdir, "Rdata", "obs.cov.Rdata")) list(obs.mean = obs.mean, obs.cov = obs.cov) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/assess.params.R b/modules/assim.sequential/R/assess.params.R index b81eda4b93d..2fb0e504f1c 100644 --- a/modules/assim.sequential/R/assess.params.R +++ b/modules/assim.sequential/R/assess.params.R @@ -1,154 +1,159 @@ ##' @title assess.params ##' @name assess.params ##' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} -##' +##' ##' @param dat MCMC output ##' @param Xt ensemble output matrix ##' @param wts ensemble weights ##' @param mu_f_TRUE muf before tobit2space ##' @param P_f_TRUE Pf before tobit2space -##' +##' ##' @description Assessing parameter estimations after mapping model output to tobit space -##' +##' ##' @return make plots ##' @export -##' +##' -assessParams <- function(dat, Xt, wts = NULL, mu_f_TRUE = NULL, P_f_TRUE = NULL){ - #mu_f_TRUE and P_f_TRUE used for simulation +assessParams <- function(dat, Xt, wts = NULL, mu_f_TRUE = NULL, P_f_TRUE = NULL) { + # mu_f_TRUE and P_f_TRUE used for simulation + - #* page 6 looks more like I expected, but I’m not sure how we’re getting negative variances - + #* In general, the first 3 pages of pairs plots doesn’t seem to be producing anything too absurd — there’s no estimates going off to large negative values and nothing TOO far from the sample mean. That said, I do find some of the estimates to be surprising (e.g. why is the posterior for mu12 at t=2 lower than the sample mean when the ensemble shouldn’t include any zeros) - - imuf <- grep("muf", colnames(dat)) + + imuf <- grep("muf", colnames(dat)) muf <- colMeans(dat[, imuf]) - iPf <- grep("pf", colnames(dat)) - Pf <- solve(matrix(colMeans(dat[, iPf]),ncol(Xt),ncol(Xt))) + iPf <- grep("pf", colnames(dat)) + Pf <- solve(matrix(colMeans(dat[, iPf]), ncol(Xt), ncol(Xt))) #--- This is where the localization needs to happen - After imputing Pf - - if(is.null(wts)){ - mufT <- apply(Xt,2,mean) + + if (is.null(wts)) { + mufT <- apply(Xt, 2, mean) PfT <- stats::cov(Xt) - }else{ - mufT <- apply(Xt,2,stats::weighted.mean,wts) - PfT <- stats::cov.wt(Xt,wts)$cov + } else { + mufT <- apply(Xt, 2, stats::weighted.mean, wts) + PfT <- stats::cov.wt(Xt, wts)$cov } - - eigen_save <- matrix(NA,nrow=nrow(dat),ncol=ncol(Xt)) - for(rr in 1:nrow(dat)) { - eigen_save[rr,] <- eigen(solve(matrix(dat[rr, iPf],ncol(Xt),ncol(Xt))))$values + + eigen_save <- matrix(NA, nrow = nrow(dat), ncol = ncol(Xt)) + for (rr in 1:nrow(dat)) { + eigen_save[rr, ] <- eigen(solve(matrix(dat[rr, iPf], ncol(Xt), ncol(Xt))))$values } - - - graphics::par(mfrow=c(2,3)) - apply(eigen_save,2,graphics::plot,typ='l',main='Eigen Value') - for(i in seq(1,length(iPf),7)){ - graphics::plot(dat[,iPf[i]],typ='l',main='Variance of Pf') + + + graphics::par(mfrow = c(2, 3)) + apply(eigen_save, 2, graphics::plot, typ = "l", main = "Eigen Value") + for (i in seq(1, length(iPf), 7)) { + graphics::plot(dat[, iPf[i]], typ = "l", main = "Variance of Pf") } - for(i in 1:length(muf)){ - graphics::plot(dat[,imuf[i]],typ='l',main=paste('muf',i)) - graphics::abline(h=mufT[i],col='red') + for (i in 1:length(muf)) { + graphics::plot(dat[, imuf[i]], typ = "l", main = paste("muf", i)) + graphics::abline(h = mufT[i], col = "red") } - + Xt_use <- Xt - rownames(Xt_use)<-colnames(Xt_use) <- NULL - - corrplot::corrplot(stats::cov2cor((PfT)),main='correlation T') - corrplot::corrplot(stats::cov2cor(stats::cov(Xt_use)),main='correlation estimate') - - mufCI <- apply(dat[,imuf],2,stats::quantile,c(0.025,0.975)) - mufTCI <- apply(Xt,2,stats::quantile,c(0.025,0.975)) - - graphics::par(mfrow=c(1,1)) - graphics::plot(mufT,muf,pch=19,ylim=range(mufCI),xlim=range(mufTCI)) - graphics::abline(a=0,b=1,lty=2) - for(i in 1:length(muf)){ - graphics::lines(mufTCI[,i],rep(as.vector(muf)[i],2),col=i,lwd=2) - graphics::lines(rep(as.vector(mufT)[i],2),mufCI[,i],col=i,lwd=2) + rownames(Xt_use) <- colnames(Xt_use) <- NULL + + corrplot::corrplot(stats::cov2cor((PfT)), main = "correlation T") + corrplot::corrplot(stats::cov2cor(stats::cov(Xt_use)), main = "correlation estimate") + + mufCI <- apply(dat[, imuf], 2, stats::quantile, c(0.025, 0.975)) + mufTCI <- apply(Xt, 2, stats::quantile, c(0.025, 0.975)) + + graphics::par(mfrow = c(1, 1)) + graphics::plot(mufT, muf, pch = 19, ylim = range(mufCI), xlim = range(mufTCI)) + graphics::abline(a = 0, b = 1, lty = 2) + for (i in 1:length(muf)) { + graphics::lines(mufTCI[, i], rep(as.vector(muf)[i], 2), col = i, lwd = 2) + graphics::lines(rep(as.vector(mufT)[i], 2), mufCI[, i], col = i, lwd = 2) } - - #muf mufT scatter plot - graphics::par(mfrow=c(2,2)) - for(i in 1:(length(imuf)-1)){ - graphics::plot(dat[,i],dat[,i+1],xlab=paste('mu', i),ylab=paste('mu', i+1)) - #points(mu_f_TRUE[i],mu_f_TRUE[i+1],cex=3,col=2,pch=18) - graphics::points(muf[i],muf[i+1],cex=3,col=3,pch=19) - graphics::points(mufT[i],mufT[i+1],cex=3,col=4,pch=20) + + # muf mufT scatter plot + graphics::par(mfrow = c(2, 2)) + for (i in 1:(length(imuf) - 1)) { + graphics::plot(dat[, i], dat[, i + 1], xlab = paste("mu", i), ylab = paste("mu", i + 1)) + # points(mu_f_TRUE[i],mu_f_TRUE[i+1],cex=3,col=2,pch=18) + graphics::points(muf[i], muf[i + 1], cex = 3, col = 3, pch = 19) + graphics::points(mufT[i], mufT[i + 1], cex = 3, col = 4, pch = 20) } graphics::plot.new() - graphics::legend("topleft",legend=c("post","sampT"),col=3:4,pch = 19:20) - #legend("topleft",legend=c("TRUE","post","sampT"),col=2:4,pch = 18:20) - - graphics::boxplot(Xt,xlab='State Variables',ylab='X') - graphics::points(muf,col='red',pch=19) - graphics::legend("topleft",legend=c("muf"),col='red',pch = 19) - - #cor(dat[,1:6]) - - #iPf <- grep("pf", colnames(dat)) - #Pf <- matrix(colMeans(dat[, iPf]),ncol(Xt),ncol(Xt)) - - PfCI <- apply(dat[,iPf],2,stats::quantile,c(0.025,0.975)) + graphics::legend("topleft", legend = c("post", "sampT"), col = 3:4, pch = 19:20) + # legend("topleft",legend=c("TRUE","post","sampT"),col=2:4,pch = 18:20) + + graphics::boxplot(Xt, xlab = "State Variables", ylab = "X") + graphics::points(muf, col = "red", pch = 19) + graphics::legend("topleft", legend = c("muf"), col = "red", pch = 19) + + # cor(dat[,1:6]) + + # iPf <- grep("pf", colnames(dat)) + # Pf <- matrix(colMeans(dat[, iPf]),ncol(Xt),ncol(Xt)) + + PfCI <- apply(dat[, iPf], 2, stats::quantile, c(0.025, 0.975)) diag.stopper <- diag(length(muf)) - - graphics::par(mfrow=c(1,1)) - graphics::plot(PfT,Pf,ylim=range(PfCI),pch=19,xlab='Pf Ensemble (True)',ylab='Pf Estimated (tobit2space)') - graphics::abline(0,1,lty=2) - for(i in 1:length(Pf)){ - graphics::lines(rep(as.vector(PfT)[i],2),PfCI[,i],col=i,lwd=2) - if(diag.stopper[i]==1){ - graphics::points(PfT[i],Pf[i],cex=2,pch = 7) + + graphics::par(mfrow = c(1, 1)) + graphics::plot(PfT, Pf, ylim = range(PfCI), pch = 19, xlab = "Pf Ensemble (True)", ylab = "Pf Estimated (tobit2space)") + graphics::abline(0, 1, lty = 2) + for (i in 1:length(Pf)) { + graphics::lines(rep(as.vector(PfT)[i], 2), PfCI[, i], col = i, lwd = 2) + if (diag.stopper[i] == 1) { + graphics::points(PfT[i], Pf[i], cex = 2, pch = 7) } } - graphics::legend('topleft','variance',pch = 7,cex=2) - - diag.stopper2 <- diag.stopper+1 + graphics::legend("topleft", "variance", pch = 7, cex = 2) + + diag.stopper2 <- diag.stopper + 1 diag(diag.stopper2) <- 0 - - graphics::plot(stats::cov2cor(PfT)[which(diag.stopper2==1)], - stats::cov2cor(Pf)[which(diag.stopper2==1)],pch=19, - ylab = 'Pf', xlab = 'Pft', main = 'Correlations') - graphics::abline(a=0,b=1,lty=2) - - corrCI <- apply(dat[,iPf[which(diag.stopper2!=0)]],2,stats::quantile,c(0.025,0.975)) - - graphics::par(mfrow=c(1,1)) - graphics::plot(PfT[which(diag.stopper2!=0)],Pf[which(diag.stopper2!=0)], - ylim=range(corrCI),pch=19,xlab='Pf Ensemble (True)', - ylab='Pf Estimated (tobit2space)', - main='Non-Diagonal Covariance') - graphics::abline(a=0,b=1,lty=2) - for(i in 1:length(Pf)){ - if(diag.stopper2[i]==1){ - graphics::lines(rep(as.vector(PfT)[i],2),PfCI[,i],col=i,lwd=2) + + graphics::plot(stats::cov2cor(PfT)[which(diag.stopper2 == 1)], + stats::cov2cor(Pf)[which(diag.stopper2 == 1)], + pch = 19, + ylab = "Pf", xlab = "Pft", main = "Correlations" + ) + graphics::abline(a = 0, b = 1, lty = 2) + + corrCI <- apply(dat[, iPf[which(diag.stopper2 != 0)]], 2, stats::quantile, c(0.025, 0.975)) + + graphics::par(mfrow = c(1, 1)) + graphics::plot(PfT[which(diag.stopper2 != 0)], Pf[which(diag.stopper2 != 0)], + ylim = range(corrCI), pch = 19, xlab = "Pf Ensemble (True)", + ylab = "Pf Estimated (tobit2space)", + main = "Non-Diagonal Covariance" + ) + graphics::abline(a = 0, b = 1, lty = 2) + for (i in 1:length(Pf)) { + if (diag.stopper2[i] == 1) { + graphics::lines(rep(as.vector(PfT)[i], 2), PfCI[, i], col = i, lwd = 2) } } - - graphics::par(mfrow=c(1,1)) - graphics::plot(diag(PfT)-diag(Pf),xlab='State Variable',pch=19, - cex=2,main='Which variance changed the most?') - - - #var.change <- data.frame(mufT = signif(colMeans(Xt),digits=2),muf=signif(muf,digits=2),abs.change.var = abs(diag(PfT)-diag(Pf))) - #var.change[order(var.change$abs.change.var),] - + + graphics::par(mfrow = c(1, 1)) + graphics::plot(diag(PfT) - diag(Pf), + xlab = "State Variable", pch = 19, + cex = 2, main = "Which variance changed the most?" + ) + + + # var.change <- data.frame(mufT = signif(colMeans(Xt),digits=2),muf=signif(muf,digits=2),abs.change.var = abs(diag(PfT)-diag(Pf))) + # var.change[order(var.change$abs.change.var),] + # sort(diag(Pf)-diag(PfT),decreasing = T) - # + # # par(mfrow=c(3,3)) # for(i in 1:length(Pf)) { # if(diag.stopper[i]==1){ # plot(dat[,i+14],ylim=c(0,10)); abline(h=as.vector(PfT)[i],col='red',lwd=2) # } # } - #scatterplots - #var - #corr #pull diags out ==1 - #check mu v var to make sure variance is only changing near 0# shifts in Xt v X on same plot - - #PfT <- cov(Xt) - #points(P_f_TRUE,PfT,col=1:14,pch="-",cex=2) + # scatterplots + # var + # corr #pull diags out ==1 + # check mu v var to make sure variance is only changing near 0# shifts in Xt v X on same plot + + # PfT <- cov(Xt) + # points(P_f_TRUE,PfT,col=1:14,pch="-",cex=2) } diff --git a/modules/assim.sequential/R/build_X.R b/modules/assim.sequential/R/build_X.R index 1dd91638d8d..9621bb5016a 100644 --- a/modules/assim.sequential/R/build_X.R +++ b/modules/assim.sequential/R/build_X.R @@ -1,8 +1,8 @@ #' build_X -#' +#' #' @name build_X #' @author Alexis Helgeson -#' +#' #' @description builds X matrix for SDA #' #' @param new.params object created from sda_matchparam, passed from sda.enkf_MultiSite @@ -17,52 +17,48 @@ #' @param restart_flag flag if it's a restart stage. Default is FALSE. #' #' @return X ready to be passed to SDA Analysis code -build_X <- function(out.configs, settings, new.params, nens, read_restart_times, outdir, t = 1, var.names, my.read_restart, restart_flag = FALSE){ - if(t == 1 & restart_flag){ +build_X <- function(out.configs, settings, new.params, nens, read_restart_times, outdir, t = 1, var.names, my.read_restart, restart_flag = FALSE) { + if (t == 1 & restart_flag) { reads <- - furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params),function(configs,my_settings,siteparams) { + furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params), function(configs, my_settings, siteparams) { # Loading the model package - this is required bc of the furrr - #library(paste0("PEcAn.",settings$model$type), character.only = TRUE) - #source("~/pecan/models/sipnet/R/read_restart.SIPNET.R") - + # library(paste0("PEcAn.",settings$model$type), character.only = TRUE) + # source("~/pecan/models/sipnet/R/read_restart.SIPNET.R") + X_tmp <- vector("list", 2) - + for (i in seq_len(nens)) { - X_tmp[[i]] <- do.call( my.read_restart, - args = list( - outdir = outdir, - runid = my_settings$run$id[i] %>% as.character(), - stop.time = read_restart_times[t+1], - settings = my_settings, - var.names = var.names, - params = siteparams[[i]] - ) + X_tmp[[i]] <- do.call(my.read_restart, + args = list( + outdir = outdir, + runid = my_settings$run$id[i] %>% as.character(), + stop.time = read_restart_times[t + 1], + settings = my_settings, + var.names = var.names, + params = siteparams[[i]] + ) ) - } return(X_tmp) }) - - }else{ + } else { reads <- - furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params),function(configs,my_settings,siteparams) { - + furrr::future_pmap(list(out.configs %>% `class<-`(c("list")), settings, new.params), function(configs, my_settings, siteparams) { X_tmp <- vector("list", 2) - + for (i in seq_len(nens)) { - X_tmp[[i]] <- do.call( my.read_restart, - args = list( - outdir = outdir, - runid = configs$runs$id[i] %>% as.character(), - stop.time = read_restart_times[t+1], - var.names = var.names, - params = siteparams[[i]] - ) + X_tmp[[i]] <- do.call(my.read_restart, + args = list( + outdir = outdir, + runid = configs$runs$id[i] %>% as.character(), + stop.time = read_restart_times[t + 1], + var.names = var.names, + params = siteparams[[i]] + ) ) - } return(X_tmp) }) } return(reads) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/downscale_function.R b/modules/assim.sequential/R/downscale_function.R index 317926eba1e..a5ca05e69b3 100644 --- a/modules/assim.sequential/R/downscale_function.R +++ b/modules/assim.sequential/R/downscale_function.R @@ -16,36 +16,36 @@ SDA_downscale_preprocess <- function(data_path, coords_path, date, carbon_pool) # Read the input data and site coordinates input_data <- readRDS(data_path) site_coordinates <- readr::read_csv(coords_path) - + # Convert input_data names to Date objects input_date_names <- lubridate::ymd(names(input_data)) names(input_data) <- input_date_names - + # Convert the input date to a Date object standard_date <- lubridate::ymd(date) - + # Ensure the date exists in the input data if (!standard_date %in% input_date_names) { stop(paste("Date", date, "not found in the input data.")) } - + # Extract the carbon data for the specified focus year index <- which(input_date_names == standard_date) data <- input_data[[index]] - + # Ensure the carbon pool exists in the input data if (!carbon_pool %in% names(data)) { stop(paste("Carbon pool", carbon_pool, "not found in the input data.")) } - + carbon_data <- as.data.frame(t(data[which(names(data) == carbon_pool)])) names(carbon_data) <- paste0("ensemble", seq(ncol(carbon_data))) - + # Ensure site coordinates have 'lon' and 'lat' columns if (!all(c("lon", "lat") %in% names(site_coordinates))) { stop("Site coordinates must contain 'lon' and 'lat' columns.") } - + # Ensure the number of rows in site coordinates matches the number of rows in carbon data if (nrow(site_coordinates) != nrow(carbon_data)) { message("Number of rows in site coordinates does not match the number of rows in carbon data.") @@ -57,7 +57,7 @@ SDA_downscale_preprocess <- function(data_path, coords_path, date, carbon_pool) carbon_data <- carbon_data[1:nrow(site_coordinates), ] } } - + message("Preprocessing completed successfully.") return(list(input_data = input_data, site_coordinates = site_coordinates, carbon_data = carbon_data)) } @@ -82,17 +82,17 @@ SDA_downscale_preprocess <- function(data_path, coords_path, date, carbon_pool) n <- length(y) indices <- seq_len(n) folds <- split(indices, cut(seq_len(n), breaks = k, labels = FALSE)) - + if (!returnTrain) { - folds <- folds # Test indices are already what we want + folds <- folds # Test indices are already what we want } else { - folds <- lapply(folds, function(x) indices[-x]) # Return training indices + folds <- lapply(folds, function(x) indices[-x]) # Return training indices } - + if (!list) { folds <- unlist(folds) } - + return(folds) } @@ -114,62 +114,63 @@ SDA_downscale_preprocess <- function(data_path, coords_path, date, carbon_pool) SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_type = "rf", seed = NULL) { carbon_data <- preprocessed$carbon_data - + # Convert site coordinates to SpatVector site_coordinates <- terra::vect(preprocessed$site_coordinates, geom = c("lon", "lat"), crs = "EPSG:4326") - + # Extract predictors from covariates raster using site coordinates predictors <- as.data.frame(terra::extract(covariates, site_coordinates, ID = FALSE)) - + # Dynamically get covariate names covariate_names <- names(predictors) - + # Create a single data frame with all predictors and ensemble data full_data <- cbind(carbon_data, predictors) - + # Split the observations into training and testing sets if (!is.null(seed)) { - set.seed(seed) # Only set seed if provided + set.seed(seed) # Only set seed if provided } sample <- sample(1:nrow(full_data), size = round(0.75 * nrow(full_data))) train_data <- full_data[sample, ] test_data <- full_data[-sample, ] - + # Prepare data for both RF and CNN x_data <- as.matrix(full_data[, covariate_names]) y_data <- as.matrix(carbon_data) - + # Calculate scaling parameters from all data scaling_params <- list( mean = colMeans(x_data), sd = apply(x_data, 2, stats::sd) ) - + # Normalize the data x_data_scaled <- scale(x_data, center = scaling_params$mean, scale = scaling_params$sd) - + # Split into training and testing sets x_train <- x_data_scaled[sample, ] x_test <- x_data_scaled[-sample, ] y_train <- y_data[sample, ] y_test <- y_data[-sample, ] - + # Initialize lists for outputs models <- list() maps <- list() predictions <- list() - + if (model_type == "rf") { for (i in seq_along(carbon_data)) { ensemble_col <- paste0("ensemble", i) formula <- stats::as.formula(paste(ensemble_col, "~", paste(covariate_names, collapse = " + "))) models[[i]] <- randomForest::randomForest(formula, - data = train_data, - ntree = 1000, - na.action = stats::na.omit, - keep.forest = TRUE, - importance = TRUE) - + data = train_data, + ntree = 1000, + na.action = stats::na.omit, + keep.forest = TRUE, + importance = TRUE + ) + maps[[i]] <- terra::predict(covariates, model = models[[i]], na.rm = TRUE) predictions[[i]] <- stats::predict(models[[i]], test_data) } @@ -184,23 +185,23 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ for (i in seq_along(carbon_data)) { all_models <- list() - + # Create k-fold indices fold_indices <- .create_folds(y = seq_len(nrow(x_train)), k = k_folds, list = TRUE, returnTrain = FALSE) - #initialise operations for each fold + # initialise operations for each fold for (fold in 1:k_folds) { cat(sprintf("Processing ensemble %d, fold %d of %d\n", i, fold, k_folds)) - + # Split data into training and validation sets for this fold train_indices <- setdiff(seq_len(nrow(x_train)), fold_indices[[fold]]) val_indices <- fold_indices[[fold]] - + x_train_fold <- x_train[train_indices, , drop = FALSE] y_train_fold <- y_train[train_indices, i] x_val_fold <- x_train[val_indices, , drop = FALSE] y_val_fold <- y_train[val_indices, i] - + # Create bagged models for this fold fold_models <- list() for (bag in 1:num_bags) { @@ -208,9 +209,9 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ bootstrap_indices <- sample(1:nrow(x_train_fold), size = nrow(x_train_fold), replace = TRUE) x_train_bag <- x_train_fold[bootstrap_indices, ] y_train_bag <- y_train_fold[bootstrap_indices] - + # Define the CNN model architecture - # Used dual batch normalization and dropout as the first set of batch normalization and + # Used dual batch normalization and dropout as the first set of batch normalization and model <- keras3::keras_model_sequential() |> # Layer Reshape : Reshape to fit target shape for the convolutional layer keras3::layer_reshape(target_shape = c(ncol(x_train), 1, 1), input_shape = ncol(x_train)) |> @@ -218,15 +219,15 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ keras3::layer_conv_2d( filters = 32, kernel_size = c(3, 1), - activation = 'relu', - padding = 'same' + activation = "relu", + padding = "same" ) |> # Flatten: Converts 3D output to 1D for dense layer input keras3::layer_flatten() |> # Dense layer: Learns complex combinations of features keras3::layer_dense( - units = 64, - activation = 'relu', + units = 64, + activation = "relu", kernel_regularizer = keras3::regularizer_l2(0.01) ) |> # Batch normalization: Normalizes layer inputs, stabilizes learning, reduces internal covariate shift @@ -235,8 +236,8 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ keras3::layer_dropout(rate = 0.3) |> # Dense layer: Learns complex combinations of features keras3::layer_dense( - units = 32, - activation = 'relu', + units = 32, + activation = "relu", kernel_regularizer = keras3::regularizer_l2(0.01) ) |> # Batch normalization: Further stabilizes learning in deeper layers @@ -248,26 +249,26 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ units = 1, kernel_regularizer = keras3::regularizer_l2(0.01) ) - + # Learning rate scheduler lr_schedule <- keras3::learning_rate_schedule_exponential_decay( initial_learning_rate = 0.001, decay_steps = 1000, decay_rate = 0.9 ) - + # Early stopping callback early_stopping <- keras3::callback_early_stopping( - monitor = 'loss', + monitor = "loss", patience = 10, restore_best_weights = TRUE ) # Compile the model model |> keras3::compile( - loss = 'mean_squared_error', + loss = "mean_squared_error", optimizer = keras3::optimizer_adam(learning_rate = lr_schedule), - metrics = c('mean_absolute_error') + metrics = c("mean_absolute_error") ) # Train the model @@ -283,14 +284,14 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ # Store the trained model for this bag in the fold_models list fold_models[[bag]] <- model } - + # Add fold models to all_models list all_models <- c(all_models, fold_models) } - + # Store all models for this ensemble models[[i]] <- all_models - + # Use all models for predictions cnn_ensemble_predict <- function(models, newdata, scaling_params) { newdata <- scale(newdata, center = scaling_params$mean, scale = scaling_params$sd) @@ -302,18 +303,19 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ prediction_rast <- terra::rast(covariates) # Generate spatial predictions using the trained model - maps[[i]] <- terra::predict(prediction_rast, model = models[[i]], - fun = cnn_ensemble_predict, - scaling_params = scaling_params) + maps[[i]] <- terra::predict(prediction_rast, + model = models[[i]], + fun = cnn_ensemble_predict, + scaling_params = scaling_params + ) # Make predictions on held-out test data predictions[[i]] <- cnn_ensemble_predict(models[[i]], x_data[-sample, ], scaling_params) - } } else { stop("Invalid model_type. Please choose either 'rf' for Random Forest or 'cnn' for Convolutional Neural Network.") } - + # Organize the results into a single output list downscale_output <- list( data = list(training = train_data, testing = test_data), @@ -322,14 +324,14 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ predictions = predictions, scaling_params = scaling_params ) - + # Rename each element of the output list with appropriate ensemble numbers for (i in seq_along(carbon_data)) { names(downscale_output$models)[i] <- paste0("ensemble", i) names(downscale_output$maps)[i] <- paste0("ensemble", i) names(downscale_output$predictions)[i] <- paste0("ensemble", i) } - + return(downscale_output) } @@ -344,23 +346,23 @@ SDA_downscale <- function(preprocessed, date, carbon_pool, covariates, model_typ ##' ##' @description This function takes the output from the SDA_downscale function and computes various performance metrics for each ensemble. It provides a way to evaluate the accuracy of the downscaling results without modifying the main downscaling function. ##' -##' @return A list of metrics for each ensemble, where each element contains MAE , MSE ,R_squared ,actual values from testing data and predicted values for the testing data +##' @return A list of metrics for each ensemble, where each element contains MAE , MSE ,R_squared ,actual values from testing data and predicted values for the testing data SDA_downscale_metrics <- function(downscale_output, carbon_pool) { metrics <- list() - + for (i in 1:length(downscale_output$data)) { actual <- downscale_output$data[[i]]$testing[[paste0(carbon_pool, "_ens", i)]] predicted <- downscale_output$predictions[[i]] - + mse <- mean((actual - predicted)^2) mae <- mean(abs(actual - predicted)) r_squared <- 1 - sum((actual - predicted)^2) / sum((actual - mean(actual))^2) - + metrics[[i]] <- list(MSE = mse, MAE = mae, R_squared = r_squared, actual = actual, predicted = predicted) } - + names(metrics) <- paste0("ensemble", seq_along(metrics)) - + return(metrics) } diff --git a/modules/assim.sequential/R/downscale_function_hrly.R b/modules/assim.sequential/R/downscale_function_hrly.R index 25da4c62150..fd6ef299c56 100644 --- a/modules/assim.sequential/R/downscale_function_hrly.R +++ b/modules/assim.sequential/R/downscale_function_hrly.R @@ -1,7 +1,7 @@ #' SDA Downscale Function for Hourly Data -#' +#' #' This function uses the randomForest model to downscale forecast data (hourly) to unmodeled locations using covariates and site locations -#' +#' #' @author Harunobu Ishii #' @param nc_file In quotes, file path for .nc containing ensemble data. #' @param coords In quotes, file path for .csv file containing the site coordinates, columns named "lon" and "lat". @@ -10,29 +10,28 @@ #' @return It returns the `downscale_output` list containing lists for the training and testing data sets, models, and predicted maps for each ensemble member. #' @export -SDA_downscale_hrly <- function(nc_file, coords, yyyy, covariates){ - +SDA_downscale_hrly <- function(nc_file, coords, yyyy, covariates) { # Read the input data and site coordinates nc_data <- ncdf4::nc_open(nc_file) on.exit(ncdf4::nc_close(nc_data)) input_data <- ncdf4::ncvar_get(nc_data, "NEE") covariate_names <- names(covariates) - - + + # Extract time and units time <- nc_data$dim$time$vals time_units <- nc_data$dim$time$units time_origin_str <- substr(time_units, 12, 31) - + # Check if timezone is specified in the time units string if (grepl("UTC|GMT", time_units)) { time_origin <- lubridate::ymd_hm(time_origin_str, tz = "UTC") } else if (grepl("EST", time_units)) { time_origin <- lubridate::ymd_hm(time_origin_str, tz = "EST") } else { - time_origin <- lubridate::ymd_hm(time_origin_str, tz = "UTC") # Default to UTC if not specified + time_origin <- lubridate::ymd_hm(time_origin_str, tz = "UTC") # Default to UTC if not specified } - + # Timereadable if (grepl("hours", time_units)) { time_readable <- time_origin + lubridate::dhours(time) @@ -41,28 +40,28 @@ SDA_downscale_hrly <- function(nc_file, coords, yyyy, covariates){ } else { stop("Unsupported time units") } - + # Extract predictors from covariates raster using site coordinates - site_coordinates <- terra::vect(readr::read_csv(coords), geom=c("lon", "lat"), crs="EPSG:4326") - predictors <- as.data.frame(terra::extract(covariates, site_coordinates,ID = FALSE)) + site_coordinates <- terra::vect(readr::read_csv(coords), geom = c("lon", "lat"), crs = "EPSG:4326") + predictors <- as.data.frame(terra::extract(covariates, site_coordinates, ID = FALSE)) + + downscale_output <- list() - downscale_output<- list() - # Train & Test split - sample <- sample(1:nrow(predictors), size = round(0.75*nrow(predictors))) - + sample <- sample(1:nrow(predictors), size = round(0.75 * nrow(predictors))) + # Predict for each time stamp of the year selected time_indices <- which(year(time_readable) == yyyy) for (index in time_indices) { data <- input_data[index, , ] carbon_data <- as.data.frame(data) - names(carbon_data) <- paste0("ensemble",seq(1:ncol(carbon_data))) + names(carbon_data) <- paste0("ensemble", seq(1:ncol(carbon_data))) # Combine carbon data and covariates/predictors and split into training/test full_data <- cbind(carbon_data, predictors) train_data <- full_data[sample, ] test_data <- full_data[-sample, ] - + # Combine each ensemble member with all predictors models <- list() maps <- list() @@ -72,23 +71,25 @@ SDA_downscale_hrly <- function(nc_file, coords, yyyy, covariates){ ensemble_col <- paste0("ensemble", i) formula <- stats::as.formula(paste(ensemble_col, "~", paste(covariate_names, collapse = " + "))) models[[i]] <- randomForest::randomForest(formula, - data = train_data, - ntree = 1000, - na.action = stats::na.omit, - keep.forest = TRUE, - importance = TRUE) - + data = train_data, + ntree = 1000, + na.action = stats::na.omit, + keep.forest = TRUE, + importance = TRUE + ) + maps[[i]] <- terra::predict(covariates, model = models[[i]], na.rm = TRUE) predictions[[i]] <- stats::predict(models[[i]], test_data) } # Organize the results into a single output list - curr_downscaled <- list( data = list(training = train_data, testing = test_data), - models = models, - maps = maps, - predictions = predictions - ) - + curr_downscaled <- list( + data = list(training = train_data, testing = test_data), + models = models, + maps = maps, + predictions = predictions + ) + # Rename each element of the output list with appropriate ensemble numbers for (i in 1:length(curr_downscaled$data)) { names(curr_downscaled$data[[i]]) <- paste0("ensemble", seq(1:ncol(carbon_data))) @@ -96,8 +97,8 @@ SDA_downscale_hrly <- function(nc_file, coords, yyyy, covariates){ names(curr_downscaled$models) <- paste0("ensemble", seq(1:ncol(carbon_data))) names(curr_downscaled$maps) <- paste0("ensemble", seq(1:ncol(carbon_data))) names(curr_downscaled$predictions) <- paste0("ensemble", seq(1:ncol(carbon_data))) - - downscale_output[[as.character(time_readable[index])]]<-curr_downscaled + + downscale_output[[as.character(time_readable[index])]] <- curr_downscaled } return(downscale_output) } diff --git a/modules/assim.sequential/R/get_ensemble_weights.R b/modules/assim.sequential/R/get_ensemble_weights.R index 8423e2eab86..8e426685714 100644 --- a/modules/assim.sequential/R/get_ensemble_weights.R +++ b/modules/assim.sequential/R/get_ensemble_weights.R @@ -1,100 +1,93 @@ #' @title get_ensemble_weights #' @name get_ensemble_weights #' @author Ann Raiho \email{ann.raiho@gmail.com} -#' +#' #' @param settings PEcAn settings object #' @param time_do Give user specific time so you don't have to have it be annual #' #' @description Creates file of ensemble weights in format needed for SDA -#' +#' #' @return NONE -#' +#' #' @import lubridate #' @export -#' - -get_ensemble_weights <- function(settings, time_do){ +#' - +get_ensemble_weights <- function(settings, time_do) { nens <- as.numeric(settings$ensemble$size) - - if(!is.null(try(settings$run$inputs$ensembleweights$path))){ - - ###-------------------------------------------------------------------### + + if (!is.null(try(settings$run$inputs$ensembleweights$path))) { + ### -------------------------------------------------------------------### ### Loading Weights ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### weight_file <- utils::read.csv(settings$run$inputs$ensembleweights$path) start_date <- settings$run$inputs$ensembleweights$start.date end_date <- settings$run$inputs$ensembleweights$end.date - years_get <- lubridate::year(start_date):lubridate::year(end_date) #assuming year time step... would need to change for other analyses possibly going down the load.data path? - - weight_file[weight_file==0] <- .00001 #hack not sure how to deal with zero weights - + years_get <- lubridate::year(start_date):lubridate::year(end_date) # assuming year time step... would need to change for other analyses possibly going down the load.data path? + + weight_file[weight_file == 0] <- .00001 # hack not sure how to deal with zero weights + weight_list <- list() - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### Assigning Weights ### - ###-------------------------------------------------------------------### - #TO DO: Right now just takes snapshot weights. Consider averaging over time period. - for(tt in seq_along(time_do)){ - + ### -------------------------------------------------------------------### + # TO DO: Right now just takes snapshot weights. Consider averaging over time period. + for (tt in seq_along(time_do)) { which_ens <- settings$run$inputs$met$path - - climate_names <- unlist(lapply(which_ens,FUN=function(x) strsplit(x,'/')[[1]][6])) - - #TO DO: make more general for subannual weights + + climate_names <- unlist(lapply(which_ens, FUN = function(x) strsplit(x, "/")[[1]][6])) + + # TO DO: make more general for subannual weights weight_list[[tt]] <- (weight_file[weight_file$year == time_do[tt] & - weight_file$climate_model %in% climate_names, 'weights'] / sum(weight_file[weight_file$year == - time_do[tt] & - weight_file$climate_model %in% climate_names, 'weights'])) * nens - - if(sum(weight_list[[tt]]) != nens) PEcAn.logger::logger.warn(paste('Time',tt,'does not equal the number of ensemble members',nens)) - - #TO DO: will need to have some way of dealing with sampling too if there are more ensemble members than weights or vice versa - - if(sum(weight_list[[tt]])==0){ - weight_list[[tt]] <- rep(1,nens) #no weights + weight_file$climate_model %in% climate_names, "weights"] / sum(weight_file[weight_file$year == + time_do[tt] & + weight_file$climate_model %in% climate_names, "weights"])) * nens + + if (sum(weight_list[[tt]]) != nens) PEcAn.logger::logger.warn(paste("Time", tt, "does not equal the number of ensemble members", nens)) + + # TO DO: will need to have some way of dealing with sampling too if there are more ensemble members than weights or vice versa + + if (sum(weight_list[[tt]]) == 0) { + weight_list[[tt]] <- rep(1, nens) # no weights } - - names(weight_list[[tt]]) <- 1:nens #giving number names because the met names change between files with the '.' and the '-' seps - + + names(weight_list[[tt]]) <- 1:nens # giving number names because the met names change between files with the '.' and the '-' seps } - - }else{ + } else { weight_list <- list() - for(tt in 1:length(years_get)){ - weight_list[[tt]] <- rep(1,nens) #no weights - names(weight_list[[tt]]) <- 1:nens #giving number names because the met names change between files with the '.' and the '-' seps + for (tt in 1:length(years_get)) { + weight_list[[tt]] <- rep(1, nens) # no weights + names(weight_list[[tt]]) <- 1:nens # giving number names because the met names change between files with the '.' and the '-' seps } } - + names(weight_list) <- time_do - + save(weight_list, file = file.path(settings$outdir, "ensemble_weights.Rdata")) - } -#Example script to rename if climate files get messed up seps -if(FALSE){ - - files_get <- list.dirs('~/TENSION_MET/') - - for(ii in 2:length(files_get)){ - file.rename(from = files_get[ii], - to = str_replace_all( - string = files_get[ii], - pattern = '-', - replacement = '.' - )) +# Example script to rename if climate files get messed up seps +if (FALSE) { + files_get <- list.dirs("~/TENSION_MET/") + + for (ii in 2:length(files_get)) { + file.rename( + from = files_get[ii], + to = str_replace_all( + string = files_get[ii], + pattern = "-", + replacement = "." + ) + ) } - - - for(ii in 2:length(files_get)){ - load(paste0(files_get[ii],'/climate.Rdata')) + + + for (ii in 2:length(files_get)) { + load(paste0(files_get[ii], "/climate.Rdata")) rownames(temp.mat) <- rownames(precip.mat) <- 850:2010 - save(temp.mat, precip.mat,file=paste0(files_get[ii],'/climate.Rdata')) + save(temp.mat, precip.mat, file = paste0(files_get[ii], "/climate.Rdata")) } - -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/hop_test.R b/modules/assim.sequential/R/hop_test.R index 4c79b437e30..f13975f1aac 100644 --- a/modules/assim.sequential/R/hop_test.R +++ b/modules/assim.sequential/R/hop_test.R @@ -1,106 +1,111 @@ ##' @title hop_test ##' @name hop_test ##' @author Ann Raiho \email{araiho@@nd.edu} -##' +##' ##' @param settings SDA PEcAn settings object ##' @param nyear number of years to run hop test over ##' @param ens.runid run id. If not provided, is looked up from [settings$outdir]/runs.txt ##' ##' @description Hop test. This script tests that the model successfully reads it's own restart and can restart without loss of information. -##' +##' ##' @return NONE ##' @export -##' -hop_test <- function(settings, ens.runid = NULL, nyear){ - ##### +##' +hop_test <- function(settings, ens.runid = NULL, nyear) { + ##### ##### Variable to check - ##### - hop_var <- lapply(settings$state.data.assimilation$state.variables,'[[','variable.name') - - ##### + ##### + hop_var <- lapply(settings$state.data.assimilation$state.variables, "[[", "variable.name") + + ##### ##### Regular Run - ##### - if(is.null(ens.runid)){ + ##### + if (is.null(ens.runid)) { PEcAn.workflow::run.write.configs(settings, write = settings$database$bety$write) - + PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) - - ens.runid <- utils::read.table(file.path(settings$rundir,'runs.txt')) + + ens.runid <- utils::read.table(file.path(settings$rundir, "runs.txt")) } if (!requireNamespace("PEcAn.utils", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'PEcAn.utils',", "needed by `PEcAnAssimSequential::hop_test()`.", - "Please install it and try again.") + "Please install it and try again." + ) } - ens <- PEcAn.utils::read.output(runid = ens.runid, - outdir = file.path(settings$outdir,'out', ens.runid), - start.year = lubridate::year(settings$run$start.date), - end.year = lubridate::year(settings$run$end.date), - variables = hop_var) - ##### + ens <- PEcAn.utils::read.output( + runid = ens.runid, + outdir = file.path(settings$outdir, "out", ens.runid), + start.year = lubridate::year(settings$run$start.date), + end.year = lubridate::year(settings$run$end.date), + variables = hop_var + ) + ##### ##### Create Empty Data - ##### + ##### reg_run_end <- lubridate::year(settings$run$end.date) - settings$run$end.date <- paste0(reg_run_end - (nyear-1),'/12/31') #changing the run end.date so that the .nc files get written after write_restart - - obs.mean <- obs.cov <- rep(list(NA),nyear) - names(obs.mean) <- names(obs.cov) <- paste0(lubridate::year(settings$run$end.date):reg_run_end,'/12/31') - - ##### + settings$run$end.date <- paste0(reg_run_end - (nyear - 1), "/12/31") # changing the run end.date so that the .nc files get written after write_restart + + obs.mean <- obs.cov <- rep(list(NA), nyear) + names(obs.mean) <- names(obs.cov) <- paste0(lubridate::year(settings$run$end.date):reg_run_end, "/12/31") + + ##### ##### Run in SDA code with no data -- HOP Run - ##### + ##### PEcAnAssimSequential::sda.enkf(settings = settings, obs.mean = obs.mean, obs.cov = obs.cov) - - hop.runid <- utils::read.table(file.path(settings$rundir,'runs.txt')) - hop.ens <- PEcAn.utils::read.output(runid = hop.runid, - outdir = file.path(settings$outdir,'out', hop.runid), - start.year = lubridate::year(settings$run$start.date), - end.year = reg_run_end, - variables = hop_var) - - save(list = ls(envir = environment(), all.names = TRUE), - file = file.path(settings$outdir, "hop_test_output.Rdata"), envir = environment()) - - ##### + + hop.runid <- utils::read.table(file.path(settings$rundir, "runs.txt")) + hop.ens <- PEcAn.utils::read.output( + runid = hop.runid, + outdir = file.path(settings$outdir, "out", hop.runid), + start.year = lubridate::year(settings$run$start.date), + end.year = reg_run_end, + variables = hop_var + ) + + save( + list = ls(envir = environment(), all.names = TRUE), + file = file.path(settings$outdir, "hop_test_output.Rdata"), envir = environment() + ) + + ##### ##### Comparison: Hop Run versus Regular Run - ##### - + ##### + plot_years <- lubridate::year(settings$run$start.date):reg_run_end - - grDevices::pdf('hop_test_results.pdf') - graphics::par(mfrow=c(2,1)) - for(p in seq_along(hop_var)){ - + + grDevices::pdf("hop_test_results.pdf") + graphics::par(mfrow = c(2, 1)) + for (p in seq_along(hop_var)) { hop_var_use <- unlist(hop_var[p]) - ens.plot <- ens[[hop_var_use]][(length(plot_years)-nyear):length(plot_years)] - hop.ens.plot <- hop.ens[[hop_var_use]][(length(plot_years)-nyear):length(plot_years)] - + ens.plot <- ens[[hop_var_use]][(length(plot_years) - nyear):length(plot_years)] + hop.ens.plot <- hop.ens[[hop_var_use]][(length(plot_years) - nyear):length(plot_years)] + plot(plot_years, - ens[[hop_var_use]], - pch=19,ylim=c(range(ens,hop.ens)), - ylab = hop_var_use, xlab = 'Years') + ens[[hop_var_use]], + pch = 19, ylim = c(range(ens, hop.ens)), + ylab = hop_var_use, xlab = "Years" + ) graphics::points(plot_years, - hop.ens[[hop_var_use]],col='red') - graphics::abline(v=year(settings$run$end.date),col='blue',lwd=2) - graphics::legend('topleft', c('Regular Run','Hop Run','Test Start'), pch=c(19,1,19),col=c('black','red','blue')) - graphics::title(paste('Hop Test Comparision',hop_var[p])) - - hop_cor <- stats::cor(ens.plot,hop.ens.plot) - - plot(ens.plot,hop.ens.plot, - xlab = paste('Regular Run',hop_var_use), - ylab = paste('Hop Run',hop_var_use),pch=19,cex=1.5) - - graphics::abline(a=0,b=1,col='red',lwd=2) - graphics::legend('topleft',paste('Correlation =',hop_cor)) - - graphics::title(paste('Hop Test Correlation',hop_var[p])) - } - grDevices::dev.off() - -} + hop.ens[[hop_var_use]], + col = "red" + ) + graphics::abline(v = year(settings$run$end.date), col = "blue", lwd = 2) + graphics::legend("topleft", c("Regular Run", "Hop Run", "Test Start"), pch = c(19, 1, 19), col = c("black", "red", "blue")) + graphics::title(paste("Hop Test Comparision", hop_var[p])) + hop_cor <- stats::cor(ens.plot, hop.ens.plot) + plot(ens.plot, hop.ens.plot, + xlab = paste("Regular Run", hop_var_use), + ylab = paste("Hop Run", hop_var_use), pch = 19, cex = 1.5 + ) + graphics::abline(a = 0, b = 1, col = "red", lwd = 2) + graphics::legend("topleft", paste("Correlation =", hop_cor)) + graphics::title(paste("Hop Test Correlation", hop_var[p])) + } + grDevices::dev.off() +} diff --git a/modules/assim.sequential/R/load_data_paleon_sda.R b/modules/assim.sequential/R/load_data_paleon_sda.R index a77596796ed..bad937f487b 100644 --- a/modules/assim.sequential/R/load_data_paleon_sda.R +++ b/modules/assim.sequential/R/load_data_paleon_sda.R @@ -1,16 +1,15 @@ ##' @title load_data_paleon_sda ##' @name load_data_paleon_sda ##' @author Ann Raiho \email{araiho@nd.edu} -##' +##' ##' @param settings PEcAn SDA settings object -##' +##' ##' @description Load data function for paleon SDA data products -##' +##' ##' @return obs.mean and obs.cov for sda.enkf ##' @export -##' -load_data_paleon_sda <- function(settings){ - +##' +load_data_paleon_sda <- function(settings) { #### Types of Data needed ## STEPPS Fcomp (Work in progress) ## ReFAB biomass @@ -18,304 +17,316 @@ load_data_paleon_sda <- function(settings){ ## Tree Rings from tree ring module ## PLS and FIA Biomass Snapshot ## Wish list: eddy covariance - #browser() - if(file.exists(file.path(settings$outdir,'sda.obs.Rdata'))){ - load(file.path(settings$outdir,'sda.obs.Rdata')) + # browser() + if (file.exists(file.path(settings$outdir, "sda.obs.Rdata"))) { + load(file.path(settings$outdir, "sda.obs.Rdata")) return(obs.list) } - + suggests_avail <- c( reshape2 = requireNamespace("reshape2", quietly = TRUE), - plyr = requireNamespace("plyr", quietly = TRUE)) + plyr = requireNamespace("plyr", quietly = TRUE) + ) suggests_missing <- paste( sQuote(names(suggests_avail)[!suggests_avail], q = FALSE), - collapse = ", ") + collapse = ", " + ) if (!all(suggests_avail)) { PEcAn.logger::logger.error( "Can't find package(s)", suggests_missing, ", needed by `PEcAnAssimSequential::load_data_paleon_sda()`.", - "Please install these and try again.") + "Please install these and try again." + ) } - + d <- settings$database$bety con <- PEcAn.DB::db.open(d) - - if(settings$host$name != 'localhost') PEcAn.logger::logger.severe('ERROR: Code does not support anything but settings$host$name <- localhost at this time.') - + + if (settings$host$name != "localhost") PEcAn.logger::logger.severe("ERROR: Code does not support anything but settings$host$name <- localhost at this time.") + site <- PEcAn.DB::query.site(settings$run$site$id, con) format_id <- settings$state.data.assimilation$data$format_id input.list <- list() input.id <- list() obvs <- list() - - var.names <- unlist(sapply(settings$state.data.assimilation$state.variable, - function(x) { - x$variable.name - }, - USE.NAMES = FALSE), - use.names = FALSE) - + + var.names <- unlist( + sapply(settings$state.data.assimilation$state.variable, + function(x) { + x$variable.name + }, + USE.NAMES = FALSE + ), + use.names = FALSE + ) + start_date <- settings$state.data.assimilation$start.date - end_date <- settings$state.data.assimilation$end.date - + end_date <- settings$state.data.assimilation$end.date + obs.mean <- obs.mean.tmp <- list() obs.cov <- obs.cov.tmp <- list() obs.times <- seq(as.Date(start_date), as.Date(end_date), by = settings$state.data.assimilation$forecast.time.step) obs.times <- formatC(lubridate::year(obs.times), width = 4, format = "d", flag = "0") - + biomass2carbon <- 0.48 - - for(i in seq_along(format_id)){ - input.list[[i]] <- PEcAn.DB::db.query(paste("SELECT * FROM inputs WHERE site_id =",site$id ," AND format_id = ",format_id[[i]]), con) + + for (i in seq_along(format_id)) { + input.list[[i]] <- PEcAn.DB::db.query(paste("SELECT * FROM inputs WHERE site_id =", site$id, " AND format_id = ", format_id[[i]]), con) input.id[[i]] <- input.list[[i]]$id - + data.path <- PEcAn.DB::query.file.path(input.id[[i]], settings$host$name, con) format_full <- format <- PEcAn.DB::query.format.vars( bety = con, input.id = input.id[[i]], format.id = NA, - var.ids=NA) - + var.ids = NA + ) + ### Tree Ring Data Product - if(format_id[[i]] == '1000000040'){ + if (format_id[[i]] == "1000000040") { # hack instead of changing the units in BETY format for now # I don't want load_data to convert anything - # data itself is in Mg / ha - format$vars[1,1] <- format$vars[1,8] <- format$vars[1,10] <- "AbvGrndWood" - format$vars[1,4] <- "kg C m-2" - - format$vars[4,1] <- format$vars[4,8] <- format$vars[4,10] <- "GWBI" - format$vars[4,4] <- "kg C m-2 s-1" + # data itself is in Mg / ha + format$vars[1, 1] <- format$vars[1, 8] <- format$vars[1, 10] <- "AbvGrndWood" + format$vars[1, 4] <- "kg C m-2" + + format$vars[4, 1] <- format$vars[4, 8] <- format$vars[4, 10] <- "GWBI" + format$vars[4, 4] <- "kg C m-2 s-1" } - - format$na.strings <- 'NA' + + format$na.strings <- "NA" time.row <- format$time.row - time.type <- format$vars$input_units[time.row] #THIS WONT WORK IF TIMESTEP ISNT ANNUAL - - # ---- LOAD INPUT DATA ---- # - PEcAn.logger::logger.info(paste('Using PEcAn.benchmark::load_data.R on format_id',format_id[[i]],'-- may take a few minutes')) - obvs[[i]] <- PEcAn.benchmark::load_data(data.path, format, start_year = lubridate::year(start_date), end_year = lubridate::year(end_date), site) - - variable <- intersect(var.names,colnames(obvs[[i]])) + time.type <- format$vars$input_units[time.row] # THIS WONT WORK IF TIMESTEP ISNT ANNUAL + + # ---- LOAD INPUT DATA ---- # + PEcAn.logger::logger.info(paste("Using PEcAn.benchmark::load_data.R on format_id", format_id[[i]], "-- may take a few minutes")) + obvs[[i]] <- PEcAn.benchmark::load_data(data.path, format, start_year = lubridate::year(start_date), end_year = lubridate::year(end_date), site) + + variable <- intersect(var.names, colnames(obvs[[i]])) ### Tree Ring Data Product - if(format_id[[i]] == '1000000040'){ - obvs[[i]] <- obvs[[i]][obvs[[i]]$model_type=='Model RW + Census',] - if(!is.null(obvs[[i]]$AbvGrndWood))obvs[[i]]$AbvGrndWood <- obvs[[i]]$AbvGrndWood * biomass2carbon #* kgm2Mgha - if(!is.null(obvs[[i]]$GWBI)) obvs[[i]]$GWBI <- obvs[[i]]$GWBI * biomass2carbon #* kgms2Mghayr + if (format_id[[i]] == "1000000040") { + obvs[[i]] <- obvs[[i]][obvs[[i]]$model_type == "Model RW + Census", ] + if (!is.null(obvs[[i]]$AbvGrndWood)) obvs[[i]]$AbvGrndWood <- obvs[[i]]$AbvGrndWood * biomass2carbon #* kgm2Mgha + if (!is.null(obvs[[i]]$GWBI)) obvs[[i]]$GWBI <- obvs[[i]]$GWBI * biomass2carbon #* kgms2Mghayr arguments <- list( plyr::.(year, MCMC_iteration, site_id), - plyr::.(variable)) + plyr::.(variable) + ) arguments2 <- list(plyr::.(year), plyr::.(variable)) arguments3 <- list( plyr::.(MCMC_iteration), plyr::.(variable), - plyr::.(year)) + plyr::.(year) + ) dataset <- obvs[[i]] - + ### Map species to model specific PFTs - if(any(var.names == 'AGB.pft')){ + if (any(var.names == "AGB.pft")) { # this is the only code path that uses data.land, so we check now instead of at top of function if (!requireNamespace("PEcAn.data.land", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'PEcAn.data.land',", "needed by `PEcAnAssimSequential::load_data_paleon_sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } - spp_id <- PEcAn.data.land::match_species_id(unique(dataset$species_id),format_name = 'usda', con) + spp_id <- PEcAn.data.land::match_species_id(unique(dataset$species_id), format_name = "usda", con) pft_mat <- PEcAn.data.land::match_pft(spp_id$bety_species_id, settings$pfts, - con = con, allow_missing = TRUE) - - x <- paste0('AGB.pft.', pft_mat$pft) + con = con, allow_missing = TRUE + ) + + x <- paste0("AGB.pft.", pft_mat$pft) names(x) <- spp_id$input_code - - PEcAn.logger::logger.info('Now, mapping data species to model PFTs') + + PEcAn.logger::logger.info("Now, mapping data species to model PFTs") dataset$pft.cat <- x[dataset$species_id] - dataset <- dataset[dataset$pft.cat!='AGB.pft.NA',] - - variable <- c('AbvGrndWood') + dataset <- dataset[dataset$pft.cat != "AGB.pft.NA", ] + + variable <- c("AbvGrndWood") arguments <- list( plyr::.(year, MCMC_iteration, site_id, pft.cat), - plyr::.(variable)) + plyr::.(variable) + ) arguments2 <- list(plyr::.(year, pft.cat), plyr::.(variable)) arguments3 <- list( plyr::.(MCMC_iteration), plyr::.(pft.cat, variable), - plyr::.(year)) + plyr::.(year) + ) } - PEcAn.logger::logger.info('Now, aggregating data and creating SDA input lists') + PEcAn.logger::logger.info("Now, aggregating data and creating SDA input lists") melt_id <- colnames(dataset)[-which(colnames(dataset) %in% variable)] melt.test <- reshape2::melt(dataset, id = melt_id, na.rm = TRUE) cast.test <- reshape2::dcast(melt.test, arguments, sum, margins = variable) - + melt_id_next <- colnames(cast.test)[-which(colnames(cast.test) %in% variable)] melt.next <- reshape2::melt(cast.test, id = melt_id_next) mean_mat <- reshape2::dcast(melt.next, arguments2, mean) - + iter_mat <- reshape2::acast(melt.next, arguments3, mean) - cov.test <- apply(iter_mat,3,function(x){stats::cov(x)}) - - for(t in seq_along(obs.times)){ - obs.mean.tmp[[t]] <- mean_mat[mean_mat[,time.type]==obs.times[t], -c(1)] #THIS WONT WORK IF TIMESTEP ISNT ANNUAL - - if(any(var.names == 'AGB.pft')){ + cov.test <- apply(iter_mat, 3, function(x) { + stats::cov(x) + }) + + for (t in seq_along(obs.times)) { + obs.mean.tmp[[t]] <- mean_mat[mean_mat[, time.type] == obs.times[t], -c(1)] # THIS WONT WORK IF TIMESTEP ISNT ANNUAL + + if (any(var.names == "AGB.pft")) { obs.mean.tmp[[t]] <- rep(NA, length(unique(dataset$pft.cat))) names(obs.mean.tmp[[t]]) <- sort(unique(dataset$pft.cat)) - for(r in seq_along(unique(dataset$pft.cat))){ - k <- mean_mat[mean_mat$year==obs.times[t] & mean_mat$pft.cat==names(obs.mean.tmp[[t]][r]), variable] - if(any(k)){ + for (r in seq_along(unique(dataset$pft.cat))) { + k <- mean_mat[mean_mat$year == obs.times[t] & mean_mat$pft.cat == names(obs.mean.tmp[[t]][r]), variable] + if (any(k)) { obs.mean.tmp[[t]][r] <- k } } } - - obs.cov.tmp[[t]] <- matrix(cov.test[,which(colnames(cov.test) %in% obs.times[t])], - ncol = sqrt(dim(cov.test)[1]), - nrow = sqrt(dim(cov.test)[1])) - if(any(var.names == 'AGB.pft')){ - colnames(obs.cov.tmp[[t]]) <- names(iter_mat[1,,t]) + + obs.cov.tmp[[t]] <- matrix(cov.test[, which(colnames(cov.test) %in% obs.times[t])], + ncol = sqrt(dim(cov.test)[1]), + nrow = sqrt(dim(cov.test)[1]) + ) + if (any(var.names == "AGB.pft")) { + colnames(obs.cov.tmp[[t]]) <- names(iter_mat[1, , t]) } } } - + ### Pollen Data Product (STEPPS) - if(format_id[[i]] == '1000000058'){ + if (format_id[[i]] == "1000000058") { ncin <- ncdf4::nc_open(data.path) - - coords <- data.frame(x=site$lon,y=site$lat) + + coords <- data.frame(x = site$lon, y = site$lat) sp::coordinates(coords) <- ~ x + y - sp::proj4string(coords) <- sp::CRS('+proj=longlat +ellps=WGS84') - + sp::proj4string(coords) <- sp::CRS("+proj=longlat +ellps=WGS84") + ### site utm coordinates utm <- sp::spTransform(coords, sp::CRS("+proj=utm +zone=18N ellps=WGS84")) utm <- as.matrix(data.frame(utm)) - + ### find grid cell - site.x <- which(min(abs(ncdf4::ncvar_get(ncin, 'x') - utm[1])) == abs(ncdf4::ncvar_get(ncin, 'x') - utm[1])) - site.y <- which(min(abs(ncdf4::ncvar_get(ncin, 'y') - utm[2])) == abs(ncdf4::ncvar_get(ncin, 'y') - utm[2])) - years <- formatC(ncdf4::ncvar_get(ncin, 'year'), width = 4, format = "d", flag = "0") - + site.x <- which(min(abs(ncdf4::ncvar_get(ncin, "x") - utm[1])) == abs(ncdf4::ncvar_get(ncin, "x") - utm[1])) + site.y <- which(min(abs(ncdf4::ncvar_get(ncin, "y") - utm[2])) == abs(ncdf4::ncvar_get(ncin, "y") - utm[2])) + years <- formatC(ncdf4::ncvar_get(ncin, "year"), width = 4, format = "d", flag = "0") + taxa <- names(ncin$var) - if('other'%in%taxa) taxa <- taxa[-c(grep('other',taxa))] - - sims.keep <- array(NA,dim=c(length(taxa),length(ncin$dim$year$vals),length(ncin$dim$sample$vals))) - for(n in seq_along(taxa)){ + if ("other" %in% taxa) taxa <- taxa[-c(grep("other", taxa))] + + sims.keep <- array(NA, dim = c(length(taxa), length(ncin$dim$year$vals), length(ncin$dim$sample$vals))) + for (n in seq_along(taxa)) { taxa.start <- ncdf4::ncvar_get(ncin, taxa[n]) - + # input is a matrix 'sims', with rows as time and columns as MCMC samples - sims.keep[n,,] <- taxa.start[site.x,site.y,,] + sims.keep[n, , ] <- taxa.start[site.x, site.y, , ] } - + ##### ##### Calculating Effective Sample Size ##### ##### - - ESS_calc <- function(ntimes, sims){ - row.means.sims <- sims - rowMeans(sims) # center based on mean at each time to remove baseline temporal correlation + + ESS_calc <- function(ntimes, sims) { + row.means.sims <- sims - rowMeans(sims) # center based on mean at each time to remove baseline temporal correlation # (we want to estimate effective sample size effect from correlation of the errors) - + # compute all pairwise covariances at different times covars <- NULL - for(lag in 1:(ntimes-1)){ - covars <- c(covars, rowMeans(row.means.sims[(lag+1):ntimes, , drop = FALSE] * row.means.sims[1:(ntimes-lag), , drop = FALSE])) + for (lag in 1:(ntimes - 1)) { + covars <- c(covars, rowMeans(row.means.sims[(lag + 1):ntimes, , drop = FALSE] * row.means.sims[1:(ntimes - lag), , drop = FALSE])) } vars <- apply(row.means.sims, 1, stats::var) # pointwise post variances at each time, might not be homoscedastic - + # nominal sample size scaled by ratio of variance of an average # under independence to variance of average of correlated values neff <- ntimes * sum(vars) / (sum(vars) + 2 * sum(covars)) return(neff) } - + neff.keep <- mean.keep <- list() pecan.pfts <- as.character(lapply(settings$pfts, function(x) x[["name"]])) - - for(n in taxa){ - sims.start <- ncdf4::ncvar_get(ncin,n) - + + for (n in taxa) { + sims.start <- ncdf4::ncvar_get(ncin, n) + # input is a matrix 'sims', with rows as time and columns as MCMC samples - sims <- sims.start[site.x,site.y,,] - + sims <- sims.start[site.x, site.y, , ] + ntimes <- 10 - + neff.keep[[n]] <- ESS_calc(ntimes = ntimes, sims = sims) mean.keep[[n]] <- rowMeans(sims) - } - - var.inf <- 1000/mean(unlist(neff.keep)) - + + var.inf <- 1000 / mean(unlist(neff.keep)) + mean.mat <- as.data.frame(mean.keep) - - for(n in seq_len(ncol(mean.mat))){ - new.name <- pecan.pfts[grep(taxa[n],pecan.pfts,ignore.case = T)] - if(any(nchar(new.name))){ - colnames(mean.mat)[n] <- paste0('Fcomp.',new.name) + + for (n in seq_len(ncol(mean.mat))) { + new.name <- pecan.pfts[grep(taxa[n], pecan.pfts, ignore.case = T)] + if (any(nchar(new.name))) { + colnames(mean.mat)[n] <- paste0("Fcomp.", new.name) } } - + ##### ##### Calculating Mean and Covariance ##### obs.mean <- list() - for(n in 1:nrow(mean.mat)){ - obs.mean[[n]]<- mean.mat[n,] + for (n in 1:nrow(mean.mat)) { + obs.mean[[n]] <- mean.mat[n, ] } - - names(obs.mean) <- paste0(years,'/12/31') + + names(obs.mean) <- paste0(years, "/12/31") rownames(sims.keep) <- colnames(mean.mat) obs.cov <- list() - for(n in 1:length(ncin$dim$year$vals)){ - obs.cov[[n]] <- stats::cov(t(sims.keep[,n,])) #* var.inf + for (n in 1:length(ncin$dim$year$vals)) { + obs.cov[[n]] <- stats::cov(t(sims.keep[, n, ])) #* var.inf } - - names(obs.cov) <- paste0(years,'/12/31') - + + names(obs.cov) <- paste0(years, "/12/31") + #### Interpolate over all years which.keep <- list() - - for(n in obs.times){ + + for (n in obs.times) { min.vec <- stats::na.omit(as.numeric(n) - year(as.Date(names(obs.mean)))) - which.keep[[n]] <- which(min(abs(min.vec))==abs(min.vec)) + which.keep[[n]] <- which(min(abs(min.vec)) == abs(min.vec)) obs.mean.tmp[[n]] <- obs.mean[[which.keep[[n]][1]]] obs.cov.tmp[[n]] <- obs.cov[[which.keep[[n]][1]]] } - - names(obs.mean.tmp)<-paste0(obs.times,'/12/31') - names(obs.cov.tmp)<-paste0(obs.times,'/12/31') - + + names(obs.mean.tmp) <- paste0(obs.times, "/12/31") + names(obs.cov.tmp) <- paste0(obs.times, "/12/31") } - + ### Error Message for no data product - if(format_id[[i]] != '1000000040' & format_id[[i]] != '1000000058'){ - PEcAn.logger::logger.severe('ERROR: This data format has not been added to this function :(') + if (format_id[[i]] != "1000000040" & format_id[[i]] != "1000000058") { + PEcAn.logger::logger.severe("ERROR: This data format has not been added to this function :(") } - } - - ### Combine data if more than one type of data - if(i > 1){ - for(t in seq_along(obs.times)){ - obs.mean[[t]] <- c(obs.mean[[t]],unlist(obs.mean.tmp[[t]])) - obs.cov[[t]] <- magic::adiag(obs.cov[[t]],unlist(obs.cov.tmp[[t]])) - } - }else{ - obs.mean <- obs.mean.tmp - obs.cov <- obs.cov.tmp + + ### Combine data if more than one type of data + if (i > 1) { + for (t in seq_along(obs.times)) { + obs.mean[[t]] <- c(obs.mean[[t]], unlist(obs.mean.tmp[[t]])) + obs.cov[[t]] <- magic::adiag(obs.cov[[t]], unlist(obs.cov.tmp[[t]])) } - - names(obs.mean) <- paste0(obs.times,'/12/31') - names(obs.cov) <- paste0(obs.times,'/12/31') - - obs.list <- list(obs.mean = obs.mean, obs.cov = obs.cov) - save(obs.list,file=file.path(settings$outdir,'sda.obs.Rdata')) + } else { + obs.mean <- obs.mean.tmp + obs.cov <- obs.cov.tmp + } + + names(obs.mean) <- paste0(obs.times, "/12/31") + names(obs.cov) <- paste0(obs.times, "/12/31") -return(obs.list) + obs.list <- list(obs.mean = obs.mean, obs.cov = obs.cov) + save(obs.list, file = file.path(settings$outdir, "sda.obs.Rdata")) + return(obs.list) } diff --git a/modules/assim.sequential/R/matrix_operation.R b/modules/assim.sequential/R/matrix_operation.R index c5177c21ece..73501db5b30 100644 --- a/modules/assim.sequential/R/matrix_operation.R +++ b/modules/assim.sequential/R/matrix_operation.R @@ -1,16 +1,16 @@ ##' @title GrabFillMatrix ##' @name GrabFillMatrix ##' @author Dongchen Zhang -##' +##' ##' @param M source matrix that will be either subtracted or filled in. ##' @param ind vector of index that of where to be subtracted or filled in. ##' @param M1 additional matrix used to fill in the source matrix, the default it NULL. ##' @details This function helps subtract or fill in a matrix given the index. -##' +##' ##' @export -GrabFillMatrix <- function (M, ind, M1 = NULL) { +GrabFillMatrix <- function(M, ind, M1 = NULL) { if (is.null(M1)) { - #grab a sub-matrix + # grab a sub-matrix m <- matrix(NA, length(ind), length(ind)) for (i in seq_along(ind)) { for (j in seq_along(ind)) { @@ -18,7 +18,7 @@ GrabFillMatrix <- function (M, ind, M1 = NULL) { } } } else { - #fill into a larger matrix + # fill into a larger matrix m <- M for (i in seq_along(ind)) { for (j in seq_along(ind)) { @@ -32,40 +32,40 @@ GrabFillMatrix <- function (M, ind, M1 = NULL) { ##' @title matrix_network ##' @name matrix_network ##' @author Dongchen Zhang -##' +##' ##' @param mat a boolean matrix representing the interactions between any sites. -##' +##' ##' @return It returns lists of index representing each network. ##' ##' @export -matrix_network <- function (mat) { - #initialize the final returned list. +matrix_network <- function(mat) { + # initialize the final returned list. vec_group <- vector("list", ncol(mat)) - #initialize the vector for sites that are completed. + # initialize the vector for sites that are completed. sites.complete <- c() for (i in 1:ncol(mat)) { - #if we already completed the ith site, go next. + # if we already completed the ith site, go next. if (i %in% sites.complete) { next } - #initialize the arguments for the while loop. + # initialize the arguments for the while loop. vec <- c() stop <- FALSE inits <- i - #while loop + # while loop while (!stop) { Inits <- c() for (init in inits) { - Inits <- c(Inits, which(mat[init,])) + Inits <- c(Inits, which(mat[init, ])) } Inits <- Inits[which(!Inits %in% vec)] vec <- sort(unique(c(vec, Inits))) - #if we don't have any new site that belongs to this network. + # if we don't have any new site that belongs to this network. if (length(Inits) == 0) { - #then stop. + # then stop. stop <- !stop } else { - #else we initialize a new round of searching by new sites. + # else we initialize a new round of searching by new sites. inits <- Inits } } @@ -74,4 +74,4 @@ matrix_network <- function (mat) { } vec_group[sapply(vec_group, is.null)] <- NULL return(vec_group) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/metSplit.R b/modules/assim.sequential/R/metSplit.R index e48156c0ccc..10c08e32e72 100644 --- a/modules/assim.sequential/R/metSplit.R +++ b/modules/assim.sequential/R/metSplit.R @@ -13,25 +13,24 @@ #' @export #' #' @return input.split object with split met filepaths -#' +#' #' @author Alexis Helgeson #' -metSplit <- function(conf.settings, inputs, settings, model, no_split = FALSE, obs.times, t, nens, restart_flag = FALSE, my.split_inputs){ - - #set start and end date for splitting met - start.time = obs.times[t - 1] #always start timestep before - - if(restart_flag){ - stop.time = settings$run$site$met.end - }else{ - stop.time = obs.times[t] +metSplit <- function(conf.settings, inputs, settings, model, no_split = FALSE, obs.times, t, nens, restart_flag = FALSE, my.split_inputs) { + # set start and end date for splitting met + start.time <- obs.times[t - 1] # always start timestep before + + if (restart_flag) { + stop.time <- settings$run$site$met.end + } else { + stop.time <- obs.times[t] } #-Splitting the input for the models that they don't care about the start and end time of simulations and they run as long as their met file. - inputs.split <- + inputs.split <- furrr::future_pmap(list(conf.settings %>% `class<-`(c("list")), inputs, model), function(settings, inputs, model) { # Loading the model package - this is required bc of the furrr - library(paste0("PEcAn.",model), character.only = TRUE) - + library(paste0("PEcAn.", model), character.only = TRUE) + inputs.split <- list() if (!no_split) { for (i in seq_len(nens)) { @@ -41,15 +40,16 @@ metSplit <- function(conf.settings, inputs, settings, model, no_split = FALSE, o args = list( settings = settings, start.time = (lubridate::ymd_hms(start.time, truncated = 3) + lubridate::second(lubridate::hms("00:00:01"))), - stop.time = lubridate::ymd_hms(stop.time, truncated = 3), - inputs = inputs$samples[[i]]) + stop.time = lubridate::ymd_hms(stop.time, truncated = 3), + inputs = inputs$samples[[i]] + ) ) } - } else{ + } else { inputs.split <- inputs } inputs.split }) - + return(inputs.split) } diff --git a/modules/assim.sequential/R/met_filtering_helpers.R b/modules/assim.sequential/R/met_filtering_helpers.R index 523d75bb685..f8a88ec8893 100644 --- a/modules/assim.sequential/R/met_filtering_helpers.R +++ b/modules/assim.sequential/R/met_filtering_helpers.R @@ -1,65 +1,60 @@ ##' Sample meteorological ensembles -##' +##' ##' @param settings PEcAn settings list ##' @param nens number of ensemble members to be sampled -##' +##' ##' @export -sample_met <- function(settings, nens=1){ - +sample_met <- function(settings, nens = 1) { # path where ensemble met folders are - if(length(settings$run$inputs$met[["path"]]) == 1){ + if (length(settings$run$inputs$met[["path"]]) == 1) { path <- settings$run$inputs$met[["path"]] - }else if(!is.null(settings$run$inputs$met[["path"]])){ # this function will be deprecated soon anyway + } else if (!is.null(settings$run$inputs$met[["path"]])) { # this function will be deprecated soon anyway path <- settings$run$inputs$met[["path"]][[1]] - }else{ + } else { PEcAn.logger::logger.error("Met path not found in settings.") } - - if(settings$host$name == "localhost"){ + + if (settings$host$name == "localhost") { ens_members <- list.files(path, recursive = TRUE) - }else{ + } else { # remote - ens_members <- PEcAn.remote::remote.execute.cmd(host, paste0('ls -d -1 ', path, "/*.*")) + ens_members <- PEcAn.remote::remote.execute.cmd(host, paste0("ls -d -1 ", path, "/*.*")) } - + start_date <- as.POSIXlt((settings$run$site$met.start)) - end_date <- as.POSIXlt((settings$run$site$met.end)) + end_date <- as.POSIXlt((settings$run$site$met.end)) - #start_date <- as.POSIXlt(strptime(settings$run$site$met.start, "%Y/%m/%d")) - #end_date <- as.POSIXlt(strptime(settings$run$site$met.end, "%Y/%m/%d")) + # start_date <- as.POSIXlt(strptime(settings$run$site$met.start, "%Y/%m/%d")) + # end_date <- as.POSIXlt(strptime(settings$run$site$met.end, "%Y/%m/%d")) start_date$zone <- end_date$zone <- NULL - + # only the original (not-splitted) file has start and end date only - tmp_members <- gsub(paste0(".", start_date), "", ens_members) - tmp_members <- gsub(paste0(".", end_date), "", tmp_members) + tmp_members <- gsub(paste0(".", start_date), "", ens_members) + tmp_members <- gsub(paste0(".", end_date), "", tmp_members) member_names <- unique(dirname(ens_members)) - + # this will change from model to model, generalize later # This function is temporary but if we will continue to use this approach for met ensembles (instead of met process workflow) # it might not be a bad idea to have sample_met.model - if(settings$model$type == "ED2"){ + if (settings$model$type == "ED2") { # TODO : it doesn't have to be called ED_MET_DRIVER_HEADER - ens_members <- file.path(basename(ens_members), "ED_MET_DRIVER_HEADER") - ens_ind <- seq_along(ens_members) - }else if(settings$model$type == "SIPNET"){ + ens_members <- file.path(basename(ens_members), "ED_MET_DRIVER_HEADER") + ens_ind <- seq_along(ens_members) + } else if (settings$model$type == "SIPNET") { ens_ind <- unlist(sapply(paste0(member_names, ".clim"), grep, tmp_members)) - }else if(settings$model$type == "LINKAGES"){ + } else if (settings$model$type == "LINKAGES") { ens_ind <- seq_along(ens_members) } - + # ens_members[ens_ind] ens_input <- list() - for(i in seq_len(nens)){ - ens_input[[i]] <- list(met=NULL) + for (i in seq_len(nens)) { + ens_input[[i]] <- list(met = NULL) ens_input[[i]]$met$path <- file.path(path, ens_members[sample(ens_ind, 1)]) } - names(ens_input) <- rep("met",length=nens) + names(ens_input) <- rep("met", length = nens) return(ens_input) } - - - - diff --git a/modules/assim.sequential/R/sample.parameters.R b/modules/assim.sequential/R/sample.parameters.R index 4f745ba4ca1..48937a3a962 100644 --- a/modules/assim.sequential/R/sample.parameters.R +++ b/modules/assim.sequential/R/sample.parameters.R @@ -1,30 +1,33 @@ ##' @name sample.parameters ##' @title sample parameters ##' @author Michael Dietze \email{dietze@@bu.edu} -##' +##' ##' @param ne number of ensemble members ##' @param settings PEcAn settings object ##' @param con PEcAn database connection -##' +##' ##' @return data frame of sampled parameters from the posterior distribution -##' +##' sample.parameters <- function(ne, settings, con) { - ## grab posteriors from database if (is.null(settings$assim.sequential$prior)) { - pft.id <- PEcAn.DB::db.query(paste0("SELECT id from pfts where name = '", settings$pfts$pft$name, "'"), - con) + pft.id <- PEcAn.DB::db.query( + paste0("SELECT id from pfts where name = '", settings$pfts$pft$name, "'"), + con + ) priors <- PEcAn.DB::db.query(paste0("SELECT * from posteriors where pft_id = ", pft.id), con) ## by default, use the most recent posterior as the prior settings$assim.sequential$prior <- priors$id[which.max(priors$updated_at)] } - + ## load prior - prior.db <- PEcAn.DB::db.query(paste0("SELECT * from dbfiles where container_type = 'Posterior' and container_id = ", - settings$assim.sequential$prior), con) + prior.db <- PEcAn.DB::db.query(paste0( + "SELECT * from dbfiles where container_type = 'Posterior' and container_id = ", + settings$assim.sequential$prior + ), con) prior.db <- prior.db[grep("post.distns.Rdata", prior.db$file_name), ] load(file.path(prior.db$file_path, "post.distns.Rdata")) - + ## sample from priors nvar <- nrow(post.distns) prior <- as.data.frame(matrix(numeric(), ne, nvar)) @@ -32,12 +35,14 @@ sample.parameters <- function(ne, settings, con) { if (post.distns$distn[i] == "exp") { prior[, i] <- eval(parse(text = paste0("rexp(", ne, ",", post.distns$parama[i], ")"))) } else { - prior[, i] <- eval(parse(text = paste0("r", post.distns$distn[i], - "(", ne, ",", post.distns$parama[i], - ",", post.distns$paramb[i], ")"))) + prior[, i] <- eval(parse(text = paste0( + "r", post.distns$distn[i], + "(", ne, ",", post.distns$parama[i], + ",", post.distns$paramb[i], ")" + ))) } } colnames(prior) <- rownames(post.distns) - + return(prior) } # sample.parameters diff --git a/modules/assim.sequential/R/sda.enkf.R b/modules/assim.sequential/R/sda.enkf.R index 20f1674d034..48c00af11a2 100644 --- a/modules/assim.sequential/R/sda.enkf.R +++ b/modules/assim.sequential/R/sda.enkf.R @@ -6,7 +6,7 @@ ##' The function then dives right into the first Analysis, then continues on like normal. ##' ##' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} -##' +##' ##' @param settings PEcAn settings object ##' @param obs.mean list of observations of the means of state variable (time X nstate) ##' @param obs.cov list of observations of covariance matrices of state variables (time X nstate X nstate) @@ -15,130 +15,137 @@ ##' @param adjustment flag for using ensemble adjustment filter or not ##' @param restart Used for iterative updating previous forecasts. This is a list that includes ens.inputs, the list of inputs by ensemble member, params, the parameters, and old_outdir, the output directory from the previous workflow. These three things are needed to ensure that if a new workflow is started that ensemble members keep there run-specific met and params. See Details ##' -##' +##' ##' @return NONE ##' @export -##' -sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, adjustment = TRUE, restart=NULL) { - +##' +sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, adjustment = TRUE, restart = NULL) { if (!requireNamespace("plyr", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'plyr',", "needed by `PEcAnAssimSequential::sda.enkf.original()`.", - "Please install it and try again.") + "Please install it and try again." + ) } if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'PEcAn.visualization',", "needed by `PEcAnAssimSequential::sda.enkf.original()`.", - "Please install it and try again.") + "Please install it and try again." + ) } ymd_hms <- lubridate::ymd_hms - hms <- lubridate::hms - second <- lubridate::second - - ###-------------------------------------------------------------------### + hms <- lubridate::hms + second <- lubridate::second + + ### -------------------------------------------------------------------### ### read settings ### - ###-------------------------------------------------------------------### - - model <- settings$model$type - write <- settings$database$bety$write - defaults <- settings$pfts - outdir <- settings$modeloutdir # currently model runs locally, this will change if remote is enabled - rundir <- settings$host$rundir - host <- settings$host - forecast.time.step <- settings$state.data.assimilation$forecast.time.step #idea for later generalizing - nens <- as.numeric(settings$state.data.assimilation$n.ensemble) + ### -------------------------------------------------------------------### + + model <- settings$model$type + write <- settings$database$bety$write + defaults <- settings$pfts + outdir <- settings$modeloutdir # currently model runs locally, this will change if remote is enabled + rundir <- settings$host$rundir + host <- settings$host + forecast.time.step <- settings$state.data.assimilation$forecast.time.step # idea for later generalizing + nens <- as.numeric(settings$state.data.assimilation$n.ensemble) processvar <- settings$state.data.assimilation$process.variance sample_parameters <- settings$state.data.assimilation$sample.parameters - var.names <- unlist(sapply(settings$state.data.assimilation$state.variable, - function(x) { - x$variable.name - }, - USE.NAMES = FALSE), - use.names = FALSE) + var.names <- unlist( + sapply(settings$state.data.assimilation$state.variable, + function(x) { + x$variable.name + }, + USE.NAMES = FALSE + ), + use.names = FALSE + ) names(var.names) <- NULL - dir.create(rundir,recursive=TRUE) # remote will give warning - - ###-------------------------------------------------------------------### + dir.create(rundir, recursive = TRUE) # remote will give warning + + ### -------------------------------------------------------------------### ### get model specific functions ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### do.call("require", list(paste0("PEcAn.", model))) - my.write.config <- paste0("write.config.", model) - my.read_restart <- paste0("read_restart.", model) + my.write.config <- paste0("write.config.", model) + my.read_restart <- paste0("read_restart.", model) my.write_restart <- paste0("write_restart.", model) - my.split_inputs <- paste0("split_inputs.", model) - + my.split_inputs <- paste0("split_inputs.", model) + # models that don't need split_inputs, check register file for that register.xml <- system.file(paste0("register.", model, ".xml"), package = paste0("PEcAn.", model)) register <- XML::xmlToList(XML::xmlParse(register.xml)) no_split <- !as.logical(register$exact.dates) - + if (!exists(my.write.config)) { PEcAn.logger::logger.warn(my.write.config, "does not exist") PEcAn.logger::logger.severe("please make sure that the PEcAn interface is loaded for", model) } - - if (!exists(my.split_inputs) & !no_split) { + + if (!exists(my.split_inputs) & !no_split) { PEcAn.logger::logger.warn(my.split_inputs, "does not exist") PEcAn.logger::logger.severe("please make sure that the PEcAn interface is loaded for", model) } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### load model specific input ensembles for initial runs ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### n.inputs <- max(table(names(settings$run$inputs))) - if(n.inputs >= nens){ + if (n.inputs >= nens) { sampleIDs <- 1:nens - }else{ - sampleIDs <- c(1:n.inputs,sample.int(n.inputs, (nens - n.inputs), replace = TRUE)) + } else { + sampleIDs <- c(1:n.inputs, sample.int(n.inputs, (nens - n.inputs), replace = TRUE)) } - - + + ens.inputs <- list() inputs <- list() - if(is.null(restart) & is.null(restart$ens.inputs)){ + if (is.null(restart) & is.null(restart$ens.inputs)) { ens.inputs <- settings$run$inputs$met$path %>% unlist() - }else { + } else { ens.inputs <- restart$ens.inputs } - - for(i in seq_len(nens)){ - - if(no_split){ # currently this is only for ED2, ensemble generator + refactoring will change these soon anyway + + for (i in seq_len(nens)) { + if (no_split) { # currently this is only for ED2, ensemble generator + refactoring will change these soon anyway # note that write configs accepts one "settings" for now, so I'll use the inputs arg to pass IC ensembles - inputs[[i]] <- lapply(settings$run$inputs, function(x) { - return( x %>% purrr::map(function(inputs){return((inputs%>%unlist)[i])})) + inputs[[i]] <- lapply(settings$run$inputs, function(x) { + return(x %>% purrr::map(function(inputs) { + return((inputs %>% unlist())[i]) + })) }) - inputs[[i]]$met <- ens.inputs[[i]]$met - }else{ + inputs[[i]]$met <- ens.inputs[[i]]$met + } else { ### get only necessary ensemble inputs. Do not change in analysis - #ens.inputs[[i]] <- get.ensemble.inputs(settings = settings, ens = sampleIDs[i]) + # ens.inputs[[i]] <- get.ensemble.inputs(settings = settings, ens = sampleIDs[i]) ### model specific split inputs - inputs[[i]] <- do.call(my.split_inputs, - args = list(settings = settings, - start.time = settings$run$start.date, - stop.time = as.Date(names(obs.mean)[1]),#settings$run$end.date, - inputs = ens.inputs[[i]]))#, + inputs[[i]] <- do.call(my.split_inputs, + args = list( + settings = settings, + start.time = settings$run$start.date, + stop.time = as.Date(names(obs.mean)[1]), # settings$run$end.date, + inputs = ens.inputs[[i]] + ) + ) # , # outpath = file.path(rundir,paste0("met",i)))) } -# ### get only necessary ensemble inputs. Do not change in analysis -# ens.inputs[[i]] <- get.ensemble.inputs(settings = settings, ens = sampleIDs[i]) -# ### model specific split inputs -# inputs[[i]] <- do.call(my.split_inputs, -# args = list(settings = settings, -# start.time = settings$run$start.date, -# stop.time = settings$run$end.date, #as.Date(names(obs.mean)[1]), -# inputs = ens.inputs[[i]]))#, -# # outpath = file.path(rundir,paste0("met",i)))) - -} - - ###-------------------------------------------------------------------### + # ### get only necessary ensemble inputs. Do not change in analysis + # ens.inputs[[i]] <- get.ensemble.inputs(settings = settings, ens = sampleIDs[i]) + # ### model specific split inputs + # inputs[[i]] <- do.call(my.split_inputs, + # args = list(settings = settings, + # start.time = settings$run$start.date, + # stop.time = settings$run$end.date, #as.Date(names(obs.mean)[1]), + # inputs = ens.inputs[[i]]))#, + # # outpath = file.path(rundir,paste0("met",i)))) + } + + ### -------------------------------------------------------------------### ### open database connection ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (write) { con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) if (is(con, "try-error")) { @@ -149,51 +156,53 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, } else { con <- NULL } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### get new workflow ids ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if ("workflow" %in% names(settings)) { workflow.id <- settings$workflow$id } else { -# workflow.id <- -1 - settings <- PEcAn.settings::check.workflow.settings(settings,con) + # workflow.id <- -1 + settings <- PEcAn.settings::check.workflow.settings(settings, con) workflow.id <- settings$workflow$id - PEcAn.logger::logger.info("new workflow ID - ",workflow.id) + PEcAn.logger::logger.info("new workflow ID - ", workflow.id) } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### create ensemble ids ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (!is.null(con)) { # write ensemble first result <- PEcAn.DB::db.query( paste( "INSERT INTO ensembles (runtype, workflow_id) ", "values ('EnKF', ", workflow.id, ") returning id", - sep = ""), - con) - ensemble.id <- result[['id']] + sep = "" + ), + con + ) + ensemble.id <- result[["id"]] } else { ensemble.id <- -1 } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### perform initial set of runs ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### run.id <- list() X <- IC - + ## Load Parameters - if(is.null(restart) & is.null(restart$params)){ + if (is.null(restart) & is.null(restart$params)) { if (sample_parameters == TRUE) { settings$ensemble$size <- settings$state.data.assimilation$n.ensemble } else { settings$ensemble$size <- 1 } - + ###################################################################################################################### - # # + # # # NOTE: It's easiest to try to define priors such that sum of SIPNET allocation params "root_allocation_fraction", # # "wood_allocation_fraction" and "leaf_allocation_fraction" doesnt' exceed 1, # # if it exceeds runs will finish but you'll get 0 for AbvGrndWood which would affect your forecast ensemble # @@ -201,28 +210,28 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, # the commented out code below was to force their sum to be <1 leaving as a reminder until refactoring # # # ###################################################################################################################### - - - + + + # cumulative_ensemble_samples <- numeric(0) - # + # # repeat{ # temporary SIPNET hack, I want to make sure sum <1 for SIPNET - PEcAn.uncertainty::get.parameter.samples(settings, ens.sample.method = settings$ensemble$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF - load(file.path(settings$outdir, "samples.Rdata")) ## loads ensemble.samples + PEcAn.uncertainty::get.parameter.samples(settings, ens.sample.method = settings$ensemble$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF + load(file.path(settings$outdir, "samples.Rdata")) ## loads ensemble.samples # cumulative_ensemble_samples <- rbind(cumulative_ensemble_samples,ensemble.samples$temperate.deciduous_SDA) # tot_check <- apply(ensemble.samples$temperate.deciduous_SDA[,c(20, 25,27)],1,sum) < 1 # cumulative_ensemble_samples <- cumulative_ensemble_samples[tot_check,] # if(nrow(cumulative_ensemble_samples)>=nens){ # ensemble.samples$temperate.deciduous_SDA <- cumulative_ensemble_samples[seq_len(nens),] # break - # } + # } # } - - + + if ("env" %in% names(ensemble.samples)) { ensemble.samples$env <- NULL } - + params <- list() for (i in seq_len(nens)) { if (sample_parameters == TRUE) { @@ -232,106 +241,111 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, } else { params[[i]] <- ensemble.samples } - } + } } else { ## params exist from restart params <- restart$params } - + ## if a restart, get the old run folders - if(!is.null(restart)){ - if(is.null(restart$old_outdir)){ - old_outdir = settings$outdir ## if old_outdir not specified, restart in place + if (!is.null(restart)) { + if (is.null(restart$old_outdir)) { + old_outdir <- settings$outdir ## if old_outdir not specified, restart in place } else { - old_outdir = restart$old_outdir + old_outdir <- restart$old_outdir } - old_runs <- list.dirs(file.path(old_outdir,"out"),recursive=FALSE) + old_runs <- list.dirs(file.path(old_outdir, "out"), recursive = FALSE) ## select the _last_ nens - old_runs <- utils::tail(old_runs,nens) + old_runs <- utils::tail(old_runs, nens) } - - + + for (i in seq_len(nens)) { - # is this gonna break other model runs? inputs is usually just the met path which is all they need anyway? settings$run$inputs <- inputs[[i]] - + ## set RUN.ID if (!is.null(con)) { paramlist <- paste("EnKF:", i) run.id[[i]] <- PEcAn.DB::db.query( paste0( "INSERT INTO runs (", - "model_id, site_id, ", - "start_time, finish_time, ", - "outdir, ensemble_id, parameter_list) ", + "model_id, site_id, ", + "start_time, finish_time, ", + "outdir, ensemble_id, parameter_list) ", "VALUES ('", - settings$model$id, "', '", settings$run$site$id, "', '", - settings$run$start.date, "', '", settings$run$end.date, "', '", - settings$outdir, "', '", ensemble.id, ", '", paramlist, "') ", - "RETURNING id"), - con) + settings$model$id, "', '", settings$run$site$id, "', '", + settings$run$start.date, "', '", settings$run$end.date, "', '", + settings$outdir, "', '", ensemble.id, ", '", paramlist, "') ", + "RETURNING id" + ), + con + ) } else { run.id[[i]] <- paste("EnKF", i, sep = ".") } dir.create(file.path(settings$rundir, run.id[[i]]), recursive = TRUE) dir.create(file.path(settings$modeloutdir, run.id[[i]]), recursive = TRUE) - + ## Write Configs - if(is.null(restart)){ - do.call(what = my.write.config, args = list(defaults = NULL, - trait.values = params[[i]], - settings = settings, - run.id = run.id[[i]], - inputs = inputs[[i]], - IC = IC[i, ])) + if (is.null(restart)) { + do.call(what = my.write.config, args = list( + defaults = NULL, + trait.values = params[[i]], + settings = settings, + run.id = run.id[[i]], + inputs = inputs[[i]], + IC = IC[i, ] + )) } else { ## copy over old run's forecast - old_file <- file.path(old_runs[i],paste0(year(settings$run$start.date),".nc")) - file.copy(old_file,file.path(settings$modeloutdir, run.id[[i]])) - ## should swap this out for a symbolic link -- no need for duplication + old_file <- file.path(old_runs[i], paste0(year(settings$run$start.date), ".nc")) + file.copy(old_file, file.path(settings$modeloutdir, run.id[[i]])) + ## should swap this out for a symbolic link -- no need for duplication } - + ## write a README for the run cat("runtype : sda.enkf\n", - "workflow id : ", as.character(workflow.id), "\n", - "ensemble id : ", as.character(ensemble.id), "\n", - "ensemble : ", i, "\n", - "run id : ", as.character(run.id[[i]]), "\n", - "pft names : ", as.character(lapply(settings$pfts, function(x) x[["name"]])), "\n", - "model : ", model, "\n", - "model id : ", settings$model$id, "\n", - "site : ", settings$run$site$name, "\n", - "site id : ", settings$run$site$id, "\n", - "met data : ", inputs$met$path, "\n", - "start date : ", settings$run$start.date, "\n", - "end date : ", settings$run$end.date, "\n", - "hostname : ", settings$host$name, "\n", - "rundir : ", file.path(settings$host$rundir, run.id[[i]]), "\n", - "outdir : ", file.path(settings$host$outdir, run.id[[i]]), "\n", - file = file.path(settings$rundir, run.id[[i]], "README.txt"), - sep='') + "workflow id : ", as.character(workflow.id), "\n", + "ensemble id : ", as.character(ensemble.id), "\n", + "ensemble : ", i, "\n", + "run id : ", as.character(run.id[[i]]), "\n", + "pft names : ", as.character(lapply(settings$pfts, function(x) x[["name"]])), "\n", + "model : ", model, "\n", + "model id : ", settings$model$id, "\n", + "site : ", settings$run$site$name, "\n", + "site id : ", settings$run$site$id, "\n", + "met data : ", inputs$met$path, "\n", + "start date : ", settings$run$start.date, "\n", + "end date : ", settings$run$end.date, "\n", + "hostname : ", settings$host$name, "\n", + "rundir : ", file.path(settings$host$rundir, run.id[[i]]), "\n", + "outdir : ", file.path(settings$host$outdir, run.id[[i]]), "\n", + file = file.path(settings$rundir, run.id[[i]], "README.txt"), + sep = "" + ) } - + ## add the jobs to the list of runs - cat(as.character(unlist(run.id)), - file = file.path(settings$rundir, "runs.txt"), - sep = "\n", - append = FALSE) - + cat(as.character(unlist(run.id)), + file = file.path(settings$rundir, "runs.txt"), + sep = "\n", + append = FALSE + ) + ## start model runs - if(is.null(restart)){ + if (is.null(restart)) { PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### tests before data assimilation ### - ###-------------------------------------------------------------------### - - # at some point add a lot of error checking + ### -------------------------------------------------------------------### + + # at some point add a lot of error checking # read time from data if data is missing you still need # to have NAs or NULL with date name vector to read the correct netcdfs by read_restart - + obs.times <- names(obs.mean) obs.times.POSIX <- ymd_hms(obs.times) @@ -340,217 +354,223 @@ sda.enkf.original <- function(settings, obs.mean, obs.cov, IC = NULL, Q = NULL, if (is.na(lubridate::ymd(obs.times[i]))) { print("Error: no dates associated with observations") } else { - ### Data does not have time associated with dates + ### Data does not have time associated with dates ### Adding 12:59:59PM assuming next time step starts one second later print("Pumpkin Warning: adding one minute before midnight time assumption to dates associated with data") obs.times.POSIX[i] <- ymd_hms(paste(obs.times[i], "23:59:59")) # if(nchar(year(obs.times.POSIX[i]))==3){ # #TODO: BROKEN: need to add leading zeros to years with less than 4 digits # obs.times.POSIX[i] <- paste0('0',ymd_hms(paste(obs.times[i], "23:59:59"))) - # } + # } } } } obs.times <- obs.times.POSIX - + # need explicit forecast length variable in settings start time, stop time, restart time if # restart time is not provided restart in stop time - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### set up for data assimilation ### - ###-------------------------------------------------------------------### - - nt <- length(obs.times) - FORECAST <- ANALYSIS <- list() + ### -------------------------------------------------------------------### + + nt <- length(obs.times) + FORECAST <- ANALYSIS <- list() enkf.params <- list() - aqq <- NULL - bqq <- numeric(nt + 1) - CI.X1 <- matrix(0, 3, nt) - CI.X2 <- CI.X1 - q.bar <- NULL #default process covariance matrix - + aqq <- NULL + bqq <- numeric(nt + 1) + CI.X1 <- matrix(0, 3, nt) + CI.X2 <- CI.X1 + q.bar <- NULL # default process covariance matrix + ##### Creating matrices that describe the bounds of the state variables ##### interval is remade everytime depending on the data at time t ##### state.interval stays constant and converts new.analysis to be within the correct bounds - interval <- NULL - state.interval <- cbind(as.numeric(lapply(settings$state.data.assimilation$state.variables,'[[','min_value')), - as.numeric(lapply(settings$state.data.assimilation$state.variables,'[[','max_value'))) + interval <- NULL + state.interval <- cbind( + as.numeric(lapply(settings$state.data.assimilation$state.variables, "[[", "min_value")), + as.numeric(lapply(settings$state.data.assimilation$state.variables, "[[", "max_value")) + ) rownames(state.interval) <- var.names - + wish.df <- function(Om, X, i, j, col) { (Om[i, j]^2 + Om[i, i] * Om[j, j]) / stats::var(X[, col]) } - + sampler_toggle <- nimble::nimbleFunction( contains = sampler_BASE, setup = function(model, mvSaved, target, control) { type <- control$type - nested_sampler_name <- paste0('sampler_', type) - control_new <- nimble::nimbleOptions('MCMCcontrolDefaultList') + nested_sampler_name <- paste0("sampler_", type) + control_new <- nimble::nimbleOptions("MCMCcontrolDefaultList") control_new[[names(control)]] <- control nested_sampler_list <- nimble::nimbleFunctionList(sampler_BASE) nested_sampler_list[[1]] <- do.call(nested_sampler_name, list(model, mvSaved, target, control_new)) toggle <- 1 }, run = function() { - if(toggle == 1) + if (toggle == 1) { nested_sampler_list[[1]]$run() + } }, methods = list( - reset = function() + reset = function() { nested_sampler_list[[1]]$reset() + } ) ) - - if(var.names=="Fcomp"){ - y_star_create <- nimble::nimbleFunction( + + if (var.names == "Fcomp") { + y_star_create <- nimble::nimbleFunction( run = function(X = double(1)) { returnType(double(1)) - + X_use <- X - X_use[X_use<0] <- 0 - y_star <- X_use/sum(X_use) - + X_use[X_use < 0] <- 0 + y_star <- X_use / sum(X_use) + return(y_star) - }) - }else{ - y_star_create <- nimble::nimbleFunction( + } + ) + } else { + y_star_create <- nimble::nimbleFunction( run = function(X = double(1)) { returnType(double(1)) - + y_star <- X - + return(y_star) - }) + } + ) } - - - tobit.model <- nimble::nimbleCode({ - - q[1:N,1:N] ~ dwish(R = aq[1:N,1:N], df = bq) ## aq and bq are estimated over time - Q[1:N,1:N] <- inverse(q[1:N,1:N]) - X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N,1:N]) ## Model Forecast ##muf and pf are assigned from ensembles - + + + tobit.model <- nimble::nimbleCode({ + q[1:N, 1:N] ~ dwish(R = aq[1:N, 1:N], df = bq) ## aq and bq are estimated over time + Q[1:N, 1:N] <- inverse(q[1:N, 1:N]) + X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N, 1:N]) ## Model Forecast ##muf and pf are assigned from ensembles + ## add process error - X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N,1:N]) - - #observation operator + X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N, 1:N]) + + # observation operator y_star[1:YN] <- y_star_create(X[1:YN]) - + ## Analysis - y.censored[1:YN] ~ dmnorm(y_star[1:YN], prec = r[1:YN,1:YN]) - - #don't flag y.censored as data, y.censored in inits - #remove y.censored samplers and only assign univariate samplers on NAs - - for(i in 1:YN){ + y.censored[1:YN] ~ dmnorm(y_star[1:YN], prec = r[1:YN, 1:YN]) + + # don't flag y.censored as data, y.censored in inits + # remove y.censored samplers and only assign univariate samplers on NAs + + for (i in 1:YN) { y.ind[i] ~ dinterval(y.censored[i], 0) } - }) - + tobit2space.model <- nimble::nimbleCode({ - for(i in 1:N){ - y.censored[i,1:J] ~ dmnorm(muf[1:J], cov = pf[1:J,1:J]) - for(j in 1:J){ - y.ind[i,j] ~ dinterval(y.censored[i,j], 0) + for (i in 1:N) { + y.censored[i, 1:J] ~ dmnorm(muf[1:J], cov = pf[1:J, 1:J]) + for (j in 1:J) { + y.ind[i, j] ~ dinterval(y.censored[i, j], 0) } } - - muf[1:J] ~ dmnorm(mean = mu_0[1:J], cov = pf[1:J,1:J]) - - Sigma[1:J,1:J] <- lambda_0[1:J,1:J]/nu_0 - pf[1:J,1:J] ~ dinvwish(S = Sigma[1:J,1:J], df = J) - + + muf[1:J] ~ dmnorm(mean = mu_0[1:J], cov = pf[1:J, 1:J]) + + Sigma[1:J, 1:J] <- lambda_0[1:J, 1:J] / nu_0 + pf[1:J, 1:J] ~ dinvwish(S = Sigma[1:J, 1:J], df = J) }) - + tobit2space.model <- nimble::nimbleCode({ - for(i in 1:N){ - y.censored[i,1:J] ~ dmnorm(muf[1:J], cov = pf[1:J,1:J]) - for(j in 1:J){ - y.ind[i,j] ~ dinterval(y.censored[i,j], 0) + for (i in 1:N) { + y.censored[i, 1:J] ~ dmnorm(muf[1:J], cov = pf[1:J, 1:J]) + for (j in 1:J) { + y.ind[i, j] ~ dinterval(y.censored[i, j], 0) } } - - muf[1:J] ~ dmnorm(mean = mu_0[1:J], cov = pf[1:J,1:J]) - - Sigma[1:J,1:J] <- lambda_0[1:J,1:J]/nu_0 - pf[1:J,1:J] ~ dinvwish(S = Sigma[1:J,1:J], df = J) - + + muf[1:J] ~ dmnorm(mean = mu_0[1:J], cov = pf[1:J, 1:J]) + + Sigma[1:J, 1:J] <- lambda_0[1:J, 1:J] / nu_0 + pf[1:J, 1:J] ~ dinvwish(S = Sigma[1:J, 1:J], df = J) }) - - t1 <- 1 - pink <- grDevices::col2rgb("deeppink") - alphapink <- grDevices::rgb(pink[1], pink[2], pink[3], 180, max = 255) - green <- grDevices::col2rgb("green") + + t1 <- 1 + pink <- grDevices::col2rgb("deeppink") + alphapink <- grDevices::rgb(pink[1], pink[2], pink[3], 180, max = 255) + green <- grDevices::col2rgb("green") alphagreen <- grDevices::rgb(green[1], green[2], green[3], 75, max = 255) - blue <- grDevices::col2rgb("blue") - alphablue <- grDevices::rgb(blue[1], blue[2], blue[3], 75, max = 255) - purple <- grDevices::col2rgb("purple") + blue <- grDevices::col2rgb("blue") + alphablue <- grDevices::rgb(blue[1], blue[2], blue[3], 75, max = 255) + purple <- grDevices::col2rgb("purple") alphapurple <- grDevices::rgb(purple[1], purple[2], purple[3], 75, max = 255) - brown <- grDevices::col2rgb("brown") + brown <- grDevices::col2rgb("brown") alphabrown <- grDevices::rgb(brown[1], brown[2], brown[3], 75, max = 255) # weight matrix wt.mat <- matrix(NA, nrow = nens, ncol = nt) - - save(list = ls(envir = environment(), all.names = TRUE), - file = file.path(outdir, "sda.initial.runs.Rdata"), envir = environment()) - - ###-------------------------------------------------------------------### + + save( + list = ls(envir = environment(), all.names = TRUE), + file = file.path(outdir, "sda.initial.runs.Rdata"), envir = environment() + ) + + ### -------------------------------------------------------------------### ### loop over time ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### -for(t in seq_len(nt)) { # - if(t == 1){ - recompile = TRUE - }else{ - recompile = FALSE + for (t in seq_len(nt)) { # + if (t == 1) { + recompile <- TRUE + } else { + recompile <- FALSE } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### read restart ### - ###-------------------------------------------------------------------### - X_tmp <- vector("list", 2) + ### -------------------------------------------------------------------### + X_tmp <- vector("list", 2) X <- list() new.params <- params - - # var.names <- c("AbvGrndWood", "GWBI", "TotLivBiom", "leaf_carbon_content") + + # var.names <- c("AbvGrndWood", "GWBI", "TotLivBiom", "leaf_carbon_content") for (i in seq_len(nens)) { - X_tmp[[i]] <- do.call(my.read_restart, args = list(outdir = outdir, - runid = run.id[[i]], - stop.time = obs.times[t], - settings = settings, - var.names = var.names, - params = params[[i]])) + X_tmp[[i]] <- do.call(my.read_restart, args = list( + outdir = outdir, + runid = run.id[[i]], + stop.time = obs.times[t], + settings = settings, + var.names = var.names, + params = params[[i]] + )) # states will be in X, but we also want to carry some deterministic relationships to write_restart # these will be stored in params - X[[i]] <- X_tmp[[i]]$X + X[[i]] <- X_tmp[[i]]$X new.params[[i]] <- X_tmp[[i]]$params } - + ## Trying to find a way to flag incomplete runs and drop them. - for(i in seq_len(length(run.id))){ - if(is.na(X[[i]][1])) { + for (i in seq_len(length(run.id))) { + if (is.na(X[[i]][1])) { print(i) - #run.id[[i]] <- NULL - #X[[i]] <- NULL + # run.id[[i]] <- NULL + # X[[i]] <- NULL } } - + X <- do.call(rbind, X) FORECAST[[t]] <- X - + obs <- which(!is.na(obs.mean[[t]])) - + mu.f <- as.numeric(apply(X, 2, mean, na.rm = TRUE)) Pf <- stats::cov(X) pmiss <- which(diag(Pf) == 0) diag(Pf)[pmiss] <- 0.1 ## hack for zero variance - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### analysis ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (any(obs)) { # if no observations skip analysis # choose <- na.omit(charmatch( @@ -560,28 +580,30 @@ for(t in seq_len(nt)) { # # na.omit(unlist(lapply(strsplit(names(obs.mean[[t]]), # split = var.names), #TO DO don't hardcode this # function(x) x[2]))))) #matches y to model - # - - choose <- stats::na.omit(charmatch(colnames(X),names(obs.mean[[t]]))) - + # + + choose <- stats::na.omit(charmatch(colnames(X), names(obs.mean[[t]]))) + Y <- unlist(obs.mean[[t]][choose]) - Y[is.na(Y)] <- 0 - - R <- as.matrix(obs.cov[[t]][choose,choose]) - R[is.na(R)]<-0 - + Y[is.na(Y)] <- 0 + + R <- as.matrix(obs.cov[[t]][choose, choose]) + R[is.na(R)] <- 0 + if (length(obs.mean[[t]]) > 1) { - diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 - diag(Pf)[which(diag(Pf)==0)] <- min(diag(Pf)[which(diag(Pf) != 0)])/5 + diag(R)[which(diag(R) == 0)] <- min(diag(R)[which(diag(R) != 0)]) / 2 + diag(Pf)[which(diag(Pf) == 0)] <- min(diag(Pf)[which(diag(Pf) != 0)]) / 5 } - + ### TO DO: plotting not going to work because of observation operator i.e. y and x are on different scales - - + + #### Plot Data and Forecast - if (FALSE) {#interactive() & t > 1 + if (FALSE) { # interactive() & t > 1 t1 <- 1 - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -589,7 +611,7 @@ for(t in seq_len(nt)) { # tmp[mch] <- x[mch] return(tmp) })) - + Ybar <- Ybar[, stats::na.omit(pmatch(colnames(X), colnames(Ybar)))] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.null(x)) { @@ -597,428 +619,464 @@ for(t in seq_len(nt)) { # } return(sqrt(diag(x))) }))) - + for (i in sample(x = 1:ncol(X), size = 2)) { t1 <- 1 - Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) - - plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(Ybar, Xci), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = "kg/m^2", - main = colnames(X)[i]) - + Xbar <- plyr::laply(FORECAST[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + plot(as.Date(obs.times[t1:t]), + Xbar, + ylim = range(c(Ybar, Xci), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = "kg/m^2", + main = colnames(X)[i] + ) + # observation / data if (i <= ncol(Ybar)) { - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, - as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = alphagreen) - graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), type = "l", - col = "darkgreen", lwd = 2) + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, + as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, + col = alphagreen + ) + graphics::lines(as.Date(obs.times[t1:t]), as.numeric(Ybar[, i]), + type = "l", + col = "darkgreen", lwd = 2 + ) } - + # forecast - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) # col='lightblue') + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) # col='lightblue') graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) } } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### Kalman Filter ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (processvar == FALSE) { ## design matrix - H <- matrix(0, length(Y), ncol(X)) #H maps true state to observed state - #linear + H <- matrix(0, length(Y), ncol(X)) # H maps true state to observed state + # linear for (i in choose) { H[i, i] <- 1 } - #non-linear fcomp + # non-linear fcomp # for (i in choose) { - # H[i, i] <- 1/sum(mu.f) #? this seems to get us on the right track. mu.f[i]/sum(mu.f) doesn't work. + # H[i, i] <- 1/sum(mu.f) #? this seems to get us on the right track. mu.f[i]/sum(mu.f) doesn't work. # } ## process error if (!is.null(Q)) { Pf <- Pf + Q } - + ## Kalman Gain K <- Pf %*% t(H) %*% solve((R + H %*% Pf %*% t(H))) # Analysis mu.a <- mu.f + K %*% (Y - H %*% mu.f) - Pa <- (diag(ncol(X)) - K %*% H) %*% Pf + Pa <- (diag(ncol(X)) - K %*% H) %*% Pf enkf.params[[t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa) - } else { - ### create matrix the describes the support for each observed state variable at time t intervalX <- matrix(NA, ncol(X), 2) rownames(intervalX) <- colnames(X) - #TO DO: Not working for fcomp - for(i in 1:length(var.names)){ - intervalX[which(startsWith(rownames(intervalX), - var.names[i])), ] <- matrix(c(as.numeric(settings$state.data.assimilation$state.variables[[i]]$min_value), - as.numeric(settings$state.data.assimilation$state.variables[[i]]$max_value)), - length(which(startsWith(rownames(intervalX), - var.names[i]))),2,byrow = TRUE) + # TO DO: Not working for fcomp + for (i in 1:length(var.names)) { + intervalX[which(startsWith( + rownames(intervalX), + var.names[i] + )), ] <- matrix( + c( + as.numeric(settings$state.data.assimilation$state.variables[[i]]$min_value), + as.numeric(settings$state.data.assimilation$state.variables[[i]]$max_value) + ), + length(which(startsWith( + rownames(intervalX), + var.names[i] + ))), 2, + byrow = TRUE + ) } - + #### These vectors are used to categorize data based on censoring from the interval matrix - x.ind <- x.censored <- matrix(NA, ncol=ncol(X), nrow=nrow(X)) - for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - x.ind[n,j] <- as.numeric(X[n,j] > 0) - x.censored[n,j] <- as.numeric(ifelse(X[n,j] > intervalX[j,2], 0, X[n,j])) # + x.ind <- x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + x.ind[n, j] <- as.numeric(X[n, j] > 0) + x.censored[n, j] <- as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # } } - - - if(recompile == TRUE){ - #The purpose of this step is to impute data for mu.f - #where there are zero values so that - #mu.f is in 'tobit space' in the full model - constants.tobit2space = list(N = nrow(X), - J = length(mu.f)) - - data.tobit2space = list(y.ind = x.ind, - y.censored = x.censored, - mu_0 = rep(0,length(mu.f)), - lambda_0 = diag(10,length(mu.f)), - nu_0 = 3)#some measure of prior obs - - inits.tobit2space = list(pf = Pf, muf = colMeans(X)) #pf = cov(X) - #set.seed(0) - #ptm <- proc.time() - tobit2space_pred <- nimble::nimbleModel(tobit2space.model, data = data.tobit2space, - constants = constants.tobit2space, inits = inits.tobit2space, - name = 'space') + + + if (recompile == TRUE) { + # The purpose of this step is to impute data for mu.f + # where there are zero values so that + # mu.f is in 'tobit space' in the full model + constants.tobit2space <- list( + N = nrow(X), + J = length(mu.f) + ) + + data.tobit2space <- list( + y.ind = x.ind, + y.censored = x.censored, + mu_0 = rep(0, length(mu.f)), + lambda_0 = diag(10, length(mu.f)), + nu_0 = 3 + ) # some measure of prior obs + + inits.tobit2space <- list(pf = Pf, muf = colMeans(X)) # pf = cov(X) + # set.seed(0) + # ptm <- proc.time() + tobit2space_pred <- nimble::nimbleModel(tobit2space.model, + data = data.tobit2space, + constants = constants.tobit2space, inits = inits.tobit2space, + name = "space" + ) ## Adding X.mod,q,r as data for building model. - conf_tobit2space <- configureMCMC(tobit2space_pred, thin = 10, print=TRUE) - conf_tobit2space$addMonitors(c("pf", "muf","y.censored")) + conf_tobit2space <- configureMCMC(tobit2space_pred, thin = 10, print = TRUE) + conf_tobit2space$addMonitors(c("pf", "muf", "y.censored")) ## [1] conjugate_dmnorm_dmnorm sampler: X[1:5] ## important! ## this is needed for correct indexing later samplerNumberOffset_tobit2space <- length(conf_tobit2space$getSamplers()) - - for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - node <- paste0('y.censored[',n,',',j,']') - conf_tobit2space$addSampler(node, 'toggle', control=list(type='RW')) + + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + node <- paste0("y.censored[", n, ",", j, "]") + conf_tobit2space$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } } - - #conf_tobit2space$printSamplers() - + + # conf_tobit2space$printSamplers() + Rmcmc_tobit2space <- nimble::buildMCMC(conf_tobit2space) - + Cmodel_tobit2space <- nimble::compileNimble(tobit2space_pred) Cmcmc_tobit2space <- nimble::compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) - - for(i in seq_along(X)) { + + for (i in seq_along(X)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - nimble::valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space+i]], 'toggle', 1-x.ind[i]) + nimble::valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + i]], "toggle", 1 - x.ind[i]) } - - }else{ + } else { Cmodel_tobit2space$y.ind <- x.ind Cmodel_tobit2space$y.censored <- x.censored - - inits.tobit2space = list(pf = Pf, muf = colMeans(X)) + + inits.tobit2space <- list(pf = Pf, muf = colMeans(X)) Cmodel_tobit2space$setInits(inits.tobit2space) - - for(i in seq_along(X)) { + + for (i in seq_along(X)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - nimble::valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space+i]], 'toggle', 1-x.ind[i]) + nimble::valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + i]], "toggle", 1 - x.ind[i]) } - } - + set.seed(0) - dat.tobit2space <- runMCMC(Cmcmc_tobit2space, niter = 50000, progressBar=TRUE) - - grDevices::pdf(file.path(outdir,paste0('assessParams',t,'.pdf'))) - - assessParams(dat = dat.tobit2space[1000:5000,], Xt = X) + dat.tobit2space <- runMCMC(Cmcmc_tobit2space, niter = 50000, progressBar = TRUE) + + grDevices::pdf(file.path(outdir, paste0("assessParams", t, ".pdf"))) + + assessParams(dat = dat.tobit2space[1000:5000, ], Xt = X) grDevices::dev.off() - + ## update parameters - dat.tobit2space <- dat.tobit2space[1000:5000, ] - imuf <- grep("muf", colnames(dat.tobit2space)) + dat.tobit2space <- dat.tobit2space[1000:5000, ] + imuf <- grep("muf", colnames(dat.tobit2space)) mu.f <- colMeans(dat.tobit2space[, imuf]) - iPf <- grep("pf", colnames(dat.tobit2space)) - Pf <- matrix(colMeans(dat.tobit2space[, iPf]),ncol(X),ncol(X)) - - iycens <- grep("y.censored",colnames(dat.tobit2space)) - + iPf <- grep("pf", colnames(dat.tobit2space)) + Pf <- matrix(colMeans(dat.tobit2space[, iPf]), ncol(X), ncol(X)) + + iycens <- grep("y.censored", colnames(dat.tobit2space)) + # Why does cov(X.new) != Pf ? - X.new <- matrix(colMeans(dat.tobit2space[,iycens]),nrow(X),ncol(X)) - #Pf <- cov(X.new) - - if (sum(diag(Pf) - diag(stats::cov(X.new))) > 3 - || sum(diag(Pf) - diag(stats::cov(X.new))) < -3) { - PEcAn.logger::logger.warn('Covariance in tobit2space model estimate is too different from original forecast covariance. Consider increasing your number of ensemble members.') + X.new <- matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) + # Pf <- cov(X.new) + + if (sum(diag(Pf) - diag(stats::cov(X.new))) > 3 || + sum(diag(Pf) - diag(stats::cov(X.new))) < -3) { + PEcAn.logger::logger.warn("Covariance in tobit2space model estimate is too different from original forecast covariance. Consider increasing your number of ensemble members.") } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### Generalized Ensemble Filter ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + #### initial conditions - bqq[1] <- length(mu.f) - if(is.null(aqq)){ - aqq <- array(0, dim = c(nt,ncol(X),ncol(X))) - }else{ - if(ncol(X)!=dim(aqq)[2]|ncol(X)!=dim(aqq)[3]){ - print('error: X has changed dimensions') + bqq[1] <- length(mu.f) + if (is.null(aqq)) { + aqq <- array(0, dim = c(nt, ncol(X), ncol(X))) + } else { + if (ncol(X) != dim(aqq)[2] | ncol(X) != dim(aqq)[3]) { + print("error: X has changed dimensions") } } - aqq[1, , ] <- diag(length(mu.f)) * bqq[1] #Q - + aqq[1, , ] <- diag(length(mu.f)) * bqq[1] # Q + ### create matrix the describes the support for each observed state variable at time t interval <- matrix(NA, length(obs.mean[[t]]), 2) rownames(interval) <- names(obs.mean[[t]]) - for(i in 1:length(var.names)){ - interval[which(startsWith(rownames(interval), - var.names[i])), ] <- matrix(c(as.numeric(settings$state.data.assimilation$state.variables[[i]]$min_value), - as.numeric(settings$state.data.assimilation$state.variables[[i]]$max_value)), - length(which(startsWith(rownames(interval), - var.names[i]))),2,byrow = TRUE) + for (i in 1:length(var.names)) { + interval[which(startsWith( + rownames(interval), + var.names[i] + )), ] <- matrix( + c( + as.numeric(settings$state.data.assimilation$state.variables[[i]]$min_value), + as.numeric(settings$state.data.assimilation$state.variables[[i]]$max_value) + ), + length(which(startsWith( + rownames(interval), + var.names[i] + ))), 2, + byrow = TRUE + ) } - #### These vectors are used to categorize data based on censoring + #### These vectors are used to categorize data based on censoring #### from the interval matrix - y.ind <- as.numeric(Y > interval[,1]) - y.censored <- as.numeric(ifelse(Y > interval[,1], Y, 0)) - - if(recompile == TRUE){ #TO DO need to make something that works to pick weather to compile or not - - constants.tobit = list(N = ncol(X), YN = length(y.ind)) - dimensions.tobit = list(X = length(mu.f), X.mod = ncol(X), - Q = c(length(mu.f),length(mu.f))) - - data.tobit = list(muf = as.vector(mu.f), - pf = solve(Pf), - aq = aqq[t,,], bq = bqq[t], + y.ind <- as.numeric(Y > interval[, 1]) + y.censored <- as.numeric(ifelse(Y > interval[, 1], Y, 0)) + + if (recompile == TRUE) { # TO DO need to make something that works to pick weather to compile or not + + constants.tobit <- list(N = ncol(X), YN = length(y.ind)) + dimensions.tobit <- list( + X = length(mu.f), X.mod = ncol(X), + Q = c(length(mu.f), length(mu.f)) + ) + + data.tobit <- list( + muf = as.vector(mu.f), + pf = solve(Pf), + aq = aqq[t, , ], bq = bqq[t], y.ind = y.ind, y.censored = y.censored, - r = solve(R)) - inits.pred = list(q = diag(length(mu.f)), X.mod = as.vector(mu.f), - X = stats::rnorm(length(mu.f),0,1)) # - - model_pred <- nimble::nimbleModel(tobit.model, data = data.tobit, dimensions = dimensions.tobit, - constants = constants.tobit, inits = inits.pred, - name = 'base') + r = solve(R) + ) + inits.pred <- list( + q = diag(length(mu.f)), X.mod = as.vector(mu.f), + X = stats::rnorm(length(mu.f), 0, 1) + ) # + + model_pred <- nimble::nimbleModel(tobit.model, + data = data.tobit, dimensions = dimensions.tobit, + constants = constants.tobit, inits = inits.pred, + name = "base" + ) ## Adding X.mod,q,r as data for building model. - conf <- configureMCMC(model_pred, print=TRUE) - conf$addMonitors(c("X","q","Q")) + conf <- configureMCMC(model_pred, print = TRUE) + conf$addMonitors(c("X", "q", "Q")) ## [1] conjugate_dmnorm_dmnorm sampler: X[1:5] ## important! ## this is needed for correct indexing later samplerNumberOffset <- length(conf$getSamplers()) - - for(i in 1:length(y.ind)) { - node <- paste0('y.censored[',i,']') - conf$addSampler(node, 'toggle', control=list(type='RW')) + + for (i in 1:length(y.ind)) { + node <- paste0("y.censored[", i, "]") + conf$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } - + conf$printSamplers() - + ## can monitor y.censored, if you wish, to verify correct behaviour - #conf$addMonitors('y.censored') - + # conf$addMonitors('y.censored') + Rmcmc <- nimble::buildMCMC(conf) - + Cmodel <- nimble::compileNimble(model_pred) Cmcmc <- nimble::compileNimble(Rmcmc, project = model_pred) - - for(i in 1:length(y.ind)) { + + for (i in 1:length(y.ind)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - nimble::valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + nimble::valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } - - }else{ + } else { Cmodel$y.ind <- y.ind Cmodel$y.censored <- y.censored - Cmodel$aq <- aqq[t,,] + Cmodel$aq <- aqq[t, , ] Cmodel$bq <- bqq[t] Cmodel$muf <- mu.f Cmodel$pf <- solve(Pf) Cmodel$r <- solve(R) - - inits.pred = list(q = diag(length(mu.f)), X.mod = as.vector(mu.f), - X = stats::rnorm(ncol(X),0,1)) # + + inits.pred <- list( + q = diag(length(mu.f)), X.mod = as.vector(mu.f), + X = stats::rnorm(ncol(X), 0, 1) + ) # Cmodel$setInits(inits.pred) - - for(i in 1:length(y.ind)) { + + for (i in 1:length(y.ind)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - nimble::valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + nimble::valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } - } - + set.seed(0) dat <- runMCMC(Cmcmc, niter = 50000) - + ## update parameters - dat <- dat[10000:50000, ] - iq <- grep("q", colnames(dat)) - iX <- grep("X[", colnames(dat), fixed = TRUE) + dat <- dat[10000:50000, ] + iq <- grep("q", colnames(dat)) + iX <- grep("X[", colnames(dat), fixed = TRUE) mu.a <- colMeans(dat[, iX]) - Pa <- cov(dat[, iX]) + Pa <- cov(dat[, iX]) Pa[is.na(Pa)] <- 0 - + CI.X1[, t] <- stats::quantile(dat[, iX[1]], c(0.025, 0.5, 0.975)) CI.X2[, t] <- stats::quantile(dat[, iX[2]], c(0.025, 0.5, 0.975)) - - mq <- dat[, iq] # Omega, Precision - q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision - - col <- matrix(1:length(mu.f) ^ 2, length(mu.f), length(mu.f)) - WV <- matrix(0, length(mu.f), length(mu.f)) + + mq <- dat[, iq] # Omega, Precision + q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision + + col <- matrix(1:length(mu.f)^2, length(mu.f), length(mu.f)) + WV <- matrix(0, length(mu.f), length(mu.f)) for (i in seq_along(mu.f)) { for (j in seq_along(mu.f)) { WV[i, j] <- wish.df(q.bar, X = mq, i = i, j = j, col = col[i, j]) } } - + n <- mean(WV) if (n < length(mu.f)) { n <- length(mu.f) } V <- solve(q.bar) * n - - aqq[t + 1, , ] <- V - bqq[t + 1] <- n - enkf.params[[t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, - Pa = Pa, q.bar = q.bar, n = n) - + + aqq[t + 1, , ] <- V + bqq[t + 1] <- n + enkf.params[[t]] <- list( + mu.f = mu.f, Pf = Pf, mu.a = mu.a, + Pa = Pa, q.bar = q.bar, n = n + ) } - } else { - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### No Observations -- Starts Here ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + ### no process variance -- forecast is the same as the analysis ### ### this logic might require more explanation. Why are we giving Q if there is no process variance? - if (processvar==FALSE) { + if (processvar == FALSE) { mu.a <- mu.f - Pa <- Pf + Q + Pa <- Pf + Q ### yes process variance -- no data } else { mu.a <- mu.f - if(is.null(q.bar)){ + if (is.null(q.bar)) { q.bar <- diag(ncol(X)) - print('Process variance not estimated. Analysis has been given uninformative process variance') - } - Pa <- Pf + solve(q.bar) # should this be V instead of solve(q.bar)? + print("Process variance not estimated. Analysis has been given uninformative process variance") + } + Pa <- Pf + solve(q.bar) # should this be V instead of solve(q.bar)? } enkf.params[[t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa) } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### update state matrix ### - ###-------------------------------------------------------------------### - if(adjustment == TRUE){ - - if(!any(obs)){ + ### -------------------------------------------------------------------### + if (adjustment == TRUE) { + if (!any(obs)) { X.new <- X } - + ## normalize - Z <- X*0 - - for(i in seq_len(nrow(X))){ - if(processvar == TRUE) { - Z[i,] <- 1/sqrt(L_f) * t(V_f)%*%(X.new[i,]-mu.f) - }else{ - Z[i,] <- 1/sqrt(L_f) * t(V_f)%*%(X[i,]-mu.f) + Z <- X * 0 + + for (i in seq_len(nrow(X))) { + if (processvar == TRUE) { + Z[i, ] <- 1 / sqrt(L_f) * t(V_f) %*% (X.new[i, ] - mu.f) + } else { + Z[i, ] <- 1 / sqrt(L_f) * t(V_f) %*% (X[i, ] - mu.f) } } - Z[is.na(Z)]<-0 - - S_f <- svd(Pf) - L_f <- S_f$d - V_f <- S_f$v - + Z[is.na(Z)] <- 0 + + S_f <- svd(Pf) + L_f <- S_f$d + V_f <- S_f$v + ## analysis - S_a <- svd(Pa) - L_a <- S_a$d - V_a <- S_a$v - - + S_a <- svd(Pa) + L_a <- S_a$d + V_a <- S_a$v + + ## analysis ensemble - X_a <- X*0 - for(i in seq_len(nrow(X))){ - X_a[i,] <- V_a %*%diag(sqrt(L_a))%*%Z[i,] + mu.a + X_a <- X * 0 + for (i in seq_len(nrow(X))) { + X_a[i, ] <- V_a %*% diag(sqrt(L_a)) %*% Z[i, ] + mu.a } - + # # calculate likelihoods -# for(i in seq_len(nens)){ -# wt.mat[i,t]<-dmnorm_chol(FORECAST[[t]][i,], mu.a, solve(Pa), log = TRUE) -# } - - if (sum(mu.a - colMeans(X_a)) > 1 - || sum(mu.a - colMeans(X_a)) < -1) { - PEcAn.logger::logger.warn('Problem with ensemble adjustment (1)') + # for(i in seq_len(nens)){ + # wt.mat[i,t]<-dmnorm_chol(FORECAST[[t]][i,], mu.a, solve(Pa), log = TRUE) + # } + + if (sum(mu.a - colMeans(X_a)) > 1 || + sum(mu.a - colMeans(X_a)) < -1) { + PEcAn.logger::logger.warn("Problem with ensemble adjustment (1)") } - if (sum(diag(Pa) - diag(cov(X_a))) > 5 - || sum(diag(Pa) - diag(cov(X_a))) < -5) { - PEcAn.logger::logger.warn('Problem with ensemble adjustment (2)') + if (sum(diag(Pa) - diag(cov(X_a))) > 5 || + sum(diag(Pa) - diag(cov(X_a))) < -5) { + PEcAn.logger::logger.warn("Problem with ensemble adjustment (2)") } - + analysis <- as.data.frame(X_a) - }else{ - - if(length(is.na(Pa)) == length(Pa)){ + } else { + if (length(is.na(Pa)) == length(Pa)) { analysis <- mu.a - }else{ + } else { analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) - } - - } - - if(nens == 1){ - analysis <- t(as.matrix(analysis)) } - + + if (nens == 1) { + analysis <- t(as.matrix(analysis)) + } + colnames(analysis) <- colnames(X) ##### Mapping analysis vectors to be in bounds of state variables - if(processvar==TRUE){ - for(i in 1:ncol(analysis)){ - int.save <- state.interval[which(startsWith(colnames(analysis)[i], - var.names)),] - analysis[analysis[,i] < int.save[1],i] <- int.save[1] - analysis[analysis[,i] > int.save[2],i] <- int.save[2] + if (processvar == TRUE) { + for (i in 1:ncol(analysis)) { + int.save <- state.interval[which(startsWith( + colnames(analysis)[i], + var.names + )), ] + analysis[analysis[, i] < int.save[1], i] <- int.save[1] + analysis[analysis[, i] > int.save[2], i] <- int.save[2] } } - + ## in the future will have to be separated from analysis - new.state <- analysis + new.state <- analysis + - ANALYSIS[[t]] <- as.matrix(analysis) if (interactive() & t > 1) { # t1 <- 1 - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -1026,162 +1084,177 @@ for(t in seq_len(nt)) { # tmp[mch] <- x[mch] tmp })) - - if(any(obs)){ + + if (any(obs)) { Y.order <- stats::na.omit(pmatch(colnames(X), colnames(Ybar))) - Ybar <- Ybar[,Y.order] + Ybar <- Ybar[, Y.order] Ybar[is.na(Ybar)] <- 0 YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { - if (length(x)<2) { + if (length(x) < 2) { rep(NA, length(names.y)) } sqrt(diag(x)) }))) - - YCI <- YCI[,Y.order] - YCI[is.na(YCI)] <- 0 - }else{ - YCI <- matrix(NA,nrow=length(t1:t), ncol=max(length(names.y),1)) + YCI <- YCI[, Y.order] + YCI[is.na(YCI)] <- 0 + } else { + YCI <- matrix(NA, nrow = length(t1:t), ncol = max(length(names.y), 1)) } - + graphics::par(mfrow = c(2, 1)) for (i in 1:ncol(FORECAST[[t]])) { # - - Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) - - ylab.names <- unlist(sapply(settings$state.data.assimilation$state.variable, - function(x) { x })[2, ], use.names = FALSE) - + + Xbar <- plyr::laply(FORECAST[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) + }) + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) + }) + + ylab.names <- unlist(sapply( + settings$state.data.assimilation$state.variable, + function(x) { + x + } + )[2, ], use.names = FALSE) + # observation / data if (i <= ncol(Ybar) & any(obs)) { - plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci, Ybar[,i]), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) + plot(as.Date(obs.times[t1:t]), + Xbar, + ylim = range(c(XaCI, Xci, Ybar[, i]), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, - as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = alphagreen) - graphics::lines(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]), - type = "l", - col = "darkgreen", - lwd = 2) - }else{ - plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) + as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, + as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, + col = alphagreen + ) + graphics::lines(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]), + type = "l", + col = "darkgreen", + lwd = 2 + ) + } else { + plot(as.Date(obs.times[t1:t]), + Xbar, + ylim = range(c(XaCI, Xci), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) } - + # forecast - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) # col='lightblue') graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) - + # analysis PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) - #legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) + # legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) } } - #dev.off() - ###-------------------------------------------------------------------### + # dev.off() + ### -------------------------------------------------------------------### ### forecast step ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (t < nt) { - - - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### split model specific inputs for current runs ### - ###-------------------------------------------------------------------### - - if(!no_split){ - + ### -------------------------------------------------------------------### + + if (!no_split) { inputs <- list() - for(i in seq_len(nens)){ - inputs[[i]] <- do.call(my.split_inputs, - args = list(settings = settings, - start.time = (ymd_hms(obs.times[t],truncated = 3) + second(hms("00:00:01"))), - stop.time = obs.times[t + 1], - inputs = ens.inputs[[i]])) - + for (i in seq_len(nens)) { + inputs[[i]] <- do.call(my.split_inputs, + args = list( + settings = settings, + start.time = (ymd_hms(obs.times[t], truncated = 3) + second(hms("00:00:01"))), + stop.time = obs.times[t + 1], + inputs = ens.inputs[[i]] + ) + ) } } - - - ###-------------------------------------------------------------------### + + + ### -------------------------------------------------------------------### ### write restart by ensemble ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + for (i in seq_len(nens)) { - settings$run$inputs <- inputs[[i]] - - do.call(my.write_restart, - args = list(outdir = outdir, - runid = run.id[[i]], - start.time = strptime(obs.times[t],format="%Y-%m-%d %H:%M:%S"), - stop.time = strptime(obs.times[t + 1],format="%Y-%m-%d %H:%M:%S"), - settings = settings, - new.state = new.state[i, ], - new.params = new.params[[i]], - inputs = inputs[[i]], - RENAME = TRUE)) + + do.call(my.write_restart, + args = list( + outdir = outdir, + runid = run.id[[i]], + start.time = strptime(obs.times[t], format = "%Y-%m-%d %H:%M:%S"), + stop.time = strptime(obs.times[t + 1], format = "%Y-%m-%d %H:%M:%S"), + settings = settings, + new.state = new.state[i, ], + new.params = new.params[[i]], + inputs = inputs[[i]], + RENAME = TRUE + ) + ) } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### Run model ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### print(paste("Running Model for Year", as.Date(obs.times[t]) + 1)) PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) } - - ###-------------------------------------------------------------------### - ### save outputs ### - ###-------------------------------------------------------------------### - save(t, FORECAST, ANALYSIS, enkf.params, file = file.path(settings$outdir,'out', "sda.output.Rdata")) + ### -------------------------------------------------------------------### + ### save outputs ### + ### -------------------------------------------------------------------### + save(t, FORECAST, ANALYSIS, enkf.params, file = file.path(settings$outdir, "out", "sda.output.Rdata")) + } ## end loop over time + ### ------------------------------------------- - - } ## end loop over time - ###------------------------------------------- - - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### create diagnostics ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + ### LOAD CLIMATE ### HACK ### LINKAGES SPECIFIC if (model == "LINKAGES") { climate_file <- settings$run$inputs$met$path load(climate_file) - temp.mat <- temp.mat[year(obs.times) - 853, ] - precip.mat <- precip.mat[year(obs.times) - 853, ] + temp.mat <- temp.mat[year(obs.times) - 853, ] + precip.mat <- precip.mat[year(obs.times) - 853, ] } else { print("climate diagnostics under development") } - - if(is.null(X)){ + + if (is.null(X)) { X <- as.matrix(FORECAST[[t]]) } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### time series ### - ###-------------------------------------------------------------------### - - if(nens > 1){ + ### -------------------------------------------------------------------### + + if (nens > 1) { grDevices::pdf(file.path(settings$outdir, "sda.enkf.time-series.pdf")) - - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) + + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -1190,173 +1263,203 @@ for(t in seq_len(nt)) { # tmp })) Y.order <- stats::na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) - Ybar <- Ybar[,Y.order] + Ybar <- Ybar[, Y.order] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.null(x)) { rep(NA, length(names.y)) } sqrt(diag(x)) }))) - - Ybar[is.na(Ybar)]<-0 - YCI[is.na(YCI)]<-0 - - YCI <- YCI[,Y.order] - Xsum <- plyr::laply(FORECAST, function(x) { mean(rowSums(x[,1:length(names.y)], na.rm = TRUE)) })[t1:t] - Xasum <- plyr::laply(ANALYSIS, function(x) { mean(rowSums(x[,1:length(names.y)], na.rm = TRUE)) })[t1:t] - + + Ybar[is.na(Ybar)] <- 0 + YCI[is.na(YCI)] <- 0 + + YCI <- YCI[, Y.order] + Xsum <- plyr::laply(FORECAST, function(x) { + mean(rowSums(x[, 1:length(names.y)], na.rm = TRUE)) + })[t1:t] + Xasum <- plyr::laply(ANALYSIS, function(x) { + mean(rowSums(x[, 1:length(names.y)], na.rm = TRUE)) + })[t1:t] + for (i in seq_len(ncol(X))) { Xbar <- plyr::laply(FORECAST[t1:t], function(x) { - mean(x[, i], na.rm = TRUE) }) #/rowSums(x[,1:9],na.rm = T) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { - stats::quantile(x[, i], c(0.025, 0.975),na.rm = T) }) - - Xci[is.na(Xci)]<-0 - + mean(x[, i], na.rm = TRUE) + }) # /rowSums(x[,1:9],na.rm = T) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = T) + }) + + Xci[is.na(Xci)] <- 0 + Xbar <- Xbar Xci <- Xci - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { - - mean(x[, i],na.rm = T) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { - quantile(x[, i], c(0.025, 0.975),na.rm = T )}) - + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = T) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + quantile(x[, i], c(0.025, 0.975), na.rm = T) + }) + Xa <- Xa XaCI <- XaCI - + plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) - + Xbar, + ylim = range(c(XaCI, Xci), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) + # observation / data - if (i<10) { # - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, - as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = alphagreen) - graphics::lines(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]), - type = "l", col = "darkgreen", lwd = 2) + if (i < 10) { # + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, + as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, + col = alphagreen + ) + graphics::lines(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]), + type = "l", col = "darkgreen", lwd = 2 + ) } - + # forecast - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) #col='lightblue') #alphablue - graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) #"darkblue" - + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = alphablue) # col='lightblue') #alphablue + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) # "darkblue" + # analysis - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) #alphapink - graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #"black" - - graphics::legend('topright',c('Forecast','Data','Analysis'),col=c(alphablue,alphagreen,alphapink),lty=1,lwd=5) - + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = alphapink) # alphapink + graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) # "black" + + graphics::legend("topright", c("Forecast", "Data", "Analysis"), col = c(alphablue, alphagreen, alphapink), lty = 1, lwd = 5) } - + grDevices::dev.off() - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### bias diagnostics ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### grDevices::pdf(file.path(settings$outdir, "bias.diagnostic.pdf")) for (i in seq_along(obs.mean[[1]])) { - Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) - - if(length(which(is.na(Ybar[,i])))>=length(t1:t)) next() + Xbar <- plyr::laply(FORECAST[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + if (length(which(is.na(Ybar[, i]))) >= length(t1:t)) next() reg <- stats::lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) - plot(t1:t, - Xbar - unlist(Ybar[, i]), - pch = 16, cex = 1, - ylim = c(min(Xci[, 1] - unlist(Ybar[, i])), max(Xci[,2] - unlist(Ybar[, i]))), - xlab = "Time", - ylab = "Error", - main = paste(colnames(X)[i], " Error = Forecast - Data")) - PEcAn.visualization::ciEnvelope(rev(t1:t), - rev(Xci[, 1] - unlist(Ybar[, i])), - rev(Xci[, 2] - unlist(Ybar[, i])), - col = alphabrown) + plot(t1:t, + Xbar - unlist(Ybar[, i]), + pch = 16, cex = 1, + ylim = c(min(Xci[, 1] - unlist(Ybar[, i])), max(Xci[, 2] - unlist(Ybar[, i]))), + xlab = "Time", + ylab = "Error", + main = paste(colnames(X)[i], " Error = Forecast - Data") + ) + PEcAn.visualization::ciEnvelope(rev(t1:t), + rev(Xci[, 1] - unlist(Ybar[, i])), + rev(Xci[, 2] - unlist(Ybar[, i])), + col = alphabrown + ) graphics::abline(h = 0, lty = 2, lwd = 2) graphics::abline(reg) - graphics::mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) + graphics::mtext(paste( + "slope =", signif(summary(reg)$coefficients[2], digits = 3), + "intercept =", signif(summary(reg)$coefficients[1], digits = 3) + )) # d<-density(c(Xbar[t1:t] - unlist(Ybar[t1:t,i]))) lines(d$y+1,d$x) - + # forecast minus analysis = update reg1 <- stats::lm(Xbar - Xa ~ c(t1:t)) - plot(t1:t, - Xbar - Xa, - pch = 16, cex = 1, - ylim = c(min(Xbar - XaCI[, 2]), max(Xbar - XaCI[, 1])), - xlab = "Time", ylab = "Update", - main = paste(colnames(X)[i], - "Update = Forecast - Analysis")) - PEcAn.visualization::ciEnvelope(rev(t1:t), - rev(Xbar - XaCI[, 1]), - rev(Xbar - XaCI[, 2]), - col = alphapurple) + plot(t1:t, + Xbar - Xa, + pch = 16, cex = 1, + ylim = c(min(Xbar - XaCI[, 2]), max(Xbar - XaCI[, 1])), + xlab = "Time", ylab = "Update", + main = paste( + colnames(X)[i], + "Update = Forecast - Analysis" + ) + ) + PEcAn.visualization::ciEnvelope(rev(t1:t), + rev(Xbar - XaCI[, 1]), + rev(Xbar - XaCI[, 2]), + col = alphapurple + ) graphics::abline(h = 0, lty = 2, lwd = 2) graphics::abline(reg1) - graphics::mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg1)$coefficients[1], - digits = 3))) + graphics::mtext(paste( + "slope =", signif(summary(reg1)$coefficients[2], digits = 3), + "intercept =", signif(summary(reg1)$coefficients[1], + digits = 3 + ) + )) # d<-density(c(Xbar[t1:t] - Xa[t1:t])) lines(d$y+1,d$x) - - dat <- data.frame(model = Xbar, obvs = Ybar[,i], time = rownames(Ybar)) - dat.stats <- data.frame(rmse = PEcAn.benchmark::metric_RMSE(dat), - r2 = PEcAn.benchmark::metric_R2(dat), - rae = PEcAn.benchmark::metric_RAE(dat), - ame = PEcAn.benchmark::metric_AME(dat)) + + dat <- data.frame(model = Xbar, obvs = Ybar[, i], time = rownames(Ybar)) + dat.stats <- data.frame( + rmse = PEcAn.benchmark::metric_RMSE(dat), + r2 = PEcAn.benchmark::metric_R2(dat), + rae = PEcAn.benchmark::metric_RAE(dat), + ame = PEcAn.benchmark::metric_AME(dat) + ) plot1 <- PEcAn.benchmark::metric_residual_plot(dat, var = colnames(Ybar)[i]) plot2 <- PEcAn.benchmark::metric_scatter_plot(dat, var = colnames(Ybar)[i]) - #PEcAn.benchmark::metric_lmDiag_plot(dat, var = colnames(Ybar)[i]) + # PEcAn.benchmark::metric_lmDiag_plot(dat, var = colnames(Ybar)[i]) plot3 <- PEcAn.benchmark::metric_timeseries_plot(dat, var = colnames(Ybar)[i]) - text = paste("\n The following is text that'll appear in a plot window.\n", - " As you can see, it's in the plot window\n", - " One might imagine useful informaiton here") - if(requireNamespace("gridExtra")){ - ss <- gridExtra::tableGrob(signif(dat.stats,digits = 3)) - gridExtra::grid.arrange(plot1,plot2,plot3,ss,ncol=2) + text <- paste( + "\n The following is text that'll appear in a plot window.\n", + " As you can see, it's in the plot window\n", + " One might imagine useful informaiton here" + ) + if (requireNamespace("gridExtra")) { + ss <- gridExtra::tableGrob(signif(dat.stats, digits = 3)) + gridExtra::grid.arrange(plot1, plot2, plot3, ss, ncol = 2) } else { print(plot1) print(plot2) print(plot3) } - } grDevices::dev.off() - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### process variance plots ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (processvar) { - - grDevices::pdf('process.var.plots.pdf') - + grDevices::pdf("process.var.plots.pdf") + cor.mat <- stats::cov2cor(solve(enkf.params[[t]]$q.bar)) colnames(cor.mat) <- colnames(X) rownames(cor.mat) <- colnames(X) graphics::par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) - corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45,order='FPC') - - graphics::par(mfrow=c(1,1)) - plot(as.Date(obs.times[t1:t]), unlist(lapply(enkf.params,'[[','n')), - pch = 16, cex = 1, - ylab = "Degrees of Freedom", xlab = "Time") - + corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45, order = "FPC") + + graphics::par(mfrow = c(1, 1)) + plot(as.Date(obs.times[t1:t]), unlist(lapply(enkf.params, "[[", "n")), + pch = 16, cex = 1, + ylab = "Degrees of Freedom", xlab = "Time" + ) + grDevices::dev.off() - } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### climate plots ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + # plot(rowMeans(temp.mat[5:t,]), # Xbar[5:t] - unlist(Ybar[5:t,i]), # xlim=range(rowMeans(temp.mat[5:t,])), @@ -1364,26 +1467,27 @@ for(t in seq_len(nt)) { # # xlab="Average Monthly Temp", # ylab="Error", # main=colnames(Ybar)[i]) - # + # # plot(rowSums(precip.mat[5:t,]), # Xbar[5:t] - unlist(Ybar[5:t,i]), # xlim=range(rowSums(precip.mat[5:t,])), # ylim = range(Xbar [5:t]- unlist(Ybar[5:t,i])), # pch=16,cex=1,xlab="Total Yearly Precip", # ylab="Error",main=colnames(Ybar)[i]) - # + # # plot(rowMeans(temp.mat[5:t,]),Xbar[5:t] - Xa[5:t],pch=16, # cex=1,xlab="Average Monthly Temp", # ylab="Update",main=colnames(Ybar)[i]) # plot(rowSums(precip.mat[5:t,]),Xbar[5:t] - Xa[5:t],pch=16, # cex=1, xlab="Total Yearly Precip", # ylab="Update",main=colnames(Ybar)[i]) - } - + grDevices::pdf(file.path(settings$outdir, "sda.enkf.time-series.pdf")) - - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) + + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -1392,151 +1496,178 @@ for(t in seq_len(nt)) { # tmp })) Y.order <- stats::na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) - Ybar <- Ybar[,Y.order] + Ybar <- Ybar[, Y.order] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.null(x)) { rep(NA, length(names.y)) } sqrt(diag(x)) }))) - - Ybar[is.na(Ybar)]<-0 - YCI[is.na(YCI)]<-0 - - YCI <- YCI[,Y.order] - Xsum <- plyr::laply(FORECAST, function(x) { mean(rowSums(x[,1:length(names.y)], na.rm = TRUE)) })[t1:t] - Xasum <- plyr::laply(ANALYSIS, function(x) { mean(rowSums(x[,1:length(names.y)], na.rm = TRUE)) })[t1:t] - + + Ybar[is.na(Ybar)] <- 0 + YCI[is.na(YCI)] <- 0 + + YCI <- YCI[, Y.order] + Xsum <- plyr::laply(FORECAST, function(x) { + mean(rowSums(x[, 1:length(names.y)], na.rm = TRUE)) + })[t1:t] + Xasum <- plyr::laply(ANALYSIS, function(x) { + mean(rowSums(x[, 1:length(names.y)], na.rm = TRUE)) + })[t1:t] + for (i in seq_len(ncol(X))) { Xbar <- plyr::laply(FORECAST[t1:t], function(x) { - mean(x[, i], na.rm = TRUE) }) #/rowSums(x[,1:9],na.rm = T) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { - stats::quantile(x[, i], c(0.025, 0.975),na.rm = T) }) - - Xci[is.na(Xci)]<-0 - + mean(x[, i], na.rm = TRUE) + }) # /rowSums(x[,1:9],na.rm = T) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = T) + }) + + Xci[is.na(Xci)] <- 0 + Xbar <- Xbar Xci <- Xci - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { - - mean(x[, i],na.rm = T) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { - stats::quantile(x[, i], c(0.025, 0.975),na.rm = T )}) - + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = T) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = T) + }) + Xa <- Xa XaCI <- XaCI - + plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) - + Xbar, + ylim = range(c(XaCI, Xci), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) + # observation / data - if (i=length(t1:t)) next() + Xbar <- plyr::laply(FORECAST[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + if (length(which(is.na(Ybar[, i]))) >= length(t1:t)) next() reg <- stats::lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) - plot(t1:t, - Xbar - unlist(Ybar[, i]), - pch = 16, cex = 1, - ylim = c(min(Xci[, 1] - unlist(Ybar[, i])), max(Xci[,2] - unlist(Ybar[, i]))), - xlab = "Time", - ylab = "Error", - main = paste(colnames(X)[i], " Error = Forecast - Data")) - PEcAn.visualization::ciEnvelope(rev(t1:t), - rev(Xci[, 1] - unlist(Ybar[, i])), - rev(Xci[, 2] - unlist(Ybar[, i])), - col = alphabrown) + plot(t1:t, + Xbar - unlist(Ybar[, i]), + pch = 16, cex = 1, + ylim = c(min(Xci[, 1] - unlist(Ybar[, i])), max(Xci[, 2] - unlist(Ybar[, i]))), + xlab = "Time", + ylab = "Error", + main = paste(colnames(X)[i], " Error = Forecast - Data") + ) + PEcAn.visualization::ciEnvelope(rev(t1:t), + rev(Xci[, 1] - unlist(Ybar[, i])), + rev(Xci[, 2] - unlist(Ybar[, i])), + col = alphabrown + ) graphics::abline(h = 0, lty = 2, lwd = 2) graphics::abline(reg) - graphics::mtext(paste("slope =", signif(summary(reg)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) + graphics::mtext(paste( + "slope =", signif(summary(reg)$coefficients[2], digits = 3), + "intercept =", signif(summary(reg)$coefficients[1], digits = 3) + )) # d<-density(c(Xbar[t1:t] - unlist(Ybar[t1:t,i]))) lines(d$y+1,d$x) - + # forecast minus analysis = update reg1 <- stats::lm(Xbar - Xa ~ c(t1:t)) - plot(t1:t, - Xbar - Xa, - pch = 16, cex = 1, - ylim = c(min(Xbar - XaCI[, 2]), max(Xbar - XaCI[, 1])), - xlab = "Time", ylab = "Update", - main = paste(colnames(X)[i], - "Update = Forecast - Analysis")) - PEcAn.visualization::ciEnvelope(rev(t1:t), - rev(Xbar - XaCI[, 1]), - rev(Xbar - XaCI[, 2]), - col = alphapurple) + plot(t1:t, + Xbar - Xa, + pch = 16, cex = 1, + ylim = c(min(Xbar - XaCI[, 2]), max(Xbar - XaCI[, 1])), + xlab = "Time", ylab = "Update", + main = paste( + colnames(X)[i], + "Update = Forecast - Analysis" + ) + ) + PEcAn.visualization::ciEnvelope(rev(t1:t), + rev(Xbar - XaCI[, 1]), + rev(Xbar - XaCI[, 2]), + col = alphapurple + ) graphics::abline(h = 0, lty = 2, lwd = 2) graphics::abline(reg1) - graphics::mtext(paste("slope =", signif(summary(reg1)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg1)$coefficients[1], - digits = 3))) + graphics::mtext(paste( + "slope =", signif(summary(reg1)$coefficients[2], digits = 3), + "intercept =", signif(summary(reg1)$coefficients[1], + digits = 3 + ) + )) # d<-density(c(Xbar[t1:t] - Xa[t1:t])) lines(d$y+1,d$x) } grDevices::dev.off() - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### process variance plots ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### if (processvar) { - - grDevices::pdf('process.var.plots.pdf') - - cor.mat <- stats::cov2cor(aqq[t,,] / bqq[t]) + grDevices::pdf("process.var.plots.pdf") + + cor.mat <- stats::cov2cor(aqq[t, , ] / bqq[t]) colnames(cor.mat) <- colnames(X) rownames(cor.mat) <- colnames(X) graphics::par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) - corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45,order='FPC') - - graphics::par(mfrow=c(1,1)) + corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45, order = "FPC") + + graphics::par(mfrow = c(1, 1)) plot(as.Date(obs.times[t1:t]), bqq[t1:t], - pch = 16, cex = 1, - ylab = "Degrees of Freedom", xlab = "Time") - + pch = 16, cex = 1, + ylab = "Degrees of Freedom", xlab = "Time" + ) + grDevices::dev.off() - } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### climate plots ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + # plot(rowMeans(temp.mat[5:t,]), # Xbar[5:t] - unlist(Ybar[5:t,i]), # xlim=range(rowMeans(temp.mat[5:t,])), @@ -1544,20 +1675,18 @@ for(t in seq_len(nt)) { # # xlab="Average Monthly Temp", # ylab="Error", # main=colnames(Ybar)[i]) - # + # # plot(rowSums(precip.mat[5:t,]), # Xbar[5:t] - unlist(Ybar[5:t,i]), # xlim=range(rowSums(precip.mat[5:t,])), # ylim = range(Xbar [5:t]- unlist(Ybar[5:t,i])), # pch=16,cex=1,xlab="Total Yearly Precip", # ylab="Error",main=colnames(Ybar)[i]) - # + # # plot(rowMeans(temp.mat[5:t,]),Xbar[5:t] - Xa[5:t],pch=16, # cex=1,xlab="Average Monthly Temp", # ylab="Update",main=colnames(Ybar)[i]) # plot(rowSums(precip.mat[5:t,]),Xbar[5:t] - Xa[5:t],pch=16, # cex=1, xlab="Total Yearly Precip", # ylab="Update",main=colnames(Ybar)[i]) - } # sda.enkf - diff --git a/modules/assim.sequential/R/sda.enkf_MultiSite.R b/modules/assim.sequential/R/sda.enkf_MultiSite.R index 81b77c6ba33..da21569c950 100644 --- a/modules/assim.sequential/R/sda.enkf_MultiSite.R +++ b/modules/assim.sequential/R/sda.enkf_MultiSite.R @@ -8,19 +8,19 @@ #' before the loop is skipped, with the info being populated from the previous #' run. The function then dives right into the first Analysis, then continues #' on like normal. -#' +#' #' @author Michael Dietze, Ann Raiho and Alexis Helgeson \email{dietze@@bu.edu} -#' +#' #' @param settings PEcAn settings object -#' @param obs.mean Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. -#' @param obs.cov Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. +#' @param obs.mean Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation means for each state variables of each site for each time point. +#' @param obs.cov Lists of date times named by time points, which contains lists of sites named by site ids, which contains observation covariances for all state variables of each site for each time point. #' @param Q Process covariance matrix given if there is no data to estimate it. #' @param restart Used for iterative updating previous forecasts. Default NULL. List object includes file path to previous runs and start date for SDA. #' @param pre_enkf_params Used for passing pre-existing time-series of process error into the current SDA runs to ignore the impact by the differences between process errors. #' @param ensemble.samples Pass ensemble.samples from outside to avoid GitHub check issues. -#' @param control List of flags controlling the behavior of the SDA. -#' `trace` for reporting back the SDA outcomes; -#' `TimeseriesPlot` for post analysis examination; +#' @param control List of flags controlling the behavior of the SDA. +#' `trace` for reporting back the SDA outcomes; +#' `TimeseriesPlot` for post analysis examination; #' `debug` decide if we want to pause the code and examining the variables inside the function; #' `pause` decide if we want to pause the SDA workflow at current time point t; #' `Profiling` decide if we want to export the temporal SDA outputs in CSV file; @@ -32,40 +32,42 @@ #' `run_parallel` decide if we want to run the SDA under parallel mode for the `future_map` function; #' `MCMC.args` include lists for controling the MCMC sampling process (iteration, nchains, burnin, and nthin.). #' @param ... Additional arguments, currently ignored -#' +#' #' @return NONE #' @import nimble furrr #' @export -#' -sda.enkf.multisite <- function(settings, - obs.mean, - obs.cov, - Q = NULL, - restart = NULL, +#' +sda.enkf.multisite <- function(settings, + obs.mean, + obs.cov, + Q = NULL, + restart = NULL, pre_enkf_params = NULL, ensemble.samples = NULL, - control=list(trace = TRUE, - TimeseriesPlot = FALSE, - debug = FALSE, - pause = FALSE, - Profiling = FALSE, - OutlierDetection=FALSE, - parallel_qsub = TRUE, - send_email = NULL, - keepNC = TRUE, - forceRun = TRUE, - run_parallel = TRUE, - MCMC.args = NULL), + control = list( + trace = TRUE, + TimeseriesPlot = FALSE, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE, + parallel_qsub = TRUE, + send_email = NULL, + keepNC = TRUE, + forceRun = TRUE, + run_parallel = TRUE, + MCMC.args = NULL + ), ...) { - #add if/else for when restart points to folder instead if T/F set restart as T - if(is.list(restart)){ + # add if/else for when restart points to folder instead if T/F set restart as T + if (is.list(restart)) { old.dir <- restart$filepath start.cut <- restart$start.cut - restart_flag = TRUE - }else{ - restart_flag = FALSE + restart_flag <- TRUE + } else { + restart_flag <- FALSE } - if(control$run_parallel){ + if (control$run_parallel) { if (future::supportsMulticore()) { future::plan(future::multicore) } else { @@ -74,103 +76,113 @@ sda.enkf.multisite <- function(settings, } if (control$debug) browser() tictoc::tic("Preparation") - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### read settings ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### adjustment <- settings$state.data.assimilation$adjustment - model <- settings$model$type - write <- settings$database$bety$write - defaults <- settings$pfts - outdir <- settings$modeloutdir # currently model runs locally, this will change if remote is enabled - rundir <- settings$host$rundir - host <- settings$host - - forecast.time.step <- settings$state.data.assimilation$forecast.time.step #idea for later generalizing - nens <- as.numeric(settings$ensemble$size) + model <- settings$model$type + write <- settings$database$bety$write + defaults <- settings$pfts + outdir <- settings$modeloutdir # currently model runs locally, this will change if remote is enabled + rundir <- settings$host$rundir + host <- settings$host + + forecast.time.step <- settings$state.data.assimilation$forecast.time.step # idea for later generalizing + nens <- as.numeric(settings$ensemble$size) processvar <- settings$state.data.assimilation$process.variance - if(processvar=="TRUE"){ + if (processvar == "TRUE") { processvar <- TRUE - }else{ + } else { processvar <- FALSE } Localization.FUN <- settings$state.data.assimilation$Localization.FUN # localization function scalef <- settings$state.data.assimilation$scalef %>% as.numeric() # scale factor for localization - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") names(var.names) <- NULL multi.site.flag <- PEcAn.settings::is.MultiSettings(settings) readsFF <- NULL # this keeps the forward forecast - + is.local <- PEcAn.remote::is.localhost(settings$host) #------------------Reading up the MCMC settings - nitr.GEF <- ifelse(is.null(settings$state.data.assimilation$nitrGEF), - 5e4, - settings$state.data.assimilation$nitrGEF %>% - as.numeric) - nthin <- ifelse(is.null(settings$state.data.assimilation$nthin), - 10, - settings$state.data.assimilation$nthin %>% - as.numeric) - nburnin<- ifelse(is.null(settings$state.data.assimilation$nburnin), - 1e4, - settings$state.data.assimilation$nburnin %>% - as.numeric) - censored.data<-ifelse(is.null(settings$state.data.assimilation$censored.data), - TRUE, - settings$state.data.assimilation$censored.data %>% - as.logical) + nitr.GEF <- ifelse(is.null(settings$state.data.assimilation$nitrGEF), + 5e4, + settings$state.data.assimilation$nitrGEF %>% + as.numeric() + ) + nthin <- ifelse(is.null(settings$state.data.assimilation$nthin), + 10, + settings$state.data.assimilation$nthin %>% + as.numeric() + ) + nburnin <- ifelse(is.null(settings$state.data.assimilation$nburnin), + 1e4, + settings$state.data.assimilation$nburnin %>% + as.numeric() + ) + censored.data <- ifelse(is.null(settings$state.data.assimilation$censored.data), + TRUE, + settings$state.data.assimilation$censored.data %>% + as.logical() + ) #--------Initialization - FORECAST <- ANALYSIS <- ens_weights <- list() + FORECAST <- ANALYSIS <- ens_weights <- list() enkf.params <- list() restart.list <- NULL - #create SDA folder to store output - if(!dir.exists(settings$outdir)) dir.create(settings$outdir, showWarnings = FALSE) - + # create SDA folder to store output + if (!dir.exists(settings$outdir)) dir.create(settings$outdir, showWarnings = FALSE) + ##### Creating matrices that describe the bounds of the state variables ##### interval is remade everytime depending on the data at time t ##### state.interval stays constant and converts new.analysis to be within the correct bounds - interval <- NULL - state.interval <- cbind(as.numeric(lapply(settings$state.data.assimilation$state.variables,'[[','min_value')), - as.numeric(lapply(settings$state.data.assimilation$state.variables,'[[','max_value'))) + interval <- NULL + state.interval <- cbind( + as.numeric(lapply(settings$state.data.assimilation$state.variables, "[[", "min_value")), + as.numeric(lapply(settings$state.data.assimilation$state.variables, "[[", "max_value")) + ) rownames(state.interval) <- var.names #------------------------------Multi - site specific - settings - #Here I'm trying to make a temp config list name and put it into map to iterate - if(multi.site.flag){ - conf.settings<-settings - site.ids <- conf.settings$run %>% purrr::map('site') %>% purrr::map('id') %>% base::unlist() %>% base::as.character() + # Here I'm trying to make a temp config list name and put it into map to iterate + if (multi.site.flag) { + conf.settings <- settings + site.ids <- conf.settings$run %>% + purrr::map("site") %>% + purrr::map("id") %>% + base::unlist() %>% + base::as.character() # a matrix ready to be sent to spDistsN1 in sp package - first col is the long second is the lat and row names are the site ids - site.locs <- conf.settings$run %>% purrr::map('site') %>% purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% - t %>% - `colnames<-`(c("Lon","Lat")) %>% + site.locs <- conf.settings$run %>% + purrr::map("site") %>% + purrr::map_dfr(~ c(.x[["lon"]], .x[["lat"]]) %>% as.numeric()) %>% + t() %>% + `colnames<-`(c("Lon", "Lat")) %>% `rownames<-`(site.ids) - #Finding the distance between the sites + # Finding the distance between the sites distances <- sp::spDists(site.locs, longlat = TRUE) - #turn that into a blocked matrix format + # turn that into a blocked matrix format blocked.dis <- block_matrix(distances %>% as.numeric(), rep(length(var.names), length(site.ids))) - - }else{ + } else { conf.settings <- list(settings) site.ids <- as.character(settings$run$site$id) } - - - ###-------------------------------------------------------------------### + + + ### -------------------------------------------------------------------### ### check dates before data assimilation ### - ###-------------------------------------------------------------------###---- - #filtering obs data based on years specifited in setting > state.data.assimilation + ### -------------------------------------------------------------------###---- + # filtering obs data based on years specifited in setting > state.data.assimilation if (restart_flag) { - start.cut <- lubridate::ymd_hms(start.cut) #start.cut taken from restart list as date to begin runs - Start.year <-lubridate::year(start.cut) - - }else{ + start.cut <- lubridate::ymd_hms(start.cut) # start.cut taken from restart list as date to begin runs + Start.year <- lubridate::year(start.cut) + } else { start.cut <- lubridate::ymd_hms(settings$state.data.assimilation$start.date, truncated = 3) Start.year <- (lubridate::year(settings$state.data.assimilation$start.date)) } - + End.year <- lubridate::year(settings$state.data.assimilation$end.date) # dates that assimilations will be done for - obs will be subsetted based on this assim.sda <- Start.year:End.year - obs.mean <- obs.mean[sapply(lubridate::year(names(obs.mean)), function(obs.year) obs.year %in% (assim.sda))] #checks obs.mean dates against assimyear dates - obs.cov <- obs.cov[sapply(lubridate::year(names(obs.cov)), function(obs.year) obs.year %in% (assim.sda))] #checks obs.cov dates against assimyear dates - #checking that there are dates in obs.mean and adding midnight as the time + obs.mean <- obs.mean[sapply(lubridate::year(names(obs.mean)), function(obs.year) obs.year %in% (assim.sda))] # checks obs.mean dates against assimyear dates + obs.cov <- obs.cov[sapply(lubridate::year(names(obs.cov)), function(obs.year) obs.year %in% (assim.sda))] # checks obs.cov dates against assimyear dates + # checking that there are dates in obs.mean and adding midnight as the time obs.times <- names(obs.mean) obs.times.POSIX <- lubridate::ymd_hms(obs.times) for (i in seq_along(obs.times)) { @@ -178,7 +190,7 @@ sda.enkf.multisite <- function(settings, if (is.na(lubridate::ymd(obs.times[i]))) { PEcAn.logger::logger.warn("Error: no dates associated with observations") } else { - ### Data does not have time associated with dates + ### Data does not have time associated with dates ### Adding 12:59:59PM assuming next time step starts one second later PEcAn.logger::logger.warn("Pumpkin Warning: adding one minute before midnight time assumption to dates associated with data") obs.times.POSIX[i] <- lubridate::ymd_hms(paste(obs.times[i], "23:59:59")) @@ -187,42 +199,41 @@ sda.enkf.multisite <- function(settings, } obs.times <- obs.times.POSIX read_restart_times <- c(lubridate::ymd_hms(start.cut, truncated = 3), obs.times) - nt <- length(obs.times) #sets length of for loop for Forecast/Analysis - if (nt==0) PEcAn.logger::logger.severe('There has to be at least one Obs.') + nt <- length(obs.times) # sets length of for loop for Forecast/Analysis + if (nt == 0) PEcAn.logger::logger.severe("There has to be at least one Obs.") -# Model Specific Setup ---------------------------------------------------- + # Model Specific Setup ---------------------------------------------------- #--get model specific functions - #my.write_restart <- paste0("PEcAn.", model, "::write_restart.", model) - #my.read_restart <- paste0("PEcAn.", model, "::read_restart.", model) - #my.split_inputs <- paste0("PEcAn.", model, "::split_inputs.", model) + # my.write_restart <- paste0("PEcAn.", model, "::write_restart.", model) + # my.read_restart <- paste0("PEcAn.", model, "::read_restart.", model) + # my.split_inputs <- paste0("PEcAn.", model, "::split_inputs.", model) do.call("library", list(paste0("PEcAn.", model))) my.write_restart <- paste0("write_restart.", model) my.read_restart <- paste0("read_restart.", model) - my.split_inputs <- paste0("split_inputs.", model) + my.split_inputs <- paste0("split_inputs.", model) #- Double checking some of the inputs if (is.null(adjustment)) adjustment <- TRUE # models that don't need split_inputs, check register file for that register.xml <- system.file(paste0("register.", model, ".xml"), package = paste0("PEcAn.", model)) register <- XML::xmlToList(XML::xmlParse(register.xml)) no_split <- !as.logical(register$exact.dates) - - if (!exists(my.split_inputs) & !no_split) { + + if (!exists(my.split_inputs) & !no_split) { PEcAn.logger::logger.warn(my.split_inputs, "does not exist") PEcAn.logger::logger.severe("please make sure that the PEcAn interface is loaded for", model) PEcAn.logger::logger.warn(my.split_inputs, "If your model does not need the split function you can specify that in register.Model.xml in model's inst folder by adding FALSE tag.") - } - #split met if model calls for it - #create a folder to store extracted met files - if(!file.exists(paste0(settings$outdir, "/Extracted_met/"))){ + # split met if model calls for it + # create a folder to store extracted met files + if (!file.exists(paste0(settings$outdir, "/Extracted_met/"))) { dir.create(paste0(settings$outdir, "/Extracted_met/")) } - - conf.settings <-conf.settings %>% - `class<-`(c("list")) %>% #until here, it separates all the settings for all sites that listed in the xml file + + conf.settings <- conf.settings %>% + `class<-`(c("list")) %>% # until here, it separates all the settings for all sites that listed in the xml file furrr::future_map(function(settings) { - library(paste0("PEcAn.",settings$model$type), character.only = TRUE)#solved by including the model in the settings + library(paste0("PEcAn.", settings$model$type), character.only = TRUE) # solved by including the model in the settings inputs.split <- list() if (!no_split) { for (i in 1:length(settings$run$inputs$met$path)) { @@ -234,30 +245,29 @@ sda.enkf.multisite <- function(settings, settings = settings, start.time = lubridate::ymd_hms(settings$run$site$met.start, truncated = 3), # This depends if we are restart or not stop.time = lubridate::ymd_hms(settings$run$site$met.end, truncated = 3), - inputs = settings$run$inputs$met$path[[i]], + inputs = settings$run$inputs$met$path[[i]], outpath = paste0(paste0(settings$outdir, "/Extracted_met/"), settings$run$site$id), - overwrite =F + overwrite = F ) ) # changing the start and end date which will be used for model2netcdf.model settings$run$start.date <- lubridate::ymd_hms(settings$state.data.assimilation$start.date, truncated = 3) settings$run$end.date <- lubridate::ymd_hms(settings$state.data.assimilation$end.date, truncated = 3) - } - } else{ + } else { inputs.split <- inputs } settings }) - conf.settings<- PEcAn.settings::as.MultiSettings(conf.settings) - ###-------------------------------------------------------------------### + conf.settings <- PEcAn.settings::as.MultiSettings(conf.settings) + ### -------------------------------------------------------------------### ### If this is a restart - Picking up were we left last time ### - ###-------------------------------------------------------------------###---- - if (restart_flag){ - #TO DO grab soil files - #add else for when sda.out.Rdata is missing - if(file.exists(file.path(old.dir,"sda.output.Rdata"))){ - load(file.path(old.dir,"sda.output.Rdata")) + ### -------------------------------------------------------------------###---- + if (restart_flag) { + # TO DO grab soil files + # add else for when sda.out.Rdata is missing + if (file.exists(file.path(old.dir, "sda.output.Rdata"))) { + load(file.path(old.dir, "sda.output.Rdata")) # #this is where the old simulation will be moved to # old.sda <- lubridate::year(names(FORECAST)) %>% tail(1) # #--- Updating the nt and etc @@ -268,32 +278,32 @@ sda.enkf.multisite <- function(settings, # file.copy(file.path(file.path(old.dir,"out"),files.last.sda), # file.path(file.path(settings$outdir,"SDA"),paste0(old.sda,"/",files.last.sda)) # ) - #sim.time <-2:nt # if It's restart I added +1 from the start to nt (which is the last year of old sim) to make the first sim in restart time t=2 - #new.params and params.list are already loaded in the environment only need to grab X - X <-FORECAST[[length(FORECAST)]] - }else{ + # sim.time <-2:nt # if It's restart I added +1 from the start to nt (which is the last year of old sim) to make the first sim in restart time t=2 + # new.params and params.list are already loaded in the environment only need to grab X + X <- FORECAST[[length(FORECAST)]] + } else { PEcAn.logger::logger.info("The SDA output from the older simulation doesn't exist, assuming first SDA run with unconstrainded forecast output") - #loading param info from previous forecast - if(!exists("ensemble.samples") || is.null(ensemble.samples)){ + # loading param info from previous forecast + if (!exists("ensemble.samples") || is.null(ensemble.samples)) { load(file.path(old.dir, "samples.Rdata")) } - #assuming that will only use previous unconstrained forecast runs for first run with SDA which means we are at t=1 - #sim.time<-seq_len(nt) - #create params object using previous forecast ensemble members + # assuming that will only use previous unconstrained forecast runs for first run with SDA which means we are at t=1 + # sim.time<-seq_len(nt) + # create params object using previous forecast ensemble members new.params <- sda_matchparam(settings, ensemble.samples, site.ids, nens) - - #create inputs object for met using previous forecast ensemble members - ####add function here, pause on this feature until we add feature to model runs that saves driver ensemble members - - #build X using previous forecast output - #out.configs object required to build X and restart.list object required for build X - #TODO: there should be an easier way to do this than to rerun write.ensemble.configs + + # create inputs object for met using previous forecast ensemble members + #### add function here, pause on this feature until we add feature to model runs that saves driver ensemble members + + # build X using previous forecast output + # out.configs object required to build X and restart.list object required for build X + # TODO: there should be an easier way to do this than to rerun write.ensemble.configs restart.list <- vector("list", length(conf.settings)) out.configs <- conf.settings %>% `class<-`(c("list")) %>% furrr::future_map2(restart.list, function(settings, restart.arg) { # Loading the model package - this is required bc of the furrr - library(paste0("PEcAn.",settings$model$type), character.only = TRUE) + library(paste0("PEcAn.", settings$model$type), character.only = TRUE) # wrtting configs for each settings - this does not make a difference with the old code PEcAn.uncertainty::write.ensemble.configs( defaults = settings$pfts, @@ -305,467 +315,484 @@ sda.enkf.multisite <- function(settings, ) }) %>% stats::setNames(site.ids) - #now all build_X args are properly formatted for the function to return X - reads <- build_X(out.configs = out.configs, - settings = settings, - new.params = new.params, - nens = nens, - read_restart_times = read_restart_times, - outdir = file.path(old.dir, "out/"), - t = 1, - var.names = var.names, - my.read_restart = my.read_restart, - restart_flag = restart_flag) - - #let's read the parameters of each site/ens - params.list <- reads %>% purrr::map(~.x %>% purrr::map("params")) + # now all build_X args are properly formatted for the function to return X + reads <- build_X( + out.configs = out.configs, + settings = settings, + new.params = new.params, + nens = nens, + read_restart_times = read_restart_times, + outdir = file.path(old.dir, "out/"), + t = 1, + var.names = var.names, + my.read_restart = my.read_restart, + restart_flag = restart_flag + ) + + # let's read the parameters of each site/ens + params.list <- reads %>% purrr::map(~ .x %>% purrr::map("params")) # Now let's read the state variables of site/ens - X <- reads %>% purrr::map(~.x %>% purrr::map_df(~.x[["X"]] %>% t %>% as.data.frame)) - + X <- reads %>% purrr::map(~ .x %>% purrr::map_df(~ .x[["X"]] %>% + t() %>% + as.data.frame())) + # Now we have a matrix that columns are state variables and rows are ensembles. # this matrix looks like this # GWBI AbvGrndWood GWBI AbvGrndWood - #[1,] 3.872521 37.2581 3.872521 37.2581 + # [1,] 3.872521 37.2581 3.872521 37.2581 # But therer is an attribute called `Site` which tells yout what column is for what site id - check out attr (X,"Site") - if (multi.site.flag){ + if (multi.site.flag) { X <- X %>% - purrr::map_dfc(~.x) %>% + purrr::map_dfc(~.x) %>% as.matrix() %>% `colnames<-`(c(rep(var.names, length(X)))) %>% - `attr<-`('Site',c(rep(site.ids, each=length(var.names)))) + `attr<-`("Site", c(rep(site.ids, each = length(var.names)))) } } } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### set up for data assimilation ### - ###-------------------------------------------------------------------###---- + ### -------------------------------------------------------------------###---- # weight matrix wt.mat <- matrix(NA, nrow = nens, ncol = nt) - # Reading param samples------------------------------- - #create params object using samples generated from TRAITS functions - if(restart_flag){ - new.params <- new.params - }else{ - if(!file.exists(file.path(settings$outdir, "samples.Rdata"))) PEcAn.logger::logger.severe("samples.Rdata cannot be found. Make sure you generate samples by running the get.parameter.samples function before running SDA.") - #Generate parameter needs to be run before this to generate the samples. This is hopefully done in the main workflow. - if(is.null(ensemble.samples)){ - load(file.path(settings$outdir, "samples.Rdata")) - } - #reformatting params - new.params <- sda_matchparam(settings, ensemble.samples, site.ids, nens) + # Reading param samples------------------------------- + # create params object using samples generated from TRAITS functions + if (restart_flag) { + new.params <- new.params + } else { + if (!file.exists(file.path(settings$outdir, "samples.Rdata"))) PEcAn.logger::logger.severe("samples.Rdata cannot be found. Make sure you generate samples by running the get.parameter.samples function before running SDA.") + # Generate parameter needs to be run before this to generate the samples. This is hopefully done in the main workflow. + if (is.null(ensemble.samples)) { + load(file.path(settings$outdir, "samples.Rdata")) } - #sample met ensemble members - #TODO: incorporate Phyllis's restart work - # sample all inputs specified in the settings$ensemble not just met - inputs <- PEcAn.settings::papply(conf.settings,function(setting) { - PEcAn.uncertainty::input.ens.gen( - settings = setting, - input = "met", - method = setting$ensemble$samplingspace$met$method, - parent_ids = NULL - ) - }) - ###------------------------------------------------------------------------------------------------### + # reformatting params + new.params <- sda_matchparam(settings, ensemble.samples, site.ids, nens) + } + # sample met ensemble members + # TODO: incorporate Phyllis's restart work + # sample all inputs specified in the settings$ensemble not just met + inputs <- PEcAn.settings::papply(conf.settings, function(setting) { + PEcAn.uncertainty::input.ens.gen( + settings = setting, + input = "met", + method = setting$ensemble$samplingspace$met$method, + parent_ids = NULL + ) + }) + ### ------------------------------------------------------------------------------------------------### ### loop over time ### - ###------------------------------------------------------------------------------------------------### - for(t in 1:nt){ - obs.t <- as.character(lubridate::date(obs.times[t])) - obs.year <- lubridate::year(obs.t) - ###-------------------------------------------------------------------------### - ### Taking care of Forecast. Splitting / Writting / running / reading back### - ###-------------------------------------------------------------------------###----- - #- Check to see if this is the first run or not and what inputs needs to be sent to write.ensemble configs - if (t>1){ - #for next time step split the met if model requires - #-Splitting the input for the models that they don't care about the start and end time of simulations and they run as long as their met file. - inputs.split <- metSplit(conf.settings, inputs, settings, model, no_split = FALSE, obs.times, t, nens, restart_flag = FALSE, my.split_inputs) - - #---------------- setting up the restart argument for each site separately and keeping them in a list - restart.list <- - furrr::future_pmap(list(out.configs, conf.settings %>% `class<-`(c("list")), params.list, inputs.split), - function(configs, settings, new.params, inputs) { - #if the new state for each site only has one row/col. - #then we need to convert it to matrix to solve the indexing issue. - new_state_site <- new.state[, which(attr(X, "Site") %in% settings$run$site$id)] - if(is.vector(new_state_site)){ - new_state_site <- matrix(new_state_site) - } - list( - runid = configs$runs$id, - start.time = strptime(obs.times[t -1], format = "%Y-%m-%d %H:%M:%S") + lubridate::second(lubridate::hms("00:00:01")), - stop.time = strptime(obs.times[t], format ="%Y-%m-%d %H:%M:%S"), - settings = settings, - new.state = new_state_site, - new.params = new.params, - inputs = inputs, - RENAME = TRUE, - ensemble.id = settings$ensemble$ensemble.id - ) - }) - } else { ## t == 1 - restart.list <- vector("list", length(conf.settings)) - } - #add flag for restart t=1 to skip model runs - if(restart_flag & t == 1){ - #for restart when t=1 do not need to do model runs and X should already exist in environment by this point - X <- X - }else{ - if (control$debug) browser() - out.configs <- conf.settings %>% - `class<-`(c("list")) %>% - furrr::future_map2(restart.list, function(settings, restart.arg) { - # Loading the model package - this is required bc of the furrr - library(paste0("PEcAn.",settings$model$type), character.only = TRUE) - # wrtting configs for each settings - this does not make a difference with the old code - PEcAn.uncertainty::write.ensemble.configs( - defaults = settings$pfts, - ensemble.samples = ensemble.samples, + ### ------------------------------------------------------------------------------------------------### + for (t in 1:nt) { + obs.t <- as.character(lubridate::date(obs.times[t])) + obs.year <- lubridate::year(obs.t) + ### -------------------------------------------------------------------------### + ### Taking care of Forecast. Splitting / Writting / running / reading back### + ### -------------------------------------------------------------------------###----- + #- Check to see if this is the first run or not and what inputs needs to be sent to write.ensemble configs + if (t > 1) { + # for next time step split the met if model requires + #-Splitting the input for the models that they don't care about the start and end time of simulations and they run as long as their met file. + inputs.split <- metSplit(conf.settings, inputs, settings, model, no_split = FALSE, obs.times, t, nens, restart_flag = FALSE, my.split_inputs) + + #---------------- setting up the restart argument for each site separately and keeping them in a list + restart.list <- + furrr::future_pmap( + list(out.configs, conf.settings %>% `class<-`(c("list")), params.list, inputs.split), + function(configs, settings, new.params, inputs) { + # if the new state for each site only has one row/col. + # then we need to convert it to matrix to solve the indexing issue. + new_state_site <- new.state[, which(attr(X, "Site") %in% settings$run$site$id)] + if (is.vector(new_state_site)) { + new_state_site <- matrix(new_state_site) + } + list( + runid = configs$runs$id, + start.time = strptime(obs.times[t - 1], format = "%Y-%m-%d %H:%M:%S") + lubridate::second(lubridate::hms("00:00:01")), + stop.time = strptime(obs.times[t], format = "%Y-%m-%d %H:%M:%S"), settings = settings, - model = settings$model$type, - write.to.db = settings$database$bety$write, - restart = restart.arg, - rename = TRUE + new.state = new_state_site, + new.params = new.params, + inputs = inputs, + RENAME = TRUE, + ensemble.id = settings$ensemble$ensemble.id ) - }) %>% - stats::setNames(site.ids) - - #if it's a rabbitmq job sumbmission, we will first copy and paste the whole run folder within the SDA to the remote host. - if (!is.null(settings$host$rabbitmq)) { - settings$host$rabbitmq$prefix <- paste0(obs.year, ".nc") - cp2cmd <- gsub("@RUNDIR@", settings$host$rundir, settings$host$rabbitmq$cp2cmd) - try(system(cp2cmd, intern = TRUE)) - } - - #I'm rewriting the runs because when I use the parallel approach for writing configs the run.txt will get messed up; because multiple cores want to write on it at the same time. - runs.tmp <- list.dirs(rundir, full.names = F) - runs.tmp <- runs.tmp[grepl("ENS-*|[0-9]", runs.tmp)] - writeLines(runs.tmp[runs.tmp != ''], file.path(rundir, 'runs.txt')) - paste(file.path(rundir, 'runs.txt')) ## testing - Sys.sleep(0.01) ## testing - if(control$parallel_qsub){ - if (is.null(control$jobs.per.file)) { - PEcAn.remote::qsub_parallel(settings, prefix = paste0(obs.year, ".nc")) - } else { - PEcAn.remote::qsub_parallel(settings, files=PEcAn.remote::merge_job_files(settings, control$jobs.per.file), prefix = paste0(obs.year, ".nc")) } - }else{ - PEcAn.workflow::start_model_runs(settings, write=settings$database$bety$write) + ) + } else { ## t == 1 + restart.list <- vector("list", length(conf.settings)) + } + # add flag for restart t=1 to skip model runs + if (restart_flag & t == 1) { + # for restart when t=1 do not need to do model runs and X should already exist in environment by this point + X <- X + } else { + if (control$debug) browser() + out.configs <- conf.settings %>% + `class<-`(c("list")) %>% + furrr::future_map2(restart.list, function(settings, restart.arg) { + # Loading the model package - this is required bc of the furrr + library(paste0("PEcAn.", settings$model$type), character.only = TRUE) + # wrtting configs for each settings - this does not make a difference with the old code + PEcAn.uncertainty::write.ensemble.configs( + defaults = settings$pfts, + ensemble.samples = ensemble.samples, + settings = settings, + model = settings$model$type, + write.to.db = settings$database$bety$write, + restart = restart.arg, + rename = TRUE + ) + }) %>% + stats::setNames(site.ids) + + # if it's a rabbitmq job sumbmission, we will first copy and paste the whole run folder within the SDA to the remote host. + if (!is.null(settings$host$rabbitmq)) { + settings$host$rabbitmq$prefix <- paste0(obs.year, ".nc") + cp2cmd <- gsub("@RUNDIR@", settings$host$rundir, settings$host$rabbitmq$cp2cmd) + try(system(cp2cmd, intern = TRUE)) + } + + # I'm rewriting the runs because when I use the parallel approach for writing configs the run.txt will get messed up; because multiple cores want to write on it at the same time. + runs.tmp <- list.dirs(rundir, full.names = F) + runs.tmp <- runs.tmp[grepl("ENS-*|[0-9]", runs.tmp)] + writeLines(runs.tmp[runs.tmp != ""], file.path(rundir, "runs.txt")) + paste(file.path(rundir, "runs.txt")) ## testing + Sys.sleep(0.01) ## testing + if (control$parallel_qsub) { + if (is.null(control$jobs.per.file)) { + PEcAn.remote::qsub_parallel(settings, prefix = paste0(obs.year, ".nc")) + } else { + PEcAn.remote::qsub_parallel(settings, files = PEcAn.remote::merge_job_files(settings, control$jobs.per.file), prefix = paste0(obs.year, ".nc")) } - #------------- Reading - every iteration and for SDA - #put building of X into a function that gets called - max_t <- 0 - while("try-error" %in% class( - try(reads <- build_X(out.configs = out.configs, - settings = settings, - new.params = new.params, - nens = nens, - read_restart_times = read_restart_times, - outdir = outdir, - t = t, - var.names = var.names, - my.read_restart = my.read_restart, - restart_flag = restart_flag), silent = T)) - ){ - Sys.sleep(10) - max_t <- max_t + 1 - if(max_t > 3){ - PEcAn.logger::logger.info("Can't find outputed NC file! Please rerun the code!") - break - return(0) - } - PEcAn.logger::logger.info("Empty folder, try again!") + } else { + PEcAn.workflow::start_model_runs(settings, write = settings$database$bety$write) + } + #------------- Reading - every iteration and for SDA + # put building of X into a function that gets called + max_t <- 0 + while ("try-error" %in% class( + try(reads <- build_X( + out.configs = out.configs, + settings = settings, + new.params = new.params, + nens = nens, + read_restart_times = read_restart_times, + outdir = outdir, + t = t, + var.names = var.names, + my.read_restart = my.read_restart, + restart_flag = restart_flag + ), silent = T) + ) + ) { + Sys.sleep(10) + max_t <- max_t + 1 + if (max_t > 3) { + PEcAn.logger::logger.info("Can't find outputed NC file! Please rerun the code!") + break + return(0) } - - if (control$debug) browser() - #let's read the parameters of each site/ens - params.list <- reads %>% purrr::map(~.x %>% purrr::map("params")) - # Now let's read the state variables of site/ens - #don't need to build X when t=1 - X <- reads %>% purrr::map(~.x %>% purrr::map_df(~.x[["X"]] %>% t %>% as.data.frame)) - - - #replacing crazy outliers before it's too late - if (control$OutlierDetection){ - X <- outlier.detector.boxplot(X) - PEcAn.logger::logger.info("Outlier Detection.") - } - - # Now we have a matrix that columns are state variables and rows are ensembles. - # this matrix looks like this - # GWBI AbvGrndWood GWBI AbvGrndWood - #[1,] 3.872521 37.2581 3.872521 37.2581 - # But therer is an attribute called `Site` which tells yout what column is for what site id - check out attr (X,"Site") - if (multi.site.flag){ - X <- X %>% - purrr::map_dfc(~.x) %>% + PEcAn.logger::logger.info("Empty folder, try again!") + } + + if (control$debug) browser() + # let's read the parameters of each site/ens + params.list <- reads %>% purrr::map(~ .x %>% purrr::map("params")) + # Now let's read the state variables of site/ens + # don't need to build X when t=1 + X <- reads %>% purrr::map(~ .x %>% purrr::map_df(~ .x[["X"]] %>% + t() %>% + as.data.frame())) + + + # replacing crazy outliers before it's too late + if (control$OutlierDetection) { + X <- outlier.detector.boxplot(X) + PEcAn.logger::logger.info("Outlier Detection.") + } + + # Now we have a matrix that columns are state variables and rows are ensembles. + # this matrix looks like this + # GWBI AbvGrndWood GWBI AbvGrndWood + # [1,] 3.872521 37.2581 3.872521 37.2581 + # But therer is an attribute called `Site` which tells yout what column is for what site id - check out attr (X,"Site") + if (multi.site.flag) { + X <- X %>% + purrr::map_dfc(~.x) %>% as.matrix() %>% `colnames<-`(c(rep(var.names, length(X)))) %>% - `attr<-`('Site',c(rep(site.ids, each=length(var.names)))) - } - - } ## end else from restart & t==1 - FORECAST[[obs.t]] <- X - - ###-------------------------------------------------------------------### - ### preparing OBS ### - ###-------------------------------------------------------------------###---- - #To trigger the analysis function with free run, you need to first specify the control$forceRun as TRUE, - #Then specify the settings$state.data.assimilation$scalef as 0, and settings$state.data.assimilation$free.run as TRUE. - if (!is.null(obs.mean[[t]][[1]]) | (as.logical(settings$state.data.assimilation$free.run) & control$forceRun)) { - # TODO: as currently configured, Analysis runs even if all obs are NA, - # which clearly should be triggering the `else` of this if, but the - # `else` has not been invoked in a while an may need updating - - - #decide if we want to estimate the process variance and choose the according function. - if(processvar == FALSE) { - an.method<-EnKF - } else if (processvar == TRUE && settings$state.data.assimilation$q.type %in% c("SINGLE", "SITE")) { - an.method<-GEF.MultiSite + `attr<-`("Site", c(rep(site.ids, each = length(var.names)))) + } + } ## end else from restart & t==1 + FORECAST[[obs.t]] <- X + + ### -------------------------------------------------------------------### + ### preparing OBS ### + ### -------------------------------------------------------------------###---- + # To trigger the analysis function with free run, you need to first specify the control$forceRun as TRUE, + # Then specify the settings$state.data.assimilation$scalef as 0, and settings$state.data.assimilation$free.run as TRUE. + if (!is.null(obs.mean[[t]][[1]]) | (as.logical(settings$state.data.assimilation$free.run) & control$forceRun)) { + # TODO: as currently configured, Analysis runs even if all obs are NA, + # which clearly should be triggering the `else` of this if, but the + # `else` has not been invoked in a while an may need updating + + + # decide if we want to estimate the process variance and choose the according function. + if (processvar == FALSE) { + an.method <- EnKF + } else if (processvar == TRUE && settings$state.data.assimilation$q.type %in% c("SINGLE", "SITE")) { + an.method <- GEF.MultiSite + } + + # decide if we want the block analysis function or multi-site analysis function. + if (processvar == TRUE && settings$state.data.assimilation$q.type %in% c("vector", "wishart")) { + # initialize block.list.all. + if (t == 1 | !exists("block.list.all")) { + block.list.all <- obs.mean %>% purrr::map(function(l) { + NULL + }) } - - #decide if we want the block analysis function or multi-site analysis function. - if (processvar == TRUE && settings$state.data.assimilation$q.type %in% c("vector", "wishart")) { - #initialize block.list.all. - if (t == 1 | !exists("block.list.all")) { - block.list.all <- obs.mean %>% purrr::map(function(l){NULL}) - } - #initialize MCMC arguments. - if (is.null(control$MCMC.args)) { - MCMC.args <- list(niter = 1e5, - nthin = 10, - nchain = 3, - nburnin = 5e4) - } else { - MCMC.args <- control$MCMC.args - } - #running analysis function. - enkf.params[[obs.t]] <- analysis_sda_block(settings, block.list.all, X, obs.mean, obs.cov, t, nt, MCMC.args, pre_enkf_params) - enkf.params[[obs.t]] <- c(enkf.params[[obs.t]], RestartList = list(restart.list %>% stats::setNames(site.ids))) - block.list.all <- enkf.params[[obs.t]]$block.list.all - #Forecast - mu.f <- enkf.params[[obs.t]]$mu.f - Pf <- enkf.params[[obs.t]]$Pf - #Analysis - Pa <- enkf.params[[obs.t]]$Pa - mu.a <- enkf.params[[obs.t]]$mu.a - } else if (exists("an.method")) { - #Making R and Y - Obs.cons <- Construct.R(site.ids, var.names, obs.mean[[t]], obs.cov[[t]]) - Y <- Obs.cons$Y - R <- Obs.cons$R - if (length(Y) > 1) { - PEcAn.logger::logger.info("The zero variances in R and Pf is being replaced by half and one fifth of the minimum variance in those matrices respectively.") - diag(R)[which(diag(R)==0)] <- min(diag(R)[which(diag(R) != 0)])/2 - } - # making the mapping operator - H <- Construct.H.multisite(site.ids, var.names, obs.mean[[t]]) - #Pass aqq and bqq. - aqq <- NULL - bqq <- numeric(nt + 1) - Pf <- NULL - #if t>1 - if(is.null(pre_enkf_params) && t>1){ - aqq <- enkf.params[[t-1]]$aqq - bqq <- enkf.params[[t-1]]$bqq - X.new<-enkf.params[[t-1]]$X.new - } - if(!is.null(pre_enkf_params) && t>1){ - aqq <- pre_enkf_params[[t-1]]$aqq - bqq <- pre_enkf_params[[t-1]]$bqq - X.new<-pre_enkf_params[[t-1]]$X.new - } - if(!is.null(pre_enkf_params)){ - Pf <- pre_enkf_params[[t]]$Pf - } - recompileTobit = !exists('Cmcmc_tobit2space') - recompileGEF = !exists('Cmcmc') - #weight list - # This reads ensemble weights generated by `get_ensemble_weights` function from assim.sequential package - weight_list <- list() - if(!file.exists(file.path(settings$outdir, "ensemble_weights.Rdata"))){ - PEcAn.logger::logger.warn("ensemble_weights.Rdata cannot be found. Make sure you generate samples by running the get.ensemble.weights function before running SDA if you want the ensembles to be weighted.") - #create null list - for(tt in 1:length(obs.times)){ - weight_list[[tt]] <- rep(1,nens) #no weights - } - } else{ - load(file.path(settings$outdir, "ensemble_weights.Rdata")) ## loads ensemble.samples - } - wts <- unlist(weight_list[[t]]) - #-analysis function - enkf.params[[obs.t]] <- GEF.MultiSite( - settings, - FUN = an.method, - Forecast = list(Q = Q, X = X), - Observed = list(R = R, Y = Y), - H = H, - extraArg = list( - aqq = aqq, - bqq = bqq, - Pf = Pf, - t = t, - nitr.GEF = nitr.GEF, - nthin = nthin, - nburnin = nburnin, - censored.data = censored.data, - recompileGEF = recompileGEF, - recompileTobit = recompileTobit, - wts = wts - ), - choose = choose, - nt = nt, - obs.mean = obs.mean, - nitr = 100000, - nburnin = 10000, - obs.cov = obs.cov, - site.ids = site.ids, - blocked.dis = blocked.dis, - distances = distances + # initialize MCMC arguments. + if (is.null(control$MCMC.args)) { + MCMC.args <- list( + niter = 1e5, + nthin = 10, + nchain = 3, + nburnin = 5e4 ) - tictoc::tic(paste0("Preparing for Adjustment for cycle = ", t)) - #Forecast - mu.f <- enkf.params[[obs.t]]$mu.f - Pf <- enkf.params[[obs.t]]$Pf - #Analysis - Pa <- enkf.params[[obs.t]]$Pa - mu.a <- enkf.params[[obs.t]]$mu.a - #extracting extra outputs - if (control$debug) browser() - if (processvar) { - aqq <- enkf.params[[obs.t]]$aqq - bqq <- enkf.params[[obs.t]]$bqq - } - # Adding obs elements to the enkf.params - #This can later on help with diagnostics - enkf.params[[obs.t]] <- - c( - enkf.params[[obs.t]], - R = list(R), - Y = list(Y), - RestartList = list(restart.list %>% stats::setNames(site.ids)) - ) + } else { + MCMC.args <- control$MCMC.args } - - ###-------------------------------------------------------------------### - ### Trace ### - ###-------------------------------------------------------------------###---- - #-- writing Trace-------------------- - if(control$trace) { - PEcAn.logger::logger.warn ("\n --------------------------- ",obs.year," ---------------------------\n") - PEcAn.logger::logger.warn ("\n --------------Obs mean----------- \n") - print(enkf.params[[obs.t]]$Y) - PEcAn.logger::logger.warn ("\n --------------Obs Cov ----------- \n") - print(enkf.params[[obs.t]]$R) - PEcAn.logger::logger.warn ("\n --------------Forecast mean ----------- \n") - print(enkf.params[[obs.t]]$mu.f) - PEcAn.logger::logger.warn ("\n --------------Forecast Cov ----------- \n") - print(enkf.params[[obs.t]]$Pf) - PEcAn.logger::logger.warn ("\n --------------Analysis mean ----------- \n") - print(t(enkf.params[[obs.t]]$mu.a)) - PEcAn.logger::logger.warn ("\n --------------Analysis Cov ----------- \n") - print(enkf.params[[obs.t]]$Pa) - PEcAn.logger::logger.warn ("\n ------------------------------------------------------\n") + # running analysis function. + enkf.params[[obs.t]] <- analysis_sda_block(settings, block.list.all, X, obs.mean, obs.cov, t, nt, MCMC.args, pre_enkf_params) + enkf.params[[obs.t]] <- c(enkf.params[[obs.t]], RestartList = list(restart.list %>% stats::setNames(site.ids))) + block.list.all <- enkf.params[[obs.t]]$block.list.all + # Forecast + mu.f <- enkf.params[[obs.t]]$mu.f + Pf <- enkf.params[[obs.t]]$Pf + # Analysis + Pa <- enkf.params[[obs.t]]$Pa + mu.a <- enkf.params[[obs.t]]$mu.a + } else if (exists("an.method")) { + # Making R and Y + Obs.cons <- Construct.R(site.ids, var.names, obs.mean[[t]], obs.cov[[t]]) + Y <- Obs.cons$Y + R <- Obs.cons$R + if (length(Y) > 1) { + PEcAn.logger::logger.info("The zero variances in R and Pf is being replaced by half and one fifth of the minimum variance in those matrices respectively.") + diag(R)[which(diag(R) == 0)] <- min(diag(R)[which(diag(R) != 0)]) / 2 } - if (control$debug) browser() - if (control$pause) readline(prompt="Press [enter] to continue \n") - } else { - ###-------------------------------------------------------------------### - ### No Observations -- ###---- - ###-----------------------------------------------------------------### - ### no process variance -- forecast is the same as the analysis ### - if (processvar==FALSE) { - mu.a <- mu.f - Pa <- Pf + Q - ### yes process variance -- no data - } else { - mu.f <- colMeans(X) #mean Forecast - This is used as an initial condition - mu.a <- mu.f - if(is.null(Q)){ - q.bar <- diag(ncol(X)) - PEcAn.logger::logger.warn('Process variance not estimated. Analysis has been given uninformative process variance') + # making the mapping operator + H <- Construct.H.multisite(site.ids, var.names, obs.mean[[t]]) + # Pass aqq and bqq. + aqq <- NULL + bqq <- numeric(nt + 1) + Pf <- NULL + # if t>1 + if (is.null(pre_enkf_params) && t > 1) { + aqq <- enkf.params[[t - 1]]$aqq + bqq <- enkf.params[[t - 1]]$bqq + X.new <- enkf.params[[t - 1]]$X.new + } + if (!is.null(pre_enkf_params) && t > 1) { + aqq <- pre_enkf_params[[t - 1]]$aqq + bqq <- pre_enkf_params[[t - 1]]$bqq + X.new <- pre_enkf_params[[t - 1]]$X.new + } + if (!is.null(pre_enkf_params)) { + Pf <- pre_enkf_params[[t]]$Pf + } + recompileTobit <- !exists("Cmcmc_tobit2space") + recompileGEF <- !exists("Cmcmc") + # weight list + # This reads ensemble weights generated by `get_ensemble_weights` function from assim.sequential package + weight_list <- list() + if (!file.exists(file.path(settings$outdir, "ensemble_weights.Rdata"))) { + PEcAn.logger::logger.warn("ensemble_weights.Rdata cannot be found. Make sure you generate samples by running the get.ensemble.weights function before running SDA if you want the ensembles to be weighted.") + # create null list + for (tt in 1:length(obs.times)) { + weight_list[[tt]] <- rep(1, nens) # no weights } - # Pa <- Pf + matrix(solve(q.bar), dim(Pf)[1], dim(Pf)[2]) - #will throw an error when q.bar and Pf are different sizes i.e. when you are running with no obs and do not variance for all state variables - #Pa <- Pf + solve(q.bar) - #hack have Pa = Pf for now - # if(!is.null(pre_enkf_params)){ - # Pf <- pre_enkf_params[[t]]$Pf - # }else{ - # Pf <- stats::cov(X) # Cov Forecast - This is used as an initial condition - # } - Pf <- stats::cov(X) - Pa <- Pf + } else { + load(file.path(settings$outdir, "ensemble_weights.Rdata")) ## loads ensemble.samples } - enkf.params[[obs.t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa) - } - - ###-------------------------------------------------------------------### - ### adjust/update state matrix ### - ###-------------------------------------------------------------------###---- - tictoc::tic(paste0("Adjustment for cycle = ", t)) - if(adjustment == TRUE){ - analysis <-adj.ens(Pf, X, mu.f, mu.a, Pa) - } else { - analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) + wts <- unlist(weight_list[[t]]) + #-analysis function + enkf.params[[obs.t]] <- GEF.MultiSite( + settings, + FUN = an.method, + Forecast = list(Q = Q, X = X), + Observed = list(R = R, Y = Y), + H = H, + extraArg = list( + aqq = aqq, + bqq = bqq, + Pf = Pf, + t = t, + nitr.GEF = nitr.GEF, + nthin = nthin, + nburnin = nburnin, + censored.data = censored.data, + recompileGEF = recompileGEF, + recompileTobit = recompileTobit, + wts = wts + ), + choose = choose, + nt = nt, + obs.mean = obs.mean, + nitr = 100000, + nburnin = 10000, + obs.cov = obs.cov, + site.ids = site.ids, + blocked.dis = blocked.dis, + distances = distances + ) + tictoc::tic(paste0("Preparing for Adjustment for cycle = ", t)) + # Forecast + mu.f <- enkf.params[[obs.t]]$mu.f + Pf <- enkf.params[[obs.t]]$Pf + # Analysis + Pa <- enkf.params[[obs.t]]$Pa + mu.a <- enkf.params[[obs.t]]$mu.a + # extracting extra outputs + if (control$debug) browser() + if (processvar) { + aqq <- enkf.params[[obs.t]]$aqq + bqq <- enkf.params[[obs.t]]$bqq + } + # Adding obs elements to the enkf.params + # This can later on help with diagnostics + enkf.params[[obs.t]] <- + c( + enkf.params[[obs.t]], + R = list(R), + Y = list(Y), + RestartList = list(restart.list %>% stats::setNames(site.ids)) + ) } - colnames(analysis) <- colnames(X) - ##### Mapping analysis vectors to be in bounds of state variables - for(i in 1:ncol(analysis)){ - int.save <- state.interval[which(startsWith(colnames(analysis)[i], - var.names)),] - analysis[analysis[,i] < int.save[1],i] <- int.save[1] - analysis[analysis[,i] > int.save[2],i] <- int.save[2] + + ### -------------------------------------------------------------------### + ### Trace ### + ### -------------------------------------------------------------------###---- + #-- writing Trace-------------------- + if (control$trace) { + PEcAn.logger::logger.warn("\n --------------------------- ", obs.year, " ---------------------------\n") + PEcAn.logger::logger.warn("\n --------------Obs mean----------- \n") + print(enkf.params[[obs.t]]$Y) + PEcAn.logger::logger.warn("\n --------------Obs Cov ----------- \n") + print(enkf.params[[obs.t]]$R) + PEcAn.logger::logger.warn("\n --------------Forecast mean ----------- \n") + print(enkf.params[[obs.t]]$mu.f) + PEcAn.logger::logger.warn("\n --------------Forecast Cov ----------- \n") + print(enkf.params[[obs.t]]$Pf) + PEcAn.logger::logger.warn("\n --------------Analysis mean ----------- \n") + print(t(enkf.params[[obs.t]]$mu.a)) + PEcAn.logger::logger.warn("\n --------------Analysis Cov ----------- \n") + print(enkf.params[[obs.t]]$Pa) + PEcAn.logger::logger.warn("\n ------------------------------------------------------\n") } - ## in the future will have to be separated from analysis - - new.state <- as.data.frame(analysis) - ANALYSIS[[obs.t]] <- analysis - ens_weights[[obs.t]] <- PEcAnAssimSequential::sda_weights_site(FORECAST, ANALYSIS, t, as.numeric(settings$ensemble$size)) - ###-------------------------------------------------------------------### - ### save outputs ### - ###-------------------------------------------------------------------###---- - Viz.output <- list(settings, obs.mean, obs.cov) #keeping obs data and settings for later visualization in Dashboard - - save(site.locs, - t, - FORECAST, - ANALYSIS, - enkf.params, - new.state, new.params,params.list, ens_weights, - out.configs, ensemble.samples, inputs, Viz.output, - file = file.path(settings$outdir, "sda.output.Rdata")) - - tictoc::tic(paste0("Visulization for cycle = ", t)) - - #writing down the image - either you asked for it or nor :) - if ((t%%2 == 0 | t == nt) & (control$TimeseriesPlot)){ - if (as.logical(settings$state.data.assimilation$free.run)) { - SDA_timeseries_plot(ANALYSIS, FORECAST, obs.mean, obs.cov, settings$outdir, by = "var", types = c("FORECAST", "ANALYSIS")) - } else { - SDA_timeseries_plot(ANALYSIS, FORECAST, obs.mean, obs.cov, settings$outdir, by = "var", types = c("FORECAST", "ANALYSIS", "OBS")) + if (control$debug) browser() + if (control$pause) readline(prompt = "Press [enter] to continue \n") + } else { + ### -------------------------------------------------------------------### + ### No Observations -- ###---- + ### -----------------------------------------------------------------### + ### no process variance -- forecast is the same as the analysis ### + if (processvar == FALSE) { + mu.a <- mu.f + Pa <- Pf + Q + ### yes process variance -- no data + } else { + mu.f <- colMeans(X) # mean Forecast - This is used as an initial condition + mu.a <- mu.f + if (is.null(Q)) { + q.bar <- diag(ncol(X)) + PEcAn.logger::logger.warn("Process variance not estimated. Analysis has been given uninformative process variance") } + # Pa <- Pf + matrix(solve(q.bar), dim(Pf)[1], dim(Pf)[2]) + # will throw an error when q.bar and Pf are different sizes i.e. when you are running with no obs and do not variance for all state variables + # Pa <- Pf + solve(q.bar) + # hack have Pa = Pf for now + # if(!is.null(pre_enkf_params)){ + # Pf <- pre_enkf_params[[t]]$Pf + # }else{ + # Pf <- stats::cov(X) # Cov Forecast - This is used as an initial condition + # } + Pf <- stats::cov(X) + Pa <- Pf + } + enkf.params[[obs.t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa) + } + + ### -------------------------------------------------------------------### + ### adjust/update state matrix ### + ### -------------------------------------------------------------------###---- + tictoc::tic(paste0("Adjustment for cycle = ", t)) + if (adjustment == TRUE) { + analysis <- adj.ens(Pf, X, mu.f, mu.a, Pa) + } else { + analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) + } + colnames(analysis) <- colnames(X) + ##### Mapping analysis vectors to be in bounds of state variables + for (i in 1:ncol(analysis)) { + int.save <- state.interval[which(startsWith( + colnames(analysis)[i], + var.names + )), ] + analysis[analysis[, i] < int.save[1], i] <- int.save[1] + analysis[analysis[, i] > int.save[2], i] <- int.save[2] + } + ## in the future will have to be separated from analysis + + new.state <- as.data.frame(analysis) + ANALYSIS[[obs.t]] <- analysis + ens_weights[[obs.t]] <- PEcAnAssimSequential::sda_weights_site(FORECAST, ANALYSIS, t, as.numeric(settings$ensemble$size)) + ### -------------------------------------------------------------------### + ### save outputs ### + ### -------------------------------------------------------------------###---- + Viz.output <- list(settings, obs.mean, obs.cov) # keeping obs data and settings for later visualization in Dashboard + + save(site.locs, + t, + FORECAST, + ANALYSIS, + enkf.params, + new.state, new.params, params.list, ens_weights, + out.configs, ensemble.samples, inputs, Viz.output, + file = file.path(settings$outdir, "sda.output.Rdata") + ) + + tictoc::tic(paste0("Visulization for cycle = ", t)) + + # writing down the image - either you asked for it or nor :) + if ((t %% 2 == 0 | t == nt) & (control$TimeseriesPlot)) { + if (as.logical(settings$state.data.assimilation$free.run)) { + SDA_timeseries_plot(ANALYSIS, FORECAST, obs.mean, obs.cov, settings$outdir, by = "var", types = c("FORECAST", "ANALYSIS")) + } else { + SDA_timeseries_plot(ANALYSIS, FORECAST, obs.mean, obs.cov, settings$outdir, by = "var", types = c("FORECAST", "ANALYSIS", "OBS")) } - #Saving the profiling result - if (control$Profiling) alltocs(file.path(settings$outdir,"SDA", "Profiling.csv")) - + } + # Saving the profiling result + if (control$Profiling) alltocs(file.path(settings$outdir, "SDA", "Profiling.csv")) + # remove files as SDA runs - if (!(control$keepNC) && t == 1){ + if (!(control$keepNC) && t == 1) { unlink(list.files(outdir, "*.nc", recursive = TRUE, full.names = TRUE)) } - if(!is.null(control$send_email)){ + if (!is.null(control$send_email)) { sendmail <- Sys.which("sendmail") mailfile <- tempfile("mail") cat(paste0("From: ", control$send_email$from, "\n", "Subject: ", "SDA progress report", "\n", "To: ", control$send_email$to, "\n", "\n", paste("Time point:", obs.times[t], "has been completed!")), file = mailfile) system2(sendmail, c("-f", paste0("\"", control$send_email$from, "\""), paste0("\"", control$send_email$to, "\""), "<", mailfile)) unlink(mailfile) } - gc() + gc() # useful for debugging to keep .nc files for assimilated years. T = 2, because this loops removes the files that were run when starting the next loop -# if (keepNC && t == 1){ -# unlink(list.files(outdir, "*.nc", recursive = TRUE, full.names = TRUE)) -# } - ## MCD: I commented the above "if" out because if you are restarting from a previous forecast, this might delete the files in that earlier folder + # if (keepNC && t == 1){ + # unlink(list.files(outdir, "*.nc", recursive = TRUE, full.names = TRUE)) + # } + ## MCD: I commented the above "if" out because if you are restarting from a previous forecast, this might delete the files in that earlier folder } ### end loop over time -} # sda.enkf \ No newline at end of file +} # sda.enkf diff --git a/modules/assim.sequential/R/sda.enkf_refactored.R b/modules/assim.sequential/R/sda.enkf_refactored.R index f87b5eecc41..ec93e88f774 100644 --- a/modules/assim.sequential/R/sda.enkf_refactored.R +++ b/modules/assim.sequential/R/sda.enkf_refactored.R @@ -8,7 +8,7 @@ #' then continues on like normal. #' #' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} -#' +#' #' @param settings PEcAn settings object #' @param obs.mean List of dataframe of observation means, named with #' observation datetime. @@ -27,84 +27,85 @@ #' code and examining the variables inside the function. #' @param ... Additional arguments, currently ignored #' -#' +#' #' @return NONE #' @import nimble #' @export -#' +#' sda.enkf <- function(settings, obs.mean, obs.cov, Q = NULL, - restart=NULL, - control=list(trace=TRUE, - interactivePlot=TRUE, - TimeseriesPlot=TRUE, - BiasPlot=FALSE, - plot.title=NULL, - debug=FALSE, - pause=FALSE), + restart = NULL, + control = list( + trace = TRUE, + interactivePlot = TRUE, + TimeseriesPlot = TRUE, + BiasPlot = FALSE, + plot.title = NULL, + debug = FALSE, + pause = FALSE + ), ...) { - - if (control$debug) browser() - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### read settings ### - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### weight_list <- list() adjustment <- settings$state.data.assimilation$adjustment - model <- settings$model$type - write <- settings$database$bety$write - defaults <- settings$pfts - outdir <- settings$modeloutdir # currently model runs locally, this will change if remote is enabled - rundir <- settings$host$rundir - host <- settings$host - forecast.time.step <- settings$state.data.assimilation$forecast.time.step #idea for later generalizing - nens <- as.numeric(settings$ensemble$size) + model <- settings$model$type + write <- settings$database$bety$write + defaults <- settings$pfts + outdir <- settings$modeloutdir # currently model runs locally, this will change if remote is enabled + rundir <- settings$host$rundir + host <- settings$host + forecast.time.step <- settings$state.data.assimilation$forecast.time.step # idea for later generalizing + nens <- as.numeric(settings$ensemble$size) processvar <- as.logical(settings$state.data.assimilation$process.variance) - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") names(var.names) <- NULL - - input.vars <- sapply(settings$state.data.assimilation$inputs, '[[', "variable.name") - operators <- sapply(settings$state.data.assimilation$inputs, '[[', "operator") - + + input.vars <- sapply(settings$state.data.assimilation$inputs, "[[", "variable.name") + operators <- sapply(settings$state.data.assimilation$inputs, "[[", "operator") + # Site location first col is the long second is the lat and row names are the site ids site.ids <- settings$run$site$id - - site.locs <- data.frame(Lon = as.numeric(settings$run$site$lon), - Lat = as.numeric(settings$run$site$lat)) - colnames(site.locs) <- c("Lon","Lat") + + site.locs <- data.frame( + Lon = as.numeric(settings$run$site$lon), + Lat = as.numeric(settings$run$site$lat) + ) + colnames(site.locs) <- c("Lon", "Lat") rownames(site.locs) <- site.ids - # start cut determines what is the best year to start spliting the met based on if we start with a restart or not. + # start cut determines what is the best year to start spliting the met based on if we start with a restart or not. if (!is.null(restart)) { - start.cut <-lubridate::ymd_hms(settings$state.data.assimilation$start.date, truncated = 3)-1 - Start.Year <-(lubridate::year(settings$state.data.assimilation$start.date)-1) - - }else{ - start.cut <-lubridate::ymd_hms(settings$state.data.assimilation$start.date, truncated = 3) - Start.Year <-(lubridate::year(settings$state.data.assimilation$start.date)) + start.cut <- lubridate::ymd_hms(settings$state.data.assimilation$start.date, truncated = 3) - 1 + Start.Year <- (lubridate::year(settings$state.data.assimilation$start.date) - 1) + } else { + start.cut <- lubridate::ymd_hms(settings$state.data.assimilation$start.date, truncated = 3) + Start.Year <- (lubridate::year(settings$state.data.assimilation$start.date)) } - - End.Year <- lubridate::year(settings$state.data.assimilation$end.date) # years that assimilations will be done for - obs will be subsetted based on this + + End.Year <- lubridate::year(settings$state.data.assimilation$end.date) # years that assimilations will be done for - obs will be subsetted based on this # filtering obs data based on years specifited in setting > state.data.assimilation - assimyears<-Start.Year:End.Year + assimyears <- Start.Year:End.Year obs.mean <- obs.mean[sapply(lubridate::year(names(obs.mean)), function(obs.year) obs.year %in% (assimyears))] obs.cov <- obs.cov[sapply(lubridate::year(names(obs.cov)), function(obs.year) obs.year %in% (assimyears))] # dir address based on the end date - if(!dir.exists("SDA")) dir.create("SDA",showWarnings = F) + if (!dir.exists("SDA")) dir.create("SDA", showWarnings = F) #--get model specific functions do.call("library", list(paste0("PEcAn.", model))) my.write_restart <- paste0("write_restart.", model) my.read_restart <- paste0("read_restart.", model) - my.split_inputs <- paste0("split_inputs.", model) + my.split_inputs <- paste0("split_inputs.", model) #- Double checking some of the inputs - if (is.null(adjustment)) adjustment<-T + if (is.null(adjustment)) adjustment <- T # models that don't need split_inputs, check register file for that register.xml <- system.file(paste0("register.", model, ".xml"), package = paste0("PEcAn.", model)) register <- XML::xmlToList(XML::xmlParse(register.xml)) no_split <- !as.logical(register$exact.dates) - - if (!exists(my.split_inputs) & !no_split) { + + if (!exists(my.split_inputs) & !no_split) { PEcAn.logger::logger.warn(my.split_inputs, "does not exist") PEcAn.logger::logger.severe("please make sure that the PEcAn interface is loaded for", model) PEcAn.logger::logger.warn( @@ -112,135 +113,141 @@ sda.enkf <- function(settings, "If your model does not need the split function you can specify that in register.Model.xml in model's inst folder by adding FALSE tag." ) } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### Splitting/Cutting the mets to the start and the end of SDA ### - ###-------------------------------------------------------------------### - - if(!no_split){ - for(i in seq_along(settings$run$inputs$met$path)){ - + ### -------------------------------------------------------------------### + + if (!no_split) { + for (i in seq_along(settings$run$inputs$met$path)) { ### model specific split inputs - settings$run$inputs$met$path[[i]] <- do.call(my.split_inputs, - args = list(settings = settings, - start.time = start.cut, - stop.time = lubridate::ymd_hms(settings$state.data.assimilation$end.date, truncated = 3, tz="UTC"), - inputs = settings$run$inputs$met$path[[i]], - overwrite=T)) + settings$run$inputs$met$path[[i]] <- do.call(my.split_inputs, + args = list( + settings = settings, + start.time = start.cut, + stop.time = lubridate::ymd_hms(settings$state.data.assimilation$end.date, truncated = 3, tz = "UTC"), + inputs = settings$run$inputs$met$path[[i]], + overwrite = T + ) + ) } } if (control$debug) browser() - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### tests before data assimilation ### - ###-------------------------------------------------------------------###---- + ### -------------------------------------------------------------------###---- obs.times <- names(obs.mean) - + obs.times.POSIX <- lubridate::ymd_hms(obs.times) - + ### TO DO: Need to find a way to deal with years before 1000 for paleon ### need a leading zero for (i in seq_along(obs.times)) { if (is.na(obs.times.POSIX[i])) { if (is.na(lubridate::ymd(obs.times[i]))) { PEcAn.logger::logger.warn("Error: no dates associated with observations") } else { - ### Data does not have time associated with dates + ### Data does not have time associated with dates ### Adding 12:59:59PM assuming next time step starts one second later PEcAn.logger::logger.warn("Pumpkin Warning: adding one minute before midnight time assumption to dates associated with data") - obs.times.POSIX[i] <- strptime(paste(obs.times[i], "23:59:59"),format="%Y-%m-%d %H:%M:%S",tz='UTC')#lubridate::ymd_hms(paste(obs.times[i], "23:59:59")) + obs.times.POSIX[i] <- strptime(paste(obs.times[i], "23:59:59"), format = "%Y-%m-%d %H:%M:%S", tz = "UTC") # lubridate::ymd_hms(paste(obs.times[i], "23:59:59")) } } } obs.times <- obs.times.POSIX - #obs.times[1] <- strptime('0950-12-31 23:59:59',format="%Y-%m-%d %H:%M:%S",tz="UTC") - #obs.times[2] <- strptime('0970-12-31 23:59:59',format="%Y-%m-%d %H:%M:%S",tz="UTC") - #obs.times[3] <- strptime('0990-12-31 23:59:59',format="%Y-%m-%d %H:%M:%S",tz="UTC") - - ###-------------------------------------------------------------------### + # obs.times[1] <- strptime('0950-12-31 23:59:59',format="%Y-%m-%d %H:%M:%S",tz="UTC") + # obs.times[2] <- strptime('0970-12-31 23:59:59',format="%Y-%m-%d %H:%M:%S",tz="UTC") + # obs.times[3] <- strptime('0990-12-31 23:59:59',format="%Y-%m-%d %H:%M:%S",tz="UTC") + + ### -------------------------------------------------------------------### ### set up for data assimilation ### - ###-------------------------------------------------------------------###----- - nt <- length(obs.times) - if (nt==0) PEcAn.logger::logger.severe('There has to be at least one observation, before you can start the SDA code.') - FORECAST <- ANALYSIS <- list() + ### -------------------------------------------------------------------###----- + nt <- length(obs.times) + if (nt == 0) PEcAn.logger::logger.severe("There has to be at least one observation, before you can start the SDA code.") + FORECAST <- ANALYSIS <- list() enkf.params <- list() - #The aqq and bqq are shape parameters estimated over time for the proccess covariance. #see GEF help - aqq <- NULL - bqq <- numeric(nt + 1) + # The aqq and bqq are shape parameters estimated over time for the proccess covariance. #see GEF help + aqq <- NULL + bqq <- numeric(nt + 1) ##### Creating matrices that describe the bounds of the state variables ##### interval is remade everytime depending on the data at time t ##### state.interval stays constant and converts new.analysis to be within the correct bounds #### This needs to be moved to GEF - interval <- NULL - state.interval <- cbind(as.numeric(lapply(settings$state.data.assimilation$state.variables, '[[', 'min_value')), - as.numeric(lapply(settings$state.data.assimilation$state.variables, '[[', 'max_value'))) + interval <- NULL + state.interval <- cbind( + as.numeric(lapply(settings$state.data.assimilation$state.variables, "[[", "min_value")), + as.numeric(lapply(settings$state.data.assimilation$state.variables, "[[", "max_value")) + ) rownames(state.interval) <- var.names - - + + # This reads ensemble weights generated by `get_ensemble_weights` function from AssimSequential package - if(!file.exists(file.path(settings$outdir, "ensemble_weights.Rdata"))){ + if (!file.exists(file.path(settings$outdir, "ensemble_weights.Rdata"))) { PEcAn.logger::logger.warn("ensemble_weights.Rdata cannot be found. Make sure you generate samples by running the get.ensemble.weights function before running SDA if you want the ensembles to be weighted.") - #create null list - for(tt in 1:length(obs.times)){ - weight_list[[tt]] <- rep(1,nens) #no weights + # create null list + for (tt in 1:length(obs.times)) { + weight_list[[tt]] <- rep(1, nens) # no weights } - } else{ - load(file.path(settings$outdir, "ensemble_weights.Rdata")) ## loads ensemble.samples + } else { + load(file.path(settings$outdir, "ensemble_weights.Rdata")) ## loads ensemble.samples } - - #Generate parameter needs to be run before this to generate the samples. This is hopefully done in the main workflow. - if(!file.exists(file.path(settings$outdir, "samples.Rdata"))) PEcAn.logger::logger.severe("samples.Rdata cannot be found. Make sure you generate samples by running the get.parameter.samples function before running SDA.") - load(file.path(settings$outdir, "samples.Rdata")) ## loads ensemble.samples - #reformatting params + + # Generate parameter needs to be run before this to generate the samples. This is hopefully done in the main workflow. + if (!file.exists(file.path(settings$outdir, "samples.Rdata"))) PEcAn.logger::logger.severe("samples.Rdata cannot be found. Make sure you generate samples by running the get.parameter.samples function before running SDA.") + load(file.path(settings$outdir, "samples.Rdata")) ## loads ensemble.samples + # reformatting params new.params <- list() for (i in seq_len(nens)) { new.params[[i]] <- lapply(ensemble.samples, function(x, n) { - x[i, ] }, n = i) - } - - ###-------------------------------------------------------------------### + x[i, ] + }, n = i) + } + + ### -------------------------------------------------------------------### ### If this is a restart - Picking up were we left last time ### - ###-------------------------------------------------------------------### - if (restart){ - if(!file.exists(file.path(settings$outdir,"SDA", "sda.output.Rdata"))){ + ### -------------------------------------------------------------------### + if (restart) { + if (!file.exists(file.path(settings$outdir, "SDA", "sda.output.Rdata"))) { PEcAn.logger::logger.warn("The SDA output from the older simulation doesn't exist.") t <- 1 } else { - load(file.path(settings$outdir,"SDA", "sda.output.Rdata")) + load(file.path(settings$outdir, "SDA", "sda.output.Rdata")) } - - load(file.path(settings$outdir,"SDA", "outconfig.Rdata")) + + load(file.path(settings$outdir, "SDA", "outconfig.Rdata")) run.id <- outconfig$runs$id ensemble.id <- outconfig$ensemble.id - - if(FALSE){ # I think let's do this outside of the sda function because sometimes you might want to restart from what you've set up to restart from and it's confusing if your file systems change within the function + + if (FALSE) { # I think let's do this outside of the sda function because sometimes you might want to restart from what you've set up to restart from and it's confusing if your file systems change within the function #--- Updating the nt and etc - if(!dir.exists(file.path(settings$outdir,"SDA",assimyears[t]))) dir.create(file.path(settings$outdir,"SDA",assimyears[t])) - + if (!dir.exists(file.path(settings$outdir, "SDA", assimyears[t]))) dir.create(file.path(settings$outdir, "SDA", assimyears[t])) + # finding/moving files to it's end year dir - files.last.sda<-list.files.nodir(file.path(settings$outdir,"SDA")) - - #copying - file.copy(file.path(file.path(settings$outdir,"SDA"),files.last.sda), - file.path(file.path(settings$outdir,"SDA"),paste0(assimyears[t],"/",files.last.sda))) + files.last.sda <- list.files.nodir(file.path(settings$outdir, "SDA")) + + # copying + file.copy( + file.path(file.path(settings$outdir, "SDA"), files.last.sda), + file.path(file.path(settings$outdir, "SDA"), paste0(assimyears[t], "/", files.last.sda)) + ) } - - if(length(FORECAST) == length(ANALYSIS) && length(FORECAST) > 0) t = 1 + length(FORECAST) #if you made it through the forecast and the analysis in t and failed on the analysis in t+1 so you didn't save t - - }else{ - t = 1 + + if (length(FORECAST) == length(ANALYSIS) && length(FORECAST) > 0) t <- 1 + length(FORECAST) # if you made it through the forecast and the analysis in t and failed on the analysis in t+1 so you didn't save t + } else { + t <- 1 } - ###------------------------------------------------------------------------------------------------### + ### ------------------------------------------------------------------------------------------------### ### loop over time ### - ###------------------------------------------------------------------------------------------------###---- - for(t in t:nt){ + ### ------------------------------------------------------------------------------------------------###---- + for (t in t:nt) { if (control$debug) browser() # do we have obs for this time - what year is it ? obs <- which(!is.na(obs.mean[[t]])) obs.year <- lubridate::year(names(obs.mean)[t]) - ###-------------------------------------------------------------------------### + ### -------------------------------------------------------------------------### ### Taking care of Forecast. Splitting / Writting / running / reading back ### - ###-------------------------------------------------------------------------###----- + ### -------------------------------------------------------------------------###----- #- Check to see if this is the first run or not and what inputs needs to be sent to write.ensemble configs # Why t>1 is different ? Because the ensemble.write.config would be different. It has the restart argument and it needs it's own setup. # Also, assumes that sda has gotten through at least one analysis step @@ -249,109 +256,108 @@ sda.enkf <- function(settings, ## First question, do we have forecast output to compare to our data? ## If not, need to run forecast ## using paste because dont want to confuse with ensemble ids - if(file.exists('run') & file.exists(file.path(settings$outdir,"SDA", "outconfig.Rdata"))){ - - load(file.path(settings$outdir,"SDA", "outconfig.Rdata")) #need to load these in case during t==1 the analysis crashed so you have a forecast but didn't get to save the sda.output.Rdata + if (file.exists("run") & file.exists(file.path(settings$outdir, "SDA", "outconfig.Rdata"))) { + load(file.path(settings$outdir, "SDA", "outconfig.Rdata")) # need to load these in case during t==1 the analysis crashed so you have a forecast but didn't get to save the sda.output.Rdata run.id <- outconfig$runs$id ensemble.id <- outconfig$ensemble.id - if(t==1) inputs <- outconfig$samples$met - + if (t == 1) inputs <- outconfig$samples$met + sum_files <- sum(unlist(sapply( X = run.id, - FUN = function(x){ - pattern = paste0(x, '/*.nc$')[1] + FUN = function(x) { + pattern <- paste0(x, "/*.nc$")[1] grep( pattern = pattern, - x = list.files(file.path(outdir,x), "*.nc$", recursive = F, full.names = T) + x = list.files(file.path(outdir, x), "*.nc$", recursive = F, full.names = T) ) }, simplify = T ))) - - }else{ - sum_files <- 0 #if rundir hasn't been created yet + } else { + sum_files <- 0 # if rundir hasn't been created yet } - - - if (sum_files == 0){ #removing:t > 1 - #removing old simulations #why? don't we need them to restart? - #unlink(list.files(outdir,"*.nc",recursive = T,full.names = T)) + + + if (sum_files == 0) { # removing:t > 1 + # removing old simulations #why? don't we need them to restart? + # unlink(list.files(outdir,"*.nc",recursive = T,full.names = T)) #-Splitting the input for the models that they don't care about the start and end time of simulations and they run as long as their met file. inputs.split <- list() - if(!no_split & exists('outconfig')){ - for(i in seq_len(nens)){ + if (!no_split & exists("outconfig")) { + for (i in seq_len(nens)) { #---------------- model specific split inputs inputs.split$samples[i] <- do.call( my.split_inputs, args = list( settings = settings, start.time = (lubridate::ymd_hms( - obs.times[t - 1], truncated = 3, tz = "UTC" + obs.times[t - 1], + truncated = 3, tz = "UTC" )), stop.time = (lubridate::ymd_hms( - obs.times[t], truncated = 3, tz = "UTC" + obs.times[t], + truncated = 3, tz = "UTC" )), inputs = inputs$samples[[i]] ) ) - - } - - }else{ - if(t > 1) inputs.split <- inputs + } + } else { + if (t > 1) inputs.split <- inputs } #---------------- setting up the restart argument - - if(exists('new.state')){ #Has the analysis been run? Yes, then restart from analysis. - + + if (exists("new.state")) { # Has the analysis been run? Yes, then restart from analysis. + if (t == 2) { - start.time = lubridate::ymd_hms(settings$run$start.date, truncated = 3) - } else { - start.time = lubridate::ymd_hms(obs.times[t - 1], truncated = 3) - } - restart.arg<-list(runid = run.id, - start.time = start.time, - stop.time = lubridate::ymd_hms(obs.times[t], truncated = 3), - settings = settings, - new.state = new.state, - new.params = new.params, - inputs = inputs.split, - RENAME = TRUE, - ensemble.id=ensemble.id) - }else{ #The analysis has not been run. Start from beginning with no restart. - restart.arg = NULL + start.time <- lubridate::ymd_hms(settings$run$start.date, truncated = 3) + } else { + start.time <- lubridate::ymd_hms(obs.times[t - 1], truncated = 3) + } + restart.arg <- list( + runid = run.id, + start.time = start.time, + stop.time = lubridate::ymd_hms(obs.times[t], truncated = 3), + settings = settings, + new.state = new.state, + new.params = new.params, + inputs = inputs.split, + RENAME = TRUE, + ensemble.id = ensemble.id + ) + } else { # The analysis has not been run. Start from beginning with no restart. + restart.arg <- NULL + } + + if (t == 1) { + config.settings <- settings + config.settings$run$end.date <- format(lubridate::ymd_hms(obs.times[t], truncated = 3), "%Y/%m/%d") + } else { + config.settings <- settings } - - if(t == 1){ - config.settings = settings - config.settings$run$end.date = format(lubridate::ymd_hms(obs.times[t], truncated = 3), "%Y/%m/%d") - } else { - config.settings = settings - } - - - + + + #-------------------------- Writing the config/Running the model and reading the outputs for each ensemble - outconfig <- PEcAn.uncertainty::write.ensemble.configs(defaults = config.settings$pfts, - ensemble.samples = ensemble.samples, - settings = config.settings, - model = config.settings$model$type, - write.to.db = config.settings$database$bety$write, - restart = restart.arg) - - save(outconfig, file = file.path(settings$outdir,"SDA", "outconfig.Rdata")) - + outconfig <- PEcAn.uncertainty::write.ensemble.configs( + defaults = config.settings$pfts, + ensemble.samples = ensemble.samples, + settings = config.settings, + model = config.settings$model$type, + write.to.db = config.settings$database$bety$write, + restart = restart.arg + ) + + save(outconfig, file = file.path(settings$outdir, "SDA", "outconfig.Rdata")) + run.id <- outconfig$runs$id ensemble.id <- outconfig$ensemble.id - if(t==1) inputs <- outconfig$samples$met # for any time after t==1 the met is the split met - - if(control$debug) browser() + if (t == 1) inputs <- outconfig$samples$met # for any time after t==1 the met is the split met + + if (control$debug) browser() #-------------------------------------------- RUN PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) - - - } #------------------------------------------- Reading the output X_tmp <- vector("list", 2) @@ -368,246 +374,266 @@ sda.enkf <- function(settings, params = new.params[[i]] ) ) - + # states will be in X, but we also want to carry some deterministic relationships to write_restart # these will be stored in params - X[[i]] <- X_tmp[[i]]$X - if (!is.null(X_tmp[[i]]$params)) + X[[i]] <- X_tmp[[i]]$X + if (!is.null(X_tmp[[i]]$params)) { new.params[[i]] <- X_tmp[[i]]$params - + } } - + #----chaning the extension of nc files to a more specific date related name - files <- list.files( + files <- list.files( path = file.path(settings$outdir, "out"), "*.nc$", recursive = TRUE, - full.names = TRUE) - files <- files[grep(pattern = "SDA*", basename(files), invert = TRUE)] - - - file.rename(files, - file.path(dirname(files), - paste0("SDA_", basename(files), "_", gsub(" ", "", names(obs.mean)[t]), ".nc") ) ) - + full.names = TRUE + ) + files <- files[grep(pattern = "SDA*", basename(files), invert = TRUE)] + + + file.rename( + files, + file.path( + dirname(files), + paste0("SDA_", basename(files), "_", gsub(" ", "", names(obs.mean)[t]), ".nc") + ) + ) + #--- Reformating X X <- do.call(rbind, X) - - - #unit scaling if needed - - X <- rescaling_stateVars(settings, X, multiply = TRUE) - - - if(sum(X,na.rm=T) == 0){ - PEcAn.logger::logger.severe(paste('NO FORECAST for',obs.times[t],'Check outdir logfiles or read restart. Do you have the right variable names?')) + + + # unit scaling if needed + + X <- rescaling_stateVars(settings, X, multiply = TRUE) + + + if (sum(X, na.rm = T) == 0) { + PEcAn.logger::logger.severe(paste("NO FORECAST for", obs.times[t], "Check outdir logfiles or read restart. Do you have the right variable names?")) } - - ###-------------------------------------------------------------------### + + ### -------------------------------------------------------------------### ### preparing OBS ### - ###-------------------------------------------------------------------### - + ### -------------------------------------------------------------------### + if (any(obs)) { # finding obs data - - #which type of observation do we have at this time point? - input.order <- sapply(input.vars, agrep, x=names(obs.mean[[t]])) + + # which type of observation do we have at this time point? + input.order <- sapply(input.vars, agrep, x = names(obs.mean[[t]])) names(input.order) <- operators - input.order.cov <- sapply(input.vars, agrep, x=colnames(obs.cov[[t]])) + input.order.cov <- sapply(input.vars, agrep, x = colnames(obs.cov[[t]])) names(input.order.cov) <- operators - + ### this is for pfts not sure if it's always nessecary? - choose <- unlist(sapply(colnames(X), agrep, x=names(obs.mean[[t]]), max=1, USE.NAMES = F)) - choose.cov <- unlist(sapply(colnames(X), agrep, x=colnames(obs.cov[[t]]), max=1, USE.NAMES = F)) - - if(!any(choose)){ + choose <- unlist(sapply(colnames(X), agrep, x = names(obs.mean[[t]]), max = 1, USE.NAMES = F)) + choose.cov <- unlist(sapply(colnames(X), agrep, x = colnames(obs.cov[[t]]), max = 1, USE.NAMES = F)) + + if (!any(choose)) { choose <- unlist(input.order) choose <- order(names(obs.mean[[t]])) choose.cov <- unlist(input.order.cov) choose.cov <- order(colnames(obs.cov[[t]])) - #substr(names(obs.mean[[t]]),nchar(names(choose)[1])+1,max(nchar(names(obs.mean[[t]])))) + # substr(names(obs.mean[[t]]),nchar(names(choose)[1])+1,max(nchar(names(obs.mean[[t]])))) } - # droping the ones that their means are zero + # droping the ones that their means are zero na.obs.mean <- which(is.na(unlist(obs.mean[[t]][choose]))) na.obs.cov <- which(is.na(unlist(obs.cov[[t]][choose]))) - if (length(na.obs.mean) > 0) choose <- choose [-na.obs.mean] + if (length(na.obs.mean) > 0) choose <- choose[-na.obs.mean] if (length(na.obs.cov) > 0) choose.cov <- choose[-na.obs.cov] - + Y <- unlist(obs.mean[[t]][choose]) - - R <- as.matrix(obs.cov[[t]][choose.cov,choose.cov]) - R[is.na(R)]<-0.1 - + + R <- as.matrix(obs.cov[[t]][choose.cov, choose.cov]) + R[is.na(R)] <- 0.1 + if (control$debug) browser() - + # making the mapping matrix - #TO DO: doesn't work unless it's one to one - if(length(operators)==0) H <- Construct_H(choose, Y, X) - ###-------------------------------------------------------------------### + # TO DO: doesn't work unless it's one to one + if (length(operators) == 0) H <- Construct_H(choose, Y, X) + ### -------------------------------------------------------------------### ### Analysis ### - ###-------------------------------------------------------------------###---- - if(processvar == FALSE){an.method<-EnKF }else{ an.method<-GEF } + ### -------------------------------------------------------------------###---- + if (processvar == FALSE) { + an.method <- EnKF + } else { + an.method <- GEF + } #-extraArgs if (processvar && t > 1) { - aqq <- enkf.params[[t-1]]$aqq - bqq <- enkf.params[[t-1]]$bqq - X.new<-enkf.params[[t-1]]$X.new + aqq <- enkf.params[[t - 1]]$aqq + bqq <- enkf.params[[t - 1]]$bqq + X.new <- enkf.params[[t - 1]]$X.new } - - if(!exists('Cmcmc_tobit2space')) { - recompileTobit = TRUE - }else{ - recompileTobit = FALSE + + if (!exists("Cmcmc_tobit2space")) { + recompileTobit <- TRUE + } else { + recompileTobit <- FALSE } - - if(!exists('Cmcmc')) { - recompileGEF = TRUE - }else{ - recompileGEF = FALSE + + if (!exists("Cmcmc")) { + recompileGEF <- TRUE + } else { + recompileGEF <- FALSE } - - + + if (is.null(outconfig$samples$met$ids)) { wts <- unlist(weight_list[[t]]) } else { wts <- unlist(weight_list[[t]][outconfig$samples$met$ids]) } - + #-analysis function enkf.params[[t]] <- Analysis.sda(settings, - FUN=an.method, - Forecast=list(Q=Q, X=X), - Observed=list(R=R, Y=Y), - H=H, - extraArg=list(aqq=aqq, bqq=bqq, t=t, - recompileTobit=recompileTobit, - recompileGEF=recompileGEF, - wts = wts), - nt=nt, - obs.mean=obs.mean, - obs.cov=obs.cov) - - #Reading back mu.f/Pf and mu.a/Pa + FUN = an.method, + Forecast = list(Q = Q, X = X), + Observed = list(R = R, Y = Y), + H = H, + extraArg = list( + aqq = aqq, bqq = bqq, t = t, + recompileTobit = recompileTobit, + recompileGEF = recompileGEF, + wts = wts + ), + nt = nt, + obs.mean = obs.mean, + obs.cov = obs.cov + ) + + # Reading back mu.f/Pf and mu.a/Pa FORECAST[[t]] <- X - #Forecast + # Forecast mu.f <- enkf.params[[t]]$mu.f Pf <- enkf.params[[t]]$Pf - #Analysis + # Analysis Pa <- enkf.params[[t]]$Pa mu.a <- enkf.params[[t]]$mu.a - + diag(Pf)[which(diag(Pf) == 0)] <- 0.1 ## hack for zero variance - #extracting extra outputs + # extracting extra outputs if (processvar) { aqq <- enkf.params[[t]]$aqq bqq <- enkf.params[[t]]$bqq X.new <- enkf.params[[t]]$X.new } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### Trace ### - ###-------------------------------------------------------------------###---- + ### -------------------------------------------------------------------###---- #-- writing Trace-------------------- if (control$trace) { - PEcAn.logger::logger.info ("\n --------------------------- ", - obs.year, - " ---------------------------\n") - PEcAn.logger::logger.info ("\n --------------Obs mean----------- \n") + PEcAn.logger::logger.info( + "\n --------------------------- ", + obs.year, + " ---------------------------\n" + ) + PEcAn.logger::logger.info("\n --------------Obs mean----------- \n") print(Y) - PEcAn.logger::logger.info ("\n --------------Obs Cov ----------- \n") + PEcAn.logger::logger.info("\n --------------Obs Cov ----------- \n") print(R) - PEcAn.logger::logger.info ("\n --------------Forecast mean ----------- \n") + PEcAn.logger::logger.info("\n --------------Forecast mean ----------- \n") print(enkf.params[[t]]$mu.f) - PEcAn.logger::logger.info ("\n --------------Forecast Cov ----------- \n") + PEcAn.logger::logger.info("\n --------------Forecast Cov ----------- \n") print(enkf.params[[t]]$Pf) - PEcAn.logger::logger.info ("\n --------------Analysis mean ----------- \n") + PEcAn.logger::logger.info("\n --------------Analysis mean ----------- \n") print(t(enkf.params[[t]]$mu.a)) - PEcAn.logger::logger.info ("\n --------------Analysis Cov ----------- \n") + PEcAn.logger::logger.info("\n --------------Analysis Cov ----------- \n") print(enkf.params[[t]]$Pa) - PEcAn.logger::logger.info ("\n ------------------------------------------------------\n") + PEcAn.logger::logger.info("\n ------------------------------------------------------\n") } if (control$debug) browser() - if (control$pause) readline(prompt="Press [enter] to continue \n") - + if (control$pause) readline(prompt = "Press [enter] to continue \n") } else { mu.f <- as.numeric(apply(X, 2, mean, na.rm = TRUE)) Pf <- stats::cov(X) - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### No Observations -- ###---- - ###-----------------------------------------------------------------### + ### -----------------------------------------------------------------### ### no process variance -- forecast is the same as the analysis ### - if (processvar==FALSE) { + if (processvar == FALSE) { mu.a <- mu.f - Pa <- Pf + Q + Pa <- Pf + Q ### yes process variance -- no data } else { mu.a <- mu.f - if(!exists('q.bar')){ + if (!exists("q.bar")) { q.bar <- diag(ncol(X)) - PEcAn.logger::logger.info('Process variance not estimated. Analysis has been given uninformative process variance') - } - Pa <- Pf + solve(q.bar) + PEcAn.logger::logger.info("Process variance not estimated. Analysis has been given uninformative process variance") + } + Pa <- Pf + solve(q.bar) } enkf.params[[t]] <- list(mu.f = mu.f, Pf = Pf, mu.a = mu.a, Pa = Pa) } - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### adjustement/update state matrix ### - ###-------------------------------------------------------------------###---- - - if(adjustment == TRUE){ - #if we have process var then x is x.new - if (processvar & exists('X.new')) {X.adj.arg <- X.new }else{ X.adj.arg <- X ; print('using X not X.new. Assuming GEF was skipped this iteration?')} - analysis <-adj.ens(Pf, X.adj.arg, mu.f, mu.a, Pa) - }else{ + ### -------------------------------------------------------------------###---- + + if (adjustment == TRUE) { + # if we have process var then x is x.new + if (processvar & exists("X.new")) { + X.adj.arg <- X.new + } else { + X.adj.arg <- X + print("using X not X.new. Assuming GEF was skipped this iteration?") + } + analysis <- adj.ens(Pf, X.adj.arg, mu.f, mu.a, Pa) + } else { analysis <- as.data.frame(mvtnorm::rmvnorm(as.numeric(nrow(X)), mu.a, Pa, method = "svd")) } - + colnames(analysis) <- colnames(X) ##### Mapping analysis vectors to be in bounds of state variables - if(processvar==TRUE){ - for(i in 1:ncol(analysis)){ - int.save <- state.interval[which(startsWith(colnames(analysis)[i], - var.names)),] - analysis[analysis[,i] < int.save[1],i] <- int.save[1] - analysis[analysis[,i] > int.save[2],i] <- int.save[2] + if (processvar == TRUE) { + for (i in 1:ncol(analysis)) { + int.save <- state.interval[which(startsWith( + colnames(analysis)[i], + var.names + )), ] + analysis[analysis[, i] < int.save[1], i] <- int.save[1] + analysis[analysis[, i] > int.save[2], i] <- int.save[2] } } - + ## in the future will have to be separated from analysis - - new.state <- as.data.frame(analysis) + + new.state <- as.data.frame(analysis) ANALYSIS[[t]] <- analysis FORECAST[[t]] <- X - - - ###-------------------------------------------------------------------### + + + ### -------------------------------------------------------------------### ### save outputs ### - ###-------------------------------------------------------------------###---- - Viz.output <- list(settings, obs.mean, obs.cov) #keeping obs data and settings for later visualization in Dashboard - + ### -------------------------------------------------------------------###---- + Viz.output <- list(settings, obs.mean, obs.cov) # keeping obs data and settings for later visualization in Dashboard + save(site.locs, t, X, FORECAST, ANALYSIS, enkf.params, new.state, new.params, run.id, - ensemble.id, ensemble.samples, inputs, Viz.output, file = file.path(settings$outdir,"SDA", "sda.output.Rdata")) - - - ### Interactive plotting ------------------------------------------------------ + ensemble.id, ensemble.samples, inputs, Viz.output, + file = file.path(settings$outdir, "SDA", "sda.output.Rdata") + ) + + + ### Interactive plotting ------------------------------------------------------ if (t > 1 & control$interactivePlot) { # - print(interactive.plotting.sda(settings,t,obs.times,obs.mean,obs.cov,obs,X,FORECAST,ANALYSIS)) + print(interactive.plotting.sda(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS)) } - } ### end loop over time if (control$debug) browser() - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------### ### time series plots ### - ###-------------------------------------------------------------------###----- - if(control$TimeseriesPlot) post.analysis.ggplot(settings,t,obs.times,obs.mean,obs.cov,obs,X,FORECAST,ANALYSIS,plot.title=control$plot.title) - if(control$TimeseriesPlot) PEcAnAssimSequential::post.analysis.ggplot.violin(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS) - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------###----- + if (control$TimeseriesPlot) post.analysis.ggplot(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS, plot.title = control$plot.title) + if (control$TimeseriesPlot) PEcAnAssimSequential::post.analysis.ggplot.violin(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS) + ### -------------------------------------------------------------------### ### bias diagnostics ### - ###-------------------------------------------------------------------###---- - if(control$BiasPlot) PEcAnAssimSequential::postana.bias.plotting.sda(settings,t,obs.times,obs.mean,obs.cov,obs,X,FORECAST,ANALYSIS) - ###-------------------------------------------------------------------### + ### -------------------------------------------------------------------###---- + if (control$BiasPlot) PEcAnAssimSequential::postana.bias.plotting.sda(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS) + ### -------------------------------------------------------------------### ### process variance plots ### - ###-------------------------------------------------------------------###----- - if (processvar & control$BiasPlot) postana.bias.plotting.sda.corr(t,obs.times,X,aqq,bqq) - + ### -------------------------------------------------------------------###----- + if (processvar & control$BiasPlot) postana.bias.plotting.sda.corr(t, obs.times, X, aqq, bqq) } # sda.enkf diff --git a/modules/assim.sequential/R/sda_matchparam.R b/modules/assim.sequential/R/sda_matchparam.R index 13a0ea4d3b1..559ce0c7f81 100644 --- a/modules/assim.sequential/R/sda_matchparam.R +++ b/modules/assim.sequential/R/sda_matchparam.R @@ -2,37 +2,38 @@ #' #' @name sda_matchparam #' @author Alexis Helgeson -#' +#' #' @param settings settings object passed from sda.enkf_MultiSite #' @param ensemble.samples taken from sample.Rdata object #' @param site.ids character object passed from sda.enkf_MultiSite #' @param nens number of ensemble members in model runs, taken from restart$runids #' -#' @return new.params object used to -sda_matchparam <- function(settings, ensemble.samples, site.ids, nens){ - #reformatting params +#' @return new.params object used to +sda_matchparam <- function(settings, ensemble.samples, site.ids, nens) { + # reformatting params new.params <- list() all.pft.names <- names(ensemble.samples) - - #loop over each site. + + # loop over each site. for (i in seq_along(site.ids)) { - #match pft name + # match pft name site.pft.name <- settings[[i]]$run$site$site.pft$pft.name - if(is.null(site.pft.name)){ - site_pft = utils::read.csv(settings[[i]]$run$inputs$pft.site$path) - site.pft.name = site_pft$pft[site_pft$site == settings[[i]]$run$site$id] + if (is.null(site.pft.name)) { + site_pft <- utils::read.csv(settings[[i]]$run$inputs$pft.site$path) + site.pft.name <- site_pft$pft[site_pft$site == settings[[i]]$run$site$id] } - which.pft <- which(all.pft.names==site.pft.name) - + which.pft <- which(all.pft.names == site.pft.name) + site.param <- list() site.samples <- ensemble.samples[which.pft] for (j in seq_len(nens)) { site.param[[j]] <- lapply(site.samples, function(x, n) { - x[j, ] }, n = j) - } + x[j, ] + }, n = j) + } new.params[[i]] <- site.param } names(new.params) <- site.ids - + return(new.params) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/sda_plotting.R b/modules/assim.sequential/R/sda_plotting.R index a9a33b614f0..7a85aa38753 100755 --- a/modules/assim.sequential/R/sda_plotting.R +++ b/modules/assim.sequential/R/sda_plotting.R @@ -1,29 +1,30 @@ # @author Ann Raiho # @description This function generates a series of colors. This is mainly used in AssimSequential package. -generate_colors_sda <-function(){ - pink <- col2rgb("deeppink") - alphapink <- rgb(pink[1], pink[2], pink[3], 180, max = 255) - green <- col2rgb("green") - alphagreen <- rgb(green[1], green[2], green[3], 75, max = 255) - blue <- col2rgb("blue") - alphablue <- rgb(blue[1], blue[2], blue[3], 75, max = 255) - purple <- col2rgb("purple") +generate_colors_sda <- function() { + pink <- col2rgb("deeppink") + alphapink <- rgb(pink[1], pink[2], pink[3], 180, max = 255) + green <- col2rgb("green") + alphagreen <- rgb(green[1], green[2], green[3], 75, max = 255) + blue <- col2rgb("blue") + alphablue <- rgb(blue[1], blue[2], blue[3], 75, max = 255) + purple <- col2rgb("purple") alphapurple <- rgb(purple[1], purple[2], purple[3], 75, max = 255) - brown <- col2rgb("brown") - alphabrown <- rgb(brown[1], brown[2], brown[3], 30, max = 255) + brown <- col2rgb("brown") + alphabrown <- rgb(brown[1], brown[2], brown[3], 30, max = 255) return(list( pink = alphapink, green = alphagreen, blue = alphablue, purple = alphapurple, - brown = alphabrown)) + brown = alphabrown + )) } ##' Internal functions for plotting SDA outputs. Interactive, post analysis time-series and bias plots in base plotting system and ggplot -##' @param settings pecan standard settings list. -##' @param t current time - int number giving the position of the current time in obs.time. +##' @param settings pecan standard settings list. +##' @param t current time - int number giving the position of the current time in obs.time. ##' @param obs.times vector of dates of measurements ##' @param obs.mean list of vectors of the means of observed data named by the measured date. ##' @param obs.cov list of cov matrices of the observed data named by the measured date. @@ -35,26 +36,29 @@ generate_colors_sda <-function(){ ##' @param Add_Map Bool variable decide if we want to export the GIS map of Ecoregion. ##' @export -interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS){ - +interactive.plotting.sda <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS) { if (!requireNamespace("plyr", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'plyr',", "needed by `PEcAnAssimSequential::interactive.plotting.sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'PEcAn.visualization',", "needed by `PEcAnAssimSequential::interactive.plotting.sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } - #Defining some colors + # Defining some colors sda_colors <- generate_colors_sda() t1 <- 1 - var.names <- var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) - + var.names <- var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) + Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -62,103 +66,121 @@ interactive.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, ob tmp[mch] <- x[mch] tmp })) - - if(any(obs)){ + + if (any(obs)) { Y.order <- stats::na.omit(pmatch(colnames(X), colnames(Ybar))) - Ybar <- Ybar[,Y.order] + Ybar <- Ybar[, Y.order] Ybar[is.na(Ybar)] <- 0 YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { - if (length(x)<2) { + if (length(x) < 2) { rep(NA, length(names.y)) } sqrt(diag(x)) }))) - - YCI <- YCI[,Y.order] + + YCI <- YCI[, Y.order] YCI[is.na(YCI)] <- 0 - - }else{ - YCI <- matrix(NA,nrow=length(t1:t), ncol=max(length(names.y),1)) + } else { + YCI <- matrix(NA, nrow = length(t1:t), ncol = max(length(names.y), 1)) } - + graphics::par(mfrow = c(2, 1)) - colmax<-2 + colmax <- 2 for (i in 1:ncol(FORECAST[[t]])) { # - - Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) }) - - ylab.names <- unlist(sapply(settings$state.data.assimilation$state.variable, - function(x) { x })[2, ], use.names = FALSE) - + + Xbar <- plyr::laply(FORECAST[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) + }) + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = TRUE) + }) + + ylab.names <- unlist(sapply( + settings$state.data.assimilation$state.variable, + function(x) { + x + } + )[2, ], use.names = FALSE) + # observation / data if (i <= ncol(Ybar) & any(obs)) { - #browser() - plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci, Ybar[,i]), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) + # browser() + plot(as.Date(obs.times[t1:t]), + Xbar, + ylim = range(c(XaCI, Xci, Ybar[, i]), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, - as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = sda_colors$green) - graphics::lines(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]), - type = "l", - col = "darkgreen", - lwd = 2) - }else{ - plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci), na.rm = TRUE), - type = "n", - xlab = "Year", - ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) + as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, + as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, + col = sda_colors$green + ) + graphics::lines(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]), + type = "l", + col = "darkgreen", + lwd = 2 + ) + } else { + plot(as.Date(obs.times[t1:t]), + Xbar, + ylim = range(c(XaCI, Xci), na.rm = TRUE), + type = "n", + xlab = "Year", + ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) } - + # forecast - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = sda_colors$blue) #col='lightblue') + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = sda_colors$blue) # col='lightblue') graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) - + # analysis PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = sda_colors$pink) graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) - #legend('topright', c('Forecast','Data','Analysis'), col=c(sda_colors$blue, sda_colors$green, sda_colors$pink), lty=1, lwd=5) + # legend('topright', c('Forecast','Data','Analysis'), col=c(sda_colors$blue, sda_colors$green, sda_colors$pink), lty=1, lwd=5) } } ##' @rdname interactive.plotting.sda ##' @export -postana.timeser.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS){ - +postana.timeser.plotting.sda <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS) { if (!requireNamespace("plyr", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'plyr',", "needed by `PEcAnAssimSequential::postana.timeser.plotting.sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'PEcAn.visualization',", "needed by `PEcAnAssimSequential::postana.timeser.plotting.sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } - #Defining some colors + # Defining some colors sda_colors <- generate_colors_sda() t1 <- 1 - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") #---- - grDevices::pdf(file.path(settings$outdir,"SDA", "sda.enkf.time-series.pdf")) - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) + grDevices::pdf(file.path(settings$outdir, "SDA", "sda.enkf.time-series.pdf")) + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -166,112 +188,129 @@ postana.timeser.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov tmp[mch] <- x[mch] tmp })) - #Y.order <- na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) - Y.order <- sapply(colnames(FORECAST[[t]]),agrep,x=colnames(Ybar),max=2,USE.NAMES = F)%>%unlist - Ybar <- Ybar[,Y.order] + # Y.order <- na.omit(pmatch(colnames(FORECAST[[t]]), colnames(Ybar))) + Y.order <- sapply(colnames(FORECAST[[t]]), agrep, x = colnames(Ybar), max = 2, USE.NAMES = F) %>% unlist() + Ybar <- Ybar[, Y.order] YCI <- t(as.matrix(sapply(obs.cov[t1:t], function(x) { if (is.na(x)) { rep(NA, length(names.y)) } else { - sqrt(diag(x)) + sqrt(diag(x)) } }))) - - Ybar[is.na(Ybar)]<-0 - YCI[is.na(YCI)]<-0 - - YCI <- YCI[,c(Y.order)] - - - - Xsum <- plyr::laply(FORECAST, function(x) { mean(rowSums(x[,1:length(names.y)], na.rm = TRUE)) })[t1:t] - Xasum <- plyr::laply(ANALYSIS, function(x) { mean(rowSums(x[,1:length(names.y)], na.rm = TRUE)) })[t1:t] - - #------For each state variable + + Ybar[is.na(Ybar)] <- 0 + YCI[is.na(YCI)] <- 0 + + YCI <- YCI[, c(Y.order)] + + + + Xsum <- plyr::laply(FORECAST, function(x) { + mean(rowSums(x[, 1:length(names.y)], na.rm = TRUE)) + })[t1:t] + Xasum <- plyr::laply(ANALYSIS, function(x) { + mean(rowSums(x[, 1:length(names.y)], na.rm = TRUE)) + })[t1:t] + + #------For each state variable for (i in seq_len(ncol(X))) { Xbar <- plyr::laply(FORECAST[t1:t], function(x) { - mean(x[, i], na.rm = TRUE) }) #/rowSums(x[,1:9],na.rm = T) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { - stats::quantile(x[, i], c(0.025, 0.975),na.rm = T) }) - - Xci[is.na(Xci)]<-0 - + mean(x[, i], na.rm = TRUE) + }) # /rowSums(x[,1:9],na.rm = T) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = T) + }) + + Xci[is.na(Xci)] <- 0 + Xbar <- Xbar Xci <- Xci - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { - mean(x[, i],na.rm = T) }) - - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { - stats::quantile(x[, i], c(0.025, 0.975),na.rm = T )}) - + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = T) + }) + + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975), na.rm = T) + }) + Xa <- Xa XaCI <- XaCI - + plot(as.Date(obs.times[t1:t]), - Xbar, - ylim = range(c(XaCI, Xci,Ybar[, 1]), na.rm = TRUE), - type = "n", - xlab = "Year", - #ylab = ylab.names[grep(colnames(X)[i], var.names)], - main = colnames(X)[i]) - + Xbar, + ylim = range(c(XaCI, Xci, Ybar[, 1]), na.rm = TRUE), + type = "n", + xlab = "Year", + # ylab = ylab.names[grep(colnames(X)[i], var.names)], + main = colnames(X)[i] + ) + # observation / data - if (i<=ncol(X)) { # - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, - as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = sda_colors$green) - graphics::lines(as.Date(obs.times[t1:t]), - as.numeric(Ybar[, i]), - type = "l", col = "darkgreen", lwd = 2) + if (i <= ncol(X)) { # + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, + as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, + col = sda_colors$green + ) + graphics::lines(as.Date(obs.times[t1:t]), + as.numeric(Ybar[, i]), + type = "l", col = "darkgreen", lwd = 2 + ) } - + # forecast - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = sda_colors$blue) #col='lightblue') #alphablue - graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) #"darkblue" - + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), Xci[, 1], Xci[, 2], col = sda_colors$blue) # col='lightblue') #alphablue + graphics::lines(as.Date(obs.times[t1:t]), Xbar, col = "darkblue", type = "l", lwd = 2) # "darkblue" + # analysis - PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = sda_colors$pink) #alphapink - graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) #"black" - + PEcAn.visualization::ciEnvelope(as.Date(obs.times[t1:t]), XaCI[, 1], XaCI[, 2], col = sda_colors$pink) # alphapink + graphics::lines(as.Date(obs.times[t1:t]), Xa, col = "black", lty = 2, lwd = 2) # "black" + graphics::legend( - 'topright', - c('Forecast', 'Data', 'Analysis'), - col=c(sda_colors$blue, sda_colors$green, sda_colors$pink), - lty=1, - lwd=5) - + "topright", + c("Forecast", "Data", "Analysis"), + col = c(sda_colors$blue, sda_colors$green, sda_colors$pink), + lty = 1, + lwd = 5 + ) } - + grDevices::dev.off() - } ##' @rdname interactive.plotting.sda ##' @export -postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS){ - +postana.bias.plotting.sda <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS) { if (!requireNamespace("plyr", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'plyr',", "needed by `PEcAnAssimSequential::postana.bias.plotting.sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { PEcAn.logger::logger.error( "Can't find package 'PEcan.visualization',", "needed by `PEcAnAssimSequential::postana.bias.plotting.sda()`.", - "Please install it and try again.") + "Please install it and try again." + ) } - #Defining some colors + # Defining some colors sda_colors <- generate_colors_sda() t1 <- 1 - ylab.names <- unlist(sapply(settings$state.data.assimilation$state.variable, - function(x) { x })[2, ], use.names = FALSE) - names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { names(x) }))) + ylab.names <- unlist(sapply( + settings$state.data.assimilation$state.variable, + function(x) { + x + } + )[2, ], use.names = FALSE) + names.y <- unique(unlist(lapply(obs.mean[t1:t], function(x) { + names(x) + }))) Ybar <- t(sapply(obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -280,256 +319,279 @@ postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, o tmp })) #---- - grDevices::pdf(file.path(settings$outdir,"SDA", "bias.diagnostic.pdf")) + grDevices::pdf(file.path(settings$outdir, "SDA", "bias.diagnostic.pdf")) for (i in seq_along(obs.mean[[1]])) { - Xbar <- plyr::laply(FORECAST[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - Xci <- plyr::laply(FORECAST[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) - - Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { mean(x[, i], na.rm = TRUE) }) - XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { stats::quantile(x[, i], c(0.025, 0.975)) }) - - if(length(which(is.na(Ybar[,i])))>=length(t1:t)) next() + Xbar <- plyr::laply(FORECAST[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + Xci <- plyr::laply(FORECAST[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + Xa <- plyr::laply(ANALYSIS[t1:t], function(x) { + mean(x[, i], na.rm = TRUE) + }) + XaCI <- plyr::laply(ANALYSIS[t1:t], function(x) { + stats::quantile(x[, i], c(0.025, 0.975)) + }) + + if (length(which(is.na(Ybar[, i]))) >= length(t1:t)) next() reg <- stats::lm(Xbar[t1:t] - unlist(Ybar[, i]) ~ c(t1:t)) - plot(t1:t, - Xbar - unlist(Ybar[, i]), - pch = 16, cex = 1, - ylim = c(min(Xci[, 1] - unlist(Ybar[, i])), max(Xci[,2] - unlist(Ybar[, i]))), - xlab = "Time", - ylab = "Error", - main = paste(colnames(X)[i], " Error = Forecast - Data")) - PEcAn.visualization::ciEnvelope(rev(t1:t), - rev(Xci[, 1] - unlist(Ybar[, i])), - rev(Xci[, 2] - unlist(Ybar[, i])), - col = sda_colors$brown) + plot(t1:t, + Xbar - unlist(Ybar[, i]), + pch = 16, cex = 1, + ylim = c(min(Xci[, 1] - unlist(Ybar[, i])), max(Xci[, 2] - unlist(Ybar[, i]))), + xlab = "Time", + ylab = "Error", + main = paste(colnames(X)[i], " Error = Forecast - Data") + ) + PEcAn.visualization::ciEnvelope(rev(t1:t), + rev(Xci[, 1] - unlist(Ybar[, i])), + rev(Xci[, 2] - unlist(Ybar[, i])), + col = sda_colors$brown + ) graphics::abline(h = 0, lty = 2, lwd = 2) graphics::abline(reg) graphics::mtext(paste( "slope =", signif(summary(reg)$coefficients[2], digits = 3), - "intercept =", signif(summary(reg)$coefficients[1], digits = 3))) + "intercept =", signif(summary(reg)$coefficients[1], digits = 3) + )) # d<-density(c(Xbar[t1:t] - unlist(Ybar[t1:t,i]))) lines(d$y+1,d$x) - + # forecast minus analysis = update reg1 <- stats::lm(Xbar - Xa ~ c(t1:t)) - plot(t1:t, - Xbar - Xa, - pch = 16, cex = 1, - ylim = c(min(Xbar - XaCI[, 2]), max(Xbar - XaCI[, 1])), - xlab = "Time", ylab = "Update", - main = paste(colnames(X)[i], - "Update = Forecast - Analysis")) - PEcAn.visualization::ciEnvelope(rev(t1:t), - rev(Xbar - XaCI[, 1]), - rev(Xbar - XaCI[, 2]), - col = sda_colors$purple) + plot(t1:t, + Xbar - Xa, + pch = 16, cex = 1, + ylim = c(min(Xbar - XaCI[, 2]), max(Xbar - XaCI[, 1])), + xlab = "Time", ylab = "Update", + main = paste( + colnames(X)[i], + "Update = Forecast - Analysis" + ) + ) + PEcAn.visualization::ciEnvelope(rev(t1:t), + rev(Xbar - XaCI[, 1]), + rev(Xbar - XaCI[, 2]), + col = sda_colors$purple + ) graphics::abline(h = 0, lty = 2, lwd = 2) graphics::abline(reg1) graphics::mtext(paste( "slope =", signif(summary(reg1)$coefficients[2], digits = 3), "intercept =", signif(summary(reg1)$coefficients[1], - digits = 3))) + digits = 3 + ) + )) # d<-density(c(Xbar[t1:t] - Xa[t1:t])) lines(d$y+1,d$x) } grDevices::dev.off() - } ##' @rdname interactive.plotting.sda #' @param aqq,bqq shape parameters estimated over time for the process covariance ##' @export -postana.bias.plotting.sda.corr<-function(t, obs.times, X, aqq, bqq){ - - t1<- 1 - #Defining some colors +postana.bias.plotting.sda.corr <- function(t, obs.times, X, aqq, bqq) { + t1 <- 1 + # Defining some colors sda_colors <- generate_colors_sda() #--- - grDevices::pdf('SDA/process.var.plots.pdf') - - cor.mat <- stats::cov2cor(aqq[t,,] / bqq[t]) + grDevices::pdf("SDA/process.var.plots.pdf") + + cor.mat <- stats::cov2cor(aqq[t, , ] / bqq[t]) colnames(cor.mat) <- colnames(X) rownames(cor.mat) <- colnames(X) graphics::par(mfrow = c(1, 1), mai = c(1, 1, 4, 1)) - corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45,order='FPC') - - graphics::par(mfrow=c(1,1)) + corrplot::corrplot(cor.mat, type = "upper", tl.srt = 45, order = "FPC") + + graphics::par(mfrow = c(1, 1)) plot(as.Date(obs.times[t1:t]), bqq[t1:t], - pch = 16, cex = 1, - ylab = "Degrees of Freedom", xlab = "Time") - + pch = 16, cex = 1, + ylab = "Degrees of Freedom", xlab = "Time" + ) + grDevices::dev.off() } ##' @rdname interactive.plotting.sda ##' @export -post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS, plot.title=NULL){ - +post.analysis.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS, plot.title = NULL) { t1 <- 1 - #Defining some colors - ready.OBS<-NULL + # Defining some colors + ready.OBS <- NULL sda_colors <- generate_colors_sda() - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") #---- - #Analysis & Forcast cleaning and STAT - All.my.data <- list(FORECAST=FORECAST,ANALYSIS=ANALYSIS) - + # Analysis & Forcast cleaning and STAT + All.my.data <- list(FORECAST = FORECAST, ANALYSIS = ANALYSIS) + ready.FA <- c("FORECAST", "ANALYSIS") %>% purrr::map_df(function(listFA) { - All.my.data[[listFA]] %>% purrr::map_df(function(state.vars) { - means <- apply(state.vars, 2, mean, na.rm = T) - CI <- apply(state.vars, 2, stats::quantile, c(0.025, 0.975), - na.rm = T) - rbind(means, CI) %>% t %>% as.data.frame() %>% dplyr::mutate(Variables = paste(colnames(state.vars))) %>% - tidyr::replace_na(list(0)) - }) %>% dplyr::mutate( - Type = listFA, - Date = rep( - lubridate::ymd_hms(obs.times[t1:t], truncated = 3, tz = "EST"), - each = colnames((All.my.data[[listFA]])[[1]]) %>% - length() + All.my.data[[listFA]] %>% + purrr::map_df(function(state.vars) { + means <- apply(state.vars, 2, mean, na.rm = T) + CI <- apply(state.vars, 2, stats::quantile, c(0.025, 0.975), + na.rm = T + ) + rbind(means, CI) %>% + t() %>% + as.data.frame() %>% + dplyr::mutate(Variables = paste(colnames(state.vars))) %>% + tidyr::replace_na(list(0)) + }) %>% + dplyr::mutate( + Type = listFA, + Date = rep( + lubridate::ymd_hms(obs.times[t1:t], truncated = 3, tz = "EST"), + each = colnames((All.my.data[[listFA]])[[1]]) %>% + length() + ) ) - ) }) - - - - #Observed data - #first merging mean and conv based on the day - - tryCatch({ - ready.OBS<- names(obs.mean)%>% - purrr::map(~c(obs.mean[.x],obs.cov[.x],.x)%>% - stats::setNames(c('means','covs','Date')))%>% - stats::setNames(names(obs.mean))%>% - purrr::map_df(function(one.day.data){ - #CI - - purrr::map2_df(sqrt(diag(one.day.data$covs)), one.day.data$means, - function(sd, mean){ - data.frame(mean-(sd*1.96), mean+(sd*1.96)) - - })%>% - dplyr::mutate(Variables=names(one.day.data$means))%>% - `colnames<-`(c('2.5%', '97.5%', 'Variables'))%>% - dplyr::mutate(means=one.day.data$means%>%unlist, - Type="Data", - Date=one.day.data$Date%>%as.POSIXct(tz="EST")) - - + + + + # Observed data + # first merging mean and conv based on the day + + tryCatch( + { + ready.OBS <- names(obs.mean) %>% + purrr::map(~ c(obs.mean[.x], obs.cov[.x], .x) %>% + stats::setNames(c("means", "covs", "Date"))) %>% + stats::setNames(names(obs.mean)) %>% + purrr::map_df(function(one.day.data) { + # CI + + purrr::map2_df( + sqrt(diag(one.day.data$covs)), one.day.data$means, + function(sd, mean) { + data.frame(mean - (sd * 1.96), mean + (sd * 1.96)) + } + ) %>% + dplyr::mutate(Variables = names(one.day.data$means)) %>% + `colnames<-`(c("2.5%", "97.5%", "Variables")) %>% + dplyr::mutate( + means = one.day.data$means %>% unlist(), + Type = "Data", + Date = one.day.data$Date %>% as.POSIXct(tz = "EST") + ) }) }, error = function(e) { - ready.OBS<-NULL + ready.OBS <- NULL } ) ready.to.plot <- ready.OBS %>% dplyr::bind_rows(ready.FA) - - #Adding the units to the variables - ready.to.plot$Variable %>% unique() %>% - purrr::walk(function(varin){ - #find the unit - unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) - if (length(unitp)>0) { + + # Adding the units to the variables + ready.to.plot$Variable %>% + unique() %>% + purrr::walk(function(varin) { + # find the unit + unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", "variable.name") %>% unlist() %in% varin) + if (length(unitp) > 0) { unit <- settings$state.data.assimilation$state.variable[[unitp]]$unit - - #replace it in the dataframe - ready.to.plot$Variable[ready.to.plot$Variable==varin] <<- paste(varin,"(",unit,")") + + # replace it in the dataframe + ready.to.plot$Variable[ready.to.plot$Variable == varin] <<- paste(varin, "(", unit, ")") } - }) - - + + p <- ready.to.plot %>% ggplot2::ggplot(ggplot2::aes(x = Date)) + ggplot2::geom_ribbon( ggplot2::aes(ymin = .data$`2.5%`, ymax = .data$`97.5%`, fill = .data$Type), - color = "black") + + color = "black" + ) + ggplot2::geom_line(ggplot2::aes(y = .data$means, color = .data$Type), lwd = 1.02, linetype = 2) + ggplot2::geom_point(ggplot2::aes(y = .data$means, color = .data$Type), size = 3, alpha = 0.75) + ggplot2::scale_fill_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue), name = "") + ggplot2::scale_color_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue), name = "") + ggplot2::theme_bw(base_size = 17) + - ggplot2::facet_wrap(~.data$Variables, scales = "free", ncol = 2) + + ggplot2::facet_wrap(~ .data$Variables, scales = "free", ncol = 2) + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) if (!is.null(plot.title)) { p <- p + ggplot2::labs(title = plot.title) } - - + + grDevices::pdf("SDA/SDA.pdf", width = 14, height = 10, onefile = TRUE) print(p) grDevices::dev.off() - - #saving plot data - save(p, ready.to.plot, file = file.path(settings$outdir,"SDA", "timeseries.plot.data.Rdata")) - - + + # saving plot data + save(p, ready.to.plot, file = file.path(settings$outdir, "SDA", "timeseries.plot.data.Rdata")) } ##' @rdname interactive.plotting.sda ##' @export -post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS, plot.title=NULL){ - - t1 <- 1 - #Defining some colors +post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.cov, obs, X, FORECAST, ANALYSIS, plot.title = NULL) { + t1 <- 1 + # Defining some colors sda_colors <- generate_colors_sda() - var.names <- sapply(settings$state.data.assimilation$state.variable, '[[', "variable.name") - -#rearranging the forcast and analysis data - - All.my.data <- list(FORECAST=FORECAST,ANALYSIS=ANALYSIS) - - ready.FA <- c('FORECAST','ANALYSIS')%>% - purrr::map_df(function(listFA){ - All.my.data[[listFA]]%>% - purrr::map_df(function(state.vars){ - state.vars%>%as.data.frame() - })%>%dplyr::mutate(Type=listFA, - Date=rep(obs.times[t1:t], each=((All.my.data[[listFA]])[[1]]) %>% nrow()) + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") + + # rearranging the forcast and analysis data + + All.my.data <- list(FORECAST = FORECAST, ANALYSIS = ANALYSIS) + + ready.FA <- c("FORECAST", "ANALYSIS") %>% + purrr::map_df(function(listFA) { + All.my.data[[listFA]] %>% + purrr::map_df(function(state.vars) { + state.vars %>% as.data.frame() + }) %>% + dplyr::mutate( + Type = listFA, + Date = rep(obs.times[t1:t], each = ((All.my.data[[listFA]])[[1]]) %>% nrow()) ) - - })%>% + }) %>% tidyr::gather(key = "Variables", value = "Value", -c("Type", "Date")) - #Observed data - #first merging mean and conv based on the day - obs.df <- names(obs.mean)%>% - purrr::map(~c(obs.mean[.x], obs.cov[.x], .x)%>% - stats::setNames(c('means','covs','Date')))%>% - stats::setNames(names(obs.mean))%>% - purrr::map_df(function(one.day.data){ - #CI - purrr::map2_df(sqrt(one.day.data$covs %>% purrr::map( ~ diag(.x)) %>% unlist), one.day.data$means, - function(sd,mean){ - data.frame(mean-(sd*1.96), mean+(sd*1.96)) - - })%>% - dplyr::mutate(Variables=names(one.day.data$means)) %>% - `colnames<-`(c('2.5%', '97.5%', 'Variables')) %>% - dplyr::mutate(means=one.day.data$means %>% unlist, - Type="Data", - Date=one.day.data$Date%>%as.POSIXct(tz="UTC")) - - - })#%>% - #filter(Variables %in% var.names) - - #Adding the units to the variables - ready.FA$Variable %>% unique() %>% - purrr::walk(function(varin){ - #find the unit - unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) - if (length(unitp)>0) { + # Observed data + # first merging mean and conv based on the day + obs.df <- names(obs.mean) %>% + purrr::map(~ c(obs.mean[.x], obs.cov[.x], .x) %>% + stats::setNames(c("means", "covs", "Date"))) %>% + stats::setNames(names(obs.mean)) %>% + purrr::map_df(function(one.day.data) { + # CI + purrr::map2_df( + sqrt(one.day.data$covs %>% purrr::map(~ diag(.x)) %>% unlist()), one.day.data$means, + function(sd, mean) { + data.frame(mean - (sd * 1.96), mean + (sd * 1.96)) + } + ) %>% + dplyr::mutate(Variables = names(one.day.data$means)) %>% + `colnames<-`(c("2.5%", "97.5%", "Variables")) %>% + dplyr::mutate( + means = one.day.data$means %>% unlist(), + Type = "Data", + Date = one.day.data$Date %>% as.POSIXct(tz = "UTC") + ) + }) # %>% + # filter(Variables %in% var.names) + + # Adding the units to the variables + ready.FA$Variable %>% + unique() %>% + purrr::walk(function(varin) { + # find the unit + unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", "variable.name") %>% unlist() %in% varin) + if (length(unitp) > 0) { unit <- settings$state.data.assimilation$state.variable[[unitp]]$unit - - #replace it in the dataframe - ready.FA$Variable[ready.FA$Variable==varin] <<- paste(varin,"(",unit,")") + + # replace it in the dataframe + ready.FA$Variable[ready.FA$Variable == varin] <<- paste(varin, "(", unit, ")") } - }) @@ -538,88 +600,91 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co ggplot2::geom_ribbon( ggplot2::aes(x = .data$Date, y = .data$means, ymin = .data$`2.5%`, ymax = .data$`97.5%`, fill = .data$Type), data = obs.df, - color = "black") + + color = "black" + ) + ggplot2::geom_line( ggplot2::aes(y = .data$means, color = .data$Type), data = obs.df, lwd = 1.02, - linetype = 2) + + linetype = 2 + ) + ggplot2::geom_violin( ggplot2::aes(x = .data$Date, fill = .data$Type, group = interaction(.data$Date, .data$Type)), - position = ggplot2::position_dodge(width = 0.9)) + + position = ggplot2::position_dodge(width = 0.9) + ) + ggplot2::geom_jitter( ggplot2::aes(color = .data$Type), - position = ggplot2::position_jitterdodge(dodge.width = 0.9)) + + position = ggplot2::position_jitterdodge(dodge.width = 0.9) + ) + ggplot2::scale_fill_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue)) + ggplot2::scale_color_manual(values = c(sda_colors$pink, sda_colors$green, sda_colors$blue)) + - ggplot2::facet_wrap(~.data$Variables, scales = "free", ncol = 2) + + ggplot2::facet_wrap(~ .data$Variables, scales = "free", ncol = 2) + ggplot2::theme_bw(base_size = 17) + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) if (!is.null(plot.title)) { p <- p + ggplot2::labs(title = plot.title) } - + grDevices::pdf("SDA/SDA.Violin.pdf", width = 14, height = 10, onefile = TRUE) - print(p) - grDevices::dev.off() - - #saving plot data - save(p, ready.FA, obs.df, file = file.path(settings$outdir,"SDA", "timeseries.violin.plot.data.Rdata")) - + print(p) + grDevices::dev.off() + + # saving plot data + save(p, ready.FA, obs.df, file = file.path(settings$outdir, "SDA", "timeseries.violin.plot.data.Rdata")) } ##' @rdname interactive.plotting.sda #' @param facetg logical: Create a subpanel for each variable? #' @param readsFF optional forward forecast ##' @export -post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=FALSE, readsFF=NULL, Add_Map=FALSE){ - +post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title = NULL, facetg = FALSE, readsFF = NULL, Add_Map = FALSE) { if (!requireNamespace("ggrepel", quietly = TRUE)) { PEcAn.logger::logger.error( "Package `ggrepel` not found, but needed by", "PEcAnAssimSequential::post.analysis.multisite.ggplot.", - "Please install it and try again.") + "Please install it and try again." + ) } - + # fix obs.mean/obs.cov for multivariable plotting issues when there is NA data. When more than 1 data set is assimilated, but there are missing data # for some sites/years/etc. the plotting will fail and crash the SDA because the numbers of columns are not consistent across all sublists within obs.mean # or obs.cov. - observed_vars = vector() + observed_vars <- vector() for (date in names(obs.mean)) { for (site in names(obs.mean[[date]])) { - vars = names(obs.mean[[date]][[site]]) - observed_vars = c(observed_vars, vars) + vars <- names(obs.mean[[date]][[site]]) + observed_vars <- c(observed_vars, vars) } } - observed_vars = unique(observed_vars) - - #new diag function: fixed the bug when length==1 then it will return 0x0 matrix - diag_fix <- function(vector){ - if (length(vector)>1){ + observed_vars <- unique(observed_vars) + + # new diag function: fixed the bug when length==1 then it will return 0x0 matrix + diag_fix <- function(vector) { + if (length(vector) > 1) { return(diag(vector)) - }else if (length(vector)==1){ + } else if (length(vector) == 1) { return(vector) } } - #bug fixing: detailed commends - for (name in names(obs.mean)){ - for (site in names(obs.mean[[1]])){ + # bug fixing: detailed commends + for (name in names(obs.mean)) { + for (site in names(obs.mean[[1]])) { obs_mean <- obs.mean[[name]][[site]] obs_cov <- obs.cov[[name]][[site]] - if(length(names(obs_mean))% purrr::map(~.x[['run']] ) %>% purrr::map('site') %>% purrr::map('name') %>% unlist() %>% as.character() - + + ylab.names <- unlist(sapply( + varnames, + function(x) { + x + } + )[2, ], use.names = FALSE) + + var.names <- sapply(settings$state.data.assimilation$state.variable, "[[", "variable.name") + site.ids <- attr(FORECAST[[1]], "Site") + site.names <- settings %>% + purrr::map(~ .x[["run"]]) %>% + purrr::map("site") %>% + purrr::map("name") %>% + unlist() %>% + as.character() + #------------------------------------------------Data prepration - #Analysis & Forcast cleaning and STAT - All.my.data <- list(FORECAST=FORECAST,ANALYSIS=ANALYSIS) - - ready.FA <- c('FORECAST','ANALYSIS')%>% - purrr::map_df(function(listFA){ - All.my.data[[listFA]]%>% - purrr::map_df(function(state.vars){ - - #finding the mean and Ci for all the state variables - site.ids %>% unique() %>% - purrr::map_df(function(site){ - (state.vars)[,which(site.ids %in% site)] %>% - as.data.frame %>% - dplyr::mutate(Site=site) + # Analysis & Forcast cleaning and STAT + All.my.data <- list(FORECAST = FORECAST, ANALYSIS = ANALYSIS) + + ready.FA <- c("FORECAST", "ANALYSIS") %>% + purrr::map_df(function(listFA) { + All.my.data[[listFA]] %>% + purrr::map_df(function(state.vars) { + # finding the mean and Ci for all the state variables + site.ids %>% + unique() %>% + purrr::map_df(function(site) { + (state.vars)[, which(site.ids %in% site)] %>% + as.data.frame() %>% + dplyr::mutate(Site = site) }) %>% tidyr::gather(key = "Variable", value = "Value", -c("Site")) %>% - dplyr::group_by(.data$Site,.data$Variable) %>% + dplyr::group_by(.data$Site, .data$Variable) %>% dplyr::summarise( Means = mean(.data$Value, na.rm = TRUE), - Lower = stats::quantile(.data$Value,0.025, na.rm = TRUE), - Upper = stats::quantile(.data$Value, 0.975, na.rm = TRUE)) - }) %>% dplyr::mutate(Type = paste0("SDA_", listFA), - Date = rep(as.Date(names(FORECAST)), each = colnames((All.my.data[[listFA]])[[1]]) %>% length() / length(unique(site.ids))) %>% as.POSIXct() + Lower = stats::quantile(.data$Value, 0.025, na.rm = TRUE), + Upper = stats::quantile(.data$Value, 0.975, na.rm = TRUE) + ) + }) %>% + dplyr::mutate( + Type = paste0("SDA_", listFA), + Date = rep(as.Date(names(FORECAST)), each = colnames((All.my.data[[listFA]])[[1]]) %>% length() / length(unique(site.ids))) %>% as.POSIXct() ) - }) - + obs.var.names <- (obs.mean[[1]])[[1]] %>% names() - #Observed data - #first merging mean and conv based on the day - ready.to.plot <- names(obs.mean)%>% - purrr::map(~c(obs.mean[.x],obs.cov[.x],.x)%>% - stats::setNames(c('means','covs','Date')))%>% - stats::setNames(names(obs.mean))%>% - purrr::map_df(function(one.day.data){ - one.day.data$means %>% - purrr::map_dfr(~.x) %>% + # Observed data + # first merging mean and conv based on the day + ready.to.plot <- names(obs.mean) %>% + purrr::map(~ c(obs.mean[.x], obs.cov[.x], .x) %>% + stats::setNames(c("means", "covs", "Date"))) %>% + stats::setNames(names(obs.mean)) %>% + purrr::map_df(function(one.day.data) { + one.day.data$means %>% + purrr::map_dfr(~.x) %>% dplyr::mutate(Site = names(one.day.data$means)) %>% tidyr::gather(key = "Variable", value = "Means", -c("Site")) %>% - dplyr::right_join(one.day.data$covs %>% - purrr::map_dfr(~ t(sqrt(as.numeric(diag_fix(.x)))) %>% - data.frame %>% `colnames<-`(c(obs.var.names))) %>% - dplyr::mutate(Site = names(one.day.data$covs)) %>% - tidyr::gather(key = "Variable", value = "Sd", -c("Site")), - by = c('Site', 'Variable')) %>% + dplyr::right_join( + one.day.data$covs %>% + purrr::map_dfr(~ t(sqrt(as.numeric(diag_fix(.x)))) %>% + data.frame() %>% + `colnames<-`(c(obs.var.names))) %>% + dplyr::mutate(Site = names(one.day.data$covs)) %>% + tidyr::gather(key = "Variable", value = "Sd", -c("Site")), + by = c("Site", "Variable") + ) %>% dplyr::mutate( - Upper = .data$Means + (.data$Sd*1.96), - Lower = .data$Means - (.data$Sd*1.96)) %>% + Upper = .data$Means + (.data$Sd * 1.96), + Lower = .data$Means - (.data$Sd * 1.96) + ) %>% # dropped the "_" from "SDA_Data" - dplyr::mutate(Type="Data", - Date=one.day.data$Date %>% as.POSIXct()) - # mutate(Type="SDA_Data", - # Date=one.day.data$Date %>% as.POSIXct()) - - - })%>% + dplyr::mutate( + Type = "Data", + Date = one.day.data$Date %>% as.POSIXct() + ) + # mutate(Type="SDA_Data", + # Date=one.day.data$Date %>% as.POSIXct()) + }) %>% dplyr::select(-.data$Sd) %>% dplyr::bind_rows(ready.FA) - + #--- Adding the forward forecast - if (!is.null(readsFF)){ - - readsFF.df<-readsFF %>% - purrr::map_df(function(siteX){ - - siteX %>% purrr::map_df(function(DateX){ - DateX %>% - purrr::map_df(~.x %>% t ) %>% + if (!is.null(readsFF)) { + readsFF.df <- readsFF %>% + purrr::map_df(function(siteX) { + siteX %>% purrr::map_df(function(DateX) { + DateX %>% + purrr::map_df(~ .x %>% t()) %>% tidyr::gather(key = "Variable", value = "Value", -c("Date", "Site")) %>% - dplyr::group_by(.data$Variable,.data$Date, .data$Site) %>% + dplyr::group_by(.data$Variable, .data$Date, .data$Site) %>% dplyr::summarise( - Means = mean(.data$Value, na.rm = TRUE), - Lower = stats::quantile(.data$Value, 0.025, na.rm = TRUE), - Upper = stats::quantile(.data$Value, 0.975, na.rm = TRUE)) %>% - dplyr::mutate(Type="ForwardForecast") + Means = mean(.data$Value, na.rm = TRUE), + Lower = stats::quantile(.data$Value, 0.025, na.rm = TRUE), + Upper = stats::quantile(.data$Value, 0.975, na.rm = TRUE) + ) %>% + dplyr::mutate(Type = "ForwardForecast") }) }) - + ready.to.plot <- ready.to.plot %>% dplyr::bind_rows(readsFF.df) - } - - ready.to.plot$Variable[ready.to.plot$Variable=="LeafC"] <-"leaf_carbon_content" - - - #Adding the units to the variables - ready.to.plot$Variable %>% unique() %>% - purrr::walk(function(varin){ - #find the unit - unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) - if (length(unitp)>0) { + + ready.to.plot$Variable[ready.to.plot$Variable == "LeafC"] <- "leaf_carbon_content" + + + # Adding the units to the variables + ready.to.plot$Variable %>% + unique() %>% + purrr::walk(function(varin) { + # find the unit + unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", "variable.name") %>% unlist() %in% varin) + if (length(unitp) > 0) { unit <- settings$state.data.assimilation$state.variable[[unitp]]$unit - - #replace it in the dataframe - ready.to.plot$Variable[ready.to.plot$Variable==varin] <<- paste(varin,"(",unit,")") + + # replace it in the dataframe + ready.to.plot$Variable[ready.to.plot$Variable == varin] <<- paste(varin, "(", unit, ")") } - }) - + #------------------------------------------- Time series plots if (facetg) { filew <- 14 fileh <- 10 - #for each site and for each variable - all.plots <- ready.to.plot$Site%>%unique() %>% - purrr::map(function(site){ - #plotting - p <- ready.to.plot %>% - dplyr::filter(.data$Site == site) %>% - ggplot2::ggplot(ggplot2::aes(x = Date)) + - ggplot2::geom_ribbon( - ggplot2::aes(ymin = .data$Lower, ymax = .data$Upper, fill = .data$Type), - color = "black") + - ggplot2::geom_line(ggplot2::aes(y = .data$Means, color = .data$Type), lwd = 1.02, linetype = 2) + - ggplot2::geom_point(ggplot2::aes(y = .data$Means, color = .data$Type), size = 3, alpha = 0.75) + - ggplot2::scale_fill_manual( - values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), - name = "") + - ggplot2::scale_color_manual( - values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), - name = "") + - ggplot2::theme_bw(base_size = 17) + - ggplot2::labs(y = "", subtitle=paste0("Site id: ",site)) + - ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) - if (!is.null(plot.title)) { - p <- p + ggplot2::labs(title=plot.title) - } - p <- p + ggplot2::facet_wrap(~.data$Variable, ncol = 2, scales = "free_y") + # for each site and for each variable + all.plots <- ready.to.plot$Site %>% + unique() %>% + purrr::map(function(site) { + # plotting + p <- ready.to.plot %>% + dplyr::filter(.data$Site == site) %>% + ggplot2::ggplot(ggplot2::aes(x = Date)) + + ggplot2::geom_ribbon( + ggplot2::aes(ymin = .data$Lower, ymax = .data$Upper, fill = .data$Type), + color = "black" + ) + + ggplot2::geom_line(ggplot2::aes(y = .data$Means, color = .data$Type), lwd = 1.02, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = .data$Means, color = .data$Type), size = 3, alpha = 0.75) + + ggplot2::scale_fill_manual( + values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), + name = "" + ) + + ggplot2::scale_color_manual( + values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), + name = "" + ) + + ggplot2::theme_bw(base_size = 17) + + ggplot2::labs(y = "", subtitle = paste0("Site id: ", site)) + + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) + if (!is.null(plot.title)) { + p <- p + ggplot2::labs(title = plot.title) + } + p <- p + ggplot2::facet_wrap(~ .data$Variable, ncol = 2, scales = "free_y") - list(p) + list(p) }) - - }else{ + } else { filew <- 10 fileh <- 8 - #for each site and for each variable - all.plots<-ready.to.plot$Site%>%unique() %>% - purrr::map(function(site){ - ready.to.plot$Variable%>%unique()%>% - purrr::map(function(vari){ - varin<-vari - unit<-"" - if (substr(vari,1,8)=="AGB.pft.") varin <- "AGB.pft" - #finding the unit - unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", 'variable.name') %>% unlist %in% varin) - if (length(unitp)>0) unit <- settings$state.data.assimilation$state.variable[[unitp]]$unit - #plotting - p<- ready.to.plot %>% - dplyr::filter(.data$Variable == vari, .data$Sitev== site) %>% + # for each site and for each variable + all.plots <- ready.to.plot$Site %>% + unique() %>% + purrr::map(function(site) { + ready.to.plot$Variable %>% + unique() %>% + purrr::map(function(vari) { + varin <- vari + unit <- "" + if (substr(vari, 1, 8) == "AGB.pft.") varin <- "AGB.pft" + # finding the unit + unitp <- which(lapply(settings$state.data.assimilation$state.variable, "[", "variable.name") %>% unlist() %in% varin) + if (length(unitp) > 0) unit <- settings$state.data.assimilation$state.variable[[unitp]]$unit + # plotting + p <- ready.to.plot %>% + dplyr::filter(.data$Variable == vari, .data$Sitev == site) %>% ggplot2::ggplot(ggplot2::aes(x = Date)) + ggplot2::geom_ribbon( ggplot2::aes(ymin = .data$Lower, ymax = .data$Upper, fill = .data$Type), - color = "black") + + color = "black" + ) + ggplot2::geom_line(ggplot2::aes(y = .data$Means, color = .data$Type), lwd = 1.02, linetype = 2) + ggplot2::geom_point(ggplot2::aes(y = .data$Means, color = .data$Type), size = 3, alpha = 0.75) + ggplot2::scale_fill_manual( values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), - name = "") + + name = "" + ) + ggplot2::scale_color_manual( values = c(sda_colors$brown, sda_colors$pink, sda_colors$green, sda_colors$blue), - name = "") + + name = "" + ) + ggplot2::theme_bw(base_size = 17) + - ggplot2::labs(y = paste(vari,'(',unit,')'), subtitle = paste0("Site id: ",site)) + + ggplot2::labs(y = paste(vari, "(", unit, ")"), subtitle = paste0("Site id: ", site)) + ggplot2::theme(legend.position = "top", strip.background = ggplot2::element_blank()) if (!is.null(plot.title)) { - p <- p + ggplot2::labs(title=plot.title) + p <- p + ggplot2::labs(title = plot.title) } p @@ -819,45 +905,52 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs }) } - if(Add_Map){ + if (Add_Map) { #------------------------------------------------ map - site.locs <- settings %>% - purrr::map(~.x[['run']] ) %>% - purrr::map('site') %>% - purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% - as.numeric)%>% - t %>% - as.data.frame()%>% - `colnames<-`(c("Lon","Lat")) %>% - dplyr::mutate(Site=.data$site.ids %>% unique(), - Name=.data$site.names) - + site.locs <- settings %>% + purrr::map(~ .x[["run"]]) %>% + purrr::map("site") %>% + purrr::map_dfr(~ c(.x[["lon"]], .x[["lat"]]) %>% + as.numeric()) %>% + t() %>% + as.data.frame() %>% + `colnames<-`(c("Lon", "Lat")) %>% + dplyr::mutate( + Site = .data$site.ids %>% unique(), + Name = .data$site.names + ) + suppressMessages({ aoi_boundary_HARV <- sf::st_read(system.file("extdata", "eco-regionl2.json", package = "PEcAn.data.land")) }) - - #transform site locs into new projection - UTM 2163 - site.locs.sp<-site.locs + + # transform site locs into new projection - UTM 2163 + site.locs.sp <- site.locs sp::coordinates(site.locs.sp) <- c("Lon", "Lat") - sp::proj4string(site.locs.sp) <- sp::CRS("+proj=longlat +datum=WGS84") ## for example + sp::proj4string(site.locs.sp) <- sp::CRS("+proj=longlat +datum=WGS84") ## for example res <- sp::spTransform(site.locs.sp, sp::CRS("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs")) - site.locs[,c(1,2)] <-res@coords - - #finding site with data + site.locs[, c(1, 2)] <- res@coords + + # finding site with data sites.w.data <- - obs.mean %>% purrr::map(names) %>% unlist() %>% as.character() %>% unique() - #adding the column to site + obs.mean %>% + purrr::map(names) %>% + unlist() %>% + as.character() %>% + unique() + # adding the column to site site.locs <- site.locs %>% dplyr::mutate(Data = .data$Site %in% sites.w.data) - - #plotting - map.plot<- ggplot2::ggplot() + + + # plotting + map.plot <- ggplot2::ggplot() + ggplot2::geom_sf( ggplot2::aes(fill = .data$NA_L1CODE), data = aoi_boundary_HARV, - alpha=0.35, - lwd=0, - color="black") + + alpha = 0.35, + lwd = 0, + color = "black" + ) + ggplot2::geom_point(data = site.locs, size = 2) + ggrepel::geom_label_repel( data = site.locs, @@ -870,41 +963,41 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs vjust = 1.2, fontface = "bold", size = 3.5 - ) + - #coord_sf(datum = sf::st_crs(2163),default = F)+ - ggplot2::scale_fill_manual(values = c("#a6cee3", - "#1f78b4","#b2df8a", - "#33a02c","#fb9a99", - "#e31a1c","#fdbf6f", - "#ff7f00","#cab2d6", - "#6a3d9a","#ffff99", - "#b15928","#fccde5", - "#d9d9d9","#66c2a5", - "#ffd92f","#8dd3c7", - "#80b1d3","#d9d9d9", - "#fdbf6f"),name="Eco-Region")+ - ggplot2::scale_color_manual(values= c("#e31a1c","#33a02c"))+ - ggplot2::theme_minimal()+ + ) + + # coord_sf(datum = sf::st_crs(2163),default = F)+ + ggplot2::scale_fill_manual(values = c( + "#a6cee3", + "#1f78b4", "#b2df8a", + "#33a02c", "#fb9a99", + "#e31a1c", "#fdbf6f", + "#ff7f00", "#cab2d6", + "#6a3d9a", "#ffff99", + "#b15928", "#fccde5", + "#d9d9d9", "#66c2a5", + "#ffd92f", "#8dd3c7", + "#80b1d3", "#d9d9d9", + "#fdbf6f" + ), name = "Eco-Region") + + ggplot2::scale_color_manual(values = c("#e31a1c", "#33a02c")) + + ggplot2::theme_minimal() + ggplot2::theme(axis.text = ggplot2::element_blank()) - + #----- Reordering the plots - all.plots.print <-list(map.plot) - for (i in seq_along(all.plots)) all.plots.print <-c(all.plots.print,all.plots[[i]]) - - grDevices::pdf(paste0(settings$outdir,"/SDA.pdf"),width = filew, height = fileh) - all.plots.print %>% purrr::map(~print(.x)) + all.plots.print <- list(map.plot) + for (i in seq_along(all.plots)) all.plots.print <- c(all.plots.print, all.plots[[i]]) + + grDevices::pdf(paste0(settings$outdir, "/SDA.pdf"), width = filew, height = fileh) + all.plots.print %>% purrr::map(~ print(.x)) grDevices::dev.off() - }else{ - grDevices::pdf(paste0(settings$outdir,"/SDA.pdf"),width = filew, height = fileh) - all.plots %>% purrr::map(~print(.x)) + } else { + grDevices::pdf(paste0(settings$outdir, "/SDA.pdf"), width = filew, height = fileh) + all.plots %>% purrr::map(~ print(.x)) grDevices::dev.off() } - - - #saving plot data + + + # saving plot data save(all.plots, ready.to.plot, file = file.path(settings$outdir, "timeseries.plot.data.Rdata")) - - } @@ -926,52 +1019,54 @@ post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs ##' @param t.inds index of period that will be plotted. ##' @export ##' @author Dongchen Zhang -SDA_timeseries_plot <- function(ANALYSIS, FORECAST, obs.mean = NULL, obs.cov = NULL, outdir, pft.path = NULL, by = "site", types = c("FORECAST", "ANALYSIS", "OBS"), CI = c(0.025, 0.975), +SDA_timeseries_plot <- function(ANALYSIS, FORECAST, obs.mean = NULL, obs.cov = NULL, outdir, pft.path = NULL, by = "site", types = c("FORECAST", "ANALYSIS", "OBS"), CI = c(0.025, 0.975), unit = list(AbvGrndWood = "Mg/ha", LAI = "m2/m2", SoilMoistFrac = "", TotSoilCarb = "kg/m2"), - style = list(general_color = c("FORECAST" = "blue", "ANALYSIS" = "red", "OBS" = "black"), - fill_color = c("FORECAST" = "yellow", "ANALYSIS" = "green", "OBS" = "grey"), - title_color = "red"), + style = list( + general_color = c("FORECAST" = "blue", "ANALYSIS" = "red", "OBS" = "black"), + fill_color = c("FORECAST" = "yellow", "ANALYSIS" = "green", "OBS" = "grey"), + title_color = "red" + ), PDF_w = 20, PDF_h = 16, - t.inds = NULL){ - #Check package availability. - if("try-error" %in% class(try(find.package("ggpubr"), silent = T))){ + t.inds = NULL) { + # Check package availability. + if ("try-error" %in% class(try(find.package("ggpubr"), silent = T))) { PEcAn.logger::logger.info("Package ggpubr is not installed! Please install it and rerun the function!") return(0) } - #TODO: make page, font, line, point sizes adjustable. + # TODO: make page, font, line, point sizes adjustable. time_points <- names(FORECAST) if (!is.null(t.inds)) { time_points <- time_points[t.inds] } site_ids <- attributes(FORECAST[[1]])$Site var_names <- attributes(FORECAST[[1]])$dimnames[[2]] - #new diag function: fixed the bug when length==1 then it will return 0x0 matrix - diag_fix <- function(vector){ - if (length(vector)>1){ + # new diag function: fixed the bug when length==1 then it will return 0x0 matrix + diag_fix <- function(vector) { + if (length(vector) > 1) { return(diag(vector)) - }else if (length(vector)==1){ + } else if (length(vector) == 1) { return(vector) } } - #read pft.csv file for the option by == pft. - if(!is.null(pft.path)){ + # read pft.csv file for the option by == pft. + if (!is.null(pft.path)) { pft <- utils::read.csv(pft.path) } - #create database + # create database DB <- data.frame() for (id in sort(unique(site_ids))) { for (time_point in time_points) { for (var_name in sort(unique(var_names))) { for (type in types) { - if(type == "OBS") { + if (type == "OBS") { obs_mean <- obs.mean[[time_point]][[id]][[var_name]] - if(length(obs_mean) == 0 | is.null(obs_mean)){ + if (length(obs_mean) == 0 | is.null(obs_mean)) { next - }else{ + } else { obs_cov <- diag_fix(obs.cov[[time_point]][[id]])[which(var_name == names(obs.mean[[time_point]][[id]]))] - MIN <- obs_mean - 1.96*sqrt(obs_cov) - MAX <- obs_mean + 1.96*sqrt(obs_cov) + MIN <- obs_mean - 1.96 * sqrt(obs_cov) + MAX <- obs_mean + 1.96 * sqrt(obs_cov) MEAN <- obs_mean } } else { @@ -979,58 +1074,60 @@ SDA_timeseries_plot <- function(ANALYSIS, FORECAST, obs.mean = NULL, obs.cov = N site_ind <- which(id == site_ids) var_ind <- which(var_name == var_names) ind <- var_ind[which(var_ind %in% site_ind)] - MEAN <- mean(temp_Dat[,ind]) - MIN <- stats::quantile(temp_Dat[,ind], CI[1]) - MAX <- stats::quantile(temp_Dat[,ind], CI[2]) + MEAN <- mean(temp_Dat[, ind]) + MIN <- stats::quantile(temp_Dat[, ind], CI[1]) + MAX <- stats::quantile(temp_Dat[, ind], CI[2]) } - if(MIN < 0) MIN <- 0 + if (MIN < 0) MIN <- 0 DB <- rbind(DB, list(id = id, date = time_point, var_name = var_name, type = type, upper = MAX, lower = MIN, mean = MEAN)) } } } } - #if we plot by each site. - if(by == "site") { + # if we plot by each site. + if (by == "site") { p <- list() for (site.id in sort(unique(site_ids))) { site_p <- list() for (var.name in sort(unique(var_names))) { - site_p <- rlist::list.append(site_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% - dplyr::select(-c(id, var_name)) %>% - dplyr::mutate(date = lubridate::ymd(date)) %>% - ggplot2::ggplot(ggplot2::aes(x=date)) + - ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill=.data$type), inherit.aes = FALSE, alpha = 0.5) + - ggplot2::geom_line(ggplot2::aes(y=mean, color=type),lwd=0.5,linetype=2) + - ggplot2::geom_point(ggplot2::aes(y=mean, color=type), size=1.5, alpha=0.75) + - ggplot2::scale_fill_manual(values = style$fill_color) + - ggplot2::scale_color_manual(values = style$general_color) + - ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")"))) + site_p <- rlist::list.append(site_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% + dplyr::select(-c(id, var_name)) %>% + dplyr::mutate(date = lubridate::ymd(date)) %>% + ggplot2::ggplot(ggplot2::aes(x = date)) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill = .data$type), inherit.aes = FALSE, alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(y = mean, color = type), lwd = 0.5, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = mean, color = type), size = 1.5, alpha = 0.75) + + ggplot2::scale_fill_manual(values = style$fill_color) + + ggplot2::scale_color_manual(values = style$general_color) + + ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")"))) } - p <- rlist::list.append(p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = site_p, common.legend = TRUE), - top = ggpubr::text_grob(site.id, color = style$title_color, face = "bold", size = 14))) + p <- rlist::list.append(p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = site_p, common.legend = TRUE), + top = ggpubr::text_grob(site.id, color = style$title_color, face = "bold", size = 14) + )) } - #if we plot by each state variable + # if we plot by each state variable } else if (by == "var") { p <- list() for (var.name in sort(unique(var_names))) { var_p <- list() for (site.id in sort(unique(site_ids))) { - var_p <- rlist::list.append(var_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% - dplyr::select(-c(id, var_name)) %>% - dplyr::mutate(date = lubridate::ymd(date)) %>% - ggplot2::ggplot(ggplot2::aes(x=date)) + - ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill=.data$type), inherit.aes = FALSE, alpha = 0.5) + - ggplot2::geom_line(ggplot2::aes(y=mean, color=type),lwd=0.5,linetype=2) + - ggplot2::geom_point(ggplot2::aes(y=mean, color=type), size=1.5, alpha=0.75) + - ggplot2::scale_fill_manual(values = style$fill_color) + - ggplot2::scale_color_manual(values = style$general_color) + - ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")")) + - ggplot2::ggtitle(site.id)) + var_p <- rlist::list.append(var_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% + dplyr::select(-c(id, var_name)) %>% + dplyr::mutate(date = lubridate::ymd(date)) %>% + ggplot2::ggplot(ggplot2::aes(x = date)) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill = .data$type), inherit.aes = FALSE, alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(y = mean, color = type), lwd = 0.5, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = mean, color = type), size = 1.5, alpha = 0.75) + + ggplot2::scale_fill_manual(values = style$fill_color) + + ggplot2::scale_color_manual(values = style$general_color) + + ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")")) + + ggplot2::ggtitle(site.id)) } - p <- rlist::list.append(p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = var_p, common.legend = TRUE), - top = ggpubr::text_grob(var.name, color = style$title_color, face = "bold", size = 14))) + p <- rlist::list.append(p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = var_p, common.legend = TRUE), + top = ggpubr::text_grob(var.name, color = style$title_color, face = "bold", size = 14) + )) } - #if we plot by each (pft * state variable) + # if we plot by each (pft * state variable) } else if (by == "pft") { if (!exists("pft")) { PEcAn.logger::logger.info("Please provide the pdf path!") @@ -1043,27 +1140,28 @@ SDA_timeseries_plot <- function(ANALYSIS, FORECAST, obs.mean = NULL, obs.cov = N for (var.name in sort(unique(var_names))) { site_p <- list() for (site.id in sort(site_id_pft)) { - site_p <- rlist::list.append(site_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% - dplyr::select(-c(id, var_name)) %>% - dplyr::mutate(date = lubridate::ymd(date)) %>% - ggplot2::ggplot(ggplot2::aes(x=date)) + - ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill=.data$type), inherit.aes = FALSE, alpha = 0.5) + - ggplot2::geom_line(ggplot2::aes(y=mean, color=type),lwd=0.5,linetype=2) + - ggplot2::geom_point(ggplot2::aes(y=mean, color=type), size=1.5, alpha=0.75) + - ggplot2::scale_fill_manual(values = style$fill_color) + - ggplot2::scale_color_manual(values = style$general_color) + - ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")")) + - ggplot2::ggtitle(site.id)) + site_p <- rlist::list.append(site_p, dplyr::filter(DB, id == site.id & var_name == var.name) %>% + dplyr::select(-c(id, var_name)) %>% + dplyr::mutate(date = lubridate::ymd(date)) %>% + ggplot2::ggplot(ggplot2::aes(x = date)) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$date, ymin = .data$lower, ymax = .data$upper, fill = .data$type), inherit.aes = FALSE, alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(y = mean, color = type), lwd = 0.5, linetype = 2) + + ggplot2::geom_point(ggplot2::aes(y = mean, color = type), size = 1.5, alpha = 0.75) + + ggplot2::scale_fill_manual(values = style$fill_color) + + ggplot2::scale_color_manual(values = style$general_color) + + ggplot2::ylab(paste0(var.name, " (", unit[var.name], ")")) + + ggplot2::ggtitle(site.id)) } - var_p <- rlist::list.append(var_p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = site_p, common.legend = TRUE), - top = ggpubr::text_grob(paste(PFT, var.name), color = style$title_color, face = "bold", size = 14))) + var_p <- rlist::list.append(var_p, ggpubr::annotate_figure(ggpubr::ggarrange(plotlist = site_p, common.legend = TRUE), + top = ggpubr::text_grob(paste(PFT, var.name), color = style$title_color, face = "bold", size = 14) + )) } p <- rlist::list.append(p, var_p) } } } - #print pdf - grDevices::pdf(file.path(outdir, paste0("SDA_", by, ".pdf")),width = PDF_w, height = PDF_h) + # print pdf + grDevices::pdf(file.path(outdir, paste0("SDA_", by, ".pdf")), width = PDF_w, height = PDF_h) print(p) grDevices::dev.off() -} \ No newline at end of file +} diff --git a/modules/assim.sequential/R/sda_weights_site.R b/modules/assim.sequential/R/sda_weights_site.R index d3938695b5e..6507dca9070 100644 --- a/modules/assim.sequential/R/sda_weights_site.R +++ b/modules/assim.sequential/R/sda_weights_site.R @@ -1,7 +1,7 @@ #' Calculate ensemble weights for each site at time t. #' -#' @param FORECAST FORECAST object built within the sda.enkf_MultiSite function. -#' @param ANALYSIS ANALYSIS object built within the Analysis_sda_multisite function. +#' @param FORECAST FORECAST object built within the sda.enkf_MultiSite function. +#' @param ANALYSIS ANALYSIS object built within the Analysis_sda_multisite function. #' @param t exact number of t inside the sda.enkf_MultiSite function. #' @param ens number of ensemble members. #' @@ -9,51 +9,55 @@ #' @export #' #' @author Dongchen Zhang and Hamze Dokoohaki -sda_weights_site <- function(FORECAST, ANALYSIS, t, ens){ - #This function is the refactored version - #of the original code "Weights_Site.R" written by Hamzed. - #read site ids from forecast results. - site.ids <- attr(FORECAST[[1]],'Site') %>% unique() - - #calculate weights for each ensemble member of each site at time point t. - Weights.new <- purrr::pmap(list(ANALYSIS[t], - FORECAST[t], - names(FORECAST)[t]), - function(ANALYSIS.r, FORECAST.r, Year.applid.weight) { - #loop over each site - site.ids %>% - future_map_dfr(function(one.site){ - #match site id - site.ind <- which( attr(FORECAST[[1]],'Site') %in% one.site) - #match date - ind <- which( names(FORECAST) %in% Year.applid.weight) - - #if we only have single variable. - if(length(site.ind) == 1){ - #calculate analysis mean value - mu.a <- mean(ANALYSIS.r[,site.ind]) - #calculate analysis variance - Pa <- stats::sd(ANALYSIS.r[,site.ind]) - #calculate weights - w <- stats::dnorm(FORECAST.r[,site.ind], mu.a, Pa, TRUE) - }else{ - #calculate analysis mean value - mu.a <- apply(ANALYSIS.r[,site.ind],2 ,mean) - #calculate analysis covariance matrix - Pa <- stats::cov(ANALYSIS.r[,site.ind]) - #calculate weights - w <- emdbook::dmvnorm(FORECAST.r[,site.ind], mu.a, Pa, TRUE) - } - - #return outputs - data.frame( - ens = 1:ens, - raw_weight=w, - Site= one.site, - Relative_weight=abs(w)/sum(abs(w)), - Year=lubridate::year(Year.applid.weight) - ) - }, .progress = TRUE) - }) +sda_weights_site <- function(FORECAST, ANALYSIS, t, ens) { + # This function is the refactored version + # of the original code "Weights_Site.R" written by Hamzed. + # read site ids from forecast results. + site.ids <- attr(FORECAST[[1]], "Site") %>% unique() + + # calculate weights for each ensemble member of each site at time point t. + Weights.new <- purrr::pmap( + list( + ANALYSIS[t], + FORECAST[t], + names(FORECAST)[t] + ), + function(ANALYSIS.r, FORECAST.r, Year.applid.weight) { + # loop over each site + site.ids %>% + future_map_dfr(function(one.site) { + # match site id + site.ind <- which(attr(FORECAST[[1]], "Site") %in% one.site) + # match date + ind <- which(names(FORECAST) %in% Year.applid.weight) + + # if we only have single variable. + if (length(site.ind) == 1) { + # calculate analysis mean value + mu.a <- mean(ANALYSIS.r[, site.ind]) + # calculate analysis variance + Pa <- stats::sd(ANALYSIS.r[, site.ind]) + # calculate weights + w <- stats::dnorm(FORECAST.r[, site.ind], mu.a, Pa, TRUE) + } else { + # calculate analysis mean value + mu.a <- apply(ANALYSIS.r[, site.ind], 2, mean) + # calculate analysis covariance matrix + Pa <- stats::cov(ANALYSIS.r[, site.ind]) + # calculate weights + w <- emdbook::dmvnorm(FORECAST.r[, site.ind], mu.a, Pa, TRUE) + } + + # return outputs + data.frame( + ens = 1:ens, + raw_weight = w, + Site = one.site, + Relative_weight = abs(w) / sum(abs(w)), + Year = lubridate::year(Year.applid.weight) + ) + }, .progress = TRUE) + } + ) Weights.new -} \ No newline at end of file +} diff --git a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R index 0202d56cede..08298a47036 100644 --- a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R +++ b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Create_Multi_settings.R @@ -8,7 +8,7 @@ start_date <- "2012/01/01" end_date <- "2021/12/31" -#setup working space +# setup working space outdir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/" SDA_run_dir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/run/" SDA_out_dir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/out/" @@ -19,43 +19,43 @@ XML_out_dir <- "/projectnb/dietzelab/dongchen/anchorSites/SDA/pecan.xml" pft_csv_dir <- "/projectnb/dietzelab/dongchen/anchorSites/site_pft.csv" modis_phenology_dir <- "/projectnb/dietzelab/Cherry/pft_files/leaf_phenology.csv" -#Obs_prep part -#AGB +# Obs_prep part +# AGB AGB_indir <- "/projectnb/dietzelab/dongchen/Multi-site/download_500_sites/AGB" allow_download <- TRUE AGB_export_csv <- TRUE -AGB_timestep <- list(unit="year", num=1) +AGB_timestep <- list(unit = "year", num = 1) -#LAI +# LAI LAI_search_window <- 30 -LAI_timestep <- list(unit="year", num=1) +LAI_timestep <- list(unit = "year", num = 1) LAI_export_csv <- TRUE run_parallel <- TRUE -#SMP +# SMP SMP_search_window <- 30 -SMP_timestep <- list(unit="year", num=1) +SMP_timestep <- list(unit = "year", num = 1) SMP_export_csv <- TRUE update_csv <- FALSE -#SoilC -SoilC_timestep <- list(unit="year", num=1) +# SoilC +SoilC_timestep <- list(unit = "year", num = 1) SoilC_export_csv <- TRUE -#Obs Date +# Obs Date obs_start_date <- "2012-07-15" obs_end_date <- "2021-07-15" obs_outdir <- "/projectnb/dietzelab/dongchen/anchorSites/Obs" -timestep <- list(unit="year", num=1) +timestep <- list(unit = "year", num = 1) -#specify model binary +# specify model binary model_binary <- "/usr2/postdoc/istfer/SIPNET/trunk//sipnet_if" -#specify host section +# specify host section host.flag <- "local" if (host.flag == "remote") { - #if we submit jobs through tunnel remotely. - host = structure(list( + # if we submit jobs through tunnel remotely. + host <- structure(list( name = "geo.bu.edu", usr = "zhangdc", folder = SDA_out_dir, @@ -69,14 +69,14 @@ if (host.flag == "remote") { rundir = SDA_run_dir )) } else if (host.flag == "local") { - host = structure(list( + host <- structure(list( name = "localhost", folder = SDA_out_dir, outdir = SDA_out_dir, rundir = SDA_run_dir )) } else if (host.flag == "rabbitmq") { - host = structure(list( + host <- structure(list( name = "localhost", rabbitmq = structure(list( uri = "amqp://guest:guest@pecan-rabbitmq:15672/%2F", @@ -90,7 +90,7 @@ if (host.flag == "remote") { )) model_binary <- "/usr/local/bin/sipnet.r136" } -#Start building template +# Start building template template <- PEcAn.settings::Settings(list( ############################################################################ ############################################################################ @@ -116,16 +116,15 @@ template <- PEcAn.settings::Settings(list( chains = 1, data = structure(list(format_id = 1000000040, input.id = 1000013298)), state.variables = structure(list( - #you could add more state variables here + # you could add more state variables here variable = structure(list(variable.name = "AbvGrndWood", unit = "MgC/ha", min_value = 0, max_value = 9999)), variable = structure(list(variable.name = "LAI", unit = "", min_value = 0, max_value = 9999)), - variable = structure(list(variable.name = "SoilMoistFrac", unit = "", min_value = 0, max_value = 1)),#soilWFracInit + variable = structure(list(variable.name = "SoilMoistFrac", unit = "", min_value = 0, max_value = 1)), # soilWFracInit variable = structure(list(variable.name = "TotSoilCarb", unit = "kg/m^2", min_value = 0, max_value = 9999)) )), forecast.time.step = "year", start.date = start_date, end.date = end_date, - Obs_Prep = structure(list( Landtrendr_AGB = structure(list(AGB_indir = AGB_indir, timestep = AGB_timestep, allow_download = allow_download, export_csv = AGB_export_csv)), MODIS_LAI = structure(list(search_window = LAI_search_window, timestep = LAI_timestep, export_csv = LAI_export_csv, run_parallel = run_parallel)), @@ -137,7 +136,7 @@ template <- PEcAn.settings::Settings(list( timestep = timestep )) )), - + ########################################################################### ########################################################################### ### ### @@ -149,7 +148,7 @@ template <- PEcAn.settings::Settings(list( notes = NULL, userid = "-1", username = "", date = "2017/12/06 21:19:33 +0000" )), - + ########################################################################### ########################################################################### ### ### @@ -160,7 +159,7 @@ template <- PEcAn.settings::Settings(list( outdir = outdir, rundir = SDA_run_dir, modeloutdir = SDA_out_dir, - + ########################################################################### ########################################################################### ### ### @@ -170,11 +169,13 @@ template <- PEcAn.settings::Settings(list( ########################################################################### database = structure(list( bety = structure( - list(user = "bety", password = "bety", host = "10.241.76.27", - dbname = "bety", driver = "PostgreSQL", write = "FALSE" - )) + list( + user = "bety", password = "bety", host = "10.241.76.27", + dbname = "bety", driver = "PostgreSQL", write = "FALSE" + ) + ) )), - + ########################################################################### ########################################################################### ### ### @@ -183,27 +184,31 @@ template <- PEcAn.settings::Settings(list( ########################################################################### ########################################################################### pfts = structure(list( - #you could add more pfts you needed and make sure to modify the outdir of each pft!!! + # you could add more pfts you needed and make sure to modify the outdir of each pft!!! pft = structure( - list(name = "temperate.deciduous.HPDA", - constants = structure(list(num = "1")), - posteriorid = "1000022311", - outdir = "/fs/data2/output//PEcAn_1000010530/pft/temperate.deciduous.HPDA" - )), - + list( + name = "temperate.deciduous.HPDA", + constants = structure(list(num = "1")), + posteriorid = "1000022311", + outdir = "/fs/data2/output//PEcAn_1000010530/pft/temperate.deciduous.HPDA" + ) + ), pft = structure( - list(name = "boreal.coniferous", - outdir = "/projectnb/dietzelab/hamzed/SDA/ProductionRun/50Sites/SDA_50Sites_1000008768/pft/Conifer/boreal.coniferous" - )), - + list( + name = "boreal.coniferous", + outdir = "/projectnb/dietzelab/hamzed/SDA/ProductionRun/50Sites/SDA_50Sites_1000008768/pft/Conifer/boreal.coniferous" + ) + ), pft = structure( - list(name = "semiarid.grassland_HPDA", - constants = structure(list(num = "1")), - posteriorid = "1000016525", - outdir = "/projectnb/dietzelab/hamzed/SDA/ProductionRun/50Sites/SDA_50Sites_1000008768/pft/GrassA/semiarid.grassland" - )) + list( + name = "semiarid.grassland_HPDA", + constants = structure(list(num = "1")), + posteriorid = "1000016525", + outdir = "/projectnb/dietzelab/hamzed/SDA/ProductionRun/50Sites/SDA_50Sites_1000008768/pft/GrassA/semiarid.grassland" + ) + ) )), - + ############################################################################ ############################################################################ ### ### @@ -214,7 +219,7 @@ template <- PEcAn.settings::Settings(list( meta.analysis = structure(list( iter = "3000", random.effects = FALSE )), - + ########################################################################### ########################################################################### ### ### @@ -222,13 +227,14 @@ template <- PEcAn.settings::Settings(list( ### ### ########################################################################### ########################################################################### - ensemble = structure(list(size = 25, variable = "NPP", - samplingspace = structure(list( - parameters = structure(list(method = "lhc")), - met = structure(list(method = "sampling")) - )) + ensemble = structure(list( + size = 25, variable = "NPP", + samplingspace = structure(list( + parameters = structure(list(method = "lhc")), + met = structure(list(method = "sampling")) + )) )), - + ############################################################################ ############################################################################ ### ### @@ -236,14 +242,15 @@ template <- PEcAn.settings::Settings(list( ### ### ############################################################################ ############################################################################ - model = structure(list(id = "1000000022", - type = "SIPNET", - revision = "ssr", - delete.raw = FALSE, - binary = model_binary, - jobtemplate = "~/sipnet_geo.job" + model = structure(list( + id = "1000000022", + type = "SIPNET", + revision = "ssr", + delete.raw = FALSE, + binary = model_binary, + jobtemplate = "~/sipnet_geo.job" )), - + ########################################################################### ########################################################################### ### ### @@ -251,9 +258,9 @@ template <- PEcAn.settings::Settings(list( ### ### ########################################################################### ########################################################################### - #be carefull of the host section, you need to specify the host of your own!!! + # be carefull of the host section, you need to specify the host of your own!!! host = host, - + ############################################################################ ############################################################################ ### ### @@ -266,7 +273,7 @@ template <- PEcAn.settings::Settings(list( settings.updated = TRUE, checked = TRUE )), - + ############################################################################ ############################################################################ ### ### @@ -277,13 +284,13 @@ template <- PEcAn.settings::Settings(list( run = structure(list( inputs = structure(list( met = structure(list( - source = "ERA5", + source = "ERA5", output = "SIPNET", id = "", path = ERA5_dir )), - - #Saved for latter use of initial condition files. + + # Saved for latter use of initial condition files. # poolinitcond = structure(list(source = "NEON_veg", # output = "poolinitcond", # ensemble = 31, @@ -311,8 +318,9 @@ nSite <- 330 multiRunSettings <- PEcAn.settings::createSitegroupMultiSettings( template, sitegroupId = sitegroupId, - nSite = nSite) -if(file.exists(XML_out_dir)){ + nSite = nSite +) +if (file.exists(XML_out_dir)) { unlink(XML_out_dir) } @@ -320,42 +328,43 @@ if(file.exists(XML_out_dir)){ PEcAn.settings::write.settings(multiRunSettings, outputfile = "pecan.xml") -#here we re-read the xml file to fix issues of some special character within the Host section. -tmp = readChar(XML_out_dir,100000000) -tmp = gsub("&","&",tmp) +# here we re-read the xml file to fix issues of some special character within the Host section. +tmp <- readChar(XML_out_dir, 100000000) +tmp <- gsub("&", "&", tmp) writeChar(tmp, XML_out_dir) settings <- PEcAn.settings::read.settings(XML_out_dir) -#add Lat and Lon to each site -#grab Site IDs from settings +# add Lat and Lon to each site +# grab Site IDs from settings site_ID <- c() for (i in 1:length(settings)) { obs <- settings[[i]]$run$site$id - site_ID <- c(site_ID,obs) + site_ID <- c(site_ID, obs) } -#query site info -#open a connection to bety and grab site info based on site IDs +# query site info +# open a connection to bety and grab site info based on site IDs con <- PEcAn.DB::db.open(settings$database$bety) site_info <- db.query(paste("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, - ST_Y(ST_CENTROID(geometry)) AS lat - FROM sites WHERE id IN (",paste(site_ID,collapse=", "),")"),con = con) + ST_Y(ST_CENTROID(geometry)) AS lat + FROM sites WHERE id IN (", paste(site_ID, collapse = ", "), ")"), con = con) -#write Lat and Lon into the settings +# write Lat and Lon into the settings for (i in 1:nSite) { temp_ID <- settings[[i]]$run$site$id - index_site_info <- which(site_info$id==temp_ID) + index_site_info <- which(site_info$id == temp_ID) settings[[i]]$run$site$lat <- site_info$lat[index_site_info] settings[[i]]$run$site$lon <- site_info$lon[index_site_info] - settings[[i]]$run$site$name <- site_info$sitename[index_site_info]#temp_ID + settings[[i]]$run$site$name <- site_info$sitename[index_site_info] # temp_ID } -#remove overlapped sites -site.locs <- settings$run %>% - purrr::map('site') %>% - purrr::map_dfr(~c(.x[['lon']],.x[['lat']]) %>% as.numeric)%>% - t %>% - `colnames<-`(c("lon","lat")) %>% data.frame +# remove overlapped sites +site.locs <- settings$run %>% + purrr::map("site") %>% + purrr::map_dfr(~ c(.x[["lon"]], .x[["lat"]]) %>% as.numeric()) %>% + t() %>% + `colnames<-`(c("lon", "lat")) %>% + data.frame() del.ind <- c() for (i in 1:nrow(site.locs)) { for (j in i:nrow(site.locs)) { @@ -363,7 +372,7 @@ for (i in 1:nrow(site.locs)) { next } if (site.locs$lon[i] == site.locs$lon[j] && - site.locs$lat[i] == site.locs$lat[j]) { + site.locs$lat[i] == site.locs$lat[j]) { del.ind <- c(del.ind, j) } } @@ -371,9 +380,9 @@ for (i in 1:nrow(site.locs)) { settings <- settings[-del.ind] ##### -unlink(paste0(settings$outdir,"/pecan.xml")) +unlink(paste0(settings$outdir, "/pecan.xml")) PEcAn.settings::write.settings(settings, outputfile = "pecan.xml") -#test create site pft function -#read the settings already done previously -settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.xml")) \ No newline at end of file +# test create site pft function +# read the settings already done previously +settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.xml")) diff --git a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Multisite-4sites.R b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Multisite-4sites.R index 1b09f4b6299..944eec16968 100644 --- a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Multisite-4sites.R +++ b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Multisite-4sites.R @@ -6,7 +6,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -14,65 +14,81 @@ library(listviewer) library(dplyr) #------------------------------------------ Setup ------------------------------------- setwd("/fs/data3/hamzed/MultiSite_Project/SDA") -unlink(c('run','out','SDA'),recursive = T) -rm(list=ls()) +unlink(c("run", "out", "SDA"), recursive = T) +rm(list = ls()) settings <- read.settings("pecan.SDA.4sites.xml") -if (inherits(settings, "MultiSettings")) site.ids <- settings %>% map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() -#sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF +if (inherits(settings, "MultiSettings")) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} +# sample from parameters used for both sensitivity analysis and Ens +get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #---------------------------------------------------------------- # OBS data preparation #--------------------------------------------------------------- load("../Obs/LandTrendr_AGB_output-4sites.RData") -site1<-point_list +site1 <- point_list load("../Obs/LandTrendr_AGB_output_796-769.RData") -site2<-point_list -site2$median_AGB[[1]] %>% - filter(Site_ID!='1000000074') ->site2$median_AGB[[1]] +site2 <- point_list +site2$median_AGB[[1]] %>% + filter(Site_ID != "1000000074") -> site2$median_AGB[[1]] -site2$stdv_AGB[[1]] %>% - filter(Site_ID!='1000000074') ->site2$stdv_AGB[[1]] -#listviewer::jsonedit(point_list) +site2$stdv_AGB[[1]] %>% + filter(Site_ID != "1000000074") -> site2$stdv_AGB[[1]] +# listviewer::jsonedit(point_list) #-------------------------------------------------------------------------------- -#for multi site both mean and cov needs to be a list like this +# for multi site both mean and cov needs to be a list like this # +date # +siteid # c(state variables)/matrix(cov state variables) -# -#reorder sites in obs -point_list$median_AGB <-rbind(site1$median_AGB[[1]], - site2$median_AGB[[1]]) %>% filter(Site_ID %in% site.ids) -point_list$stdv_AGB <-rbind(site1$stdv_AGB[[1]], - site2$stdv_AGB[[1]])%>% filter(Site_ID %in% site.ids) +# +# reorder sites in obs +point_list$median_AGB <- rbind( + site1$median_AGB[[1]], + site2$median_AGB[[1]] +) %>% filter(Site_ID %in% site.ids) +point_list$stdv_AGB <- rbind( + site1$stdv_AGB[[1]], + site2$stdv_AGB[[1]] +) %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() +site.order <- sapply(site.ids, function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() -point_list$median_AGB <- point_list$median_AGB[site.order,] -point_list$stdv_AGB <- point_list$stdv_AGB[site.order,] +point_list$median_AGB <- point_list$median_AGB[site.order, ] +point_list$stdv_AGB <- point_list$stdv_AGB[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(site1$median_AGB[[1]]),"_")[3:length(site1$median_AGB[[1]])] %>% - map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(site1$median_AGB[[1]]), "_")[3:length(site1$median_AGB[[1]])] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") -obs.mean <-names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% - map(function(namesl){ - ((point_list$median_AGB)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)]) - ) - }) %>% setNames(date.obs) +obs.mean <- names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% + map(function(namesl) { + ((point_list$median_AGB)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("AbvGrndWood"))) %>% + setNames(site.ids[1:length(.)]) + ) + }) %>% + setNames(date.obs) -obs.cov <-names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% +obs.cov <- names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% map(function(namesl) { ((point_list$stdv_AGB)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #---------------------------------------------------------------- # end OBS data preparation @@ -80,17 +96,17 @@ obs.cov <-names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% new.settings <- PEcAn.settings::prepare.settings(settings) jsonedit(new.settings) #------------------------------------------ SDA ------------------------------------- -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="lhc sampling - 4sites - SF50 - ALL PFTs - small sample size", - facet.plots=T, - debug=T, - pause=F) - ) - - - \ No newline at end of file +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "lhc sampling - 4sites - SF50 - ALL PFTs - small sample size", + facet.plots = T, + debug = T, + pause = F + ) +) diff --git a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights.R b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights.R index b4966c12e28..b3bba079731 100644 --- a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights.R +++ b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights.R @@ -1,160 +1,183 @@ - library(tidyverse) - library(mvtnorm) - library(nimble) - library(furrr) - rm(list = ls()) - setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights") - load('FORECAST.RData') - load('ANALYSIS.RData') - plan(multisession) - #------------------------------------------------------ - #Loading SDA outputs----------------------------------- - #------------------------------------------------------ - args <- commandArgs(trailingOnly = TRUE) - ind <- args[1] %>% as.numeric() - if(is.na(ind))ind <- 1 - print(ind) - - - Weights.new <- pmap(list(ANALYSIS[ind], - FORECAST[ind], - names(FORECAST)[ind]), - function(ANALYSIS, FORECAST, Year.applid.weight) { - - library(tidyverse) - library(mvtnorm) - library(nimble) - X.original <- FORECAST - #--- state variables with zeros - s.v.z <-which(apply(X.original, 2, function(x) any(x==0))) - - site.need <- attr(X.original, 'Site')[s.v.z]%>% unique() - #------------------------------ For each site - imputed <- site.need %>% - future_map(possibly( - function(site.id){ +library(tidyverse) +library(mvtnorm) +library(nimble) +library(furrr) +rm(list = ls()) +setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights") +load("FORECAST.RData") +load("ANALYSIS.RData") +plan(multisession) +#------------------------------------------------------ +# Loading SDA outputs----------------------------------- +#------------------------------------------------------ +args <- commandArgs(trailingOnly = TRUE) +ind <- args[1] %>% as.numeric() +if (is.na(ind)) ind <- 1 +print(ind) + + +Weights.new <- pmap( + list( + ANALYSIS[ind], + FORECAST[ind], + names(FORECAST)[ind] + ), + function(ANALYSIS, FORECAST, Year.applid.weight) { + library(tidyverse) + library(mvtnorm) + library(nimble) + X.original <- FORECAST + #--- state variables with zeros + s.v.z <- which(apply(X.original, 2, function(x) any(x == 0))) + + site.need <- attr(X.original, "Site")[s.v.z] %>% unique() + #------------------------------ For each site + imputed <- site.need %>% + future_map( + possibly( + function(site.id) { file.create(file.path("testF", site.id)) - s.v.z.p <- which(attr(X.original, 'Site') == site.id) - + s.v.z.p <- which(attr(X.original, "Site") == site.id) + X <- X.original[, s.v.z.p] #--- Pf <- cov(X) mu.f <- apply(X, 2, mean) #-------------------------------------------------------------------------- - #Estimating the weights -- + # Estimating the weights -- #-------------------------------------------------------------------------- PEcAn.assim.sequential:::load_nimble() - + intervalX <- matrix(NA, ncol(X), 2) rownames(intervalX) <- colnames(X) - + intervalX[, 1] <- 0 # lower bound - intervalX[, 2] <- 300 #upper bound - + intervalX[, 2] <- 300 # upper bound + weight_list <- rep(0, nrow(X)) wts <- unlist(weight_list) #### These vectors are used to categorize data based on censoring from the interval matrix - x.ind <- x.censored <- matrix(NA, ncol=ncol(X), nrow=nrow(X)) - for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - x.ind[n,j] <- as.numeric(X[n,j] > 0) - x.censored[n,j] <- as.numeric(ifelse(X[n,j] > intervalX[j,2], 0, X[n,j])) # + x.ind <- x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + x.ind[n, j] <- as.numeric(X[n, j] > 0) + x.censored[n, j] <- as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # } } - - #The purpose of this step is to impute data for mu.f - #where there are zero values so that - #mu.f is in 'tobit space' in the full model - constants.tobit2space <- list(N = nrow(X), - J = length(mu.f)) - - data.tobit2space <- list(y.ind = x.ind, - y.censored = x.censored, - mu_0 = rep(0,length(mu.f)), - lambda_0 = diag(length(mu.f), - length(mu.f)+1), - nu_0 = 3, - wts = wts)#some measure of prior obs - - inits.tobit2space <- list(pf = cov(X), - muf = colMeans(X)) - + + # The purpose of this step is to impute data for mu.f + # where there are zero values so that + # mu.f is in 'tobit space' in the full model + constants.tobit2space <- list( + N = nrow(X), + J = length(mu.f) + ) + + data.tobit2space <- list( + y.ind = x.ind, + y.censored = x.censored, + mu_0 = rep(0, length(mu.f)), + lambda_0 = diag( + length(mu.f), + length(mu.f) + 1 + ), + nu_0 = 3, + wts = wts + ) # some measure of prior obs + + inits.tobit2space <- list( + pf = cov(X), + muf = colMeans(X) + ) + tobit2space_pred <- nimbleModel(tobit2space.model, - data = data.tobit2space, - constants = constants.tobit2space, - inits = inits.tobit2space, - name = 'space') + data = data.tobit2space, + constants = constants.tobit2space, + inits = inits.tobit2space, + name = "space" + ) ## Adding X.mod,q,r as data for building model. conf_tobit2space <- configureMCMC(tobit2space_pred, - thin = 10, - print=TRUE) - conf_tobit2space$addMonitors(c("pf", "muf","y.censored")) - + thin = 10, + print = TRUE + ) + conf_tobit2space$addMonitors(c("pf", "muf", "y.censored")) + samplerNumberOffset_tobit2space <- length(conf_tobit2space$getSamplers()) - - for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - node <- paste0('y.censored[',n,',',j,']') - conf_tobit2space$addSampler(node, 'toggle', control=list(type='RW_block')) + + for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + node <- paste0("y.censored[", n, ",", j, "]") + conf_tobit2space$addSampler(node, "toggle", control = list(type = "RW_block")) } } - - #conf_tobit2space$printSamplers() - + + # conf_tobit2space$printSamplers() + Rmcmc_tobit2space <- buildMCMC(conf_tobit2space) - + Cmodel_tobit2space <- compileNimble(tobit2space_pred) Cmcmc_tobit2space <- compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) - - for(i in seq_along(X)) { - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space+i]], 'toggle', 1-x.ind[i]) + + for (i in seq_along(X)) { + valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + i]], "toggle", 1 - x.ind[i]) } - #browser() + # browser() dat.tobit2space <- runMCMC(Cmcmc_tobit2space, - niter = 1000000, - nburnin=400000, - progressBar=TRUE) - - iycens <- grep("y.censored",colnames(dat.tobit2space)) - X.new <- matrix(colMeans(dat.tobit2space[,iycens]), nrow(X), ncol(X)) - - list(list(X.new, - X)) %>% + niter = 1000000, + nburnin = 400000, + progressBar = TRUE + ) + + iycens <- grep("y.censored", colnames(dat.tobit2space)) + X.new <- matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) + + list(list( + X.new, + X + )) %>% setNames(site.id) - }, otherwise = NULL), - .progress = TRUE) - - #------------------------------------------------ Replacing and estimating the X - imputed <- imputed %>% flatten() - - X.new <- imputed %>% - map(~ .x[[1]]) %>% - do.call('cbind', .) - - ind.rep <- which(attr(X.original, 'Site') %in% names(imputed)) - X.original.clean <- X.original - X.original.clean[, ind.rep] <- X.new - - - mu.a <- apply(ANALYSIS,2 ,mean) - Pa <- cov(ANALYSIS) - - flux.weights <- dmvnorm(X.original, - mean = mu.a, - sigma = Pa, - log = FALSE) - - flux.weights.new <- dmvnorm(X.original.clean, - mean = mu.a, - sigma = Pa, - log = FALSE) - - gc() - list(x.ori = X.original, - x.ori.new = X.original.clean, - weights.new = flux.weights.new, - weights = flux.weights - ) - }) - - saveRDS(Weights.new, file=paste0("W_",ind,".RDS")) + }, + otherwise = NULL + ), + .progress = TRUE + ) + + #------------------------------------------------ Replacing and estimating the X + imputed <- imputed %>% flatten() + + X.new <- imputed %>% + map(~ .x[[1]]) %>% + do.call("cbind", .) + + ind.rep <- which(attr(X.original, "Site") %in% names(imputed)) + X.original.clean <- X.original + X.original.clean[, ind.rep] <- X.new + + + mu.a <- apply(ANALYSIS, 2, mean) + Pa <- cov(ANALYSIS) + + flux.weights <- dmvnorm(X.original, + mean = mu.a, + sigma = Pa, + log = FALSE + ) + + flux.weights.new <- dmvnorm(X.original.clean, + mean = mu.a, + sigma = Pa, + log = FALSE + ) + + gc() + list( + x.ori = X.original, + x.ori.new = X.original.clean, + weights.new = flux.weights.new, + weights = flux.weights + ) + } +) + +saveRDS(Weights.new, file = paste0("W_", ind, ".RDS")) diff --git a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_RDS.R b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_RDS.R index 413d08a0ccd..d99e46f1db3 100644 --- a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_RDS.R +++ b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_RDS.R @@ -1,52 +1,56 @@ - library(tidyverse) - library(mvtnorm) - library(nimble) - library(scales) - library(furrr) - rm(list = ls()) - setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights") - load('FORECAST.RData') - load('ANALYSIS.RData') - plan(multisession) - #------------------------------------------------------ - #Loading SDA outputs----------------------------------- - #------------------------------------------------------ - args <- commandArgs(trailingOnly = TRUE) - ind <- args[1] %>% as.numeric() - if(is.na(ind))ind <- 1 - print(ind) - ind <- 1:33 - - Weights.new <- pmap(list(ANALYSIS[ind], - FORECAST[ind], - names(FORECAST)[ind]), - function(ANALYSIS.r, FORECAST.r, Year.applid.weight) { - #Read tobit outputs - ind <- which( names(FORECAST) %in% Year.applid.weight) - wr <- readRDS(paste0("RDS/W_",ind,".RDS")) - # Mu.a, Pa - mu.a <- apply(ANALYSIS.r,2 ,mean) - Pa <- cov(ANALYSIS.r) - # browser() - w <- emdbook::dmvnorm(wr$x.original.new, mu.a, Pa, TRUE) - - data.frame( - ens = 1:20, - raw_weight=w, - Relative_weight=abs(w)/sum(abs(w)), - Year=lubridate::year(Year.applid.weight) - ) - }) - - #saveRDS(Weights.new, 'Weights.new.RDS') +library(tidyverse) +library(mvtnorm) +library(nimble) +library(scales) +library(furrr) +rm(list = ls()) +setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights") +load("FORECAST.RData") +load("ANALYSIS.RData") +plan(multisession) +#------------------------------------------------------ +# Loading SDA outputs----------------------------------- +#------------------------------------------------------ +args <- commandArgs(trailingOnly = TRUE) +ind <- args[1] %>% as.numeric() +if (is.na(ind)) ind <- 1 +print(ind) +ind <- 1:33 - Weights.new %>% - map_dfr(~.x) %>% - # filter(!(Year%in%c(2013,2014)))%>% - ggplot(aes(Year,Relative_weight))+ - geom_area(stat="identity", aes(fill=ens %>% as.factor()))+ - scale_fill_viridis_d(name="Ensemble", option = "A")+ - scale_x_continuous(breaks = seq(1986,2018,2))+ - theme_minimal(base_size = 15) - -ggsave("Weights2.png", width = 10, height = 6) +Weights.new <- pmap( + list( + ANALYSIS[ind], + FORECAST[ind], + names(FORECAST)[ind] + ), + function(ANALYSIS.r, FORECAST.r, Year.applid.weight) { + # Read tobit outputs + ind <- which(names(FORECAST) %in% Year.applid.weight) + wr <- readRDS(paste0("RDS/W_", ind, ".RDS")) + # Mu.a, Pa + mu.a <- apply(ANALYSIS.r, 2, mean) + Pa <- cov(ANALYSIS.r) + # browser() + w <- emdbook::dmvnorm(wr$x.original.new, mu.a, Pa, TRUE) + + data.frame( + ens = 1:20, + raw_weight = w, + Relative_weight = abs(w) / sum(abs(w)), + Year = lubridate::year(Year.applid.weight) + ) + } +) + +# saveRDS(Weights.new, 'Weights.new.RDS') + +Weights.new %>% + map_dfr(~.x) %>% + # filter(!(Year%in%c(2013,2014)))%>% + ggplot(aes(Year, Relative_weight)) + + geom_area(stat = "identity", aes(fill = ens %>% as.factor())) + + scale_fill_viridis_d(name = "Ensemble", option = "A") + + scale_x_continuous(breaks = seq(1986, 2018, 2)) + + theme_minimal(base_size = 15) + +ggsave("Weights2.png", width = 10, height = 6) diff --git a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_Site.R b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_Site.R index fc144a24eca..c4f6fdeb96d 100644 --- a/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_Site.R +++ b/modules/assim.sequential/inst/MultiSite-Exs/SDA/Weights_Site.R @@ -1,91 +1,91 @@ - library(tidyverse) - library(mvtnorm) - library(nimble) - library(scales) - library(furrr) - rm(list = ls()) - setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights") - load('FORECAST.RData') - load('ANALYSIS.RData') - plan(multisession) - #------------------------------------------------------ - #Loading SDA outputs----------------------------------- - #------------------------------------------------------ - args <- commandArgs(trailingOnly = TRUE) - ind <- args[1] %>% as.numeric() - if(is.na(ind))ind <- 1 - print(ind) - ind <- 1:33 - - site.ids <- attr(FORECAST[[1]],'Site') %>% unique() - - Weights.new <- pmap(list(ANALYSIS[ind], - FORECAST[ind], - names(FORECAST)[ind]), - function(ANALYSIS.r, FORECAST.r, Year.applid.weight) { - - - site.ids %>% - future_map_dfr(function(one.site){ - # browser() - site.ind <- which( attr(FORECAST[[1]],'Site') %in% one.site) - #Read tobit outputs - ind <- which( names(FORECAST) %in% Year.applid.weight) - wr <- readRDS(paste0("RDS/W_",ind,".RDS")) - # Mu.a, Pa - mu.a <- apply(ANALYSIS.r[,site.ind],2 ,mean) - Pa <- cov(ANALYSIS.r[,site.ind]) - # browser() - w <- emdbook::dmvnorm(FORECAST.r[,site.ind], mu.a, Pa, TRUE) - - data.frame( - ens = 1:20, - raw_weight=w, - Site= one.site, - Relative_weight=abs(w)/sum(abs(w)), - Year=lubridate::year(Year.applid.weight) - ) - }, .progress = TRUE) - - }) - - #saveRDS(Weights.new, 'Weights.new.RDS') - site.ids %>% - walk(function(site.id.one){ - Weights.new %>% - map_dfr(~.x) %>% - filter(Site %in% site.id.one)%>% - ggplot(aes(Year,Relative_weight))+ - geom_area(stat="identity", aes(fill=ens %>% as.factor()))+ - scale_fill_viridis_d(name="Ensemble", option = "A")+ - scale_x_continuous(breaks = seq(1986,2018,2))+ - theme_minimal(base_size = 15) - - ggsave(paste0("SiteL_Plots/Weights_",site.id.one,".png"), width = 10, height = 6) - }) +library(tidyverse) +library(mvtnorm) +library(nimble) +library(scales) +library(furrr) +rm(list = ls()) +setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights") +load("FORECAST.RData") +load("ANALYSIS.RData") +plan(multisession) +#------------------------------------------------------ +# Loading SDA outputs----------------------------------- +#------------------------------------------------------ +args <- commandArgs(trailingOnly = TRUE) +ind <- args[1] %>% as.numeric() +if (is.na(ind)) ind <- 1 +print(ind) +ind <- 1:33 - Weights.new %>% +site.ids <- attr(FORECAST[[1]], "Site") %>% unique() + +Weights.new <- pmap( + list( + ANALYSIS[ind], + FORECAST[ind], + names(FORECAST)[ind] + ), + function(ANALYSIS.r, FORECAST.r, Year.applid.weight) { + site.ids %>% + future_map_dfr(function(one.site) { + # browser() + site.ind <- which(attr(FORECAST[[1]], "Site") %in% one.site) + # Read tobit outputs + ind <- which(names(FORECAST) %in% Year.applid.weight) + wr <- readRDS(paste0("RDS/W_", ind, ".RDS")) + # Mu.a, Pa + mu.a <- apply(ANALYSIS.r[, site.ind], 2, mean) + Pa <- cov(ANALYSIS.r[, site.ind]) + # browser() + w <- emdbook::dmvnorm(FORECAST.r[, site.ind], mu.a, Pa, TRUE) + + data.frame( + ens = 1:20, + raw_weight = w, + Site = one.site, + Relative_weight = abs(w) / sum(abs(w)), + Year = lubridate::year(Year.applid.weight) + ) + }, .progress = TRUE) + } +) + +# saveRDS(Weights.new, 'Weights.new.RDS') +site.ids %>% + walk(function(site.id.one) { + Weights.new %>% map_dfr(~.x) %>% - saveRDS(file="site_level_shit.RDS") - - - site_level_shit[ which(is.nan(site_level_shit$Relative_weight)), 4] <- 0.05 - - saveRDS(site_level_shit, file="site_level_shit.RDS") - - - - site.ids %>% - future_map(function(site.id.one){ - site_level_shit %>% - filter(Site %in% site.id.one)%>% - ggplot(aes(Year,Relative_weight))+ - geom_area(stat="identity", aes(fill=ens %>% as.factor()))+ - scale_fill_viridis_d(name="Ensemble", option = "A")+ - scale_x_continuous(breaks = seq(1986,2018,2))+ - labs(title=site.id.one)+ - theme_minimal(base_size = 15) - - ggsave(paste0("SiteL_Plots/Weights_",site.id.one,".png"), width = 10, height = 6) - }) - \ No newline at end of file + filter(Site %in% site.id.one) %>% + ggplot(aes(Year, Relative_weight)) + + geom_area(stat = "identity", aes(fill = ens %>% as.factor())) + + scale_fill_viridis_d(name = "Ensemble", option = "A") + + scale_x_continuous(breaks = seq(1986, 2018, 2)) + + theme_minimal(base_size = 15) + + ggsave(paste0("SiteL_Plots/Weights_", site.id.one, ".png"), width = 10, height = 6) + }) + +Weights.new %>% + map_dfr(~.x) %>% + saveRDS(file = "site_level_shit.RDS") + + +site_level_shit[which(is.nan(site_level_shit$Relative_weight)), 4] <- 0.05 + +saveRDS(site_level_shit, file = "site_level_shit.RDS") + + + +site.ids %>% + future_map(function(site.id.one) { + site_level_shit %>% + filter(Site %in% site.id.one) %>% + ggplot(aes(Year, Relative_weight)) + + geom_area(stat = "identity", aes(fill = ens %>% as.factor())) + + scale_fill_viridis_d(name = "Ensemble", option = "A") + + scale_x_continuous(breaks = seq(1986, 2018, 2)) + + labs(title = site.id.one) + + theme_minimal(base_size = 15) + + ggsave(paste0("SiteL_Plots/Weights_", site.id.one, ".png"), width = 10, height = 6) + }) diff --git a/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard.R b/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard.R index ddfc4e8a935..f45bcd18316 100644 --- a/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard.R +++ b/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard.R @@ -3,60 +3,54 @@ library(tidyverse) library(lubridate) library(PEcAn.all) -download_US_Harvard <- function(start_date, end_date) { - - if(end_date > Sys.Date()) end_date = Sys.Date() - date = seq(from = start_date, to = end_date, by = 'days') - data = NA - for(i in 1:length(date)){ - +download_US_Harvard <- function(start_date, end_date) { + if (end_date > Sys.Date()) end_date <- Sys.Date() + date <- seq(from = start_date, to = end_date, by = "days") + data <- NA + for (i in 1:length(date)) { yy <- strftime(date[i], format = "%y") doy <- strftime(date[i], format = "%j") - my_host <- list(name = "geo.bu.edu", user = 'kzarada', tunnel = "/tmp/tunnel") - - try(remote.copy.from(host = my_host, src = paste0('/projectnb/dietzelab/NEFI_data/HFEMS_prelim_', yy, '_', doy, '_dat.csv'), - dst = paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy, doy,'.csv'), delete=FALSE)) - - - - if(file.exists(paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy, doy,'.csv'))){ - data1 = read.csv(paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy,doy,'.csv'), header = T, sep = "") - data = rbind(data, data1)} - + my_host <- list(name = "geo.bu.edu", user = "kzarada", tunnel = "/tmp/tunnel") + + try(remote.copy.from( + host = my_host, src = paste0("/projectnb/dietzelab/NEFI_data/HFEMS_prelim_", yy, "_", doy, "_dat.csv"), + dst = paste0("/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/", yy, doy, ".csv"), delete = FALSE + )) + + + + if (file.exists(paste0("/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/", yy, doy, ".csv"))) { + data1 <- read.csv(paste0("/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/", yy, doy, ".csv"), header = T, sep = "") + data <- rbind(data, data1) + } } - - - data <- data %>% - drop_na(TIME_START.YYYYMMDDhhmm) %>% - mutate(Time = lubridate::with_tz(as.POSIXct(strptime(TIME_START.YYYYMMDDhhmm, format = "%Y%m%d%H%M", tz = "EST"),tz = "EST"), tz = "UTC")) %>% + + + data <- data %>% + drop_na(TIME_START.YYYYMMDDhhmm) %>% + mutate(Time = lubridate::with_tz(as.POSIXct(strptime(TIME_START.YYYYMMDDhhmm, format = "%Y%m%d%H%M", tz = "EST"), tz = "EST"), tz = "UTC")) %>% dplyr::select(Time, LHF.W.m.2, Fco2.e.6mol.m.2.s.1) - + colnames(data) <- c("Time", "LE", "NEE") - - - Time = lubridate::force_tz(seq(from = as.POSIXct(start_date), to = as.POSIXct(end_date), by = "30 mins"), tz = "UTC") - - data.full = data.frame(Time, NEE = rep(NA, length(Time)), LE = rep(NA, length(Time))) - - - - - match(Time, data$Time) - - data.full$NEE <- data$NEE[match(Time, data$Time)] - data.full$LE <- data$NEE[match(Time, data$Time)] - data.full$NEE <- PEcAn.utils::misc.convert(data.full$NEE, "umol C m-2 s-1", "kg C m-2 s-1") - - - return(data.full) - } -#manually check if files are available -#read.csv('ftp://ftp.as.harvard.edu/pub/exchange/jwm/Forecast_data/HFEMS_prelim_19_330_dat.csv') + Time <- lubridate::force_tz(seq(from = as.POSIXct(start_date), to = as.POSIXct(end_date), by = "30 mins"), tz = "UTC") + + data.full <- data.frame(Time, NEE = rep(NA, length(Time)), LE = rep(NA, length(Time))) + + + match(Time, data$Time) + data.full$NEE <- data$NEE[match(Time, data$Time)] + data.full$LE <- data$NEE[match(Time, data$Time)] + data.full$NEE <- PEcAn.utils::misc.convert(data.full$NEE, "umol C m-2 s-1", "kg C m-2 s-1") + return(data.full) +} + +# manually check if files are available +# read.csv('ftp://ftp.as.harvard.edu/pub/exchange/jwm/Forecast_data/HFEMS_prelim_19_330_dat.csv') diff --git a/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard_met.R b/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard_met.R index 14af3bf5ff4..4c025c4a5a2 100644 --- a/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard_met.R +++ b/modules/assim.sequential/inst/NEFI/US_Harvard/download_Harvard_met.R @@ -3,45 +3,40 @@ library(tidyverse) library(lubridate) download_US_Harvard_met <- function(start_date, end_date) { - - - date = seq(from = start_date, to = end_date, by = 'days') - data = NA - for(i in 1:length(date)){ - + date <- seq(from = start_date, to = end_date, by = "days") + data <- NA + for (i in 1:length(date)) { yy <- strftime(date[i], format = "%y") doy <- strftime(date[i], format = "%j") - if(file.exists(paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy, doy,'.csv'))){ - data1 = read.csv(paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy,doy,'.csv'), header = T, sep = "") - data = rbind(data, data1)} - + if (file.exists(paste0("/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/", yy, doy, ".csv"))) { + data1 <- read.csv(paste0("/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/", yy, doy, ".csv"), header = T, sep = "") + data <- rbind(data, data1) + } } - - - data <- data %>% - drop_na(TIME_START.YYYYMMDDhhmm) %>% - mutate(Time = lubridate::with_tz(as.POSIXct(strptime(TIME_START.YYYYMMDDhhmm, format = "%Y%m%d%H%M", tz = "EST"),tz = "EST"), tz = "UTC")) %>% + + + data <- data %>% + drop_na(TIME_START.YYYYMMDDhhmm) %>% + mutate(Time = lubridate::with_tz(as.POSIXct(strptime(TIME_START.YYYYMMDDhhmm, format = "%Y%m%d%H%M", tz = "EST"), tz = "EST"), tz = "UTC")) %>% dplyr::select(Time, Wspd.m.s.1, Ta.C, RH) - + colnames(data) <- c("date", "ws", "Tair", "rH") - - - date = lubridate::force_tz(seq(from = as.POSIXct(start_date), to = as.POSIXct(end_date), by = "30 mins"), tz = "UTC") - - data.full = data.frame(date, ws = rep(NA, length(date)), Tair = rep(NA, length(date)), rH = rep(NA, length(date)) ) - - - - - - + + + date <- lubridate::force_tz(seq(from = as.POSIXct(start_date), to = as.POSIXct(end_date), by = "30 mins"), tz = "UTC") + + data.full <- data.frame(date, ws = rep(NA, length(date)), Tair = rep(NA, length(date)), rH = rep(NA, length(date))) + + + + + + data.full$ws <- data$ws[match(date, data$date)] data.full$Tair <- data$Tair[match(date, data$date)] data.full$rH <- data$rH[match(date, data$date)] - - + + return(data.full) - -} #end function - \ No newline at end of file +} # end function diff --git a/modules/assim.sequential/inst/NEFI/US_Harvard/download_soilmoist_harvard.R b/modules/assim.sequential/inst/NEFI/US_Harvard/download_soilmoist_harvard.R index b6ba2937b2c..de53a055c46 100644 --- a/modules/assim.sequential/inst/NEFI/US_Harvard/download_soilmoist_harvard.R +++ b/modules/assim.sequential/inst/NEFI/US_Harvard/download_soilmoist_harvard.R @@ -3,54 +3,52 @@ library(tidyverse) library(lubridate) library(PEcAn.all) -download_soilmoist_Harvard <- function(start_date, end_date) { - - if(end_date > Sys.Date()) end_date = Sys.Date() - if(start_date < as.Date("2019-11-06")) start_date = "2019-11-06" - date = seq(from = as.Date(start_date), to = as.Date(end_date), by = 'days') - data = NA - for(i in 1:length(date)){ - +download_soilmoist_Harvard <- function(start_date, end_date) { + if (end_date > Sys.Date()) end_date <- Sys.Date() + if (start_date < as.Date("2019-11-06")) start_date <- "2019-11-06" + date <- seq(from = as.Date(start_date), to = as.Date(end_date), by = "days") + data <- NA + for (i in 1:length(date)) { yy <- strftime(date[i], format = "%y") doy <- strftime(date[i], format = "%j") - #my_host <- list(name = "geo.bu.edu", user = 'kzarada', tunnel = "/tmp/tunnel") - - #try(remote.copy.from(host = my_, src = paste0('/projectnb/dietzelab/NEFI_data/HFEMS_prelim_', yy, '_', doy, '_dat.csv'), - #dst = paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy, doy,'.csv'), delete=FALSE)) - - - - if(file.exists(paste0('/projectnb/dietzelab/NEFI_data/HFEMS_prelim_', yy, '_', doy, '_dat.csv'))){ - data1 = read.csv(paste0('/projectnb/dietzelab/NEFI_data/HFEMS_prelim_', yy, '_', doy, '_dat.csv'), header = T, sep = "") - data = rbind(data, data1)} - + # my_host <- list(name = "geo.bu.edu", user = 'kzarada', tunnel = "/tmp/tunnel") + + # try(remote.copy.from(host = my_, src = paste0('/projectnb/dietzelab/NEFI_data/HFEMS_prelim_', yy, '_', doy, '_dat.csv'), + # dst = paste0('/fs/data3/kzarada/NEFI/US_Harvard/Flux_data/', yy, doy,'.csv'), delete=FALSE)) + + + + if (file.exists(paste0("/projectnb/dietzelab/NEFI_data/HFEMS_prelim_", yy, "_", doy, "_dat.csv"))) { + data1 <- read.csv(paste0("/projectnb/dietzelab/NEFI_data/HFEMS_prelim_", yy, "_", doy, "_dat.csv"), header = T, sep = "") + data <- rbind(data, data1) + } } - - - data <- data %>% - drop_na(TIME_START.YYYYMMDDhhmm) %>% - mutate(Time = lubridate::with_tz(as.POSIXct(strptime(TIME_START.YYYYMMDDhhmm, format = "%Y%m%d%H%M", tz = "EST"),tz = "EST"), tz = "UTC")) %>% + + + data <- data %>% + drop_na(TIME_START.YYYYMMDDhhmm) %>% + mutate(Time = lubridate::with_tz(as.POSIXct(strptime(TIME_START.YYYYMMDDhhmm, format = "%Y%m%d%H%M", tz = "EST"), tz = "EST"), tz = "UTC")) %>% dplyr::select(Time, SWC15) - + colnames(data) <- c("Time", "SWC15") - - - Time = lubridate::force_tz(seq(from = as.POSIXct(start_date), to = as.POSIXct(end_date), by = "30 mins"), tz = "UTC") - - data.full = data.frame(Time, SWC15 = rep(NA, length(Time))) - - - - + + + Time <- lubridate::force_tz(seq(from = as.POSIXct(start_date), to = as.POSIXct(end_date), by = "30 mins"), tz = "UTC") + + data.full <- data.frame(Time, SWC15 = rep(NA, length(Time))) + + + + match(Time, data$Time) - + data.full$SWC15 <- data$SWC15[match(Time, data$Time)] - - + + return(data.full) } -#manually check if files are available -#read.csv('ftp://ftp.as.harvard.edu/pub/exchange/jwm/Forecast_data/HFEMS_prelim_20_196_dat.csv') +# manually check if files are available +# read.csv('ftp://ftp.as.harvard.edu/pub/exchange/jwm/Forecast_data/HFEMS_prelim_20_196_dat.csv') diff --git a/modules/assim.sequential/inst/NEFI/US_Los/download_Los.R b/modules/assim.sequential/inst/NEFI/US_Los/download_Los.R index 8d4bf4e6533..46ad41fd8bb 100644 --- a/modules/assim.sequential/inst/NEFI/US_Los/download_Los.R +++ b/modules/assim.sequential/inst/NEFI/US_Los/download_Los.R @@ -3,18 +3,17 @@ library(tidyverse) library(lubridate) download_US_Los <- function(start_date, end_date) { - - base_url <- "http://co2.aos.wisc.edu/data/cheas/lcreek/flux/current/ameriflux/US-Los_HH_" + base_url <- "http://co2.aos.wisc.edu/data/cheas/lcreek/flux/current/ameriflux/US-Los_HH_" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data data <- start_year:end_year %>% - purrr::map_df(function(syear){ + purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, '01010000_', syear + 1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE ) %>% @@ -29,12 +28,12 @@ download_US_Los <- function(start_date, end_date) { } ) }) %>% - mutate_all(funs(as.numeric)) %>% - mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% - dplyr::select(Time, NEE, LE = LE_1_1_1) %>% - dplyr::na_if(-9999) %>% - mutate(NEE = PEcAn.utils::misc.convert(NEE, "umol C m-2 s-1", "kg C m-2 s-1")) %>% - filter(Time >= start_date & Time <=end_date) - + mutate_all(funs(as.numeric)) %>% + mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% + dplyr::select(Time, NEE, LE = LE_1_1_1) %>% + dplyr::na_if(-9999) %>% + mutate(NEE = PEcAn.utils::misc.convert(NEE, "umol C m-2 s-1", "kg C m-2 s-1")) %>% + filter(Time >= start_date & Time <= end_date) + return(data) } diff --git a/modules/assim.sequential/inst/NEFI/US_Los/download_Los_met.R b/modules/assim.sequential/inst/NEFI/US_Los/download_Los_met.R index b45f4651c9f..22b4109e4cd 100644 --- a/modules/assim.sequential/inst/NEFI/US_Los/download_Los_met.R +++ b/modules/assim.sequential/inst/NEFI/US_Los/download_Los_met.R @@ -3,18 +3,17 @@ library(tidyverse) library(lubridate) download_US_Los_met <- function(start_date, end_date) { - - base_url <- "http://co2.aos.wisc.edu/data/cheas/lcreek/flux/current/ameriflux/US-Los_HH_" + base_url <- "http://co2.aos.wisc.edu/data/cheas/lcreek/flux/current/ameriflux/US-Los_HH_" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data data <- start_year:end_year %>% - purrr::map_df(function(syear){ + purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, '01010000_', syear + 1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE ) %>% @@ -29,12 +28,12 @@ download_US_Los_met <- function(start_date, end_date) { } ) }) %>% - mutate_all(funs(as.numeric)) %>% - mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% - dplyr::select(Time, WS_1_1_1, TA_1_1_1) %>% - dplyr::na_if(-9999) %>% - add_column(rH = NA)%>% - filter(Time >= start_date & Time <=end_date) - colnames(data) = c("date", "ws", 'Tair', "rH") + mutate_all(funs(as.numeric)) %>% + mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% + dplyr::select(Time, WS_1_1_1, TA_1_1_1) %>% + dplyr::na_if(-9999) %>% + add_column(rH = NA) %>% + filter(Time >= start_date & Time <= end_date) + colnames(data) <- c("date", "ws", "Tair", "rH") return(data) } diff --git a/modules/assim.sequential/inst/NEFI/US_Potato/download_Potato.R b/modules/assim.sequential/inst/NEFI/US_Potato/download_Potato.R index 2626e44a6f0..24caec430bc 100644 --- a/modules/assim.sequential/inst/NEFI/US_Potato/download_Potato.R +++ b/modules/assim.sequential/inst/NEFI/US_Potato/download_Potato.R @@ -1,85 +1,77 @@ -#if then statement for date? Only the last 4 days are available in the current folder -#second part would be in the year folder that corresponds with the start date. -#only need the current system date from the current folder. The rest are in the year folder +# if then statement for date? Only the last 4 days are available in the current folder +# second part would be in the year folder that corresponds with the start date. +# only need the current system date from the current folder. The rest are in the year folder -download_US_Potato<- function(start_date, end_date) { - - - #Each .dat file is a seperate half hour file. - date = seq.Date(from = as.Date(start_date), to = as.Date(end_date), by = 'day') - - t <- format(seq(from=as.POSIXct("0000","%H%M", tz="UTC"), - to=as.POSIXct("2330", "%H%M", tz="UTC"), - by="30 min", format = "%H%M"), "%H%M") - #read in the headers to start the data frame - data = read.delim("http://co2.aos.wisc.edu/data/potato/2019/20190128/Potato_flux_2019_01_28_0000.dat", sep = ",", skip = 1, stringsAsFactors = FALSE)[1,] - - #the current day is saved in a different folder so we need a different url - if(end_date == Sys.Date()){ - - for(j in 1: (length(date)-1)){ - - for( i in 1: length(t)){ - - +download_US_Potato <- function(start_date, end_date) { + # Each .dat file is a seperate half hour file. + date <- seq.Date(from = as.Date(start_date), to = as.Date(end_date), by = "day") + + t <- format(seq( + from = as.POSIXct("0000", "%H%M", tz = "UTC"), + to = as.POSIXct("2330", "%H%M", tz = "UTC"), + by = "30 min", format = "%H%M" + ), "%H%M") + # read in the headers to start the data frame + data <- read.delim("http://co2.aos.wisc.edu/data/potato/2019/20190128/Potato_flux_2019_01_28_0000.dat", sep = ",", skip = 1, stringsAsFactors = FALSE)[1, ] + + # the current day is saved in a different folder so we need a different url + if (end_date == Sys.Date()) { + for (j in 1:(length(date) - 1)) { + for (i in 1:length(t)) { baseurl <- "http://co2.aos.wisc.edu/data/potato/" url <- paste0(baseurl, lubridate::year(date[j]), "/", gsub("-", "", (date[j])), "/Potato_flux_", gsub("-", "_", (date[j])), "_", t[i], ".dat") - if(url.exists(url)){ - data<- rbind(data, read.delim(url, sep = ",", skip = 1, stringsAsFactors = FALSE)[3,]) - }else { + if (url.exists(url)) { + data <- rbind(data, read.delim(url, sep = ",", skip = 1, stringsAsFactors = FALSE)[3, ]) + } else { index.time <- strsplit(t[i], "") - index <- c(paste0(date[j], " ", index.time[[1]][1], index.time[[1]][2] , ":", index.time[[1]][3], index.time[[1]][4] , ":00"), rep("NA", 97)) - data <- rbind(data, index ) + index <- c(paste0(date[j], " ", index.time[[1]][1], index.time[[1]][2], ":", index.time[[1]][3], index.time[[1]][4], ":00"), rep("NA", 97)) + data <- rbind(data, index) } - } } - - #Have to adjust the time length because it's current. Going back 2 hours just to be safe with lag and errors - p <- format(seq(from=as.POSIXct("0000","%H%M", tz="UTC"), - to=as.POSIXct(format(lubridate::round_date(Sys.time() - lubridate::hours(2), unit = "30 min"), "%H%M"), "%H%M", tz="UTC"), - by="30 min", format = "%H%M"), "%H%M") - - for(i in 1:length(p)){ - + + # Have to adjust the time length because it's current. Going back 2 hours just to be safe with lag and errors + p <- format(seq( + from = as.POSIXct("0000", "%H%M", tz = "UTC"), + to = as.POSIXct(format(lubridate::round_date(Sys.time() - lubridate::hours(2), unit = "30 min"), "%H%M"), "%H%M", tz = "UTC"), + by = "30 min", format = "%H%M" + ), "%H%M") + + for (i in 1:length(p)) { url1 <- paste0("http://co2.aos.wisc.edu/data/potato/current/Potato_flux_", gsub("-", "_", end_date), "_", p[i], ".dat") - data<- rbind(data, read.delim(url1, sep = ",", skip = 1, stringsAsFactors = FALSE)[3,]) - + data <- rbind(data, read.delim(url1, sep = ",", skip = 1, stringsAsFactors = FALSE)[3, ]) } - } else{ - - for(j in 1: (length(date))){ - - for( i in 1: length(t)){ - + } else { + for (j in 1:(length(date))) { + for (i in 1:length(t)) { baseurl <- "http://co2.aos.wisc.edu/data/potato/" url <- paste0(baseurl, lubridate::year(date[j]), "/", gsub("-", "", (date[j])), "/Potato_flux_", gsub("-", "_", (date[j])), "_", t[i], ".dat") - if(url.exists(url)){ - data<- rbind(data, read.delim(url, sep = ",", skip = 1, stringsAsFactors = FALSE)[3,]) - }else { + if (url.exists(url)) { + data <- rbind(data, read.delim(url, sep = ",", skip = 1, stringsAsFactors = FALSE)[3, ]) + } else { index.time <- strsplit(t[i], "") - index <- c(paste0(date[j], " ", index.time[[1]][1], index.time[[1]][2] , ":", index.time[[1]][3], index.time[[1]][4] , ":00"), rep("NA", 97)) - data <- rbind(data, index ) + index <- c(paste0(date[j], " ", index.time[[1]][1], index.time[[1]][2], ":", index.time[[1]][3], index.time[[1]][4], ":00"), rep("NA", 97)) + data <- rbind(data, index) } - } } } - #want to pull out timestamp, u_star, - - #clean data - - data <- data[-1,] #remove units - - data <- data %>% dplyr::select("TIMESTAMP", "u_star" ,"LE_wpl", "Fc_wpl", "CO2_sig_strgth_mean", "H2O_sig_strgth_mean") %>% - mutate(NEE = replace(Fc_wpl, u_star < 0.1, "NA"), LE = replace(LE_wpl, u_star < 0.1, "NA")) %>% - mutate(NEE = replace(NEE, CO2_sig_strgth_mean < 0.6, "NA"), LE = replace(LE, H2O_sig_strgth_mean < 0.6, "NA")) %>% - dplyr::select("TIMESTAMP", "NEE", "LE") %>% - mutate(NEE = as.numeric(NEE), LE = as.numeric(LE)) %>% - na_if( -999) %>% - mutate(NEE = PEcAn.utils::misc.convert(NEE, "umol C m-2 s-1", "kg C m-2 s-1"), LE = as.numeric(LE)) - - + # want to pull out timestamp, u_star, + + # clean data + + data <- data[-1, ] # remove units + + data <- data %>% + dplyr::select("TIMESTAMP", "u_star", "LE_wpl", "Fc_wpl", "CO2_sig_strgth_mean", "H2O_sig_strgth_mean") %>% + mutate(NEE = replace(Fc_wpl, u_star < 0.1, "NA"), LE = replace(LE_wpl, u_star < 0.1, "NA")) %>% + mutate(NEE = replace(NEE, CO2_sig_strgth_mean < 0.6, "NA"), LE = replace(LE, H2O_sig_strgth_mean < 0.6, "NA")) %>% + dplyr::select("TIMESTAMP", "NEE", "LE") %>% + mutate(NEE = as.numeric(NEE), LE = as.numeric(LE)) %>% + na_if(-999) %>% + mutate(NEE = PEcAn.utils::misc.convert(NEE, "umol C m-2 s-1", "kg C m-2 s-1"), LE = as.numeric(LE)) + + colnames(data) <- c("Time", "NEE", "LE") return(data) @@ -87,8 +79,6 @@ download_US_Potato<- function(start_date, end_date) { -#download_potato(start_date, end_date) -#start_date = "2019-07-22" -#end_date = "2019-07-24" - - +# download_potato(start_date, end_date) +# start_date = "2019-07-22" +# end_date = "2019-07-24" diff --git a/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv.R b/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv.R index e4c854997ac..c3ec77a46d6 100644 --- a/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv.R +++ b/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv.R @@ -3,40 +3,37 @@ library(tidyverse) library(lubridate) download_US_Syv <- function(start_date, end_date) { - - base_url <- "http://co2.aos.wisc.edu/data/cheas/sylvania/flux/current/ameriflux/US-Syv_HH_" + base_url <- "http://co2.aos.wisc.edu/data/cheas/sylvania/flux/current/ameriflux/US-Syv_HH_" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data data <- start_year:end_year %>% - purrr::map_df(function(syear){ + purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, '01010000_', syear + 1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE ) %>% - apply(2, trimws) %>% - apply(2, as.character) %>% - data.frame(stringsAsFactors = F), - error = function(e) { - NULL - }, - warning = function(e) { - NULL - } - ) + apply(2, trimws) %>% + apply(2, as.character) %>% + data.frame(stringsAsFactors = F), + error = function(e) { + NULL + }, + warning = function(e) { + NULL + } + ) }) %>% - mutate_all(funs(as.numeric)) %>% - mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% - dplyr::select(Time, NEE, LE = LE_1_1_1) %>% - dplyr::na_if(-9999) %>% - mutate(NEE = PEcAn.utils::misc.convert(NEE, "umol C m-2 s-1", "kg C m-2 s-1")) %>% - filter(Time >= start_date & Time <=end_date) - + mutate_all(funs(as.numeric)) %>% + mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% + dplyr::select(Time, NEE, LE = LE_1_1_1) %>% + dplyr::na_if(-9999) %>% + mutate(NEE = PEcAn.utils::misc.convert(NEE, "umol C m-2 s-1", "kg C m-2 s-1")) %>% + filter(Time >= start_date & Time <= end_date) + return(data) } - - \ No newline at end of file diff --git a/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv_met.R b/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv_met.R index cefa42bdb9f..64106a5df75 100644 --- a/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv_met.R +++ b/modules/assim.sequential/inst/NEFI/US_Syv/download_Syv_met.R @@ -3,18 +3,17 @@ library(tidyverse) library(lubridate) download_US_Syv_met <- function(start_date, end_date) { - - base_url <- "http://co2.aos.wisc.edu/data/cheas/sylvania/flux/current/ameriflux/US-Syv_HH_" + base_url <- "http://co2.aos.wisc.edu/data/cheas/sylvania/flux/current/ameriflux/US-Syv_HH_" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data data <- start_year:end_year %>% - purrr::map_df(function(syear){ + purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, '01010000_', syear + 1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE ) %>% @@ -29,12 +28,11 @@ download_US_Syv_met <- function(start_date, end_date) { } ) }) %>% - mutate_all(funs(as.numeric)) %>% - mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% - dplyr::select(Time, WS_1_1_1, TA_1_1_1, RH_1_1_1, TS_1_1_1) %>% - dplyr::na_if(-9999) %>% - filter(Time >= start_date & Time <=end_date) - colnames(data) = c("date", "ws", 'Tair', "rH", "Tsoil") + mutate_all(funs(as.numeric)) %>% + mutate(Time = lubridate::ymd_hm(TIMESTAMP_START)) %>% + dplyr::select(Time, WS_1_1_1, TA_1_1_1, RH_1_1_1, TS_1_1_1) %>% + dplyr::na_if(-9999) %>% + filter(Time >= start_date & Time <= end_date) + colnames(data) <- c("date", "ws", "Tair", "rH", "Tsoil") return(data) } - diff --git a/modules/assim.sequential/inst/NEFI/US_Syv/download_soilmoist_Syv.R b/modules/assim.sequential/inst/NEFI/US_Syv/download_soilmoist_Syv.R index e8c022c8c45..cb782e128cc 100644 --- a/modules/assim.sequential/inst/NEFI/US_Syv/download_soilmoist_Syv.R +++ b/modules/assim.sequential/inst/NEFI/US_Syv/download_soilmoist_Syv.R @@ -1,16 +1,16 @@ download_soilmoist_Syv <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/sylvania/flux/prelim/clean/ameriflux/US-Syv_HH_" - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, "01010000_", syear+1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE, stringsAsFactors = F ) %>% @@ -26,15 +26,16 @@ download_soilmoist_Syv <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - raw.data$Time <-as.POSIXct(as.character(raw.data$TIMESTAMP_START), - format="%Y%m%d%H%M", tz="UTC") - # Some cleaning and filtering - raw.data <- raw.data %>% + + # Constructing the date based on the columns we have + raw.data$Time <- as.POSIXct(as.character(raw.data$TIMESTAMP_START), + format = "%Y%m%d%H%M", tz = "UTC" + ) + # Some cleaning and filtering + raw.data <- raw.data %>% dplyr::select(SWC_1_1_1, Time) %>% - na_if(-9999) %>% - filter(Time >= start_date & Time <=end_date) - colnames(raw.data) <- c('avgsoil', 'Time') + na_if(-9999) %>% + filter(Time >= start_date & Time <= end_date) + colnames(raw.data) <- c("avgsoil", "Time") return(raw.data) } diff --git a/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr.R b/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr.R index ddb3ec75e11..1418b9f083a 100644 --- a/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr.R +++ b/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr.R @@ -1,9 +1,9 @@ download_US_WCr <- function(start_date, end_date) { - base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek" + base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - - + + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { @@ -27,29 +27,33 @@ download_US_WCr <- function(start_date, end_date) { }) %>% mutate_all(funs(as.numeric)) - if(dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0){ - #Constructing the date based on the columns we have - raw.data$date <-as.POSIXct(paste0(raw.data$V1,"/",raw.data$V2,"/",raw.data$V3," ", raw.data$V4 %>% as.integer(), ":",(raw.data$V4-as.integer(raw.data$V4))*60), - format="%Y/%m/%d %H:%M", tz="UTC") - - raw.data <- raw.data %>% dplyr::select(date, V9, V10) %>% - filter(date >= start_date & date <=end_date) %>% - na_if( -999) %>% - mutate(V9 = PEcAn.utils::misc.convert(V9, "umol C m-2 s-1", "kg C m-2 s-1") ) - colnames(raw.data) <- c("Time", "NEE", "LE") - }else(raw.data = NULL) #end if statment - # Some cleaning and filtering - #raw.data <- raw.data %>% - # select(-V5, -V6) %>% + if (dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0) { + # Constructing the date based on the columns we have + raw.data$date <- as.POSIXct(paste0(raw.data$V1, "/", raw.data$V2, "/", raw.data$V3, " ", raw.data$V4 %>% as.integer(), ":", (raw.data$V4 - as.integer(raw.data$V4)) * 60), + format = "%Y/%m/%d %H:%M", tz = "UTC" + ) + + raw.data <- raw.data %>% + dplyr::select(date, V9, V10) %>% + filter(date >= start_date & date <= end_date) %>% + na_if(-999) %>% + mutate(V9 = PEcAn.utils::misc.convert(V9, "umol C m-2 s-1", "kg C m-2 s-1")) + colnames(raw.data) <- c("Time", "NEE", "LE") + } else { + (raw.data <- NULL) + } # end if statment + # Some cleaning and filtering + # raw.data <- raw.data %>% + # select(-V5, -V6) %>% # filter(date <=end_date) - - #Colnames changed - + + # Colnames changed + return(raw.data) } # start_date <- as.Date("2017-01-01") # end_date <- as.Date("2018-10-01") -# +# # download_US_WCr(start_date, end_date) ->pp -# -# tail(pp) \ No newline at end of file +# +# tail(pp) diff --git a/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr_met.R b/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr_met.R index dc6f3fad9f1..621d377885b 100644 --- a/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr_met.R +++ b/modules/assim.sequential/inst/NEFI/US_WCr/download_WCr_met.R @@ -1,10 +1,10 @@ download_US_WCr_met <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek" - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { @@ -27,25 +27,28 @@ download_US_WCr_met <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - - if(dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0){ - #Constructing the date based on the columns we have - raw.data$date <-as.POSIXct(paste0(raw.data$V1,"/",raw.data$V2,"/",raw.data$V3," ", raw.data$V4 %>% as.integer(), ":",(raw.data$V4-as.integer(raw.data$V4))*60), - format="%Y/%m/%d %H:%M", tz="UTC") - # Some cleaning and filtering - raw.data <- raw.data %>% - dplyr::select(V1,V2,V3,V4,V5, V6, V26, V35, V40, V59, V51, V61, V17, V58, date) %>% - filter(date >= start_date & date <=end_date) - - #Colnames changed - colnames(raw.data) <- c("Year", "Month", "Day", "Hour", "DoY", "FjDay", "Tair", "rH", "Tsoil", "Rg", "P_atm", "LW", "WS" , "Rain", "date") - }else(raw.data = NULL) + + + if (dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0) { + # Constructing the date based on the columns we have + raw.data$date <- as.POSIXct(paste0(raw.data$V1, "/", raw.data$V2, "/", raw.data$V3, " ", raw.data$V4 %>% as.integer(), ":", (raw.data$V4 - as.integer(raw.data$V4)) * 60), + format = "%Y/%m/%d %H:%M", tz = "UTC" + ) + # Some cleaning and filtering + raw.data <- raw.data %>% + dplyr::select(V1, V2, V3, V4, V5, V6, V26, V35, V40, V59, V51, V61, V17, V58, date) %>% + filter(date >= start_date & date <= end_date) + + # Colnames changed + colnames(raw.data) <- c("Year", "Month", "Day", "Hour", "DoY", "FjDay", "Tair", "rH", "Tsoil", "Rg", "P_atm", "LW", "WS", "Rain", "date") + } else { + (raw.data <- NULL) + } return(raw.data) } # start_date <- as.Date("2017-01-01") # end_date <- as.Date("2018-10-01") -# -#download_US_WCr_met(start_date, end_date) ->met -# -# tail(pp) \ No newline at end of file +# +# download_US_WCr_met(start_date, end_date) ->met +# +# tail(pp) diff --git a/modules/assim.sequential/inst/NEFI/US_WCr/download_soilmoist_WCr.R b/modules/assim.sequential/inst/NEFI/US_WCr/download_soilmoist_WCr.R index 6b64599b979..f718fb10bfb 100644 --- a/modules/assim.sequential/inst/NEFI/US_WCr/download_soilmoist_WCr.R +++ b/modules/assim.sequential/inst/NEFI/US_WCr/download_soilmoist_WCr.R @@ -1,16 +1,16 @@ download_soilmoist_WCr <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/clean/ameriflux/US-WCr_HH_" - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, "01010000_", syear+1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE, stringsAsFactors = F ) %>% @@ -26,27 +26,30 @@ download_soilmoist_WCr <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - if(dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0){ - raw.data$Time <-as.POSIXct(as.character(raw.data$TIMESTAMP_START), - format="%Y%m%d%H%M", tz="UTC") - # Some cleaning and filtering + + # Constructing the date based on the columns we have + if (dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0) { + raw.data$Time <- as.POSIXct(as.character(raw.data$TIMESTAMP_START), + format = "%Y%m%d%H%M", tz = "UTC" + ) + # Some cleaning and filtering # SWC origionally has units = % at depths 2_1 = 5cm, 2_2 = 10cm, 2_3 = 20cm, 2_4 = 30cm, 2_5 = 40cm, 2_6 = 50cm - raw.data <- raw.data %>% + raw.data <- raw.data %>% dplyr::select(SWC_1_1_1, SWC_1_2_1, SWC_1_3_1, SWC_1_4_1, SWC_1_5_1, SWC_2_1_1, SWC_2_2_1, SWC_2_3_1, SWC_2_4_1, SWC_2_5_1, SWC_2_6_1, Time) %>% - na_if(-9999) %>% - filter(Time >= start_date & Time <=end_date) - - #get average soil moisture - - #with all depths - #raw.data$avgsoil <- raw.data$SWC_2_1_1*.05 + raw.data$SWC_2_2_1*.10 + raw.data$SWC_2_3_1*.20 + raw.data$SWC_2_4_1*.30 + raw.data$SWC_2_5_1*.40 + raw.data$SWC_2_6_1*.50 - - #shallow depths (>30cm) - raw.data$avgsoil2 <- raw.data$SWC_2_1_1 #*.05 + raw.data$SWC_2_2_1*.10 + raw.data$SWC_2_3_1*.20 - raw.data$avgsoil1 <- raw.data$SWC_1_2_1*0.12 + raw.data$SWC_1_3_1*0.16 + raw.data$SWC_1_4_1*0.32 + raw.data$SWC_1_5_1*0.4 #old sensor + na_if(-9999) %>% + filter(Time >= start_date & Time <= end_date) + + # get average soil moisture + + # with all depths + # raw.data$avgsoil <- raw.data$SWC_2_1_1*.05 + raw.data$SWC_2_2_1*.10 + raw.data$SWC_2_3_1*.20 + raw.data$SWC_2_4_1*.30 + raw.data$SWC_2_5_1*.40 + raw.data$SWC_2_6_1*.50 + + # shallow depths (>30cm) + raw.data$avgsoil2 <- raw.data$SWC_2_1_1 #* .05 + raw.data$SWC_2_2_1*.10 + raw.data$SWC_2_3_1*.20 + raw.data$avgsoil1 <- raw.data$SWC_1_2_1 * 0.12 + raw.data$SWC_1_3_1 * 0.16 + raw.data$SWC_1_4_1 * 0.32 + raw.data$SWC_1_5_1 * 0.4 # old sensor raw.data <- raw.data %>% dplyr::select(Time, avgsoil1, avgsoil2) - }else(raw.data <- NULL) + } else { + (raw.data <- NULL) + } return(raw.data) } diff --git a/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF.R b/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF.R index 1633e04cf7c..dee5bbb3ad1 100644 --- a/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF.R +++ b/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF.R @@ -1,9 +1,9 @@ download_US_WLEF <- function(start_date, end_date) { - base_url <- "http://co2.aos.wisc.edu/data/cheas/wlef/flux/prelim/" + base_url <- "http://co2.aos.wisc.edu/data/cheas/wlef/flux/prelim/" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { @@ -26,29 +26,31 @@ download_US_WLEF <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - raw.data$date <-as.POSIXct(paste0(raw.data$Year,"/",raw.data$MO,"/",raw.data$DD," ", raw.data$HH), - format="%Y/%m/%d %H", tz="UTC") - - raw.data <- raw.data %>% dplyr::select(date, NEE_122, LE_122) %>% - filter(date >= start_date & date <=end_date) %>% - na_if( -999) %>% + + # Constructing the date based on the columns we have + raw.data$date <- as.POSIXct(paste0(raw.data$Year, "/", raw.data$MO, "/", raw.data$DD, " ", raw.data$HH), + format = "%Y/%m/%d %H", tz = "UTC" + ) + + raw.data <- raw.data %>% + dplyr::select(date, NEE_122, LE_122) %>% + filter(date >= start_date & date <= end_date) %>% + na_if(-999) %>% mutate(NEE_122 = PEcAn.utils::misc.convert(NEE_122, "umol C m-2 s-1", "kg C m-2 s-1")) - + colnames(raw.data) <- c("Time", "NEE", "LE") - # Some cleaning and filtering - #raw.data <- raw.data %>% + # Some cleaning and filtering + # raw.data <- raw.data %>% # select(-V5, -V6) %>% # filter(date <=end_date) - - #Colnames changed - + + # Colnames changed + return(raw.data) } # start_date <- as.Date("2017-01-01") # end_date <- as.Date("2018-10-01") -# +# # download_US_WCr(start_date, end_date) ->pp -# -# tail(pp) \ No newline at end of file +# +# tail(pp) diff --git a/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF_met.R b/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF_met.R index d7aa10548ca..6cffbc4c887 100644 --- a/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF_met.R +++ b/modules/assim.sequential/inst/NEFI/US_WLEF/download_WLEF_met.R @@ -1,10 +1,10 @@ download_US_WLEF_met <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wlef/flux/prelim/" - - + + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { @@ -27,17 +27,18 @@ download_US_WLEF_met <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - raw.data$date <-as.POSIXct(paste0(raw.data$Year,"/",raw.data$MO,"/",raw.data$DD," ", raw.data$HH), - format="%Y/%m/%d %H", tz="UTC") - - # Some cleaning and filtering - raw.data <- raw.data %>% - dplyr::select(Year,MO,DD,HH,DOY, fDOY, T122, RH122, Patm, Precip, PAR, date) %>% - filter(date >= start_date & date <=end_date) - + + # Constructing the date based on the columns we have + raw.data$date <- as.POSIXct(paste0(raw.data$Year, "/", raw.data$MO, "/", raw.data$DD, " ", raw.data$HH), + format = "%Y/%m/%d %H", tz = "UTC" + ) + + # Some cleaning and filtering + raw.data <- raw.data %>% + dplyr::select(Year, MO, DD, HH, DOY, fDOY, T122, RH122, Patm, Precip, PAR, date) %>% + filter(date >= start_date & date <= end_date) + colnames(raw.data) <- c("Year", "Month", "Day", "Hour", "DOY", "FjDay", "Tair", "rH", "P_atm", "Rain", "PAR", "date") - + return(raw.data) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/inst/NEFI/US_WLEF/download_soilmoist_WLEF.R b/modules/assim.sequential/inst/NEFI/US_WLEF/download_soilmoist_WLEF.R index d52daa6a8b1..c392023ebb0 100644 --- a/modules/assim.sequential/inst/NEFI/US_WLEF/download_soilmoist_WLEF.R +++ b/modules/assim.sequential/inst/NEFI/US_WLEF/download_soilmoist_WLEF.R @@ -1,16 +1,16 @@ download_soilmoist_WLEF <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wlef/flux/prelim/clean/ameriflux/US-PFa_HR_" - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, "01010000_", syear+1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE, stringsAsFactors = F ) %>% @@ -26,15 +26,16 @@ download_soilmoist_WLEF <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - raw.data$Time <-as.POSIXct(as.character(raw.data$TIMESTAMP_START), - format="%Y%m%d%H%M", tz="UTC") - # Some cleaning and filtering - raw.data <- raw.data %>% + + # Constructing the date based on the columns we have + raw.data$Time <- as.POSIXct(as.character(raw.data$TIMESTAMP_START), + format = "%Y%m%d%H%M", tz = "UTC" + ) + # Some cleaning and filtering + raw.data <- raw.data %>% dplyr::select(SWC_1_1_1, Time) %>% - na_if(-9999) %>% - filter(Time >= start_date & Time <=end_date) - colnames(raw.data) <- c('avgsoil', 'Time') + na_if(-9999) %>% + filter(Time >= start_date & Time <= end_date) + colnames(raw.data) <- c("avgsoil", "Time") return(raw.data) } diff --git a/modules/assim.sequential/inst/NEFI/email.R b/modules/assim.sequential/inst/NEFI/email.R index 2fe56ae5422..2810960c0b4 100644 --- a/modules/assim.sequential/inst/NEFI/email.R +++ b/modules/assim.sequential/inst/NEFI/email.R @@ -1,23 +1,19 @@ -####Send Emails of WCr Forecasts #### +#### Send Emails of WCr Forecasts #### library(EasyHTMLReport) emails <- read.csv("/fs/data3/kzarada/NEFI/Willow_Creek/emails.csv", header = T) -for(i in 1:dim(emails)[1]){ -from <- 'kzarada428@gmail.com' -to <- dput(as.character(emails$Email[i])) -subject <- paste0("Willow Creek Forecast for ", Sys.Date()) -mailControl = list(host.name = 'smtp.gmail.com', port = 465, user.name = 'kzarada428', passwd = "Poland5178", ssl = T) - -easyHtmlReport(rmd.file = "/fs/data3/kzarada/NEFI/Willow_Creek/forecast.Rmd", - from = from, - to = to, - subject = subject, - control = mailControl) - +for (i in 1:dim(emails)[1]) { + from <- "kzarada428@gmail.com" + to <- dput(as.character(emails$Email[i])) + subject <- paste0("Willow Creek Forecast for ", Sys.Date()) + mailControl <- list(host.name = "smtp.gmail.com", port = 465, user.name = "kzarada428", passwd = "Poland5178", ssl = T) + + easyHtmlReport( + rmd.file = "/fs/data3/kzarada/NEFI/Willow_Creek/forecast.Rmd", + from = from, + to = to, + subject = subject, + control = mailControl + ) } - - - - - diff --git a/modules/assim.sequential/inst/NEFI/email_graphs.R b/modules/assim.sequential/inst/NEFI/email_graphs.R index 52eb0650dc9..447d93146f6 100644 --- a/modules/assim.sequential/inst/NEFI/email_graphs.R +++ b/modules/assim.sequential/inst/NEFI/email_graphs.R @@ -1,4 +1,4 @@ -setwd('/fs/data3/kzarada/NEFI/Willow_Creek') +setwd("/fs/data3/kzarada/NEFI/Willow_Creek") library("ggplot2") library("plotly") library("gganimate") @@ -6,19 +6,17 @@ library("tidyverse") library("htmltools") source("/fs/data3/kzarada/NEFI/Willow_Creek/wcr.graphs.R") -### FORECAST -vars = c("NEE", "LE") -site = 676 -for(j in 1:length(vars)){ - - args = c(as.character(Sys.Date()), vars[j], site) - - assign(paste0("data_", vars[j]), wcr.graphs(args)) - - } +### FORECAST +vars <- c("NEE", "LE") +site <- 676 +for (j in 1:length(vars)) { + args <- c(as.character(Sys.Date()), vars[j], site) -nee.data = get("data_NEE") -le.data = get("data_LE") + assign(paste0("data_", vars[j]), wcr.graphs(args)) +} + +nee.data <- get("data_NEE") +le.data <- get("data_LE") nee.data$Time <- as.factor(paste(nee.data$date, nee.data$Time, sep = "_")) nee.data$start_date <- as.factor(nee.data$start_date) @@ -29,29 +27,29 @@ le.data$start_date <- as.factor(le.data$start_date) x.breaks <- unique(nee.data$time[seq(1, length(nee.data$time), by = 4)]) labels <- format(seq.Date(from = Sys.Date(), by = "day", length.out = 16), "%m-%d") -nee <- ggplot(nee.data) + - geom_ribbon(aes(x = time, ymin=Lower, ymax=Upper, fill="95% confidence interval"), alpha = 0.4) + - geom_line(aes(x=time, y=Predicted, color="Predicted"), size = 1) + +nee <- ggplot(nee.data) + + geom_ribbon(aes(x = time, ymin = Lower, ymax = Upper, fill = "95% confidence interval"), alpha = 0.4) + + geom_line(aes(x = time, y = Predicted, color = "Predicted"), size = 1) + ggtitle(paste0("Net Ecosystem Exchange Forcast for ", Sys.Date())) + - scale_x_continuous(name="Time (days)", breaks = x.breaks, labels = labels) + - scale_y_continuous(name="NEE (kg C m-2 s-1)") + - scale_colour_manual(name='Legend', values=c("Predicted"="lightskyblue1")) + - scale_fill_manual(name=element_blank(), values=c("95% confidence interval" = "blue3")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + scale_x_continuous(name = "Time (days)", breaks = x.breaks, labels = labels) + + scale_y_continuous(name = "NEE (kg C m-2 s-1)") + + scale_colour_manual(name = "Legend", values = c("Predicted" = "lightskyblue1")) + + scale_fill_manual(name = element_blank(), values = c("95% confidence interval" = "blue3")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) -le <- ggplot(le.data) + - geom_ribbon(aes(x = time, ymin=Lower, ymax=Upper, fill="95% confidence interval"), alpha = 0.4) + - geom_line(aes(x=time, y=Predicted, color="Predicted"), size = 1) + +le <- ggplot(le.data) + + geom_ribbon(aes(x = time, ymin = Lower, ymax = Upper, fill = "95% confidence interval"), alpha = 0.4) + + geom_line(aes(x = time, y = Predicted, color = "Predicted"), size = 1) + ggtitle(paste0("Latent Energy Forcast for ", Sys.Date())) + - scale_x_continuous(name="Time (days)", breaks = x.breaks, labels = labels) + - scale_y_continuous(name="LE (W m-2 s-1)") + - scale_colour_manual(name='Legend', values=c("Predicted"="lightskyblue1")) + - scale_fill_manual(name=element_blank(), values=c("95% confidence interval" = "blue3")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + scale_x_continuous(name = "Time (days)", breaks = x.breaks, labels = labels) + + scale_y_continuous(name = "LE (W m-2 s-1)") + + scale_colour_manual(name = "Legend", values = c("Predicted" = "lightskyblue1")) + + scale_fill_manual(name = element_blank(), values = c("95% confidence interval" = "blue3")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) ### Forecast Horizon @@ -59,22 +57,18 @@ source("/fs/data3/kzarada/NEFI/Willow_Creek/animated_WCr_graphs.R") sub.nee <- subset(nee.data, nee.data$Time == paste0(Sys.Date() - lubridate::days(1), "_12")) -x.breaks = sub.nee$start_date -labels = rev(seq(from = 1, to = length(x.breaks), by = 1)) +x.breaks <- sub.nee$start_date +labels <- rev(seq(from = 1, to = length(x.breaks), by = 1)) -horiz <- ggplot(sub.nee, aes(group = 1)) + - geom_ribbon(aes(x = start_date, ymin = Lower, ymax = Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = start_date, y = Predicted, color = "Predicted")) + - geom_line(aes(x = start_date, y = NEE, color = "Observed Data"), size = 1) + +horiz <- ggplot(sub.nee, aes(group = 1)) + + geom_ribbon(aes(x = start_date, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = start_date, y = Predicted, color = "Predicted")) + + geom_line(aes(x = start_date, y = NEE, color = "Observed Data"), size = 1) + ggtitle(paste0("Forecast Horizon for ", Sys.Date() - lubridate::days(1))) + - scale_color_manual(name = "Legend", labels = c("Observed Data", "Predicted"), values=c("Observed Data" = "firebrick4", "Predicted" = "skyblue1")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="NEE (kg C m-2 s-1)") + - scale_x_discrete(name = "Days from Observed Date", breaks = x.breaks, labels = labels) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 12), legend.title = element_blank(), legend.text = element_text(size = 10), axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 11), axis.title.y = element_text(size = 12)) - - - - + scale_color_manual(name = "Legend", labels = c("Observed Data", "Predicted"), values = c("Observed Data" = "firebrick4", "Predicted" = "skyblue1")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "NEE (kg C m-2 s-1)") + + scale_x_discrete(name = "Days from Observed Date", breaks = x.breaks, labels = labels) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 12), legend.title = element_blank(), legend.text = element_text(size = 10), axis.text.x = element_text(size = 10), axis.text.y = element_text(size = 11), axis.title.y = element_text(size = 12)) diff --git a/modules/assim.sequential/inst/NEFI/forecast.graphs.R b/modules/assim.sequential/inst/NEFI/forecast.graphs.R index d57b3325ed7..84fe4a56658 100644 --- a/modules/assim.sequential/inst/NEFI/forecast.graphs.R +++ b/modules/assim.sequential/inst/NEFI/forecast.graphs.R @@ -1,31 +1,36 @@ #### need to create a graph funciton here to call with the args of start time -forecast.graphs <- function(args){ - start_date <- tryCatch(as.POSIXct(args[1]), error = function(e) {NULL} ) +forecast.graphs <- function(args) { + start_date <- tryCatch(as.POSIXct(args[1]), error = function(e) { + NULL + }) if (is.null(start_date)) { in_wid <- as.integer(args[1]) } - dbparms = list( + dbparms <- list( dbname = "bety", host = "128.197.168.114", user = "bety", password = "bety", - driver = "Postgres") + driver = "Postgres" + ) con <- PEcAn.DB::db.open(dbparms) on.exit(PEcAn.DB::db.close(con), add = TRUE) # Identify the workflow with the proper information if (!is.null(start_date)) { - workflows <- PEcAn.DB::db.query(paste0("SELECT * FROM workflows WHERE start_date='", format(start_date, "%Y-%m-%d %H:%M:%S"), - "' ORDER BY id"), con) + workflows <- PEcAn.DB::db.query(paste0( + "SELECT * FROM workflows WHERE start_date='", format(start_date, "%Y-%m-%d %H:%M:%S"), + "' ORDER BY id" + ), con) } else { workflows <- PEcAn.DB::db.query(paste0("SELECT * FROM workflows WHERE id='", in_wid, "'"), con) } print(workflows) - workflows <- workflows[which(workflows$site_id == args[3]),] + workflows <- workflows[which(workflows$site_id == args[3]), ] if (nrow(workflows) > 1) { - workflow <- workflows[1,] + workflow <- workflows[1, ] } else { workflow <- workflows } @@ -34,97 +39,116 @@ forecast.graphs <- function(args){ print(paste0("Using workflow ", workflow$id)) wid <- workflow$id outdir <- args[4] - pecan_out_dir <- paste0(outdir, "PEcAn_", wid, "/out"); + pecan_out_dir <- paste0(outdir, "PEcAn_", wid, "/out") pecan_out_dirs <- list.dirs(path = pecan_out_dir) if (is.na(pecan_out_dirs[1])) { print(paste0(pecan_out_dirs, " does not exist.")) } - #neemat <- matrix(1:64, nrow=1, ncol=64) # Proxy row, will be deleted later. - #qlemat <- matrix(1:64, nrow=1, ncol=64)# Proxy row, will be deleted later. + # neemat <- matrix(1:64, nrow=1, ncol=64) # Proxy row, will be deleted later. + # qlemat <- matrix(1:64, nrow=1, ncol=64)# Proxy row, will be deleted later. neemat <- vector() qlemat <- vector() soilmoist <- vector() time <- vector() - num_results <- 0; + num_results <- 0 for (i in 2:length(pecan_out_dirs)) { - #datafile <- file.path(pecan_out_dirs[i], format(workflow$start_date, "%Y.nc")) + # datafile <- file.path(pecan_out_dirs[i], format(workflow$start_date, "%Y.nc")) datafiles <- list.files(pecan_out_dirs[i]) datafiles <- datafiles[grep("*.nc$", datafiles)] - if (length(datafiles) == 0) { + if (length(datafiles) == 0) { print(paste0("File ", pecan_out_dirs[i], " does not exist.")) next } - if(length(datafiles) == 1){ - - file = paste0(pecan_out_dirs[i],'/', datafiles[1]) - - num_results <- num_results + 1 - - #open netcdf file - ncptr <- ncdf4::nc_open(file); - - # Attach data to matricies - nee <- ncdf4::ncvar_get(ncptr, "NEE") - if(i == 2){ neemat <- nee} else{neemat <- cbind(neemat,nee)} - - qle <- ncdf4::ncvar_get(ncptr, "Qle") - if(i == 2){ qlemat <- qle} else{qlemat <- cbind(qlemat,qle)} - - soil <- ncdf4::ncvar_get(ncptr, "SoilMoistFrac") - if(i == 2){ soilmoist <- soil} else{soilmoist <- cbind(soilmoist,soil)} - - sec <- ncptr$dim$time$vals - origin <- strsplit(ncptr$dim$time$units, " ")[[1]][3] - - # Close netcdf file - ncdf4::nc_close(ncptr) - } - - if(length(datafiles) > 1){ - - - file = paste0(pecan_out_dirs[i],'/', datafiles[1]) - file2 = paste0(pecan_out_dirs[i],'/', datafiles[2]) - - num_results <- num_results + 1 - - #open netcdf file - ncptr1 <- ncdf4::nc_open(file); - ncptr2 <- ncdf4::nc_open(file2); - # Attach data to matricies - nee1 <- ncdf4::ncvar_get(ncptr1, "NEE") - nee2 <- ncdf4::ncvar_get(ncptr2, "NEE") - nee <- c(nee1, nee2) - if(i == 2){ neemat <- nee} else{neemat <- cbind(neemat,nee)} - - qle1 <- ncdf4::ncvar_get(ncptr1, "Qle") - qle2 <- ncdf4::ncvar_get(ncptr2, "Qle") - qle <- c(qle1, qle2) - - if(i == 2){ qlemat <- qle} else{qlemat <- cbind(qlemat,qle)} - - soil1 <- ncdf4::ncvar_get(ncptr1, "SoilMoistFrac") - soil2 <- ncdf4::ncvar_get(ncptr2, "SoilMoistFrac") - soil <- c(soil1, soil2) - if(i == 2){ soilmoist <- soil} else{soilmoist <- cbind(soilmoist,soil)} - - - sec <- c(ncptr1$dim$time$vals, ncptr2$dim$time$vals+ last(ncptr1$dim$time$vals)) - origin <- strsplit(ncptr1$dim$time$units, " ")[[1]][3] - - - # Close netcdf file - ncdf4::nc_close(ncptr1) - ncdf4::nc_close(ncptr2) - + if (length(datafiles) == 1) { + file <- paste0(pecan_out_dirs[i], "/", datafiles[1]) + + num_results <- num_results + 1 + + # open netcdf file + ncptr <- ncdf4::nc_open(file) + + # Attach data to matricies + nee <- ncdf4::ncvar_get(ncptr, "NEE") + if (i == 2) { + neemat <- nee + } else { + neemat <- cbind(neemat, nee) + } + + qle <- ncdf4::ncvar_get(ncptr, "Qle") + if (i == 2) { + qlemat <- qle + } else { + qlemat <- cbind(qlemat, qle) + } + + soil <- ncdf4::ncvar_get(ncptr, "SoilMoistFrac") + if (i == 2) { + soilmoist <- soil + } else { + soilmoist <- cbind(soilmoist, soil) + } + + sec <- ncptr$dim$time$vals + origin <- strsplit(ncptr$dim$time$units, " ")[[1]][3] + + # Close netcdf file + ncdf4::nc_close(ncptr) } + if (length(datafiles) > 1) { + file <- paste0(pecan_out_dirs[i], "/", datafiles[1]) + file2 <- paste0(pecan_out_dirs[i], "/", datafiles[2]) + + num_results <- num_results + 1 + + # open netcdf file + ncptr1 <- ncdf4::nc_open(file) + ncptr2 <- ncdf4::nc_open(file2) + # Attach data to matricies + nee1 <- ncdf4::ncvar_get(ncptr1, "NEE") + nee2 <- ncdf4::ncvar_get(ncptr2, "NEE") + nee <- c(nee1, nee2) + if (i == 2) { + neemat <- nee + } else { + neemat <- cbind(neemat, nee) + } + + qle1 <- ncdf4::ncvar_get(ncptr1, "Qle") + qle2 <- ncdf4::ncvar_get(ncptr2, "Qle") + qle <- c(qle1, qle2) + + if (i == 2) { + qlemat <- qle + } else { + qlemat <- cbind(qlemat, qle) + } + + soil1 <- ncdf4::ncvar_get(ncptr1, "SoilMoistFrac") + soil2 <- ncdf4::ncvar_get(ncptr2, "SoilMoistFrac") + soil <- c(soil1, soil2) + if (i == 2) { + soilmoist <- soil + } else { + soilmoist <- cbind(soilmoist, soil) + } + + + sec <- c(ncptr1$dim$time$vals, ncptr2$dim$time$vals + last(ncptr1$dim$time$vals)) + origin <- strsplit(ncptr1$dim$time$units, " ")[[1]][3] + + + # Close netcdf file + ncdf4::nc_close(ncptr1) + ncdf4::nc_close(ncptr2) + } } if (num_results == 0) { @@ -135,54 +159,57 @@ forecast.graphs <- function(args){ } # Time - time <- seq(1, length.out= length(sec)) + time <- seq(1, length.out = length(sec)) # Caluclate means neemins <- NULL neemaxes <- NULL - quantiles <- apply(neemat,1,quantile,c(0.025,0.5,0.975), na.rm=TRUE) - neelower95 <- quantiles[1,] - neemeans <- quantiles[2,] - neeupper95 <- quantiles[3,] + quantiles <- apply(neemat, 1, quantile, c(0.025, 0.5, 0.975), na.rm = TRUE) + neelower95 <- quantiles[1, ] + neemeans <- quantiles[2, ] + neeupper95 <- quantiles[3, ] needf <- data.frame(time = time, Lower = neelower95, Predicted = neemeans, Upper = neeupper95) needf$date <- as.Date(sec, origin = origin) - #$needf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(needf$date) - 3))) + # $needf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(needf$date) - 3))) needf$start_date <- rep(start_date, each = length(sec)) needf$Time <- round(abs(sec - floor(sec)) * 24) - quantiles <- apply(qlemat,1,quantile,c(0.025,0.5,0.975), na.rm=TRUE) - qlelower95 <- quantiles[1,] - qlemeans <- quantiles[2,] - qleupper95 <- quantiles[3,] + quantiles <- apply(qlemat, 1, quantile, c(0.025, 0.5, 0.975), na.rm = TRUE) + qlelower95 <- quantiles[1, ] + qlemeans <- quantiles[2, ] + qleupper95 <- quantiles[3, ] qledf <- data.frame(time = time, Lower = qlelower95, Predicted = qlemeans, Upper = qleupper95) - qledf$date <- as.Date(sec, origin = origin) - #qledf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(qledf$date) - 3))) + qledf$date <- as.Date(sec, origin = origin) + # qledf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(qledf$date) - 3))) qledf$start_date <- rep(start_date, each = length(sec)) qledf$Time <- round(abs(sec - floor(sec)) * 24) - #soil moisture + # soil moisture soilmins <- NULL soilmaxes <- NULL - quantiles <- apply(soilmoist,1,quantile,c(0.025,0.5,0.975), na.rm=TRUE) - soillower95 <- quantiles[1,] - soilmeans <- quantiles[2,] - soilupper95 <- quantiles[3,] + quantiles <- apply(soilmoist, 1, quantile, c(0.025, 0.5, 0.975), na.rm = TRUE) + soillower95 <- quantiles[1, ] + soilmeans <- quantiles[2, ] + soilupper95 <- quantiles[3, ] soildf <- data.frame(time = time, Lower = soillower95, Predicted = soilmeans, Upper = soilupper95) soildf$date <- as.Date(sec, origin = origin) - #$needf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(needf$date) - 3))) + # $needf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(needf$date) - 3))) soildf$start_date <- rep(start_date, each = length(sec)) soildf$Time <- round(abs(sec - floor(sec)) * 24) - if(args[2] == "NEE"){ - return(needf)} - if(args[2]== "LE"){ - return(qledf)} - else(return(soildf)) + if (args[2] == "NEE") { + return(needf) + } + if (args[2] == "LE") { + return(qledf) + } else { + (return(soildf)) + } } diff --git a/modules/assim.sequential/inst/NEFI/generate.gefs.xml.R b/modules/assim.sequential/inst/NEFI/generate.gefs.xml.R index a644139710f..f130718598c 100644 --- a/modules/assim.sequential/inst/NEFI/generate.gefs.xml.R +++ b/modules/assim.sequential/inst/NEFI/generate.gefs.xml.R @@ -1,6 +1,6 @@ ## Some global environment variables .wd <- getwd() # Set this to whatever you want the working directory to be. If this is run off a cron job, - # you probably do not want the working directory to be cron's working directory, which is what getwd will return. +# you probably do not want the working directory to be cron's working directory, which is what getwd will return. ##' @@ -9,26 +9,28 @@ ##' @author Luke Dramko round.to.six.hours <- function(date = Sys.time() - lubridate::hours(2)) { if (is.character(date)) { - date <- as.POSIXct(date, tz="UTC") + date <- as.POSIXct(date, tz = "UTC") } - forecast_hour = (lubridate::hour(date) %/% 6) * 6 #Integer division by 6 followed by re-multiplication acts like a "floor function" for multiples of 6 - forecast_hour = sprintf("%04d", forecast_hour * 100) - date = as.POSIXct(paste0(lubridate::year(date), "-", lubridate::month(date), "-", lubridate::day(date), " ", - substring(forecast_hour, 1,2), ":00:00"), tz="UTC") + forecast_hour <- (lubridate::hour(date) %/% 6) * 6 # Integer division by 6 followed by re-multiplication acts like a "floor function" for multiples of 6 + forecast_hour <- sprintf("%04d", forecast_hour * 100) + date <- as.POSIXct(paste0( + lubridate::year(date), "-", lubridate::month(date), "-", lubridate::day(date), " ", + substring(forecast_hour, 1, 2), ":00:00" + ), tz = "UTC") return(date) } ##' -##'This script sets up the xml for, and runs PEcAn for the following settings: +##' This script sets up the xml for, and runs PEcAn for the following settings: ##' -##'NOAA_GEFS data -##'Start date: current date/time -##'End date: 16 days in the future -##'model: SIPNET +##' NOAA_GEFS data +##' Start date: current date/time +##' End date: 16 days in the future +##' model: SIPNET ##' -##'@author Luke Dramko +##' @author Luke Dramko # Settings file (including path) is passed in on the command line. args <- commandArgs(trailingOnly = TRUE) @@ -50,19 +52,22 @@ settings <- PEcAn.settings::read.settings(args[1]) xmloutdir <- regmatches(args[1], regexpr("(~|\\./)?(/)?([^/]*/)*", args[1])) # extract xmloutdir from args[1] filename <- basename(args[1]) # Extract file name from args[1] -if(xmloutdir == "") {xmloutdir <- "."} +if (xmloutdir == "") { + xmloutdir <- "." +} -dbparms = list( +dbparms <- list( dbname = "bety", host = "128.197.168.114", user = "bety", password = "bety", - driver = "Postgres") + driver = "Postgres" +) con <- PEcAn.DB::db.open(dbparms) if (is.null(con)) { print("Database connection failed.") - quit("no", status=12) + quit("no", status = 12) } # Set the run dates @@ -76,8 +81,8 @@ end_date <- start_date + lubridate::days(16) settings$run$start.date <- as.character(start_date) settings$run$end.date <- as.character(end_date) -#settings$ensemble$start.year <- start_date -#settings$ensemble$end.year <- end_date +# settings$ensemble$start.year <- start_date +# settings$ensemble$end.year <- end_date # Update the time of the run settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), " +0000") @@ -92,19 +97,21 @@ settings$ensemble$end.year <- as.character(end_date, "%Y") # Create new workflow ID and register it with the database hostname <- PEcAn.remote::fqdn() -query <- paste0("INSERT INTO workflows (site_id, model_id, notes, folder, hostname, start_date,", - "end_date, params, advanced_edit) ", - "values (", settings$run$site$id, ", ", settings$model$id, ", ", "''", ", '', '", hostname, "', '", - format(start_date, "%Y/%m/%d %H:%M:%S"), "', '", format(end_date, "%Y/%m/%d %H:%M:%S"), "', ", "''", ", ", "true) RETURNING id") +query <- paste0( + "INSERT INTO workflows (site_id, model_id, notes, folder, hostname, start_date,", + "end_date, params, advanced_edit) ", + "values (", settings$run$site$id, ", ", settings$model$id, ", ", "''", ", '', '", hostname, "', '", + format(start_date, "%Y/%m/%d %H:%M:%S"), "', '", format(end_date, "%Y/%m/%d %H:%M:%S"), "', ", "''", ", ", "true) RETURNING id" +) workflowid <- PEcAn.DB::db.query(query, con = con) workflowid <- as.character(unlist(workflowid)) settings$workflow$id <- workflowid -#The outdirectory is specific to a particular workflow +# The outdirectory is specific to a particular workflow outdir <- settings$outdir if (substr(outdir, nchar(outdir), nchar(outdir)) == "/") { - outdir <- substr(outdir, 1, nchar(outdir) -1 ) + outdir <- substr(outdir, 1, nchar(outdir) - 1) } basedir <- regmatches(outdir, regexpr("(~|\\./)?(/)?([^/]*/)*", outdir)) @@ -114,7 +121,7 @@ settings$outdir <- outdir # Create the output directory. PEcAn does not do this for you. It's normally the job of the # web interface's php code. if (!dir.exists(outdir)) { - dir.create(outdir, recursive=TRUE, showWarnings = FALSE) + dir.create(outdir, recursive = TRUE, showWarnings = FALSE) } diff --git a/modules/assim.sequential/inst/NEFI/graph_SDA_fluxtowers.R b/modules/assim.sequential/inst/NEFI/graph_SDA_fluxtowers.R index a6a9754b4e9..acf9d6570b5 100644 --- a/modules/assim.sequential/inst/NEFI/graph_SDA_fluxtowers.R +++ b/modules/assim.sequential/inst/NEFI/graph_SDA_fluxtowers.R @@ -1,317 +1,312 @@ - -#setwd('/fs/data3/kzarada/NEFI/Willow_Creek') +# setwd('/fs/data3/kzarada/NEFI/Willow_Creek') library("ggplot2") library("plotly") library("gganimate") library("tidyverse") -library('PEcAn.all') +library("PEcAn.all") library("RCurl") source("/fs/data3/kzarada/NEFI/sda.graphs.R") -#source("/fs/data3/kzarada/NEFI/Willow_Creek/download_WCr_met.R") +# source("/fs/data3/kzarada/NEFI/Willow_Creek/download_WCr_met.R") -#WCR -WCR.num.SDA = 676 -WCR.abv.SDA = "WCr" -WCR.outdir.SDA = '/fs/data3/kzarada/ouput/' -WCR.db.num.SDA = "0-676" +# WCR +WCR.num.SDA <- 676 +WCR.abv.SDA <- "WCr" +WCR.outdir.SDA <- "/fs/data3/kzarada/ouput/" +WCR.db.num.SDA <- "0-676" -sda.tower.graphs <- function(site.num, site.abv, outdir, db.num){ - - +sda.tower.graphs <- function(site.num, site.abv, outdir, db.num) { ### Site numbers ### # WCr = 676 # Syv = 622 # Wlef = 678 # Los = 679 - frame_end = Sys.Date() + lubridate::days(16) - frame_start = Sys.Date() - lubridate::days(10) - - ftime = seq(as.Date(frame_start), as.Date(frame_end), by="days") - ctime = seq(as.Date(frame_start), Sys.Date(), by = "days") - lubridate::days(4) - vars = c("NEE", "LE", "soil") - - for(j in 1:length(vars)){ - - - for(i in 1:length(ctime)){ - - args = c(as.character(ctime[i]), vars[j], site.num, outdir) - + frame_end <- Sys.Date() + lubridate::days(16) + frame_start <- Sys.Date() - lubridate::days(10) + + ftime <- seq(as.Date(frame_start), as.Date(frame_end), by = "days") + ctime <- seq(as.Date(frame_start), Sys.Date(), by = "days") - lubridate::days(4) + vars <- c("NEE", "LE", "soil") + + for (j in 1:length(vars)) { + for (i in 1:length(ctime)) { + args <- c(as.character(ctime[i]), vars[j], site.num, outdir) + assign(paste0(ctime[i], "_", vars[j]), sda.graphs(args)) - } } - NEE.index <- ls(pattern = paste0("_NEE"), envir=environment()) - LE.index <- ls(pattern = paste0("_LE"), envir=environment()) - soil.index <- ls(pattern = paste0("_soil"), envir=environment()) - - - nee.data = get(NEE.index[1]) - for(i in 2:length(NEE.index)){ - - nee.data = rbind(nee.data, get(NEE.index[i])) + NEE.index <- ls(pattern = paste0("_NEE"), envir = environment()) + LE.index <- ls(pattern = paste0("_LE"), envir = environment()) + soil.index <- ls(pattern = paste0("_soil"), envir = environment()) + + + nee.data <- get(NEE.index[1]) + for (i in 2:length(NEE.index)) { + nee.data <- rbind(nee.data, get(NEE.index[i])) } - - le.data = get(LE.index[1]) - for(i in 2:length(LE.index)){ - - le.data = rbind(le.data, get(LE.index[i])) + + le.data <- get(LE.index[1]) + for (i in 2:length(LE.index)) { + le.data <- rbind(le.data, get(LE.index[i])) } - - soil.data = get(soil.index[1]) - for(i in 2:length(LE.index)){ - - soil.data = rbind(soil.data, get(soil.index[i])) + + soil.data <- get(soil.index[1]) + for (i in 2:length(LE.index)) { + soil.data <- rbind(soil.data, get(soil.index[i])) } - + nee.data$Time <- as.POSIXct(paste(nee.data$date, nee.data$Time, sep = " "), format = "%Y-%m-%d %H") nee.data$Time <- lubridate::force_tz(nee.data$Time, "UTC") nee.data$start_date <- as.factor(nee.data$start_date) - + le.data$Time <- as.POSIXct(paste(le.data$date, le.data$Time, sep = " "), format = "%Y-%m-%d %H") le.data$Time <- lubridate::force_tz(le.data$Time, "UTC") le.data$start_date <- as.factor(le.data$start_date) - + soil.data$Time <- as.POSIXct(paste(soil.data$date, soil.data$Time, sep = " "), format = "%Y-%m-%d %H") soil.data$Time <- lubridate::force_tz(soil.data$Time, "UTC") soil.data$start_date <- as.factor(soil.data$start_date) - - Time = seq(from = head(unique(nee.data$date), 1), to = tail(unique(nee.data$date), 1), by = 1) - #Download observed data - source(paste0('/fs/data3/kzarada/NEFI/US_', site.abv,"/download_", site.abv, ".R")) + + Time <- seq(from = head(unique(nee.data$date), 1), to = tail(unique(nee.data$date), 1), by = 1) + # Download observed data + source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv, ".R")) real_data <- do.call(paste0("download_US_", site.abv), list(Time[1], last(Time))) - real_data$Time = lubridate::with_tz(as.POSIXct(real_data$Time, format = "%Y-%m-%d %H:%M:%S", tz = "UTC"), "UTC") - - - - #Time1 <- lubridate::with_tz(seq(from = as.POSIXct(frame_start, tz = "UTC"), to = as.POSIXct(frame_end, tz = "UTC"), by ="hour"), "UTC") - #Time1 <- Time1[-1] #first time isn't included - - #combine observed with predicted data + real_data$Time <- lubridate::with_tz(as.POSIXct(real_data$Time, format = "%Y-%m-%d %H:%M:%S", tz = "UTC"), "UTC") + + + + # Time1 <- lubridate::with_tz(seq(from = as.POSIXct(frame_start, tz = "UTC"), to = as.POSIXct(frame_end, tz = "UTC"), by ="hour"), "UTC") + # Time1 <- Time1[-1] #first time isn't included + + # combine observed with predicted data real_data_nee <- as_tibble(real_data %>% dplyr::select(Time, NEE)) real_data_le <- as_tibble(real_data %>% dplyr::select(Time, LE)) - - nee.data <- left_join(as_tibble(nee.data), real_data_nee, by = c("Time"), suffix = c("nee", "real")) + + nee.data <- left_join(as_tibble(nee.data), real_data_nee, by = c("Time"), suffix = c("nee", "real")) le.data <- left_join(as_tibble(le.data), real_data_le, by = c("Time"), suffix = c("le", "real")) - - if(file.exists(paste0('/fs/data3/kzarada/NEFI/US_', site.abv, '/download_soilmoist_', site.abv, '.R'))){ - source(paste0('/fs/data3/kzarada/NEFI/US_', site.abv, '/download_soilmoist_', site.abv, '.R')) + + if (file.exists(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_soilmoist_", site.abv, ".R"))) { + source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_soilmoist_", site.abv, ".R")) real_soil <- do.call(paste0("download_soilmoist_", site.abv), list(frame_start, frame_end)) soil.data <- left_join(as_tibble(soil.data), real_soil, by = c("Time"), suffic = c("soil", "real")) - soil.data$avgsoil = soil.data$avgsoil/100 - - Time = lubridate::with_tz(as.POSIXct(Time), tz = "UTC") + soil.data$avgsoil <- soil.data$avgsoil / 100 + + Time <- lubridate::with_tz(as.POSIXct(Time), tz = "UTC") x.breaks <- match(Time, nee.data$Time) x.breaks <- x.breaks[!is.na(x.breaks)] - - s <-ggplot(soil.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label= LE - Predicted - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = avgsoil, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + - ggtitle(paste0("Soil Moisture for ", frame_start, " to ", frame_end,", at ", site.abv)) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="Soil Moisture (%)") + #, limits = c(qle_lower, qle_upper)) + - scale_x_discrete(name = "", breaks = x.breaks, labels = format(Time, "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - - - - ggplot.soil<-ggplotly(s, tooltip = 'all', layerData = 2) %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - - ggplot.soil$x$data[[1]]$name <-"95% Confidence Interval" + + s <- ggplot(soil.data, aes(group = start_date, ids = start_date, frame = start_date)) + # , label= LE - Predicted + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = avgsoil, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + ggtitle(paste0("Soil Moisture for ", frame_start, " to ", frame_end, ", at ", site.abv)) + + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "Soil Moisture (%)") + # , limits = c(qle_lower, qle_upper)) + + scale_x_discrete(name = "", breaks = x.breaks, labels = format(Time, "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + + + + ggplot.soil <- ggplotly(s, tooltip = "all", layerData = 2) %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) + + ggplot.soil$x$data[[1]]$name <- "95% Confidence Interval" ggplot.soil$x$data[[2]]$name <- "Observed Data" ggplot.soil$x$data[[3]]$name <- "Predicted Mean" - - soil.data$error = soil.data$avgsoil - soil.data$Predicted - - } else(s = "NA") - - - Time = lubridate::with_tz(as.POSIXct(Time), tz = "UTC") + + soil.data$error <- soil.data$avgsoil - soil.data$Predicted + } else { + (s <- "NA") + } + + + Time <- lubridate::with_tz(as.POSIXct(Time), tz = "UTC") x.breaks <- match(Time, nee.data$Time) x.breaks <- x.breaks[!is.na(x.breaks)] - - + + # These variables control the start and end dates of the y axis - nee_upper = max(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) - nee_lower = min(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) - - qle_upper = max(le.data %>% dplyr::select(Upper, Predicted, LE) %>% drop_na()) - qle_lower = min(le.data %>% dplyr::select(Lower, Predicted, LE) %>% drop_na()) - - p <-ggplot(nee.data, aes(group = start_date, ids = start_date, frame = start_date)) + - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = NEE, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + nee_upper <- max(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) + nee_lower <- min(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) + + qle_upper <- max(le.data %>% dplyr::select(Upper, Predicted, LE) %>% drop_na()) + qle_lower <- min(le.data %>% dplyr::select(Lower, Predicted, LE) %>% drop_na()) + + p <- ggplot(nee.data, aes(group = start_date, ids = start_date, frame = start_date)) + + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = NEE, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + ggtitle(paste0("Net Ecosystem Exchange for ", frame_start, " to ", frame_end, ", at ", site.abv)) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="NEE (kg C m-2 s-1)", limits = c(nee_lower, nee_upper)) + - scale_x_discrete(name = "", breaks = x.breaks, labels = format(Time, "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - - q <- ggplot(le.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label= LE - Predicted - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = LE, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "NEE (kg C m-2 s-1)", limits = c(nee_lower, nee_upper)) + + scale_x_discrete(name = "", breaks = x.breaks, labels = format(Time, "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + + q <- ggplot(le.data, aes(group = start_date, ids = start_date, frame = start_date)) + # , label= LE - Predicted + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = LE, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + ggtitle(paste0("Latent Energy for ", frame_start, " to ", frame_end, ", at ", site.abv)) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="LE (W m-2 s-1)") + #, limits = c(qle_lower, qle_upper)) + - scale_x_discrete(name = "", breaks = x.breaks, labels = format(Time, "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - - - - ggplot.nee<-ggplotly(p, tooltip = 'all') %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - - ggplot.nee$x$data[[1]]$name <-"95% Confidence Interval" + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "LE (W m-2 s-1)") + # , limits = c(qle_lower, qle_upper)) + + scale_x_discrete(name = "", breaks = x.breaks, labels = format(Time, "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + + + + ggplot.nee <- ggplotly(p, tooltip = "all") %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) + + ggplot.nee$x$data[[1]]$name <- "95% Confidence Interval" ggplot.nee$x$data[[2]]$name <- "Observed Data" ggplot.nee$x$data[[3]]$name <- "Predicted Mean" - - - ggplot.le<-ggplotly(q, tooltip = 'all', layerData = 2) %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - - ggplot.le$x$data[[1]]$name <-"95% Confidence Interval" + + + ggplot.le <- ggplotly(q, tooltip = "all", layerData = 2) %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) + + ggplot.le$x$data[[1]]$name <- "95% Confidence Interval" ggplot.le$x$data[[2]]$name <- "Observed Data" ggplot.le$x$data[[3]]$name <- "Predicted Mean" - - - - #for shiny app - if(file.exists(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv,"_met.R"))){ - source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv,"_met.R")) - met = do.call(paste0("download_US_", site.abv,"_met"), list(frame_start, Sys.Date())) - - if("Tsoil" %in% names(met)){ - met <- as_tibble(met) %>% mutate(Time = as.POSIXct(date)) %>% dplyr::select(Time, Tair,Tsoil, rH) - }else{met <- as_tibble(met) %>% mutate(Time = as.POSIXct(date)) %>% dplyr::select(Time, Tair, rH)} - - nee.met <- nee.data %>% inner_join(met,nee.data, by = c("Time")) - - #Calculate Error + + + + # for shiny app + if (file.exists(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv, "_met.R"))) { + source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv, "_met.R")) + met <- do.call(paste0("download_US_", site.abv, "_met"), list(frame_start, Sys.Date())) + + if ("Tsoil" %in% names(met)) { + met <- as_tibble(met) %>% + mutate(Time = as.POSIXct(date)) %>% + dplyr::select(Time, Tair, Tsoil, rH) + } else { + met <- as_tibble(met) %>% + mutate(Time = as.POSIXct(date)) %>% + dplyr::select(Time, Tair, rH) + } + + nee.met <- nee.data %>% inner_join(met, nee.data, by = c("Time")) + + # Calculate Error nee.met$error <- (nee.met$NEE - nee.met$Predicted) } - - nee.data$error = nee.data$NEE - nee.data$Predicted - le.data$error = le.data$LE - le.data$Predicted - - #for met comparison - + + nee.data$error <- nee.data$NEE - nee.data$Predicted + le.data$error <- le.data$LE - le.data$Predicted + + # for met comparison + library(ncdf4) forecast.path <- paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_downscale_site_", db.num, "/") - + forecasted_data <- data.frame() #### stopping here-- need to make sure that it goes through each dir index and saves and then moves on dirs <- list.dirs(path = forecast.path) dir.1 <- dirs[grepl(paste0(".21.", Sys.Date(), "T*"), dirs)] - nc.files = list() - index = list() - dir.index = list() - - index= strsplit(dir.1[1], split = ".21.20")[[1]][2] - dir.index= dirs[grepl(index[1], dirs)] - - - - for(k in 1:21){ - nc.files[k]<- list.files(path = dir.index[k], pattern = '*.nc' ) + nc.files <- list() + index <- list() + dir.index <- list() + + index <- strsplit(dir.1[1], split = ".21.20")[[1]][2] + dir.index <- dirs[grepl(index[1], dirs)] + + + + for (k in 1:21) { + nc.files[k] <- list.files(path = dir.index[k], pattern = "*.nc") } - + forecasted_data <- data.frame() - for(i in 1:21){ + for (i in 1:21) { setwd(dir.index[i]) nc <- nc_open(nc.files[[i]][1]) sec <- nc$dim$time$vals sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - dt <- mean(diff(sec), na.rm=TRUE) + dt <- mean(diff(sec), na.rm = TRUE) tstep <- round(86400 / dt) dt <- 86400 / tstep - - - Tair <-ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + + + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") - Qair <-ncdf4::ncvar_get(nc, "specific_humidity") #humidity (kg/kg) + Qair <- ncdf4::ncvar_get(nc, "specific_humidity") # humidity (kg/kg) ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) if (!is.numeric(ws)) { U <- ncdf4::ncvar_get(nc, "eastward_wind") V <- ncdf4::ncvar_get(nc, "northward_wind") - ws <- sqrt(U ^ 2 + V ^ 2) + ws <- sqrt(U^2 + V^2) PEcAn.logger::logger.info("wind_speed absent; calculated from eastward_wind and northward_wind") } - - + + Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") - pres <- ncdf4::ncvar_get(nc,'air_pressure') ## in pascal - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 + pres <- ncdf4::ncvar_get(nc, "air_pressure") ## in pascal + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 LW <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") RH <- PEcAn.data.atmosphere::qair2rh(Qair, Tair_C, press = 950) - + file.name <- nc.files[[i]][1] - + hour <- strsplit(strsplit(index, split = "T")[[1]][2], split = ".20")[[1]][1] - - start_date <- as.POSIXct(paste0(strsplit(strsplit(nc$dim$time$units, " ")[[1]][3], split = "T")[[1]][1]," ", hour), format = "%Y-%m-%d %H:%M") + + start_date <- as.POSIXct(paste0(strsplit(strsplit(nc$dim$time$units, " ")[[1]][3], split = "T")[[1]][1], " ", hour), format = "%Y-%m-%d %H:%M") sec <- nc$dim$time$vals - - + + timestamp <- seq(from = start_date + lubridate::hours(6), by = "6 hour", length.out = length(sec)) ensemble <- rep(i, times = length(timestamp)) tmp <- as.data.frame(cbind( ensemble, - Tair_C, + Tair_C, Qair, - RH, + RH, Rain = Rain * dt, - ws, - SW, + ws, + SW, LW )) tmp$timestamp <- timestamp nc_close(nc) forecasted_data <- rbind(forecasted_data, tmp) - } - - forecasted_data$ensemble = as.factor(forecasted_data$ensemble) - - - - - #Save Rdata for shiny app - save(list = ls(), file = paste0("/srv/shiny-server/Flux_Dashboard/data/", site.abv, ".SDA.RData")) - save(list = ls(), file = paste0("/fs/data3/kzarada/NEFI/", site.abv, ".SDA.RData")) - - - print(ls()) -} + } + forecasted_data$ensemble <- as.factor(forecasted_data$ensemble) -sda.tower.graphs(WCR.num.SDA, - WCR.abv.SDA, - WCR.outdir.SDA, - WCR.db.num.SDA) + # Save Rdata for shiny app + save(list = ls(), file = paste0("/srv/shiny-server/Flux_Dashboard/data/", site.abv, ".SDA.RData")) + save(list = ls(), file = paste0("/fs/data3/kzarada/NEFI/", site.abv, ".SDA.RData")) + + + print(ls()) +} + +sda.tower.graphs( + WCR.num.SDA, + WCR.abv.SDA, + WCR.outdir.SDA, + WCR.db.num.SDA +) diff --git a/modules/assim.sequential/inst/NEFI/graph_fluxtowers.R b/modules/assim.sequential/inst/NEFI/graph_fluxtowers.R index 775fc0bd56d..819fc79fa4b 100644 --- a/modules/assim.sequential/inst/NEFI/graph_fluxtowers.R +++ b/modules/assim.sequential/inst/NEFI/graph_fluxtowers.R @@ -1,362 +1,371 @@ - -#setwd('/fs/data3/kzarada/NEFI/Willow_Creek') +# setwd('/fs/data3/kzarada/NEFI/Willow_Creek') library("ggplot2") library("plotly") library("gganimate") library("tidyverse") -library('PEcAn.all') +library("PEcAn.all") library("RCurl") source("/fs/data3/kzarada/NEFI/Willow_Creek/forecast.graphs.R") -#source("/fs/data3/kzarada/NEFI/Willow_Creek/download_WCr_met.R") - -#WLEF -WLEF.num = 678 -WLEF.abv = "WLEF" -WLEF.outdir = '/fs/data3/kzarada/NEFI/US_WLEF/output/' -WLEF.db.num = "0-678" -#WCR -WCR.num = 676 -WCR.abv = "WCr" -WCR.outdir = '/fs/data3/kzarada/output/' -WCR.db.num = "0-676" -#Potato -Potato.num = 1000026756 -Potato.abv = 'Potato' -Potato.outdir = '/fs/data3/kzarada/NEFI/US_Potato/output/' -Potato.db.num = "1-26756" -#Syv -Syv.num = 622 -Syv.abv = "Syv" -Syv.outdir = '/fs/data3/kzarada/NEFI/US_Syv/output/' -Syv.db.num = "0-622" - -#Los -Los.num = 679 -Los.abv = "Los" -Los.outdir = '/fs/data3/kzarada/NEFI/US_Los/output/' -Los.db.num = "0-679" - -#Harvard -Harvard.num = 646 -Harvard.abv = "Harvard" -Harvard.outdir = '/fs/data3/kzarada/NEFI/US_Harvard/output/' -Harvard.db.num = '0-646' - -tower.graphs <- function(site.num, site.abv, outdir, db.num){ - - -### Site numbers ### -# WCr = 676 -# Syv = 622 -# Wlef = 678 -# Los = 679 -frame_end = Sys.Date() + lubridate::days(16) -frame_start = Sys.Date() - lubridate::days(10) - -ftime = seq(as.Date(frame_start), as.Date(frame_end), by="days") -ctime = seq(as.Date(frame_start), Sys.Date(), by = "days") -vars = c("NEE", "LE", "soil") - -for(j in 1:length(vars)){ - - - for(i in 1:length(ctime)){ - - args = c(as.character(ctime[i]), vars[j], site.num, outdir) - - assign(paste0(ctime[i], "_", vars[j]), forecast.graphs(args)) - +# source("/fs/data3/kzarada/NEFI/Willow_Creek/download_WCr_met.R") + +# WLEF +WLEF.num <- 678 +WLEF.abv <- "WLEF" +WLEF.outdir <- "/fs/data3/kzarada/NEFI/US_WLEF/output/" +WLEF.db.num <- "0-678" +# WCR +WCR.num <- 676 +WCR.abv <- "WCr" +WCR.outdir <- "/fs/data3/kzarada/output/" +WCR.db.num <- "0-676" +# Potato +Potato.num <- 1000026756 +Potato.abv <- "Potato" +Potato.outdir <- "/fs/data3/kzarada/NEFI/US_Potato/output/" +Potato.db.num <- "1-26756" +# Syv +Syv.num <- 622 +Syv.abv <- "Syv" +Syv.outdir <- "/fs/data3/kzarada/NEFI/US_Syv/output/" +Syv.db.num <- "0-622" + +# Los +Los.num <- 679 +Los.abv <- "Los" +Los.outdir <- "/fs/data3/kzarada/NEFI/US_Los/output/" +Los.db.num <- "0-679" + +# Harvard +Harvard.num <- 646 +Harvard.abv <- "Harvard" +Harvard.outdir <- "/fs/data3/kzarada/NEFI/US_Harvard/output/" +Harvard.db.num <- "0-646" + +tower.graphs <- function(site.num, site.abv, outdir, db.num) { + ### Site numbers ### + # WCr = 676 + # Syv = 622 + # Wlef = 678 + # Los = 679 + frame_end <- Sys.Date() + lubridate::days(16) + frame_start <- Sys.Date() - lubridate::days(10) + + ftime <- seq(as.Date(frame_start), as.Date(frame_end), by = "days") + ctime <- seq(as.Date(frame_start), Sys.Date(), by = "days") + vars <- c("NEE", "LE", "soil") + + for (j in 1:length(vars)) { + for (i in 1:length(ctime)) { + args <- c(as.character(ctime[i]), vars[j], site.num, outdir) + + assign(paste0(ctime[i], "_", vars[j]), forecast.graphs(args)) + } } -} -NEE.index <- ls(pattern = paste0("_NEE"), envir=environment()) -LE.index <- ls(pattern = paste0("_LE"), envir=environment()) -soil.index <- ls(pattern = paste0("_soil"), envir=environment()) + NEE.index <- ls(pattern = paste0("_NEE"), envir = environment()) + LE.index <- ls(pattern = paste0("_LE"), envir = environment()) + soil.index <- ls(pattern = paste0("_soil"), envir = environment()) -nee.data = get(NEE.index[1]) -for(i in 2:length(NEE.index)){ - - nee.data = rbind(nee.data, get(NEE.index[i])) -} + nee.data <- get(NEE.index[1]) + for (i in 2:length(NEE.index)) { + nee.data <- rbind(nee.data, get(NEE.index[i])) + } -le.data = get(LE.index[1]) -for(i in 2:length(LE.index)){ - - le.data = rbind(le.data, get(LE.index[i])) -} + le.data <- get(LE.index[1]) + for (i in 2:length(LE.index)) { + le.data <- rbind(le.data, get(LE.index[i])) + } -soil.data = get(soil.index[1]) -for(i in 2:length(LE.index)){ - - soil.data = rbind(soil.data, get(soil.index[i])) -} + soil.data <- get(soil.index[1]) + for (i in 2:length(LE.index)) { + soil.data <- rbind(soil.data, get(soil.index[i])) + } -nee.data$Time <- as.POSIXct(paste(nee.data$date, nee.data$Time, sep = " "), format = "%Y-%m-%d %H") -nee.data$Time <- lubridate::force_tz(nee.data$Time, "UTC") -nee.data$start_date <- as.factor(nee.data$start_date) - -le.data$Time <- as.POSIXct(paste(le.data$date, le.data$Time, sep = " "), format = "%Y-%m-%d %H") -le.data$Time <- lubridate::force_tz(le.data$Time, "UTC") -le.data$start_date <- as.factor(le.data$start_date) - -soil.data$Time <- as.POSIXct(paste(soil.data$date, soil.data$Time, sep = " "), format = "%Y-%m-%d %H") -soil.data$Time <- lubridate::force_tz(soil.data$Time, "UTC") -soil.data$start_date <- as.factor(soil.data$start_date) - - -#Download observed data -source(paste0('/fs/data3/kzarada/NEFI/US_', site.abv,"/download_", site.abv, ".R")) -real_data <- do.call(paste0("download_US_", site.abv), list(frame_start, frame_end)) -real_data$Time = lubridate::with_tz(as.POSIXct(real_data$Time, format = "%Y-%m-%d %H:%M:%S", tz = "UTC"), "UTC") - - - -#Time1 <- lubridate::with_tz(seq(from = as.POSIXct(frame_start, tz = "UTC"), to = as.POSIXct(frame_end, tz = "UTC"), by ="hour"), "UTC") -#Time1 <- Time1[-1] #first time isn't included - -#combine observed with predicted data -real_data_nee <- as_tibble(real_data %>% dplyr::select(Time, NEE)) -real_data_le <- as_tibble(real_data %>% dplyr::select(Time, LE)) - -nee.data <- left_join(as_tibble(nee.data), real_data_nee, by = c("Time"), suffix = c("nee", "real")) -le.data <- left_join(as_tibble(le.data), real_data_le, by = c("Time"), suffix = c("le", "real")) - -# if(file.exists(paste0('/fs/data3/kzarada/NEFI/US_', site.abv, '/download_soilmoist_', site.abv, '.R'))){ -# source(paste0('/fs/data3/kzarada/NEFI/US_', site.abv, '/download_soilmoist_', site.abv, '.R')) -# real_soil <- do.call(paste0("download_soilmoist_", site.abv), list(frame_start, frame_end)) -# soil.data <- left_join(as_tibble(soil.data), real_soil, by = c("Time"), suffic = c("soil", "real")) -# soil.data$avgsoil = soil.data$avgsoil/100 -# -# ftime = lubridate::with_tz(as.POSIXct(ftime), tz = "UTC") -# x.breaks <- match(ftime, nee.data$Time) -# x.breaks <- x.breaks[!is.na(x.breaks)] -# -# s <-ggplot(soil.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label= LE - Predicted -# geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + -# geom_line(aes(x = Time, y = avgsoil, color = "Observed Data"), size = 1) + -# geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + -# ggtitle(paste0("Soil Moisture for ", frame_start, " to ", frame_end,", at ", site.abv)) + -# scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + -# scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + -# scale_y_continuous(name="Soil Moisture (%)") + #, limits = c(qle_lower, qle_upper)) + -# #scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + -# theme_minimal() + -# theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) -# -# -# -# ggplot.soil<-ggplotly(s, tooltip = 'all', layerData = 2) %>% -# animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% -# animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% -# animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% -# layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% -# layout(showlegend = T, margin = c(30,50,30,50)) -# -# ggplot.soil$x$data[[1]]$name <-"95% Confidence Interval" -# ggplot.soil$x$data[[2]]$name <- "Observed Data" -# ggplot.soil$x$data[[3]]$name <- "Predicted Mean" -# -# soil.data$error = soil.data$avgsoil - soil.data$Predicted -# -# } else(s = "NA") - - -ftime = lubridate::with_tz(as.POSIXct(ftime), tz = "UTC") -x.breaks <- match(ftime, nee.data$Time) -x.breaks <- x.breaks[!is.na(x.breaks)] - - -# These variables control the start and end dates of the y axis -nee_upper = max(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) -nee_lower = min(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) - -qle_upper = max(le.data %>% dplyr::select(Upper, Predicted, LE) %>% drop_na()) -qle_lower = min(le.data %>% dplyr::select(Lower, Predicted, LE) %>% drop_na()) - -p <-ggplot(nee.data, aes(group = start_date, ids = start_date, frame = start_date)) + - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = NEE, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + - ggtitle(paste0("Net Ecosystem Exchange for ", frame_start, " to ", frame_end, ", at ", site.abv)) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="NEE (kg C m-2 s-1)", limits = c(nee_lower, nee_upper)) + - #scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - -q <- ggplot(le.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label= LE - Predicted - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = LE, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + - ggtitle(paste0("Latent Energy for ", frame_start, " to ", frame_end, ", at ", site.abv)) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="LE (W m-2 s-1)") + #, limits = c(qle_lower, qle_upper)) + - #scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - - - -ggplot.nee<-ggplotly(p, tooltip = 'all') %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - -ggplot.nee$x$data[[1]]$name <-"95% Confidence Interval" -ggplot.nee$x$data[[2]]$name <- "Observed Data" -ggplot.nee$x$data[[3]]$name <- "Predicted Mean" - - -ggplot.le<-ggplotly(q, tooltip = 'all', layerData = 2) %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - -ggplot.le$x$data[[1]]$name <-"95% Confidence Interval" -ggplot.le$x$data[[2]]$name <- "Observed Data" -ggplot.le$x$data[[3]]$name <- "Predicted Mean" - - - -#for shiny app -if(file.exists(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv,"_met.R"))){ -source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv,"_met.R")) -met = do.call(paste0("download_US_", site.abv,"_met"), list(frame_start, Sys.Date())) - -if("Tsoil" %in% names(met)){ -met <- as_tibble(met) %>% dplyr::mutate(Time = as.POSIXct(date)) %>% dplyr::select(Time, Tair,Tsoil, rH) -}else{met <- as_tibble(met) %>% dplyr::mutate(Time = as.POSIXct(date)) %>% dplyr::select(Time, Tair, rH)} - -nee.met <- nee.data %>% dplyr::inner_join(met,nee.data, by = c("Time")) - -#Calculate Error -nee.met$error <- (nee.met$NEE - nee.met$Predicted) -} + nee.data$Time <- as.POSIXct(paste(nee.data$date, nee.data$Time, sep = " "), format = "%Y-%m-%d %H") + nee.data$Time <- lubridate::force_tz(nee.data$Time, "UTC") + nee.data$start_date <- as.factor(nee.data$start_date) + + le.data$Time <- as.POSIXct(paste(le.data$date, le.data$Time, sep = " "), format = "%Y-%m-%d %H") + le.data$Time <- lubridate::force_tz(le.data$Time, "UTC") + le.data$start_date <- as.factor(le.data$start_date) + + soil.data$Time <- as.POSIXct(paste(soil.data$date, soil.data$Time, sep = " "), format = "%Y-%m-%d %H") + soil.data$Time <- lubridate::force_tz(soil.data$Time, "UTC") + soil.data$start_date <- as.factor(soil.data$start_date) + + + # Download observed data + source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv, ".R")) + real_data <- do.call(paste0("download_US_", site.abv), list(frame_start, frame_end)) + real_data$Time <- lubridate::with_tz(as.POSIXct(real_data$Time, format = "%Y-%m-%d %H:%M:%S", tz = "UTC"), "UTC") + + + + # Time1 <- lubridate::with_tz(seq(from = as.POSIXct(frame_start, tz = "UTC"), to = as.POSIXct(frame_end, tz = "UTC"), by ="hour"), "UTC") + # Time1 <- Time1[-1] #first time isn't included + + # combine observed with predicted data + real_data_nee <- as_tibble(real_data %>% dplyr::select(Time, NEE)) + real_data_le <- as_tibble(real_data %>% dplyr::select(Time, LE)) + + nee.data <- left_join(as_tibble(nee.data), real_data_nee, by = c("Time"), suffix = c("nee", "real")) + le.data <- left_join(as_tibble(le.data), real_data_le, by = c("Time"), suffix = c("le", "real")) + + # if(file.exists(paste0('/fs/data3/kzarada/NEFI/US_', site.abv, '/download_soilmoist_', site.abv, '.R'))){ + # source(paste0('/fs/data3/kzarada/NEFI/US_', site.abv, '/download_soilmoist_', site.abv, '.R')) + # real_soil <- do.call(paste0("download_soilmoist_", site.abv), list(frame_start, frame_end)) + # soil.data <- left_join(as_tibble(soil.data), real_soil, by = c("Time"), suffic = c("soil", "real")) + # soil.data$avgsoil = soil.data$avgsoil/100 + # + # ftime = lubridate::with_tz(as.POSIXct(ftime), tz = "UTC") + # x.breaks <- match(ftime, nee.data$Time) + # x.breaks <- x.breaks[!is.na(x.breaks)] + # + # s <-ggplot(soil.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label= LE - Predicted + # geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + + # geom_line(aes(x = Time, y = avgsoil, color = "Observed Data"), size = 1) + + # geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + # ggtitle(paste0("Soil Moisture for ", frame_start, " to ", frame_end,", at ", site.abv)) + + # scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + # scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + + # scale_y_continuous(name="Soil Moisture (%)") + #, limits = c(qle_lower, qle_upper)) + + # #scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + + # theme_minimal() + + # theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + # + # + # + # ggplot.soil<-ggplotly(s, tooltip = 'all', layerData = 2) %>% + # animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% + # animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% + # animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% + # layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + # layout(showlegend = T, margin = c(30,50,30,50)) + # + # ggplot.soil$x$data[[1]]$name <-"95% Confidence Interval" + # ggplot.soil$x$data[[2]]$name <- "Observed Data" + # ggplot.soil$x$data[[3]]$name <- "Predicted Mean" + # + # soil.data$error = soil.data$avgsoil - soil.data$Predicted + # + # } else(s = "NA") + + + ftime <- lubridate::with_tz(as.POSIXct(ftime), tz = "UTC") + x.breaks <- match(ftime, nee.data$Time) + x.breaks <- x.breaks[!is.na(x.breaks)] + + + # These variables control the start and end dates of the y axis + nee_upper <- max(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) + nee_lower <- min(nee.data %>% dplyr::select(Upper, Lower, Predicted, NEE), na.rm = TRUE) + + qle_upper <- max(le.data %>% dplyr::select(Upper, Predicted, LE) %>% drop_na()) + qle_lower <- min(le.data %>% dplyr::select(Lower, Predicted, LE) %>% drop_na()) + + p <- ggplot(nee.data, aes(group = start_date, ids = start_date, frame = start_date)) + + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = NEE, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + ggtitle(paste0("Net Ecosystem Exchange for ", frame_start, " to ", frame_end, ", at ", site.abv)) + + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "NEE (kg C m-2 s-1)", limits = c(nee_lower, nee_upper)) + + # scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + + q <- ggplot(le.data, aes(group = start_date, ids = start_date, frame = start_date)) + # , label= LE - Predicted + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = LE, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + ggtitle(paste0("Latent Energy for ", frame_start, " to ", frame_end, ", at ", site.abv)) + + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "LE (W m-2 s-1)") + # , limits = c(qle_lower, qle_upper)) + + # scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + + + + ggplot.nee <- ggplotly(p, tooltip = "all") %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) + + ggplot.nee$x$data[[1]]$name <- "95% Confidence Interval" + ggplot.nee$x$data[[2]]$name <- "Observed Data" + ggplot.nee$x$data[[3]]$name <- "Predicted Mean" + + + ggplot.le <- ggplotly(q, tooltip = "all", layerData = 2) %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) + + ggplot.le$x$data[[1]]$name <- "95% Confidence Interval" + ggplot.le$x$data[[2]]$name <- "Observed Data" + ggplot.le$x$data[[3]]$name <- "Predicted Mean" + + + + # for shiny app + if (file.exists(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv, "_met.R"))) { + source(paste0("/fs/data3/kzarada/NEFI/US_", site.abv, "/download_", site.abv, "_met.R")) + met <- do.call(paste0("download_US_", site.abv, "_met"), list(frame_start, Sys.Date())) + + if ("Tsoil" %in% names(met)) { + met <- as_tibble(met) %>% + dplyr::mutate(Time = as.POSIXct(date)) %>% + dplyr::select(Time, Tair, Tsoil, rH) + } else { + met <- as_tibble(met) %>% + dplyr::mutate(Time = as.POSIXct(date)) %>% + dplyr::select(Time, Tair, rH) + } + + nee.met <- nee.data %>% dplyr::inner_join(met, nee.data, by = c("Time")) + + # Calculate Error + nee.met$error <- (nee.met$NEE - nee.met$Predicted) + } + + nee.data$error <- nee.data$NEE - nee.data$Predicted + le.data$error <- le.data$LE - le.data$Predicted -nee.data$error = nee.data$NEE - nee.data$Predicted -le.data$error = le.data$LE - le.data$Predicted + # for met comparison -#for met comparison + library(ncdf4) -library(ncdf4) + if (dir.exists(paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_downscale_site_", db.num, "/"))) { + forecast.path <- paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_downscale_site_", db.num, "/") + } else { + (forecast.path <- paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_site_", db.num, "/")) + } + + forecasted_data <- data.frame() + #### stopping here-- need to make sure that it goes through each dir index and saves and then moves on + dirs <- list.dirs(path = forecast.path) + dir.1 <- dirs[grepl(paste0(".21.", Sys.Date(), "T*"), dirs)] + nc.files <- list() + index <- list() + dir.index <- list() + + index <- strsplit(dir.1[1], split = ".21.20")[[1]][2] + dir.index <- dirs[grepl(index[1], dirs)] -if(dir.exists(paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_downscale_site_", db.num, "/"))){ - forecast.path <-paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_downscale_site_", db.num, "/") - }else(forecast.path <- paste0("/fs/data3/kzarada/pecan.data/dbfiles/NOAA_GEFS_site_", db.num, "/")) -forecasted_data <- data.frame() -#### stopping here-- need to make sure that it goes through each dir index and saves and then moves on -dirs <- list.dirs(path = forecast.path) -dir.1 <- dirs[grepl(paste0(".21.", Sys.Date(), "T*"), dirs)] -nc.files = list() -index = list() -dir.index = list() -index= strsplit(dir.1[1], split = ".21.20")[[1]][2] -dir.index= dirs[grepl(index[1], dirs)] + for (k in 1:21) { + nc.files[k] <- list.files(path = dir.index[k], pattern = "*.nc") + } + + forecasted_data <- data.frame() + for (i in 1:21) { + setwd(dir.index[i]) + nc <- nc_open(nc.files[[i]][1]) + sec <- nc$dim$time$vals + sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") + dt <- mean(diff(sec), na.rm = TRUE) + tstep <- round(86400 / dt) + dt <- 86400 / tstep + + + Tair <- ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin + Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") + Qair <- ncdf4::ncvar_get(nc, "specific_humidity") # humidity (kg/kg) + ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) + if (!is.numeric(ws)) { + U <- ncdf4::ncvar_get(nc, "eastward_wind") + V <- ncdf4::ncvar_get(nc, "northward_wind") + ws <- sqrt(U^2 + V^2) + PEcAn.logger::logger.info("wind_speed absent; calculated from eastward_wind and northward_wind") + } + + + Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") + pres <- ncdf4::ncvar_get(nc, "air_pressure") ## in pascal + SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 + LW <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") + RH <- PEcAn.data.atmosphere::qair2rh(Qair, Tair_C, press = 950) + + file.name <- nc.files[[i]][1] + + hour <- strsplit(strsplit(index, split = "T")[[1]][2], split = ".20")[[1]][1] + + start_date <- as.POSIXct(paste0(strsplit(strsplit(nc$dim$time$units, " ")[[1]][3], split = "T")[[1]][1], " ", hour), format = "%Y-%m-%d %H:%M") + sec <- nc$dim$time$vals + + + timestamp <- seq(from = start_date + lubridate::hours(6), by = "1 hour", length.out = length(sec)) + ensemble <- rep(i, times = length(timestamp)) + tmp <- as.data.frame(cbind( + ensemble, + Tair_C, + Qair, + RH, + Rain = Rain * dt, + ws, + SW, + LW + )) + tmp$timestamp <- timestamp + nc_close(nc) + forecasted_data <- rbind(forecasted_data, tmp) + } + forecasted_data$ensemble <- as.factor(forecasted_data$ensemble) -for(k in 1:21){ - nc.files[k]<- list.files(path = dir.index[k], pattern = '*.nc' ) + + + # Save Rdata for shiny app + save(list = ls(), file = paste0("/srv/shiny-server/Flux_Dashboard/data/", site.abv, ".RData")) + save(list = ls(), file = paste0("/fs/data3/kzarada/NEFI/", site.abv, ".RData")) + + + print(ls()) } -forecasted_data <- data.frame() -for(i in 1:21){ - setwd(dir.index[i]) - nc <- nc_open(nc.files[[i]][1]) - sec <- nc$dim$time$vals - sec <- PEcAn.utils::ud_convert(sec, unlist(strsplit(nc$dim$time$units, " "))[1], "seconds") - dt <- mean(diff(sec), na.rm=TRUE) - tstep <- round(86400 / dt) - dt <- 86400 / tstep - - - Tair <-ncdf4::ncvar_get(nc, "air_temperature") ## in Kelvin - Tair_C <- PEcAn.utils::ud_convert(Tair, "K", "degC") - Qair <-ncdf4::ncvar_get(nc, "specific_humidity") #humidity (kg/kg) - ws <- try(ncdf4::ncvar_get(nc, "wind_speed")) - if (!is.numeric(ws)) { - U <- ncdf4::ncvar_get(nc, "eastward_wind") - V <- ncdf4::ncvar_get(nc, "northward_wind") - ws <- sqrt(U ^ 2 + V ^ 2) - PEcAn.logger::logger.info("wind_speed absent; calculated from eastward_wind and northward_wind") - } - - - Rain <- ncdf4::ncvar_get(nc, "precipitation_flux") - pres <- ncdf4::ncvar_get(nc,'air_pressure') ## in pascal - SW <- ncdf4::ncvar_get(nc, "surface_downwelling_shortwave_flux_in_air") ## in W/m2 - LW <- ncdf4::ncvar_get(nc, "surface_downwelling_longwave_flux_in_air") - RH <- PEcAn.data.atmosphere::qair2rh(Qair, Tair_C, press = 950) - - file.name <- nc.files[[i]][1] - - hour <- strsplit(strsplit(index, split = "T")[[1]][2], split = ".20")[[1]][1] - - start_date <- as.POSIXct(paste0(strsplit(strsplit(nc$dim$time$units, " ")[[1]][3], split = "T")[[1]][1]," ", hour), format = "%Y-%m-%d %H:%M") - sec <- nc$dim$time$vals - - - timestamp <- seq(from = start_date + lubridate::hours(6), by = "1 hour", length.out = length(sec)) - ensemble <- rep(i, times = length(timestamp)) - tmp <- as.data.frame(cbind( - ensemble, - Tair_C, - Qair, - RH, - Rain = Rain * dt, - ws, - SW, - LW - )) - tmp$timestamp <- timestamp - nc_close(nc) - forecasted_data <- rbind(forecasted_data, tmp) -} - -forecasted_data$ensemble = as.factor(forecasted_data$ensemble) - - - - -#Save Rdata for shiny app -save(list = ls(), file = paste0("/srv/shiny-server/Flux_Dashboard/data/", site.abv, ".RData")) -save(list = ls(), file = paste0("/fs/data3/kzarada/NEFI/", site.abv, ".RData")) - - -print(ls()) -} - - -try(tower.graphs(WLEF.num, - WLEF.abv, - WLEF.outdir, - WLEF.db.num)) -try(tower.graphs(WCR.num, - WCR.abv, - WCR.outdir, - WCR.db.num)) -try(tower.graphs(Potato.num, - Potato.abv, - Potato.outdir, - Potato.db.num)) -try(tower.graphs(Syv.num, - Syv.abv, - Syv.outdir, - Syv.db.num)) -try(tower.graphs(Los.num, - Los.abv, - Los.outdir, - Los.db.num)) -try(tower.graphs(Harvard.num, - Harvard.abv, - Harvard.outdir, - Harvard.db.num)) +try(tower.graphs( + WLEF.num, + WLEF.abv, + WLEF.outdir, + WLEF.db.num +)) +try(tower.graphs( + WCR.num, + WCR.abv, + WCR.outdir, + WCR.db.num +)) +try(tower.graphs( + Potato.num, + Potato.abv, + Potato.outdir, + Potato.db.num +)) +try(tower.graphs( + Syv.num, + Syv.abv, + Syv.outdir, + Syv.db.num +)) +try(tower.graphs( + Los.num, + Los.abv, + Los.outdir, + Los.db.num +)) +try(tower.graphs( + Harvard.num, + Harvard.abv, + Harvard.outdir, + Harvard.db.num +)) diff --git a/modules/assim.sequential/inst/RemoteLauncher/Remote_sync.R b/modules/assim.sequential/inst/RemoteLauncher/Remote_sync.R index 0e11a1f94d2..4803b1cc138 100644 --- a/modules/assim.sequential/inst/RemoteLauncher/Remote_sync.R +++ b/modules/assim.sequential/inst/RemoteLauncher/Remote_sync.R @@ -11,28 +11,28 @@ args <- commandArgs(trailingOnly = TRUE) #---------------------------------------------------------------- # Copying over the luncher and sending the command #--------------------------------------------------------------- -#Settings -if (is.na(args[1])){ +# Settings +if (is.na(args[1])) { PEcAn.logger::logger.severe("No path to active xml setting run is defined.") } else { - settings.file = args[1] + settings.file <- args[1] settings <- PEcAn.settings::read.settings(settings.file) my_host <- list(name = settings$host$name, tunnel = settings$host$tunnel, user = settings$host$user) } -#Settings -if (is.na(args[2])){ +# Settings +if (is.na(args[2])) { PEcAn.logger::logger.severe("No path to active remote SDA run is defined.") } else { - remote.path = args[2] - sda.path <- paste0("dir.exists(\'",remote.path,"/SDA\')") + remote.path <- args[2] + sda.path <- paste0("dir.exists(\'", remote.path, "/SDA\')") } -#Settings -if (is.na(args[3])){ +# Settings +if (is.na(args[3])) { PEcAn.logger::logger.severe("No PID to active remote SDA run is defined.") } else { - PID = args[3] + PID <- args[3] } #---------------------------------------------------------------- # Copying over the luncher and sending the command @@ -40,34 +40,37 @@ if (is.na(args[3])){ # Let's see if the PID is still running is.active <- qsub_run_finished(run = PID, host = my_host, qstat = settings$host$qstat) -is.active <- ifelse (nchar(is.active) > 1, TRUE, FALSE) +is.active <- ifelse(nchar(is.active) > 1, TRUE, FALSE) #---------------------------------------------------------------- -# looping +# looping #--------------------------------------------------------------- -while(is.active){ +while (is.active) { is.active <- qsub_run_finished(run = PID, host = my_host, qstat = settings$host$qstat) - is.active <- ifelse (nchar(is.active) > 1, TRUE, FALSE) - - remote.copy.from(my_host, - paste0(remote.path,"/SDA"), - paste0(settings$outdir) + is.active <- ifelse(nchar(is.active) > 1, TRUE, FALSE) + + remote.copy.from( + my_host, + paste0(remote.path, "/SDA"), + paste0(settings$outdir) ) PEcAn.logger::logger.info(paste0("SDA folder was synced at ------------------- ", Sys.time())) Sys.sleep(3000) } #---------------------------------------------------------------- -# Final Copying +# Final Copying #--------------------------------------------------------------- sda.dir.exists <- remote.execute.R(sda.path, - my_host, - user = my_host$user, - scratchdir = ".") + my_host, + user = my_host$user, + scratchdir = "." +) -if (sda.dir.exists){ - remote.copy.from(my_host, - paste0(remote.path,"/SDA"), - paste0(settings$outdir) - ) +if (sda.dir.exists) { + remote.copy.from( + my_host, + paste0(remote.path, "/SDA"), + paste0(settings$outdir) + ) } PEcAn.logger::logger.info("------------------- Finished Syncing -------------------") diff --git a/modules/assim.sequential/inst/RemoteLauncher/SDA_launcher.R b/modules/assim.sequential/inst/RemoteLauncher/SDA_launcher.R index c82a5aeeda7..94c1c4b3b5c 100644 --- a/modules/assim.sequential/inst/RemoteLauncher/SDA_launcher.R +++ b/modules/assim.sequential/inst/RemoteLauncher/SDA_launcher.R @@ -17,19 +17,19 @@ plan(multiprocess) # Reading settings and paths #--------------------------------------------------------------- args <- commandArgs(trailingOnly = TRUE) -#Settings -if (is.na(args[1])){ +# Settings +if (is.na(args[1])) { settings <- read.settings("pecan.SDA.4sites.xml") } else { - settings.file = args[1] + settings.file <- args[1] settings <- PEcAn.settings::read.settings(settings.file) } -#Obs Path -if (is.na(args[2])){ +# Obs Path +if (is.na(args[2])) { PEcAn.logger::logger.severe("This file needs to be called from terminal and needs to recived to argument with it. First, path to the setting xml file and second is the path to the obs data. Seems like the second argument is missing.") } else { - obs.path = args[2] + obs.path <- args[2] } #---------------------------------------------------------------- # Setup @@ -50,35 +50,43 @@ setwd(settings$outdir) # PEcAn.logger::logger.info(paste0("I just deleted ", dir.delete, " folder !")) # } # }) -# +# # unlink(c('run', 'out', 'SDA'), recursive = TRUE) #---------------------------------------------------------------- # Find what sites we are running for #--------------------------------------------------------------- -if (inherits(settings, "MultiSettings")) site.ids <- settings %>% map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if (inherits(settings, "MultiSettings")) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} #---------------------------------------------------------------- # samples should be ready if not lets make it #--------------------------------------------------------------- if (!("samples.Rdata" %in% list.files())) { - #check to see if there are posterior.files tags under pft - - posterior.files.vec<-settings$pfts %>% - purrr::map(purrr::possibly('posterior.files', NA_character_)) %>% + # check to see if there are posterior.files tags under pft + + posterior.files.vec <- settings$pfts %>% + purrr::map(purrr::possibly("posterior.files", NA_character_)) %>% purrr::modify_depth(1, function(x) { ifelse(is.null(x), NA_character_, x) }) %>% unlist() - + get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method, - posterior.files=posterior.files.vec) ## Aside: if method were set to unscented, would take minimal changes to do UnKF + ens.sample.method = settings$ensemble$samplingspace$parameters$method, + posterior.files = posterior.files.vec + ) ## Aside: if method were set to unscented, would take minimal changes to do UnKF } #---------------------------------------------------------------- # OBS data preparation #--------------------------------------------------------------- tryCatch( { - if (is.MultiSettings(settings)){ + if (is.MultiSettings(settings)) { obss <- PEcAnAssimSequential:::Obs.data.prepare.MultiSite(obs.path, site.ids) } else { obss <- load(obs.path) @@ -94,22 +102,20 @@ tryCatch( #---------------------------------------------------------------- # SDA #--------------------------------------------------------------- -if (is.MultiSettings(settings)){ +if (is.MultiSettings(settings)) { #---------------------------------------------------------------- # Preparing settings #--------------------------------------------------------------- new.settings <- PEcAn.settings::prepare.settings(settings) - #MultiSite SDA function + # MultiSite SDA function sda.enkf.multisite(new.settings, - obs.mean =obss$obs.mean , - obs.cov = obss$obs.cov) + obs.mean = obss$obs.mean, + obs.cov = obss$obs.cov + ) } else { - #Refactored SDA function + # Refactored SDA function sda.enkf(settings, - obs.mean =obss$obs.mean , - obs.cov = obss$obs.cov - ) + obs.mean = obss$obs.mean, + obs.cov = obss$obs.cov + ) } - - - diff --git a/modules/assim.sequential/inst/SDA_runner.R b/modules/assim.sequential/inst/SDA_runner.R index 13315bb2218..e7d0b812d26 100644 --- a/modules/assim.sequential/inst/SDA_runner.R +++ b/modules/assim.sequential/inst/SDA_runner.R @@ -22,32 +22,32 @@ library(Kendall) setwd("/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA") settings_dir <- "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/IC/pecan.xml" settings <- PEcAn.settings::read.settings(settings_dir) -#prepare samples +# prepare samples # PEcAn.uncertainty::get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) -#prepare settings +# prepare settings settings <- PEcAn.settings::prepare.settings(settings) All_obs_prep <- settings$state.data.assimilation$Obs_Prep folder_prefix <- c("AGB", "LAI", "SMP", "SoilC") comb <- combn(4, 3) for (i in 1:4) { - settings$state.data.assimilation$Obs_Prep <- All_obs_prep[c(comb[,i], 5:8)] - folder_name <- paste0(folder_prefix[comb[,i]], collapse = '_') - #prep obs + settings$state.data.assimilation$Obs_Prep <- All_obs_prep[c(comb[, i], 5:8)] + folder_name <- paste0(folder_prefix[comb[, i]], collapse = "_") + # prep obs obs <- PEcAnAssimSequential::SDA_OBS_Assembler(settings = settings) - #prepare obs + # prepare obs load("/projectnb/dietzelab/dongchen/All_NEON_SDA/test_OBS/Rdata/obs.mean.Rdata") load("/projectnb/dietzelab/dongchen/All_NEON_SDA/test_OBS/Rdata/obs.cov.Rdata") - + for (i in 1:length(obs.mean)) { - if(is.null(obs.mean[[i]][[1]])){ + if (is.null(obs.mean[[i]][[1]])) { next } for (j in 1:length(obs.mean[[i]])) { - obs.mean[[i]][[j]][which(obs.mean[[i]][[j]]==0)] <- 0.01 - if(length(obs.cov[[i]][[j]]) > 1){ - diag(obs.cov[[i]][[j]])[which(diag(obs.cov[[i]][[j]]==0))] <- 1 - }else{ - if(obs.cov[[i]][[j]] == 0){ + obs.mean[[i]][[j]][which(obs.mean[[i]][[j]] == 0)] <- 0.01 + if (length(obs.cov[[i]][[j]]) > 1) { + diag(obs.cov[[i]][[j]])[which(diag(obs.cov[[i]][[j]] == 0))] <- 1 + } else { + if (obs.cov[[i]][[j]] == 0) { obs.cov[[i]][[j]] <- 1 } } @@ -55,39 +55,53 @@ for (i in 1:4) { } load("/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_ALL/enkf.Rdata") PEcAn.utils::sendmail("zhangdc@bu.edu", "zhangdc@bu.edu", "SDA progress report", paste("Started working on", folder_name)) - if(is.character(try(PEcAnAssimSequential::sda.enkf.multisite(settings = settings, - obs.mean = obs.mean, - obs.cov = obs.cov, - Q = NULL, - restart = FALSE, - forceRun = TRUE, - keepNC = TRUE, - pre_enkf_params = enkf.params, - control=list(trace = TRUE, - FF = FALSE, - interactivePlot = FALSE, - TimeseriesPlot = F, - BiasPlot = FALSE, - plot.title = NULL, - facet.plots = FALSE, - debug = FALSE, - pause = FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)), silent = T))){ + if (is.character(try(PEcAnAssimSequential::sda.enkf.multisite( + settings = settings, + obs.mean = obs.mean, + obs.cov = obs.cov, + Q = NULL, + restart = FALSE, + forceRun = TRUE, + keepNC = TRUE, + pre_enkf_params = enkf.params, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = F, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = FALSE, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) + ), silent = T))) { PEcAn.utils::sendmail("zhangdc@bu.edu", "zhangdc@bu.edu", "SDA running error", "Error") break - }else{ + } else { PEcAn.utils::sendmail("zhangdc@bu.edu", "zhangdc@bu.edu", "SDA progress report", paste0(folder_name, " has been completed!")) } - file.rename('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA', - file.path('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42', - paste0("SDA_", folder_name))) - dir.create('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA') - dir.create('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/out') - dir.create('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/run') - file.copy(file.path('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42', - paste0("SDA_", folder_name), - "samples.Rdata"), - file.path('/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA', - "samples.Rdata")) -} \ No newline at end of file + file.rename( + "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA", + file.path( + "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42", + paste0("SDA_", folder_name) + ) + ) + dir.create("/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA") + dir.create("/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/out") + dir.create("/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA/run") + file.copy( + file.path( + "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42", + paste0("SDA_", folder_name), + "samples.Rdata" + ), + file.path( + "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA", + "samples.Rdata" + ) + ) +} diff --git a/modules/assim.sequential/inst/WillowCreek/NoDataWorkflow.R b/modules/assim.sequential/inst/WillowCreek/NoDataWorkflow.R index d4aa0ec654b..4ceb8ed0ab9 100644 --- a/modules/assim.sequential/inst/WillowCreek/NoDataWorkflow.R +++ b/modules/assim.sequential/inst/WillowCreek/NoDataWorkflow.R @@ -19,18 +19,19 @@ plan(multisession) outputPath <- "/projectnb/dietzelab/ahelgeso/SDA/Wcr_SDA_Output/NoData/" nodata <- TRUE restart <- FALSE -days.obs <- 1 #how many of observed data to include -- not including today +days.obs <- 1 # how many of observed data to include -- not including today setwd(outputPath) c( - 'Utils.R', - 'download_WCr.R', + "Utils.R", + "download_WCr.R", "gapfill_WCr.R", - 'prep.data.assim.R' -) %>% walk( ~ source( + "prep.data.assim.R" +) %>% walk(~ source( system.file("WillowCreek", - .x, - package = "PEcAnAssimSequential") + .x, + package = "PEcAnAssimSequential" + ) )) @@ -42,24 +43,24 @@ c( setwd("/projectnb/dietzelab/ahelgeso/SDA/Wcr_SDA_Output/NoData/") -#reading xml +# reading xml settings <- read.settings("/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/inst/WillowCreek/nodata.xml") -#connecting to DB -con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) +# connecting to DB +con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) # all.previous.sims <- list.dirs(outputPath, recursive = F) # if (length(all.previous.sims) > 0 & !inherits(con, "try-error")) { -# +# # tryCatch({ # # Looking through all the old simulations and find the most recent # all.previous.sims <- all.previous.sims %>% # map(~ list.files(path = file.path(.x, "SDA"))) %>% # setNames(all.previous.sims) %>% # discard( ~ !"sda.output.Rdata" %in% .x) # I'm throwing out the ones that they did not have a SDA output -# +# # last.sim <- # names(all.previous.sims) %>% # map_chr( ~ strsplit(.x, "_")[[1]][5]) %>% @@ -80,7 +81,7 @@ con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) # sda.start <- Sys.Date() - 1 # PEcAn.logger::logger.warn(paste0("There was a problem with finding the last successfull SDA.",conditionMessage(e))) # }) -# +# # # if there was no older sims # if (is.na(sda.start)) # sda.start <- Sys.Date() - 9 @@ -97,7 +98,7 @@ met.start <- sda.start - lubridate::days(2) met.end <- met.start + lubridate::days(16) -#pad Observed Data to match met data +# pad Observed Data to match met data date <- seq( @@ -109,47 +110,51 @@ date <- pad.prep <- as.data.frame(cbind(Date = as.character(date), means = rep("NA", length(date)), covs = rep("NA", length(date)))) %>% dynutils::tibble_as_list() -names(pad.prep) <-date +names(pad.prep) <- date -prep.data = pad.prep +prep.data <- pad.prep obs.mean <- prep.data %>% - purrr::map('means') %>% + purrr::map("means") %>% + setNames(names(prep.data)) +obs.cov <- prep.data %>% + purrr::map("covs") %>% setNames(names(prep.data)) -obs.cov <- prep.data %>% purrr::map('covs') %>% setNames(names(prep.data)) if (nodata) { - obs.mean <- obs.mean %>% purrr::map(function(x) - return(NA)) - obs.cov <- obs.cov %>% purrr::map(function(x) - return(NA)) + obs.mean <- obs.mean %>% purrr::map(function(x) { + return(NA) + }) + obs.cov <- obs.cov %>% purrr::map(function(x) { + return(NA) + }) } #----------------------------------------------------------------------------------------------- #------------------------------------------ Fixing the settings -------------------------------- #----------------------------------------------------------------------------------------------- -#unlink existing IC files +# unlink existing IC files sapply(paste0("/projectnb/dietzelabe/pecan.data/dbfiles/IC_site_0-676_", 1:100, ".nc"), unlink) -#Using the found dates to run - this will help to download mets +# Using the found dates to run - this will help to download mets settings$run$start.date <- as.character(met.start) settings$run$end.date <- as.character(met.end) settings$run$site$met.start <- as.character(met.start) settings$run$site$met.end <- as.character(met.end) -#info +# info settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), " +0000") # -------------------------------------------------------------------------------------------------- #---------------------------------------------- PEcAn Workflow ------------------------------------- # -------------------------------------------------------------------------------------------------- -#Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force=FALSE) +# Update/fix/check settings. Will only run the first time it's called, unless force=TRUE +settings <- PEcAn.settings::prepare.settings(settings, force = FALSE) setwd(settings$outdir) -#Write pecan.CHECKED.xml +# Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") # start from scratch if no continue is passed in statusFile <- file.path(settings$outdir, "STATUS") @@ -163,11 +168,11 @@ settings <- PEcAn.workflow::do_conversions(settings, T, T, T) if (PEcAn.utils::status.check("TRAIT") == 0) { PEcAn.utils::status.start("TRAIT") settings <- PEcAn.workflow::runModule.get.trait.data(settings) - PEcAn.settings::write.settings(settings, outputfile = 'pecan.TRAIT.xml') + PEcAn.settings::write.settings(settings, outputfile = "pecan.TRAIT.xml") PEcAn.utils::status.end() -} else if (file.exists(file.path(settings$outdir, 'pecan.TRAIT.xml'))) { +} else if (file.exists(file.path(settings$outdir, "pecan.TRAIT.xml"))) { settings <- - PEcAn.settings::read.settings(file.path(settings$outdir, 'pecan.TRAIT.xml')) + PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.TRAIT.xml")) } # Run the PEcAn meta.analysis if (!is.null(settings$meta.analysis)) { @@ -177,11 +182,11 @@ if (!is.null(settings$meta.analysis)) { PEcAn.utils::status.end() } } -#sample from parameters used for both sensitivity analysis and Ens +# sample from parameters used for both sensitivity analysis and Ens get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) # Setting dates in assimilation tags - This will help with preprocess split in SDA code -settings$state.data.assimilation$start.date <-as.character(first(names(obs.mean))) -settings$state.data.assimilation$end.date <-as.character(last(names(obs.mean))) +settings$state.data.assimilation$start.date <- as.character(first(names(obs.mean))) +settings$state.data.assimilation$end.date <- as.character(last(names(obs.mean))) #- lubridate::hms("06:00:00") @@ -189,100 +194,101 @@ settings$state.data.assimilation$end.date <-as.character(last(names(obs.mean))) #--------------------------------- Restart ------------------------------------- # -------------------------------------------------------------------------------------------------- -if(restart == TRUE){ - if(!dir.exists("SDA")) dir.create("SDA",showWarnings = F) - - #Update the SDA Output to just have last time step - temp<- new.env() +if (restart == TRUE) { + if (!dir.exists("SDA")) dir.create("SDA", showWarnings = F) + + # Update the SDA Output to just have last time step + temp <- new.env() load(file.path(restart.path, "SDA", "sda.output.Rdata"), envir = temp) temp <- as.list(temp) - - #we want ANALYSIS, FORECAST, and enkf.parms to match up with how many days obs data we have - # +24 because it's hourly now and we want the next day as the start - if(length(temp$ANALYSIS) > 1){ - - for(i in 1:days.obs + 1){ + + # we want ANALYSIS, FORECAST, and enkf.parms to match up with how many days obs data we have + # +24 because it's hourly now and we want the next day as the start + if (length(temp$ANALYSIS) > 1) { + for (i in 1:days.obs + 1) { temp$ANALYSIS[[i]] <- temp$ANALYSIS[[i + 24]] } - for(i in rev((days.obs + 2):length(temp$ANALYSIS))){ + for (i in rev((days.obs + 2):length(temp$ANALYSIS))) { temp$ANALYSIS[[i]] <- NULL } - - for(i in 1:days.obs + 1){ + + for (i in 1:days.obs + 1) { temp$FORECAST[[i]] <- temp$FORECAST[[i + 24]] } - for(i in rev((days.obs + 2):length(temp$FORECAST))){ + for (i in rev((days.obs + 2):length(temp$FORECAST))) { temp$FORECAST[[i]] <- NULL } - - for(i in 1:days.obs + 1){ + + for (i in 1:days.obs + 1) { temp$enkf.params[[i]] <- temp$enkf.params[[i + 24]] } - for(i in rev((days.obs + 2):length(temp$enkf.params))){ + for (i in rev((days.obs + 2):length(temp$enkf.params))) { temp$enkf.params[[i]] <- NULL - } - + } } - temp$t = 1 - - #change inputs path to match sampling met paths - - for(i in 1: length(temp$inputs$ids)){ - + temp$t <- 1 + + # change inputs path to match sampling met paths + + for (i in 1:length(temp$inputs$ids)) { temp$inputs$samples[i] <- settings$run$inputs$met$path[temp$inputs$ids[i]] - } - - temp1<- new.env() + + temp1 <- new.env() list2env(temp, envir = temp1) - save(list = c("ANALYSIS", "enkf.params", "ensemble.id", "ensemble.samples", 'inputs', 'new.params', 'new.state', 'run.id', 'site.locs', 't', 'Viz.output', 'X'), - envir = temp1, - file = file.path(settings$outdir, "SDA", "sda.output.Rdata")) - - - + save( + list = c("ANALYSIS", "enkf.params", "ensemble.id", "ensemble.samples", "inputs", "new.params", "new.state", "run.id", "site.locs", "t", "Viz.output", "X"), + envir = temp1, + file = file.path(settings$outdir, "SDA", "sda.output.Rdata") + ) + + + temp.out <- new.env() - load(file.path(restart.path, "SDA", 'outconfig.Rdata'), envir = temp.out) + load(file.path(restart.path, "SDA", "outconfig.Rdata"), envir = temp.out) temp.out <- as.list(temp.out) temp.out$outconfig$samples <- NULL - + temp.out1 <- new.env() list2env(temp.out, envir = temp.out1) - save(list = c('outconfig'), - envir = temp.out1, - file = file.path(settings$outdir, "SDA", "outconfig.Rdata")) - - - - #copy over run and out folders - - if(!dir.exists("run")) dir.create("run",showWarnings = F) - + save( + list = c("outconfig"), + envir = temp.out1, + file = file.path(settings$outdir, "SDA", "outconfig.Rdata") + ) + + + + # copy over run and out folders + + if (!dir.exists("run")) dir.create("run", showWarnings = F) + files <- list.files(path = file.path(restart.path, "run/"), full.names = T, recursive = T, include.dirs = T, pattern = "sipnet.clim") readfiles <- list.files(path = file.path(restart.path, "run/"), full.names = T, recursive = T, include.dirs = T, pattern = "README.txt") - + newfiles <- gsub(pattern = restart.path, settings$outdir, files) readnewfiles <- gsub(pattern = restart.path, settings$outdir, readfiles) - + rundirs <- gsub(pattern = "/sipnet.clim", "", files) rundirs <- gsub(pattern = restart.path, settings$outdir, rundirs) - for(i in 1 : length(rundirs)){ - dir.create(rundirs[i]) + for (i in 1:length(rundirs)) { + dir.create(rundirs[i]) file.copy(from = files[i], to = newfiles[i]) - file.copy(from = readfiles[i], to = readnewfiles[i])} - file.copy(from = paste0(restart.path, '/run/runs.txt'), to = paste0(settings$outdir,'/run/runs.txt' )) - - if(!dir.exists("out")) dir.create("out",showWarnings = F) - + file.copy(from = readfiles[i], to = readnewfiles[i]) + } + file.copy(from = paste0(restart.path, "/run/runs.txt"), to = paste0(settings$outdir, "/run/runs.txt")) + + if (!dir.exists("out")) dir.create("out", showWarnings = F) + files <- list.files(path = file.path(restart.path, "out/"), full.names = T, recursive = T, include.dirs = T, pattern = "sipnet.out") newfiles <- gsub(pattern = restart.path, settings$outdir, files) outdirs <- gsub(pattern = "/sipnet.out", "", files) outdirs <- gsub(pattern = restart.path, settings$outdir, outdirs) - for(i in 1 : length(outdirs)){ - dir.create(outdirs[i]) - file.copy(from = files[i], to = newfiles[i])} - -} + for (i in 1:length(outdirs)) { + dir.create(outdirs[i]) + file.copy(from = files[i], to = newfiles[i]) + } +} # -------------------------------------------------------------------------------------------------- #--------------------------------- Run state data assimilation ------------------------------------- @@ -292,38 +298,36 @@ if(restart == TRUE){ settings$host$name <- "geo.bu.edu" settings$host$user <- "ahelgeso" settings$host$folder <- "/projectnb/dietzelab/ahelgeso/SDA/Wcr_SDA_Output/" -settings$host$job.sh <- "module load R/4.1.2" -settings$host$qsub <- 'qsub -l h_rt=24:00:00 -V -N @NAME@ -o @STDOUT@ -e @STDERR@' -settings$host$qsub.jobid <- 'Your job ([0-9]+) .*' -settings$host$qstat <- 'qstat -j @JOBID@ || echo DONE' +settings$host$job.sh <- "module load R/4.1.2" +settings$host$qsub <- "qsub -l h_rt=24:00:00 -V -N @NAME@ -o @STDOUT@ -e @STDERR@" +settings$host$qsub.jobid <- "Your job ([0-9]+) .*" +settings$host$qstat <- "qstat -j @JOBID@ || echo DONE" settings$host$tunnel <- "/projectnb/dietzelab/ahelgeso/tunnel" -settings$model$binary = "/usr2/postdoc/istfer/SIPNET/1023/sipnet" +settings$model$binary <- "/usr2/postdoc/istfer/SIPNET/1023/sipnet" -unlink(c('run','out'), recursive = T) +unlink(c("run", "out"), recursive = T) -#debugonce(PEcAnAssimSequential::sda.enkf) -if ('state.data.assimilation' %in% names(settings)) { +# debugonce(PEcAnAssimSequential::sda.enkf) +if ("state.data.assimilation" %in% names(settings)) { if (PEcAn.utils::status.check("SDA") == 0) { PEcAn.utils::status.start("SDA") PEcAnAssimSequential::sda.enkf( - settings, - restart=restart, - Q=0, + settings, + restart = restart, + Q = 0, obs.mean = obs.mean, obs.cov = obs.cov, control = list( trace = TRUE, - interactivePlot =FALSE, - TimeseriesPlot =TRUE, - BiasPlot =FALSE, + interactivePlot = FALSE, + TimeseriesPlot = TRUE, + BiasPlot = FALSE, debug = FALSE, - pause=FALSE + pause = FALSE ) ) - + PEcAn.utils::status.end() } } - - diff --git a/modules/assim.sequential/inst/WillowCreek/SDA_Workflow.R b/modules/assim.sequential/inst/WillowCreek/SDA_Workflow.R index 7bdf966c4be..4d192d3335d 100644 --- a/modules/assim.sequential/inst/WillowCreek/SDA_Workflow.R +++ b/modules/assim.sequential/inst/WillowCreek/SDA_Workflow.R @@ -11,7 +11,7 @@ library("tidyverse") library("furrr") library("R.utils") library("dynutils") -library('nimble') +library("nimble") plan(multisession) @@ -21,11 +21,11 @@ plan(multisession) forecastPath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/FluxPaper/" outputPath <- "/projectnb/dietzelab/ahelgeso/SDA/HF_SDA_Output/" -nodata <- FALSE #use this to run SDA with no data +nodata <- FALSE # use this to run SDA with no data restart <- list() -days.obs <- 3 #how many of observed data *BY HOURS* to include -- not including today +days.obs <- 3 # how many of observed data *BY HOURS* to include -- not including today setwd(outputPath) -options(warn=-1) +options(warn = -1) #------------------------------------------------------------------------------------------------ @@ -33,27 +33,28 @@ options(warn=-1) #------------------------------------------------------------------------------------------------ c( - 'Utils.R', - 'download_WCr.R', + "Utils.R", + "download_WCr.R", "gapfill_WCr.R", - 'prep.data.assim.R' -) %>% walk( ~ source( + "prep.data.assim.R" +) %>% walk(~ source( system.file("WillowCreek", - .x, - package = "PEcAnAssimSequential") + .x, + package = "PEcAnAssimSequential" + ) )) #------------------------------------------------------------------------------------------------ #------------------------------------------ Preparing the pecan xml ----------------------------- #------------------------------------------------------------------------------------------------ -#reading xml +# reading xml settings <- read.settings("/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/inst/WillowCreek/testingMulti_HF.xml") -#connecting to DB -con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) +# connecting to DB +con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) on.exit(db.close(con)) -#Find last SDA Run to get new start date +# Find last SDA Run to get new start date sda.start <- NA all.previous.sims <- list.dirs(outputPath, recursive = F) # if (length(all.previous.sims) > 0) { @@ -62,7 +63,7 @@ all.previous.sims <- list.dirs(outputPath, recursive = F) # map(~ list.files(path = file.path(all.previous.sims, "SDA"))) %>% # setNames(all.previous.sims) %>% # discard( ~ !"sda.output.Rdata" %in% all.previous.sims) # I'm throwing out the ones that they did not have a SDA output -# +# # last.sim <- # names(all.previous.sims) %>% # map_chr( ~ strsplit(names(all.previous.sims), "_")[[1]][5]) %>% @@ -77,18 +78,18 @@ all.previous.sims <- list.dirs(outputPath, recursive = F) # # pulling the date and the path to the last SDA # restart.path <-grep(last.sim$ID, names(all.previous.sims), value = T) # sda.start <- last.sim$start_date+ lubridate::days(3) -# +# # error = function(e) { # restart.path <- NULL # sda.start <- Sys.Date() - 9 # PEcAn.logger::logger.warn(paste0("There was a problem with finding the last successfull SDA.",conditionMessage(e))) # } -# +# # # if there was no older sims # if (is.na(sda.start)) # sda.start <- Sys.Date() - 9 # } -#to manually change start date +# to manually change start date sda.start <- as.Date("2021-07-28") sda.end <- sda.start + lubridate::days(1) @@ -100,70 +101,71 @@ met.end <- met.start + lubridate::days(35) #----------------------------------------------------------------------------------------------- #------------------------------------------ Download flux ------------------------------ #----------------------------------------------------------------------------------------------- -if(settings$run$site$id == 676){ +if (settings$run$site$id == 676) { site_info <- list( site_id = 676, site_name = "Willow Creek", lat = 45.805925, lon = -90.07961, - time_zone = "UTC") -#Fluxes -prep.data <- prep.data.assim( - sda.start - lubridate::days(90),# it needs at least 90 days for gap filling - sda.end, - numvals = 100, - vars = c("NEE", "LE"), - data.len = days.obs, - sda.start) - -Axobs.raw <-prep.data$rawobs -prep.data<-prep.data$obs - -# if there is infinte value then take it out - here we want to remove any that just have one NA in the observed data -prep.data <- prep.data %>% - map(function(day.data){ - #cheking the mean - nan.mean <- which(is.infinite(day.data$means) | is.nan(day.data$means) | is.na(day.data$means)) - if ( length(nan.mean)>0 ) { - - day.data$means <- day.data$means[-nan.mean] - day.data$covs <- day.data$covs[-nan.mean, -nan.mean] %>% - as.matrix() %>% - `colnames <-`(c(colnames(day.data$covs)[-nan.mean])) - } - day.data - }) - - -# Changing LE to Qle which is what SIPNET expects -prep.data <- prep.data %>% - map(function(day.data) { - names(day.data$means)[names(day.data$means) == "LE"] <- "Qle" - dimnames(day.data$covs) <- dimnames(day.data$covs) %>% - map(function(name) { - name[name == "LE"] <- "Qle" - name - }) - - day.data - }) - -###### Pad Observed Data to forecast ############# -date <- - seq( - from = lubridate::force_tz(as.POSIXct(last(names(prep.data)), format = "%Y-%m-%d %H:%M:%S"), tz = "UTC") + lubridate::hours(1), - to = lubridate::with_tz(as.POSIXct(first(sda.end) + lubridate::days(1), format = "%Y-%m-%d %H:%M:%S"), tz = "UTC"), - by = "1 hour" + time_zone = "UTC" ) + # Fluxes + prep.data <- prep.data.assim( + sda.start - lubridate::days(90), # it needs at least 90 days for gap filling + sda.end, + numvals = 100, + vars = c("NEE", "LE"), + data.len = days.obs, + sda.start + ) + + Axobs.raw <- prep.data$rawobs + prep.data <- prep.data$obs + + # if there is infinte value then take it out - here we want to remove any that just have one NA in the observed data + prep.data <- prep.data %>% + map(function(day.data) { + # cheking the mean + nan.mean <- which(is.infinite(day.data$means) | is.nan(day.data$means) | is.na(day.data$means)) + if (length(nan.mean) > 0) { + day.data$means <- day.data$means[-nan.mean] + day.data$covs <- day.data$covs[-nan.mean, -nan.mean] %>% + as.matrix() %>% + `colnames <-`(c(colnames(day.data$covs)[-nan.mean])) + } + day.data + }) + + + # Changing LE to Qle which is what SIPNET expects + prep.data <- prep.data %>% + map(function(day.data) { + names(day.data$means)[names(day.data$means) == "LE"] <- "Qle" + dimnames(day.data$covs) <- dimnames(day.data$covs) %>% + map(function(name) { + name[name == "LE"] <- "Qle" + name + }) + + day.data + }) + + ###### Pad Observed Data to forecast ############# + date <- + seq( + from = lubridate::force_tz(as.POSIXct(last(names(prep.data)), format = "%Y-%m-%d %H:%M:%S"), tz = "UTC") + lubridate::hours(1), + to = lubridate::with_tz(as.POSIXct(first(sda.end) + lubridate::days(1), format = "%Y-%m-%d %H:%M:%S"), tz = "UTC"), + by = "1 hour" + ) -pad.prep <- Axobs.raw %>% - tidyr::complete(Date = date) %>% - filter(Date %in% date) %>% - mutate(means = NA, covs = NA) %>% - dplyr::select(Date, means, covs) %>% - dynutils::tibble_as_list() + pad.prep <- Axobs.raw %>% + tidyr::complete(Date = date) %>% + filter(Date %in% date) %>% + mutate(means = NA, covs = NA) %>% + dplyr::select(Date, means, covs) %>% + dynutils::tibble_as_list() -names(pad.prep) <-date + names(pad.prep) <- date } # -------------------------------------------------------------------------------------------------- #---------------------------------------------- LAI DATA ------------------------------------- @@ -173,100 +175,106 @@ site_info <- list( site_name = settings$run$site$name, lat = settings$run$site$lat, lon = settings$run$site$lon, - time_zone = "UTC") - - lai <- call_MODIS(outdir = NULL, - var = 'lai', - site_info = site_info, - product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")),paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), - run_parallel = TRUE, - ncores = NULL, - product = "MOD15A2H", - band = "Lai_500m", - package_method = "MODISTools", - QC_filter = TRUE, - progress = TRUE) - -#filter for good resolution data - lai <- lai %>% filter(qc == "000") -#filter for lai that matches sda.start - lai <- lai %>% filter(calendar_date == sda.start) - - if(dim(lai)[1] < 1){ - lai = NA - PEcAn.logger::logger.warn(paste0("MODIS mean Data not available for these dates, initialzing NA")) - } + time_zone = "UTC" +) - lai_sd <- call_MODIS(outdir = NULL, - var = 'lai', - site_info = site_info, - product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")),paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), - run_parallel = TRUE, - ncores = NULL, - product = "MOD15A2H", - band = "LaiStdDev_500m", - package_method = "MODISTools", - QC_filter = TRUE, - progress = TRUE) - -#filter for good resolution data - lai_sd <- lai_sd %>% filter(qc == "000") -#filter for lai.sd that matches sda.start - lai_sd <- lai_sd %>% filter(calendar_date == sda.start) - - if(dim(lai_sd)[1] < 1){ - lai_sd = NA - PEcAn.logger::logger.warn(paste0("MODIS standard deviation Data not available for these dates, initialzing NA")) - } - -if(settings$run$site$id == 676){ -#Add in LAI info -if(is.na(lai)){ - index <- rep(FALSE, length(names(prep.data)))}else{ - index <- as.Date(names(prep.data)) %in% as.Date(lai$calendar_date) +lai <- call_MODIS( + outdir = NULL, + var = "lai", + site_info = site_info, + product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")), paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), + run_parallel = TRUE, + ncores = NULL, + product = "MOD15A2H", + band = "Lai_500m", + package_method = "MODISTools", + QC_filter = TRUE, + progress = TRUE +) + +# filter for good resolution data +lai <- lai %>% filter(qc == "000") +# filter for lai that matches sda.start +lai <- lai %>% filter(calendar_date == sda.start) + +if (dim(lai)[1] < 1) { + lai <- NA + PEcAn.logger::logger.warn(paste0("MODIS mean Data not available for these dates, initialzing NA")) } -for(i in 1:length(index)){ - - if(index[i]){ - lai.date <- which(as.Date(lai$calendar_date) %in% as.Date(names(prep.data))) - LAI <- c(0,0) - prep.data[[i]]$means <- c(prep.data[[i]]$means, lai$data[lai.date]) - prep.data[[i]]$covs <- rbind(cbind(prep.data[[i]]$covs, c(0, 0)), c(0,0, lai_sd$data)) - - names(prep.data[[i]]$means) <- c("NEE", "Qle", "LAI") - rownames(prep.data[[i]]$covs) <- c("NEE", "Qle", "LAI") - colnames(prep.data[[i]]$covs) <- c("NEE", "Qle", "LAI") - - } +lai_sd <- call_MODIS( + outdir = NULL, + var = "lai", + site_info = site_info, + product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")), paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), + run_parallel = TRUE, + ncores = NULL, + product = "MOD15A2H", + band = "LaiStdDev_500m", + package_method = "MODISTools", + QC_filter = TRUE, + progress = TRUE +) + +# filter for good resolution data +lai_sd <- lai_sd %>% filter(qc == "000") +# filter for lai.sd that matches sda.start +lai_sd <- lai_sd %>% filter(calendar_date == sda.start) + +if (dim(lai_sd)[1] < 1) { + lai_sd <- NA + PEcAn.logger::logger.warn(paste0("MODIS standard deviation Data not available for these dates, initialzing NA")) } -#add forecast pad to the obs data -prep.data = c(prep.data, pad.prep) +if (settings$run$site$id == 676) { + # Add in LAI info + if (is.na(lai)) { + index <- rep(FALSE, length(names(prep.data))) + } else { + index <- as.Date(names(prep.data)) %in% as.Date(lai$calendar_date) + } -#split into means and covs -obs.mean <- prep.data %>% - map('means') %>% - setNames(names(prep.data)) -obs.cov <- prep.data %>% map('covs') %>% setNames(names(prep.data)) -}else{ -#build obs mean/cov matrix for LAI + for (i in 1:length(index)) { + if (index[i]) { + lai.date <- which(as.Date(lai$calendar_date) %in% as.Date(names(prep.data))) + LAI <- c(0, 0) + prep.data[[i]]$means <- c(prep.data[[i]]$means, lai$data[lai.date]) + prep.data[[i]]$covs <- rbind(cbind(prep.data[[i]]$covs, c(0, 0)), c(0, 0, lai_sd$data)) + + names(prep.data[[i]]$means) <- c("NEE", "Qle", "LAI") + rownames(prep.data[[i]]$covs) <- c("NEE", "Qle", "LAI") + colnames(prep.data[[i]]$covs) <- c("NEE", "Qle", "LAI") + } + } + + # add forecast pad to the obs data + prep.data <- c(prep.data, pad.prep) + + # split into means and covs + obs.mean <- prep.data %>% + map("means") %>% + setNames(names(prep.data)) + obs.cov <- prep.data %>% + map("covs") %>% + setNames(names(prep.data)) +} else { + # build obs mean/cov matrix for LAI obs.mean <- data.frame(date = lai$calendar_date, site_id = lai$site_id, lai = lai$data) - obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) + obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) obs.mean <- split(obs.mean, obs.mean$date) - + obs.cov <- data.frame(date = lai_sd$calendar_date, site_id = lai_sd$site_id, lai = lai_sd$data) - obs.cov$date = as.character(obs.cov$date, stringsAsFactors = FALSE) + obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = FALSE) obs.cov <- split(obs.cov, obs.cov$date) } #----------------------------------------------------------------------------------------------- #------------------------------------------ Fixing the settings -------------------------------- #----------------------------------------------------------------------------------------------- -#Using the found dates to run - this will help to download mets +# Using the found dates to run - this will help to download mets settings$run$site$met.start <- as.character(met.start) settings$run$site$met.end <- as.character(met.end) -#info +# info settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), " +0000") @@ -274,8 +282,8 @@ settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), " +0000") # -------------------------------------------------------------------------------------------------- #---------------------------------------------- PEcAn Workflow ------------------------------------- # -------------------------------------------------------------------------------------------------- -#Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force=TRUE) +# Update/fix/check settings. Will only run the first time it's called, unless force=TRUE +settings <- PEcAn.settings::prepare.settings(settings, force = TRUE) setwd(settings$outdir) # ggsave( # file.path(settings$outdir, "Obs_plot.pdf"), @@ -284,7 +292,7 @@ setwd(settings$outdir) # height = 9 # ) -#Write pecan.CHECKED.xml +# Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") # start from scratch if no continue is passed in statusFile <- file.path(settings$outdir, "STATUS") @@ -293,67 +301,67 @@ if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile) } -#manually add in clim files -con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) +# manually add in clim files +con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) input_check <- PEcAn.DB::dbfile.input.check( - siteid= site_info$site_id %>% as.character(), - startdate = met.start %>% as.Date, + siteid = site_info$site_id %>% as.character(), + startdate = met.start %>% as.Date(), enddate = NULL, parentid = NA, - mimetype="text/csv", - formatname="Sipnet.climna", + mimetype = "text/csv", + formatname = "Sipnet.climna", con = con, hostname = PEcAn.remote::fqdn(), - pattern = NULL, + pattern = NULL, exact.dates = TRUE, - return.all=TRUE + return.all = TRUE ) -#If INPUTS already exists, add id and met path to settings file +# If INPUTS already exists, add id and met path to settings file -if(length(input_check$id) > 0){ - #met paths - clim_check = list() - for(i in 1:length(input_check$file_path)){ - +if (length(input_check$id) > 0) { + # met paths + clim_check <- list() + for (i in 1:length(input_check$file_path)) { clim_check[[i]] <- file.path(input_check$file_path[i], input_check$file_name[i]) - }#end i loop for creating file paths - #ids - index_id = list() - index_path = list() - for(i in 1:length(input_check$id)){ - index_id[[i]] = as.character(input_check$id[i])#get ids as list - - }#end i loop for making lists - names(index_id) = sprintf("id%s",seq(1:length(input_check$id))) #rename list - names(clim_check) = sprintf("path%s",seq(1:length(input_check$id))) - - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check -}else{PEcAn.utils::logger.error("No met file found")} -#settings <- PEcAn.workflow::do_conversions(settings, T, T, T) - -if(is_empty(settings$run$inputs$met$path) & length(clim_check)>0){ - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check + } # end i loop for creating file paths + # ids + index_id <- list() + index_path <- list() + for (i in 1:length(input_check$id)) { + index_id[[i]] <- as.character(input_check$id[i]) # get ids as list + } # end i loop for making lists + names(index_id) <- sprintf("id%s", seq(1:length(input_check$id))) # rename list + names(clim_check) <- sprintf("path%s", seq(1:length(input_check$id))) + + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check +} else { + PEcAn.utils::logger.error("No met file found") +} +# settings <- PEcAn.workflow::do_conversions(settings, T, T, T) + +if (is_empty(settings$run$inputs$met$path) & length(clim_check) > 0) { + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check } -#query database for previous forecast run (i.e. t=0) +# query database for previous forecast run (i.e. t=0) query.run <- paste0("SELECT * FROM runs WHERE site_id =", site_info$site_id) run <- PEcAn.DB::db.query(query.run, con) -#filter for sda.start +# filter for sda.start run <- dplyr::filter(run, start_time == sda.start) daydiff <- difftime(Sys.time(), run$created_at, units = "days") runday <- which(min(daydiff) == daydiff) startday <- run$created_at[runday] run <- dplyr::filter(run, as.Date(created_at) == as.Date(startday)) -#add filter for model +# add filter for model query.ens <- paste0("SELECT * FROM ensembles WHERE id =", run$ensemble_id) ens <- PEcAn.DB::db.query(query.ens, con) -#now that we have the workflow id for forecast run we can close connection to BETY +# now that we have the workflow id for forecast run we can close connection to BETY PEcAn.DB::db.close(con) -#list files in output folder +# list files in output folder restart$filepath <- paste0(forecastPath, "PEcAn_", ens$workflow_id, "/") restart$start.cut <- lubridate::as_datetime(obs.mean$`2021-06-02`$date) restart$start.cut <- format(restart$start.cut, "%Y-%m-%d %H:%M:%S", tz = "EST") @@ -361,11 +369,11 @@ restart$runids <- run$id # t0Files <- list.files(t0Path, full.names = TRUE, pattern = "out") # t0FilesEns <- list.files(t0Files) -#use default param template plus model output to build new param files to pass to SDA along with met +# use default param template plus model output to build new param files to pass to SDA along with met -#still want to run this to get the IC files -#settings <- PEcAn.workflow::do_conversions(settings) #end if loop for existing inputs +# still want to run this to get the IC files +# settings <- PEcAn.workflow::do_conversions(settings) #end if loop for existing inputs # if(is_empty(settings$run$inputs$met$path) & length(clim_check)>0){ # settings$run$inputs$met$id = index_id @@ -393,13 +401,13 @@ restart$runids <- run$id # PEcAn.utils::status.end() # } # } -# +# # #sample from parameters used for both sensitivity analysis and Ens # get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) # Setting dates in assimilation tags - This will help with preprocess split in SDA code -settings$state.data.assimilation$start.date <-as.character(sda.start) -settings$state.data.assimilation$end.date <-as.character(sda.end) +settings$state.data.assimilation$start.date <- as.character(sda.start) +settings$state.data.assimilation$end.date <- as.character(sda.end) # if (nodata) { # obs.mean <- obs.mean %>% map(function(x) @@ -414,132 +422,127 @@ settings$state.data.assimilation$end.date <-as.character(sda.end) # if(restart == TRUE){ # if(!dir.exists("SDA")) dir.create("SDA",showWarnings = F) -# -# #Update the SDA Output to just have last time step +# +# #Update the SDA Output to just have last time step # temp<- new.env() # load(file.path(restart.path, "SDA", "sda.output.Rdata"), envir = temp) # temp <- as.list(temp) -# +# # #we want ANALYSIS, FORECAST, and enkf.parms to match up with how many days obs data we have -# # +24 because it's hourly now and we want the next day as the start +# # +24 because it's hourly now and we want the next day as the start # if(length(temp$ANALYSIS) > 1){ -# -# for(i in 1:days.obs + 1){ +# +# for(i in 1:days.obs + 1){ # temp$ANALYSIS[[i]] <- temp$ANALYSIS[[i + 24]] # } -# for(i in rev((days.obs + 2):length(temp$ANALYSIS))){ +# for(i in rev((days.obs + 2):length(temp$ANALYSIS))){ # temp$ANALYSIS[[i]] <- NULL # } -# -# -# for(i in 1:days.obs + 1){ +# +# +# for(i in 1:days.obs + 1){ # temp$FORECAST[[i]] <- temp$FORECAST[[i + 24]] # } -# for(i in rev((days.obs + 2):length(temp$FORECAST))){ +# for(i in rev((days.obs + 2):length(temp$FORECAST))){ # temp$FORECAST[[i]] <- NULL # } -# -# for(i in 1:days.obs + 1){ +# +# for(i in 1:days.obs + 1){ # temp$enkf.params[[i]] <- temp$enkf.params[[i + 24]] # } -# for(i in rev((days.obs + 2):length(temp$enkf.params))){ +# for(i in rev((days.obs + 2):length(temp$enkf.params))){ # temp$enkf.params[[i]] <- NULL -# } -# +# } +# # } -# temp$t = 1 -# -# #change inputs path to match sampling met paths -# +# temp$t = 1 +# +# #change inputs path to match sampling met paths +# # for(i in 1: length(temp$inputs$ids)){ -# +# # temp$inputs$samples[i] <- settings$run$inputs$met$path[temp$inputs$ids[i]] -# +# # } -# +# # temp1<- new.env() # list2env(temp, envir = temp1) # save(list = c("ANALYSIS", 'FORECAST', "enkf.params", "ensemble.id", "ensemble.samples", 'inputs', 'new.params', 'new.state', 'run.id', 'site.locs', 't', 'Viz.output', 'X'), -# envir = temp1, -# file = file.path(settings$outdir, "SDA", "sda.output.Rdata")) -# -# -# +# envir = temp1, +# file = file.path(settings$outdir, "SDA", "sda.output.Rdata")) +# +# +# # temp.out <- new.env() # load(file.path(restart.path, "SDA", 'outconfig.Rdata'), envir = temp.out) # temp.out <- as.list(temp.out) # temp.out$outconfig$samples <- NULL -# +# # temp.out1 <- new.env() # list2env(temp.out, envir = temp.out1) -# save(list = c('outconfig'), -# envir = temp.out1, +# save(list = c('outconfig'), +# envir = temp.out1, # file = file.path(settings$outdir, "SDA", "outconfig.Rdata")) -# -# -# -# #copy over run and out folders -# +# +# +# +# #copy over run and out folders +# # if(!dir.exists("run")) dir.create("run",showWarnings = F) -# +# # files <- list.files(path = file.path(restart.path, "run/"), full.names = T, recursive = T, include.dirs = T, pattern = "sipnet.clim") # readfiles <- list.files(path = file.path(restart.path, "run/"), full.names = T, recursive = T, include.dirs = T, pattern = "README.txt") -# +# # newfiles <- gsub(pattern = restart.path, settings$outdir, files) # readnewfiles <- gsub(pattern = restart.path, settings$outdir, readfiles) -# +# # rundirs <- gsub(pattern = "/sipnet.clim", "", files) # rundirs <- gsub(pattern = restart.path, settings$outdir, rundirs) # for(i in 1 : length(rundirs)){ -# dir.create(rundirs[i]) +# dir.create(rundirs[i]) # file.copy(from = files[i], to = newfiles[i]) -# file.copy(from = readfiles[i], to = readnewfiles[i])} +# file.copy(from = readfiles[i], to = readnewfiles[i])} # file.copy(from = paste0(restart.path, '/run/runs.txt'), to = paste0(settings$outdir,'/run/runs.txt' )) -# +# # if(!dir.exists("out")) dir.create("out",showWarnings = F) -# +# # files <- list.files(path = file.path(restart.path, "out/"), full.names = T, recursive = T, include.dirs = T, pattern = "sipnet.out") # newfiles <- gsub(pattern = restart.path, settings$outdir, files) # outdirs <- gsub(pattern = "/sipnet.out", "", files) # outdirs <- gsub(pattern = restart.path, settings$outdir, outdirs) # for(i in 1 : length(outdirs)){ -# dir.create(outdirs[i]) -# file.copy(from = files[i], to = newfiles[i])} -# -# } +# dir.create(outdirs[i]) +# file.copy(from = files[i], to = newfiles[i])} +# +# } # -------------------------------------------------------------------------------------------------- #--------------------------------- Run state data assimilation ------------------------------------- # -------------------------------------------------------------------------------------------------- -#source('/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/R/Nimble_codes.R') +# source('/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/R/Nimble_codes.R') -if(restart == FALSE) unlink(c('run','out','SDA'), recursive = T) +if (restart == FALSE) unlink(c("run", "out", "SDA"), recursive = T) debugonce(PEcAnAssimSequential::sda.enkf) -if ('state.data.assimilation' %in% names(settings)) { +if ("state.data.assimilation" %in% names(settings)) { if (PEcAn.utils::status.check("SDA") == 0) { PEcAn.utils::status.start("SDA") PEcAnAssimSequential::sda.enkf( - settings, - restart=restart, - Q=0, + settings, + restart = restart, + Q = 0, obs.mean = obs.mean, obs.cov = obs.cov, control = list( trace = TRUE, - interactivePlot =FALSE, - TimeseriesPlot =TRUE, - BiasPlot =FALSE, + interactivePlot = FALSE, + TimeseriesPlot = TRUE, + BiasPlot = FALSE, debug = FALSE, - pause=FALSE + pause = FALSE ) ) - + PEcAn.utils::status.end() } } - - - - - diff --git a/modules/assim.sequential/inst/WillowCreek/Utils.R b/modules/assim.sequential/inst/WillowCreek/Utils.R index 4719792f74c..6d093022c20 100644 --- a/modules/assim.sequential/inst/WillowCreek/Utils.R +++ b/modules/assim.sequential/inst/WillowCreek/Utils.R @@ -1,15 +1,15 @@ ##' ##' Rounds a date to the previous 6 hours (0:00, 6:00, 12:00, or 18:00). -##' +##' ##' @author Luke Dramko round.to.six.hours <- function(date = Sys.time() - lubridate::hours(2)) { if (is.character(date)) { date <- as.POSIXct(date, tz = "UTC") } - forecast_hour = (lubridate::hour(date) %/% 6) * 6 #Integer division by 6 followed by re-multiplication acts like a "floor function" for multiples of 6 - forecast_hour = sprintf("%04d", forecast_hour * 100) - date = as.POSIXct( + forecast_hour <- (lubridate::hour(date) %/% 6) * 6 # Integer division by 6 followed by re-multiplication acts like a "floor function" for multiples of 6 + forecast_hour <- sprintf("%04d", forecast_hour * 100) + date <- as.POSIXct( paste0( lubridate::year(date), "-", @@ -22,47 +22,51 @@ round.to.six.hours <- ), tz = "UTC" ) - + return(date) } ploting_fluxes <- function(obs.raw) { obs.plot <- obs.raw %>% - tidyr::gather(Param, Value,-c(Date)) %>% - filter(!( - Param %in% c( - "FjDay", - "U", - "Day", - "DoY", - "FC", - "FjFay", - "Hour", - "Month", - "SC", - "Ustar", - "Year", - "H", - "Flag" - ) - ), - Value != -999) %>% - #filter((Date %>% as.Date) %in% (names(prep.data) %>% as.Date())) %>% + tidyr::gather(Param, Value, -c(Date)) %>% + filter( + !( + Param %in% c( + "FjDay", + "U", + "Day", + "DoY", + "FC", + "FjFay", + "Hour", + "Month", + "SC", + "Ustar", + "Year", + "H", + "Flag" + ) + ), + Value != -999 + ) %>% + # filter((Date %>% as.Date) %in% (names(prep.data) %>% as.Date())) %>% ggplot(aes(Date, Value)) + geom_line(aes(color = Param), lwd = 1) + geom_point(aes(color = Param), size = 3) + - facet_wrap(~ Param, scales = "free", ncol = 1) + + facet_wrap(~Param, scales = "free", ncol = 1) + scale_x_datetime( breaks = scales::date_breaks("12 hour"), labels = scales::date_format("%m/%d-%H:00") ) + scale_color_brewer(palette = "Set1") + theme_minimal(base_size = 15) + - geom_hline(yintercept = 0)+ + geom_hline(yintercept = 0) + labs(y = "") + - theme(legend.position = "none", - axis.text.x = element_text(angle = 30, hjust = 1)) - + theme( + legend.position = "none", + axis.text.x = element_text(angle = 30, hjust = 1) + ) + return(obs.plot) } diff --git a/modules/assim.sequential/inst/WillowCreek/download_WCr.R b/modules/assim.sequential/inst/WillowCreek/download_WCr.R index d35f6bc4f1b..386ffe9a64e 100644 --- a/modules/assim.sequential/inst/WillowCreek/download_WCr.R +++ b/modules/assim.sequential/inst/WillowCreek/download_WCr.R @@ -1,9 +1,9 @@ download_US_WCr_met <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek" - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { @@ -26,33 +26,34 @@ download_US_WCr_met <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - #Converting the WCR data from CST to UTC - raw.data$date <-lubridate::with_tz(as.POSIXct(paste0(raw.data$V1,"/",raw.data$V2,"/",raw.data$V3," ", raw.data$V4 %>% as.integer(), ":",(raw.data$V4-as.integer(raw.data$V4))*60), - format="%Y/%m/%d %H:%M", tz="US/Central"), tz = "UTC") - - - + + # Constructing the date based on the columns we have + # Converting the WCR data from CST to UTC + raw.data$date <- lubridate::with_tz(as.POSIXct(paste0(raw.data$V1, "/", raw.data$V2, "/", raw.data$V3, " ", raw.data$V4 %>% as.integer(), ":", (raw.data$V4 - as.integer(raw.data$V4)) * 60), + format = "%Y/%m/%d %H:%M", tz = "US/Central" + ), tz = "UTC") + + + start_date <- as.POSIXct(start_date, format = "%Y-%m-%d", tz = "UTC") end_date <- as.POSIXct(end_date, format = "%Y-%m-%d", tz = "UTC") - # Some cleaning and filtering - raw.data <- raw.data %>% - dplyr::select(V1,V2,V3,V4,V5, V6, V26, V35, V40, V59, date) %>% - filter(date >= start_date & date <=end_date) - - #Colnames changed + # Some cleaning and filtering + raw.data <- raw.data %>% + dplyr::select(V1, V2, V3, V4, V5, V6, V26, V35, V40, V59, date) %>% + filter(date >= start_date & date <= end_date) + + # Colnames changed colnames(raw.data) <- c("Year", "Month", "Day", "Hour", "DoY", "FjDay", "Tair", "rH", "Tsoil", "Rg", "date") - + return(raw.data) } download_US_WCr_flux <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek" - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { @@ -75,20 +76,21 @@ download_US_WCr_flux <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - raw.data$date <-as.POSIXct(paste0(raw.data$V1,"/",raw.data$V2,"/",raw.data$V3," ", raw.data$V4 %>% as.integer(), ":",(raw.data$V4-as.integer(raw.data$V4))*60), - format="%Y/%m/%d %H:%M", tz="UTC") - + + # Constructing the date based on the columns we have + raw.data$date <- as.POSIXct(paste0(raw.data$V1, "/", raw.data$V2, "/", raw.data$V3, " ", raw.data$V4 %>% as.integer(), ":", (raw.data$V4 - as.integer(raw.data$V4)) * 60), + format = "%Y/%m/%d %H:%M", tz = "UTC" + ) + start_date <- as.POSIXct(start_date, format = "%Y-%m-%d", tz = "UTC") end_date <- as.POSIXct(end_date, format = "%Y-%m-%d", tz = "UTC") - - # Some cleaning and filtering - raw.data <- raw.data %>% + + # Some cleaning and filtering + raw.data <- raw.data %>% # select(-V5, -V6) %>% - dplyr::filter(date >= start_date & date <=end_date) - #Colnames changed + dplyr::filter(date >= start_date & date <= end_date) + # Colnames changed colnames(raw.data) <- c("Year", "Month", "Day", "Hour", "DoY", "FjDay", "SC", "FC", "NEE", "LE", "H", "Ustar", "Flag", "date") - + return(raw.data) } diff --git a/modules/assim.sequential/inst/WillowCreek/download_soilmoist_WCr.R b/modules/assim.sequential/inst/WillowCreek/download_soilmoist_WCr.R index 1d4063160cb..ec8ce62a04e 100644 --- a/modules/assim.sequential/inst/WillowCreek/download_soilmoist_WCr.R +++ b/modules/assim.sequential/inst/WillowCreek/download_soilmoist_WCr.R @@ -2,14 +2,14 @@ download_soilmoist_WCr <- function(start_date, end_date) { base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/clean/ameriflux/US-WCr_HH_" start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # Reading in the data raw.data <- start_year:end_year %>% purrr::map_df(function(syear) { influx <- tryCatch( read.table( - paste0(base_url, syear, "01010000_", syear+1, "01010000.csv"), + paste0(base_url, syear, "01010000_", syear + 1, "01010000.csv"), sep = ",", header = TRUE, stringsAsFactors = F ) %>% @@ -25,21 +25,24 @@ download_soilmoist_WCr <- function(start_date, end_date) { ) }) %>% mutate_all(funs(as.numeric)) - - #Constructing the date based on the columns we have - if(dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0){ - raw.data$Time <-as.POSIXct(as.character(raw.data$TIMESTAMP_START), - format="%Y%m%d%H%M", tz="UTC") - # Some cleaning and filtering - raw.data <- raw.data %>% + + # Constructing the date based on the columns we have + if (dim(raw.data)[1] > 0 & dim(raw.data)[2] > 0) { + raw.data$Time <- as.POSIXct(as.character(raw.data$TIMESTAMP_START), + format = "%Y%m%d%H%M", tz = "UTC" + ) + # Some cleaning and filtering + raw.data <- raw.data %>% dplyr::select(SWC_1_1_1, SWC_1_2_1, SWC_1_3_1, SWC_1_4_1, SWC_1_5_1, Time) %>% - na_if(-9999) %>% - filter(Time >= start_date & Time <=end_date) - - #get average soil moisture - - raw.data$avgsoil <- raw.data$SWC_1_2_1*0.12 + raw.data$SWC_1_3_1*0.16 + raw.data$SWC_1_4_1*0.32 + raw.data$SWC_1_5_1*0.4 + na_if(-9999) %>% + filter(Time >= start_date & Time <= end_date) + + # get average soil moisture + + raw.data$avgsoil <- raw.data$SWC_1_2_1 * 0.12 + raw.data$SWC_1_3_1 * 0.16 + raw.data$SWC_1_4_1 * 0.32 + raw.data$SWC_1_5_1 * 0.4 raw.data <- raw.data %>% dplyr::select(Time, avgsoil) - }else(raw.data <- NULL) + } else { + (raw.data <- NULL) + } return(raw.data) -} \ No newline at end of file +} diff --git a/modules/assim.sequential/inst/WillowCreek/gapfill_WCr.R b/modules/assim.sequential/inst/WillowCreek/gapfill_WCr.R index 6d1ecca1358..e1c840ae538 100644 --- a/modules/assim.sequential/inst/WillowCreek/gapfill_WCr.R +++ b/modules/assim.sequential/inst/WillowCreek/gapfill_WCr.R @@ -1,58 +1,57 @@ -##'@title gapfill_WCr -##'@section purpose: -##'This function uses the REddyProc package to gap fill half-hourly data -##'from the Willow Creek Ameriflux tower +##' @title gapfill_WCr +##' @section purpose: +##' This function uses the REddyProc package to gap fill half-hourly data +##' from the Willow Creek Ameriflux tower ##' -##'@param start_date First date of data download -##'@param end_date End date of data download -##'@param var variable (NEE or LE) that you want to do data assimilation on -##'@return data frame of gapfilled data -##'@export -##'@author Luke Dramko and K. Zarada +##' @param start_date First date of data download +##' @param end_date End date of data download +##' @param var variable (NEE or LE) that you want to do data assimilation on +##' @return data frame of gapfilled data +##' @export +##' @author Luke Dramko and K. Zarada gapfill_WCr <- function(start_date, end_date, - var, nsample=10, - FUN.met=download_US_WCr_met, - FUN.flux=download_US_WCr_flux){ - - + var, nsample = 10, + FUN.met = download_US_WCr_met, + FUN.flux = download_US_WCr_flux) { start_date <- as.Date(start_date) end_date <- as.Date(end_date) - - #download WCr flux and met date - flux <- FUN.flux(start_date, end_date) - met <- FUN.met(start_date, end_date) - # converting NEE and LE - #change -999 to NA's - flux[flux == -999] <- NA #want both NEE and LE to be larger numbers - #flux$NEE<-PEcAn.utils::misc.convert(flux$NEE, "umol C m-2 s-1", "kg C m-2 s-1") - #flux$LE<-flux$LE*1e-6 - #join met and flux data by date (which includes time and day) + + # download WCr flux and met date + flux <- FUN.flux(start_date, end_date) + met <- FUN.met(start_date, end_date) + # converting NEE and LE + # change -999 to NA's + flux[flux == -999] <- NA # want both NEE and LE to be larger numbers + # flux$NEE<-PEcAn.utils::misc.convert(flux$NEE, "umol C m-2 s-1", "kg C m-2 s-1") + # flux$LE<-flux$LE*1e-6 + # join met and flux data by date (which includes time and day) met <- met %>% dplyr::select(date, Tair, Rg, Tsoil) flux <- left_join(flux, met, by = "date") %>% - dplyr::select(-FjDay, -SC, -FC) %>% + dplyr::select(-FjDay, -SC, -FC) %>% distinct(date, .keep_all = TRUE) - #print(str(flux)) - - - #Start REddyProc gapfilling + # print(str(flux)) + + + # Start REddyProc gapfilling suppressWarnings({ - EddyDataWithPosix.F <- fConvertTimeToPosix(flux, - 'YDH', - Year.s = 'Year' - , - Day.s = 'DoY', - Hour.s = 'Hour') %>% - dplyr::select(-date,-Month,-Day) %>% + "YDH", + Year.s = "Year", + Day.s = "DoY", + Hour.s = "Hour" + ) %>% + dplyr::select(-date, -Month, -Day) %>% distinct(DateTime, .keep_all = TRUE) }) - - - EddyProc.C <- sEddyProc$new('WCr', EddyDataWithPosix.F, - c(var,'Rg','Tair', 'Ustar')) - + + + EddyProc.C <- sEddyProc$new( + "WCr", EddyDataWithPosix.F, + c(var, "Rg", "Tair", "Ustar") + ) + tryCatch( { EddyProc.C$sMDSGapFill(var) @@ -61,10 +60,10 @@ gapfill_WCr <- function(start_date, end_date, PEcAn.logger::logger.warn(e) } ) - - #Merging the output + + # Merging the output FilledEddyData.F <- EddyProc.C$sExportResults() CombinedData.F <- cbind(flux, FilledEddyData.F) - + return(CombinedData.F) } diff --git a/modules/assim.sequential/inst/WillowCreek/prep.data.assim.R b/modules/assim.sequential/inst/WillowCreek/prep.data.assim.R index a6765c284f6..0d409b5406f 100644 --- a/modules/assim.sequential/inst/WillowCreek/prep.data.assim.R +++ b/modules/assim.sequential/inst/WillowCreek/prep.data.assim.R @@ -1,90 +1,93 @@ -##'@title prep.data.assim -##'@section purpose: -##'This function finds flux uncertainty and finds mean and cov -##'for a call to PEcAnAssimSequential::sda.enkf() +##' @title prep.data.assim +##' @section purpose: +##' This function finds flux uncertainty and finds mean and cov +##' for a call to PEcAnAssimSequential::sda.enkf() ##' -##'@param settings the PEcAn settings object (a collection of nested lists) -##'@param numvals number of simulated data points for each time point -##'@param vars variable (NEE or LE) that you want to do data assimilation on -##'@param data.len how many hours for the output (default is 48 hours) -##'@return None -##'@export -##'@author Luke Dramko and K. Zarada and Hamze Dokoohaki +##' @param settings the PEcAn settings object (a collection of nested lists) +##' @param numvals number of simulated data points for each time point +##' @param vars variable (NEE or LE) that you want to do data assimilation on +##' @param data.len how many hours for the output (default is 48 hours) +##' @return None +##' @export +##' @author Luke Dramko and K. Zarada and Hamze Dokoohaki prep.data.assim <- function(start_date, end_date, numvals, vars, data.len = 3, sda.start) { - - Date.vec <-NULL - + Date.vec <- NULL + gapfilled.vars <- vars %>% purrr::map_dfc(function(var) { - field_data <- gapfill_WCr(start_date, end_date, var) - + PEcAn.logger::logger.info(paste(var, " is done")) - #I'm sending the date out to use it later on + # I'm sending the date out to use it later on return(field_data) }) - - #gapfilled.vars$NEE_f = PEcAn.utils::misc.convert(gapfilled.vars$NEE_f, "kg C m-2 s-1", "umol C m-2 s-1") - - #Reading the columns we need + + # gapfilled.vars$NEE_f = PEcAn.utils::misc.convert(gapfilled.vars$NEE_f, "kg C m-2 s-1", "umol C m-2 s-1") + + # Reading the columns we need cols <- grep(paste0("_*_f$"), colnames(gapfilled.vars), value = TRUE) - gapfilled.vars <- gapfilled.vars %>% dplyr::select(Date=date...11, Flag = Flag...10,cols) - - #Creating NEE and LE filled output - gapfilled.vars.out <- gapfilled.vars %>% dplyr::select(-Flag) %>% + gapfilled.vars <- gapfilled.vars %>% dplyr::select(Date = date...11, Flag = Flag...10, cols) + + # Creating NEE and LE filled output + gapfilled.vars.out <- gapfilled.vars %>% + dplyr::select(-Flag) %>% filter(Date >= (sda.start - lubridate::days(data.len)) & Date < sda.start) - - #Pecan Flux Uncertainty - processed.flux <- 3:(3+length(vars)-1) %>% + + # Pecan Flux Uncertainty + processed.flux <- 3:(3 + length(vars) - 1) %>% purrr::map(function(col.num) { - field_data <- gapfilled.vars[,c(1,2,col.num)] - + field_data <- gapfilled.vars[, c(1, 2, col.num)] + uncertainty_vals <- list() # Creates a proxy row for rbinding sums <- list() # One vector holds the mean for each variable. obs.mean <- NULL - # for each of Gap filling uncertainty bands + # for each of Gap filling uncertainty bands # The first will be always Date, second Flag and the third is the flux - AMF.params <- PEcAn.uncertainty::flux.uncertainty(field_data[,3], QC = field_data$Flag) - + AMF.params <- PEcAn.uncertainty::flux.uncertainty(field_data[, 3], QC = field_data$Flag) + # Create proxy row for rbinding - random_mat = NULL - new_col = rep(0, dim(field_data)[1]) - + random_mat <- NULL + new_col <- rep(0, dim(field_data)[1]) + # Create a new column # i: the particular variable being worked with; j: the column number; k: the row number for (j in 1:numvals) { # number of random numbers obs <- field_data[, 3][!is.na(field_data[, 3])] pos <- obs >= 0 - + res <- obs - res[pos] <- rexp(length(obs[pos]), - 1 / (AMF.params$intercept[[1]] + (AMF.params$slopeP[[1]] * obs[pos]))) - - res[!pos] <- rexp(length(obs[!pos]), - 1 / (AMF.params$intercept[[1]] + (AMF.params$slopeN[[1]] * obs[!pos]))) - + res[pos] <- rexp( + length(obs[pos]), + 1 / (AMF.params$intercept[[1]] + (AMF.params$slopeP[[1]] * obs[pos])) + ) + + res[!pos] <- rexp( + length(obs[!pos]), + 1 / (AMF.params$intercept[[1]] + (AMF.params$slopeN[[1]] * obs[!pos])) + ) + random_multiplier <- sample(c(-1, 1), length(res), replace = TRUE) simulated <- obs + (random_multiplier * res) - - random_mat = cbind(random_mat, simulated) + + random_mat <- cbind(random_mat, simulated) } # end j - + obs.mean <- c(obs.mean, mean(field_data[, 3], na.rm = TRUE)) # this keeps the mean of each day for the whole time series and all variables - sums = c(sums, list(random_mat)) - - data.frame(Date=field_data$Date[!is.na(field_data[, 3])],sums) + sums <- c(sums, list(random_mat)) + + data.frame(Date = field_data$Date[!is.na(field_data[, 3])], sums) }) # end of map - - - #I'm sending mixing up simulations of vars to aggregate them first and then estimate their var/cov - outlist<-processed.flux %>% + + + # I'm sending mixing up simulations of vars to aggregate them first and then estimate their var/cov + outlist <- processed.flux %>% map2_dfc(vars, function(x, xnames) { names(x)[2:numvals] <- paste0(names(x)[2:numvals], xnames) - + x %>% filter(Date >= (sda.start - lubridate::hours(data.len)) & Date < sda.start) %>% mutate(Interval = lubridate::round_date(Date, "1 hour")) %>% @@ -92,29 +95,31 @@ prep.data.assim <- function(start_date, end_date, numvals, vars, data.len = 3, s }) %>% split(.$Interval...202) %>% map(function(row) { - - #fidning the interval cols / taking them out + # fidning the interval cols / taking them out colsDates <- grep(paste0("Interval"), colnames(row), value = FALSE) Date1 <- row[, colsDates[1]] row <- row[, -c(colsDates)] # finding the order of columns in dataframe - var.order <- split(1:ncol(row), - ceiling(seq_along(1:ncol(row))/(ncol(row)/length(vars)))) - - #combine all the numbers for this time interval - alldata <- var.order %>% - map_dfc(~row[,.x] %>% unlist %>% as.numeric) %>% - setNames(vars) - # mean and the cov between all the state variables is estimated here + var.order <- split( + 1:ncol(row), + ceiling(seq_along(1:ncol(row)) / (ncol(row) / length(vars))) + ) + + # combine all the numbers for this time interval + alldata <- var.order %>% + map_dfc(~ row[, .x] %>% + unlist() %>% + as.numeric()) %>% + setNames(vars) + # mean and the cov between all the state variables is estimated here return(list( Date = Date1 %>% unique(), covs = cov(alldata), means = apply(alldata, 2, mean) )) }) - - outlist <- list(obs=outlist, rawobs=gapfilled.vars.out ) - + + outlist <- list(obs = outlist, rawobs = gapfilled.vars.out) + return(outlist) - } # prep.data.assim diff --git a/modules/assim.sequential/inst/WillowCreek/workflow.template.R b/modules/assim.sequential/inst/WillowCreek/workflow.template.R index c277d88296c..761debabbb4 100755 --- a/modules/assim.sequential/inst/WillowCreek/workflow.template.R +++ b/modules/assim.sequential/inst/WillowCreek/workflow.template.R @@ -14,36 +14,36 @@ plan(multiprocess) # ---------------------------------------------------------------------------------------------- #------------------------------------------ That's all we need xml path and the out folder ----- # ---------------------------------------------------------------------------------------------- -args = c("/fs/data3/kzarada/ouput", FALSE, "gefs.sipnet.template.xml", TRUE, 3) +args <- c("/fs/data3/kzarada/ouput", FALSE, "gefs.sipnet.template.xml", TRUE, 3) -if (is.na(args[1])){ +if (is.na(args[1])) { outputPath <- "/fs/data3/kzarada/ouput" } else { outputPath <- args[1] } -if (is.na(args[2])){ +if (is.na(args[2])) { nodata <- FALSE } else { - nodata <-as.logical(args[2]) + nodata <- as.logical(args[2]) } -if (is.na(args[3])){ - xmlTempName <-"gefs.sipnet.template.xml" +if (is.na(args[3])) { + xmlTempName <- "gefs.sipnet.template.xml" } else { xmlTempName <- args[3] } -if (is.na(args[4])){ - restart <-TRUE +if (is.na(args[4])) { + restart <- TRUE } else { restart <- args[4] } -if (is.na(args[5])){ - days.obs <- 3 #how many of observed data to include -- not including today +if (is.na(args[5])) { + days.obs <- 3 # how many of observed data to include -- not including today } else { - days.obs <- as.numeric(args[5]) + days.obs <- as.numeric(args[5]) } setwd(outputPath) @@ -51,20 +51,21 @@ setwd(outputPath) #------------------------------------------ sourcing the required tools ------------------------- #------------------------------------------------------------------------------------------------ c( - 'Utils.R', - 'download_WCr.R', + "Utils.R", + "download_WCr.R", "gapfill_WCr.R", - 'prep.data.assim.R' -) %>% walk( ~ source( + "prep.data.assim.R" +) %>% walk(~ source( system.file("WillowCreek", - .x, - package = "PEcAnAssimSequential") + .x, + package = "PEcAnAssimSequential" + ) )) -#reading xml +# reading xml settings <- read.settings("/fs/data3/kzarada/pecan/modules/assim.sequential/inst/WillowCreek/gefs.sipnet.template.xml") -#connecting to DB -con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) +# connecting to DB +con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) #------------------------------------------------------------------------------------------------ #------------------------------------------ Preparing the pecan xml ----------------------------- @@ -72,64 +73,66 @@ con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) #--------------------------- Finding old sims all.previous.sims <- list.dirs(outputPath, recursive = F) if (length(all.previous.sims) > 0 & !inherits(con, "try-error")) { - - tryCatch({ - # Looking through all the old simulations and find the most recent - all.previous.sims <- all.previous.sims %>% - map(~ list.files(path = file.path(.x, "SDA"))) %>% - setNames(all.previous.sims) %>% - discard( ~ !"sda.output.Rdata" %in% .x) # I'm throwing out the ones that they did not have a SDA output - - last.sim <- - names(all.previous.sims) %>% - map_chr( ~ strsplit(.x, "_")[[1]][2]) %>% - map_dfr(~ db.query( - query = paste("SELECT started_at FROM workflows WHERE id =", .x), - con = con - ) %>% - mutate(ID=.x)) %>% - mutate(started_at = as.Date(started_at)) %>% - arrange(desc(started_at), desc(ID)) %>% - head(1) - # pulling the date and the path to the last SDA - restart.path <-grep(last.sim$ID, names(all.previous.sims), value = T) - sda.start <- last.sim$started_at - }, - error = function(e) { - restart.path <- NULL - sda.start <- Sys.Date() - 12 - PEcAn.logger::logger.warn(paste0("There was a problem with finding the last successfull SDA.",conditionMessage(e))) - }) - + tryCatch( + { + # Looking through all the old simulations and find the most recent + all.previous.sims <- all.previous.sims %>% + map(~ list.files(path = file.path(.x, "SDA"))) %>% + setNames(all.previous.sims) %>% + discard(~ !"sda.output.Rdata" %in% .x) # I'm throwing out the ones that they did not have a SDA output + + last.sim <- + names(all.previous.sims) %>% + map_chr(~ strsplit(.x, "_")[[1]][2]) %>% + map_dfr(~ db.query( + query = paste("SELECT started_at FROM workflows WHERE id =", .x), + con = con + ) %>% + mutate(ID = .x)) %>% + mutate(started_at = as.Date(started_at)) %>% + arrange(desc(started_at), desc(ID)) %>% + head(1) + # pulling the date and the path to the last SDA + restart.path <- grep(last.sim$ID, names(all.previous.sims), value = T) + sda.start <- last.sim$started_at + }, + error = function(e) { + restart.path <- NULL + sda.start <- Sys.Date() - 12 + PEcAn.logger::logger.warn(paste0("There was a problem with finding the last successfull SDA.", conditionMessage(e))) + } + ) + # if there was no older sims - if (is.na(sda.start)) + if (is.na(sda.start)) { sda.start <- Sys.Date() - 12 + } } sda.end <- Sys.Date() #----------------------------------------------------------------------------------------------- #------------------------------------------ Download met and flux ------------------------------ #----------------------------------------------------------------------------------------------- -#Fluxes -if(!exists('prep.data')) +# Fluxes +if (!exists("prep.data")) { prep.data <- prep.data.assim( - sda.start - 90,# it needs at least 90 days for gap filling + sda.start - 90, # it needs at least 90 days for gap filling sda.end, numvals = 100, vars = c("NEE", "LE"), - data.len = days.obs * 24 - ) -obs.raw <-prep.data$rawobs -prep.data<-prep.data$obs + data.len = days.obs * 24 + ) +} +obs.raw <- prep.data$rawobs +prep.data <- prep.data$obs -# if there is infinte value then take it out - here we want to remove any that just have one NA in the observed data -prep.data <- prep.data %>% - map(function(day.data){ - #cheking the mean +# if there is infinte value then take it out - here we want to remove any that just have one NA in the observed data +prep.data <- prep.data %>% + map(function(day.data) { + # cheking the mean nan.mean <- which(is.infinite(day.data$means) | is.nan(day.data$means) | is.na(day.data$means)) - if ( length(nan.mean)>0 ) { - + if (length(nan.mean) > 0) { day.data$means <- day.data$means[-nan.mean] day.data$covs <- day.data$covs[-nan.mean, -nan.mean] %>% as.matrix() %>% @@ -148,17 +151,19 @@ prep.data <- prep.data %>% name[name == "LE"] <- "Qle" name }) - + day.data }) # Finding the right end and start date -met.start <- obs.raw$Date%>% head(1) %>% lubridate::floor_date(unit = "day") +met.start <- obs.raw$Date %>% + head(1) %>% + lubridate::floor_date(unit = "day") met.end <- met.start + lubridate::days(16) -#pad Observed Data to match met data +# pad Observed Data to match met data date <- seq( @@ -176,35 +181,39 @@ pad.prep <- obs.raw %>% dplyr::select(Date, means, covs) %>% dynutils::tibble_as_list() -names(pad.prep) <-date +names(pad.prep) <- date -#create the data type to match the other data +# create the data type to match the other data pad.cov <- matrix(data = c(rep(NA, 4)), nrow = 2, ncol = 2, dimnames = list(c("NEE", "Qle"), c("NEE", "Qle"))) -pad.means = c(NA, NA) +pad.means <- c(NA, NA) names(pad.means) <- c("NEE", "Qle") -#cycle through and populate the list +# cycle through and populate the list -pad <- pad.prep %>% - map(function(day.data){ - day.data$means <- pad.means - day.data$covs <- pad.cov - day.data - }) +pad <- pad.prep %>% + map(function(day.data) { + day.data$means <- pad.means + day.data$covs <- pad.cov + day.data + }) -#add onto end of prep.data list +# add onto end of prep.data list -prep.data = c(prep.data, pad) +prep.data <- c(prep.data, pad) # This line is what makes the SDA to run daily ***** IMPORTANT CODE OVER HERE -prep.data<-prep.data %>% - discard(~lubridate::hour(.x$Date)!=0) +prep.data <- prep.data %>% + discard(~ lubridate::hour(.x$Date) != 0) -obs.mean <- prep.data %>% map('means') %>% setNames(names(prep.data)) -obs.cov <- prep.data %>% map('covs') %>% setNames(names(prep.data)) +obs.mean <- prep.data %>% + map("means") %>% + setNames(names(prep.data)) +obs.cov <- prep.data %>% + map("covs") %>% + setNames(names(prep.data)) @@ -217,27 +226,27 @@ obs.cov <- prep.data %>% map('covs') %>% setNames(names(prep.data)) #----------------------------------------------------------------------------------------------- #------------------------------------------ Fixing the settings -------------------------------- #----------------------------------------------------------------------------------------------- -#Using the found dates to run - this will help to download mets +# Using the found dates to run - this will help to download mets settings$run$start.date <- as.character(met.start) settings$run$end.date <- as.character(last(date)) settings$run$site$met.start <- as.character(met.start) settings$run$site$met.end <- as.character(met.end) -#info +# info settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), " +0000") # -------------------------------------------------------------------------------------------------- #---------------------------------------------- PEcAn Workflow ------------------------------------- # -------------------------------------------------------------------------------------------------- -#Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force=FALSE) +# Update/fix/check settings. Will only run the first time it's called, unless force=TRUE +settings <- PEcAn.settings::prepare.settings(settings, force = FALSE) setwd(settings$outdir) ggsave( file.path(settings$outdir, "Obs_plot.pdf"), - ploting_fluxes(obs.raw) , + ploting_fluxes(obs.raw), width = 16, height = 9 ) -#Write pecan.CHECKED.xml +# Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") # start from scratch if no continue is passed in statusFile <- file.path(settings$outdir, "STATUS") @@ -251,11 +260,11 @@ settings <- PEcAn.workflow::do_conversions(settings, T, T, T) if (PEcAn.utils::status.check("TRAIT") == 0) { PEcAn.utils::status.start("TRAIT") settings <- PEcAn.workflow::runModule.get.trait.data(settings) - PEcAn.settings::write.settings(settings, outputfile = 'pecan.TRAIT.xml') + PEcAn.settings::write.settings(settings, outputfile = "pecan.TRAIT.xml") PEcAn.utils::status.end() -} else if (file.exists(file.path(settings$outdir, 'pecan.TRAIT.xml'))) { +} else if (file.exists(file.path(settings$outdir, "pecan.TRAIT.xml"))) { settings <- - PEcAn.settings::read.settings(file.path(settings$outdir, 'pecan.TRAIT.xml')) + PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.TRAIT.xml")) } # Run the PEcAn meta.analysis if (!is.null(settings$meta.analysis)) { @@ -265,116 +274,122 @@ if (!is.null(settings$meta.analysis)) { PEcAn.utils::status.end() } } -#sample from parameters used for both sensitivity analysis and Ens +# sample from parameters used for both sensitivity analysis and Ens get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) # Setting dates in assimilation tags - This will help with preprocess split in SDA code -settings$state.data.assimilation$start.date <-as.character(met.start) -settings$state.data.assimilation$end.date <-as.character(met.end - lubridate::hms("06:00:00")) +settings$state.data.assimilation$start.date <- as.character(met.start) +settings$state.data.assimilation$end.date <- as.character(met.end - lubridate::hms("06:00:00")) if (nodata) { - obs.mean <- obs.mean %>% map(function(x) - return(NA)) - obs.cov <- obs.cov %>% map(function(x) - return(NA)) + obs.mean <- obs.mean %>% map(function(x) { + return(NA) + }) + obs.cov <- obs.cov %>% map(function(x) { + return(NA) + }) } # -------------------------------------------------------------------------------------------------- #--------------------------------- Restart ------------------------------------- # -------------------------------------------------------------------------------------------------- -#@Hamze - should we add a if statement here for the times that we don't want to copy the path? -# @Hamze: Yes if restart == TRUE -if(restart == TRUE){ - if(!dir.exists("SDA")) dir.create("SDA",showWarnings = F) +# @Hamze - should we add a if statement here for the times that we don't want to copy the path? +# @Hamze: Yes if restart == TRUE +if (restart == TRUE) { + if (!dir.exists("SDA")) dir.create("SDA", showWarnings = F) - #Update the SDA Output to just have last time step - temp<- new.env() + # Update the SDA Output to just have last time step + temp <- new.env() load(file.path(restart.path, "SDA", "sda.output.Rdata"), envir = temp) temp <- as.list(temp) - - #we want ANALYSIS, FORECAST, and enkf.parms to match up with how many days obs data we have - # +2 for days.obs since today is not included in the number. So we want to keep today and any other obs data - if(length(temp$ANALYSIS) > 1){ - for(i in rev((days.obs + 2):length(temp$ANALYSIS))){ - temp$ANALYSIS[[i]] <- NULL - } - - for(i in rev((days.obs + 2):length(temp$FORECAST))){ - temp$FORECAST[[i]] <- NULL - } - - - for(i in rev((days.obs + 2):length(temp$enkf.params))){ - temp$enkf.params[[i]] <- NULL - } + + # we want ANALYSIS, FORECAST, and enkf.parms to match up with how many days obs data we have + # +2 for days.obs since today is not included in the number. So we want to keep today and any other obs data + if (length(temp$ANALYSIS) > 1) { + for (i in rev((days.obs + 2):length(temp$ANALYSIS))) { + temp$ANALYSIS[[i]] <- NULL + } + + for (i in rev((days.obs + 2):length(temp$FORECAST))) { + temp$FORECAST[[i]] <- NULL + } + + + for (i in rev((days.obs + 2):length(temp$enkf.params))) { + temp$enkf.params[[i]] <- NULL + } } - temp$t = 1 - - #change inputs path to match sampling met paths - - for(i in 1: length(temp$inputs$ids)){ - + temp$t <- 1 + + # change inputs path to match sampling met paths + + for (i in 1:length(temp$inputs$ids)) { temp$inputs$samples[i] <- settings$run$inputs$met$path[temp$inputs$ids[i]] - } - - temp1<- new.env() + + temp1 <- new.env() list2env(temp, envir = temp1) - save(list = c("ANALYSIS", 'FORECAST', "enkf.params", "ensemble.id", "ensemble.samples", 'inputs', 'new.params', 'new.state', 'run.id', 'site.locs', 't', 'Viz.output', 'X'), - envir = temp1, - file = file.path(settings$outdir, "SDA", "sda.output.Rdata")) + save( + list = c("ANALYSIS", "FORECAST", "enkf.params", "ensemble.id", "ensemble.samples", "inputs", "new.params", "new.state", "run.id", "site.locs", "t", "Viz.output", "X"), + envir = temp1, + file = file.path(settings$outdir, "SDA", "sda.output.Rdata") + ) + + - - temp.out <- new.env() - load(file.path(restart.path, "SDA", 'outconfig.Rdata'), envir = temp.out) + load(file.path(restart.path, "SDA", "outconfig.Rdata"), envir = temp.out) temp.out <- as.list(temp.out) temp.out$outconfig$samples <- NULL - + temp.out1 <- new.env() list2env(temp.out, envir = temp.out1) - save(list = c('outconfig'), - envir = temp.out1, - file = file.path(settings$outdir, "SDA", "outconfig.Rdata")) - - - -#copy over run and out folders - - if(!dir.exists("run")) dir.create("run",showWarnings = F) - copyDirectory(from = file.path(restart.path, "run/"), - to = file.path(settings$outdir, "run/")) - if(!dir.exists("out")) dir.create("out",showWarnings = F) - copyDirectory(from = file.path(restart.path, "out/"), - to = file.path(settings$outdir, "out/")) -} #restart == TRUE - # -------------------------------------------------------------------------------------------------- + save( + list = c("outconfig"), + envir = temp.out1, + file = file.path(settings$outdir, "SDA", "outconfig.Rdata") + ) + + + + # copy over run and out folders + + if (!dir.exists("run")) dir.create("run", showWarnings = F) + copyDirectory( + from = file.path(restart.path, "run/"), + to = file.path(settings$outdir, "run/") + ) + if (!dir.exists("out")) dir.create("out", showWarnings = F) + copyDirectory( + from = file.path(restart.path, "out/"), + to = file.path(settings$outdir, "out/") + ) +} # restart == TRUE +# -------------------------------------------------------------------------------------------------- #--------------------------------- Run state data assimilation ------------------------------------- # -------------------------------------------------------------------------------------------------- -if(restart == FALSE) unlink(c('run','out','SDA'), recursive = T) +if (restart == FALSE) unlink(c("run", "out", "SDA"), recursive = T) -if ('state.data.assimilation' %in% names(settings)) { +if ("state.data.assimilation" %in% names(settings)) { if (PEcAn.utils::status.check("SDA") == 0) { PEcAn.utils::status.start("SDA") PEcAnAssimSequential::sda.enkf( - settings, - restart=restart, - Q=0, + settings, + restart = restart, + Q = 0, obs.mean = obs.mean, obs.cov = obs.cov, control = list( trace = TRUE, - interactivePlot =FALSE, - TimeseriesPlot =TRUE, - BiasPlot =FALSE, - debug =FALSE, - pause=FALSE + interactivePlot = FALSE, + TimeseriesPlot = TRUE, + BiasPlot = FALSE, + debug = FALSE, + pause = FALSE ) ) PEcAn.utils::status.end() } } - - \ No newline at end of file diff --git a/modules/assim.sequential/inst/alr_test.R b/modules/assim.sequential/inst/alr_test.R index 8dffcb2e65c..e8f91d8f86e 100644 --- a/modules/assim.sequential/inst/alr_test.R +++ b/modules/assim.sequential/inst/alr_test.R @@ -5,93 +5,98 @@ sampler_toggle <<- nimbleFunction( contains = sampler_BASE, setup = function(model, mvSaved, target, control) { type <- control$type - nested_sampler_name <- paste0('sampler_', type) - control_new <- nimbleOptions('MCMCcontrolDefaultList') + nested_sampler_name <- paste0("sampler_", type) + control_new <- nimbleOptions("MCMCcontrolDefaultList") control_new[[names(control)]] <- control nested_sampler_list <- nimbleFunctionList(sampler_BASE) nested_sampler_list[[1]] <- do.call(nested_sampler_name, list(model, mvSaved, target, control_new)) toggle <- 1 }, run = function() { - if(toggle == 1) + if (toggle == 1) { nested_sampler_list[[1]]$run() + } }, methods = list( - reset = function() + reset = function() { nested_sampler_list[[1]]$reset() + } ) ) -alr <<- nimbleFunction( +alr <<- nimbleFunction( run = function(y = double(1)) { returnType(double(1)) - - y[y<0] <- .000000001 - y_alr <- log(y[1:(length(y)-1)] / y[length(y)]) - + + y[y < 0] <- .000000001 + y_alr <- log(y[1:(length(y) - 1)] / y[length(y)]) + return(y_alr) - }) + } +) -inv.alr <<- nimbleFunction( +inv.alr <<- nimbleFunction( run = function(alr = double(1)) { returnType(double(1)) - - y = exp(c(alr, 0)) / sum(exp(c(alr, 0))) - + + y <- exp(c(alr, 0)) / sum(exp(c(alr, 0))) + return(y) - }) - -tobit.model <<- nimbleCode({ - - q[1:N,1:N] ~ dwish(R = aq[1:N,1:N], df = bq) ## aq and bq are estimated over time - Q[1:N,1:N] <- inverse(q[1:N,1:N]) - X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N,1:N]) ## Model Forecast ##muf and pf are assigned from ensembles - + } +) + +tobit.model <<- nimbleCode({ + q[1:N, 1:N] ~ dwish(R = aq[1:N, 1:N], df = bq) ## aq and bq are estimated over time + Q[1:N, 1:N] <- inverse(q[1:N, 1:N]) + X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N, 1:N]) ## Model Forecast ##muf and pf are assigned from ensembles + ## add process error - X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N,1:N]) + X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N, 1:N]) y_star[X_fcomp_start:X_fcomp_end] <- alr(X[X_fcomp_model_start:X_fcomp_model_end]) - + y.censored[X_fcomp_start:X_fcomp_end] ~ dmnorm(y_star[X_fcomp_start:X_fcomp_end], - prec = r[X_fcomp_start:X_fcomp_end, - X_fcomp_start:X_fcomp_end]) - for(i in 1:YN){ + prec = r[ + X_fcomp_start:X_fcomp_end, + X_fcomp_start:X_fcomp_end + ] + ) + for (i in 1:YN) { y.ind[i] ~ dinterval(y.censored[i], 0) } - }) library(nimble) n.state <- 4 nens <- 100000 -sig <- diag(n.state) * c(500,600,50,200) -#sig[1,2] <- sig[2,1] <- 420 -#sig[1,3] <- sig[3,1] <- 100 -#sig[1,4] <- sig[4,1] <- 240 +sig <- diag(n.state) * c(500, 600, 50, 200) +# sig[1,2] <- sig[2,1] <- 420 +# sig[1,3] <- sig[3,1] <- 100 +# sig[1,4] <- sig[4,1] <- 240 -#sig[2,4] <- sig[4,2] <- 170 -#sig[3,4] <- sig[4,3] <- 710 +# sig[2,4] <- sig[4,2] <- 170 +# sig[3,4] <- sig[4,3] <- 710 mu_X <- c(4000, 6000, 3000, 7000) mu_y <- c(0.2, 0.1, 0.4, 0.3) -X <- rmvnorm(n = nens,rnorm(4,mu_X,1000),sigma = sig) -if(any(X<0)) stop() +X <- rmvnorm(n = nens, rnorm(4, mu_X, 1000), sigma = sig) +if (any(X < 0)) stop() mu.f <- colMeans(X) -Pf <- cov(X) +Pf <- cov(X) set.seed(0) -take <- sample(x = 1:nens,size = 100) -x.prop.table <- prop.table(X,margin = 1) +take <- sample(x = 1:nens, size = 100) +x.prop.table <- prop.table(X, margin = 1) -alr.x <- t(apply(x.prop.table,1,alr)) +alr.x <- t(apply(x.prop.table, 1, alr)) # y.censored <- colMeans(alr.x[take,]) y.censored <- alr(mu_y) -y.ind <- rep(1,n.state-1) +y.ind <- rep(1, n.state - 1) library(MCMCpack) -#R <- rwish(20, diag(3)*100) -R <- solve(cov(alr.x[take,])) +# R <- rwish(20, diag(3)*100) +R <- solve(cov(alr.x[take, ])) X_direct_start <- X_direct_end <- X_pft2total_start <- X_pft2total_end <- X_pft2total_model <- 0 X_fcomp_start <- 1 @@ -103,46 +108,56 @@ alr_last <- 4 direct_TRUE <- pft2total_TRUE <- FALSE fcomp_TRUE <- TRUE -constants.tobit <<- list(N = ncol(X), YN = length(y.ind), - X_fcomp_start = X_fcomp_start, X_fcomp_end = X_fcomp_end, - X_fcomp_model_start=X_fcomp_model[1], - X_fcomp_model_end=X_fcomp_model[length(X_fcomp_model)], - alr_last = alr_last) - -dimensions.tobit = list(X = length(mu.f), X.mod = ncol(X), - Q = c(length(mu.f),length(mu.f)), - y_star = length(y.censored)) - -data.tobit = list(muf = as.vector(mu.f), - pf = solve(Pf), - aq = diag(n.state+1)*(n.state+1), - bq = (n.state+1), - y.ind = rep(1,n.state), - r = (R), - y.censored = y.censored) #precision - -inits.pred = list(q = diag(length(mu.f))*(n.state + 1), - X.mod = rnorm(length(mu.f),mu.f,100), - X = rnorm(length(mu.f),mu.f,100), - y_star = rnorm(n.state-2,0,1)) # - -save(constants.tobit,dimensions.tobit,data.tobit,inits.pred, file='use_this.Rdata') - -if(!exists('Cmcmc')){ - model_pred <- nimbleModel(tobit.model, data = data.tobit, - dimensions = dimensions.tobit, - constants = constants.tobit, - inits = inits.pred, - name = 'pred') +constants.tobit <<- list( + N = ncol(X), YN = length(y.ind), + X_fcomp_start = X_fcomp_start, X_fcomp_end = X_fcomp_end, + X_fcomp_model_start = X_fcomp_model[1], + X_fcomp_model_end = X_fcomp_model[length(X_fcomp_model)], + alr_last = alr_last +) + +dimensions.tobit <- list( + X = length(mu.f), X.mod = ncol(X), + Q = c(length(mu.f), length(mu.f)), + y_star = length(y.censored) +) + +data.tobit <- list( + muf = as.vector(mu.f), + pf = solve(Pf), + aq = diag(n.state + 1) * (n.state + 1), + bq = (n.state + 1), + y.ind = rep(1, n.state), + r = (R), + y.censored = y.censored +) # precision + +inits.pred <- list( + q = diag(length(mu.f)) * (n.state + 1), + X.mod = rnorm(length(mu.f), mu.f, 100), + X = rnorm(length(mu.f), mu.f, 100), + y_star = rnorm(n.state - 2, 0, 1) +) # + +save(constants.tobit, dimensions.tobit, data.tobit, inits.pred, file = "use_this.Rdata") + +if (!exists("Cmcmc")) { + model_pred <- nimbleModel(tobit.model, + data = data.tobit, + dimensions = dimensions.tobit, + constants = constants.tobit, + inits = inits.pred, + name = "pred" + ) ## Adding X.mod,q,r as data for building model. - conf <- configureMCMC(model_pred, print=TRUE) - - conf$addMonitors(c("X","q","Q", "y_star","y.censored")) + conf <- configureMCMC(model_pred, print = TRUE) + + conf$addMonitors(c("X", "q", "Q", "y_star", "y.censored")) ## [1] conjugate_dmnorm_dmnorm sampler: X[1:5] ## important! ## this is needed for correct indexing later - x.char <- paste0('X[1:',10,']') - + x.char <- paste0("X[1:", 10, "]") + #### setting custom proposal distribution # conf$removeSamplers(x.char) # propCov.means <- signif(diag(solve(Pf)),1)#mean(unlist(lapply(obs.cov,FUN = function(x){diag(x)})))[choose]#c(rep(max(diag(Pf)),ncol(X)))# @@ -150,108 +165,110 @@ if(!exists('Cmcmc')){ # conf$addSampler(target =c(x.char), # control <- list(propCov = diag(ncol(X))*propCov.means), # type='RW_block') - + samplerNumberOffset <<- length(conf$getSamplers()) - + ## sampler needs to be disabled - for(i in 1:length(y.ind)) { - node <- paste0('y.censored[',i,']') - conf$addSampler(node, 'toggle', control=list(type='RW')) + for (i in 1:length(y.ind)) { + node <- paste0("y.censored[", i, "]") + conf$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } - + conf$printSamplers() - + ## can monitor y.censored, if you wish, to verify correct behaviour - #conf$addMonitors('y.censored') - + # conf$addMonitors('y.censored') + Rmcmc <<- buildMCMC(conf) - + Cmodel <<- compileNimble(model_pred) Cmcmc <<- compileNimble(Rmcmc, project = model_pred) - - for(i in 1:length(y.ind)) { + + for (i in 1:length(y.ind)) { # ironically, here we have to "toggle" the value of y.ind[i] # this specifies that when y.ind[i] = 1, # indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } } -diff.comp <- c(900,100,10,1) +diff.comp <- c(900, 100, 10, 1) y.censored <- Cmodel$y.censored -Cmodel$y.censored <- y.censored#alr(diff.comp)#test different composition data +Cmodel$y.censored <- y.censored # alr(diff.comp)#test different composition data Cmodel$muf <- mu.f Cmodel$pf <- solve(Pf) -Cmodel$r <- rwish(20, diag(8)*1000) #precision - -inits.pred = list(q = diag(length(mu.f))*(length(mu.f)+1), - X.mod = rnorm(length(mu.f),rep(100, 4),100), - X = rnorm(length(mu.f),mu.f,100), - y_star = rnorm(n.state-2,0,1)) # +Cmodel$r <- rwish(20, diag(8) * 1000) # precision + +inits.pred <- list( + q = diag(length(mu.f)) * (length(mu.f) + 1), + X.mod = rnorm(length(mu.f), rep(100, 4), 100), + X = rnorm(length(mu.f), mu.f, 100), + y_star = rnorm(n.state - 2, 0, 1) +) # # X.mod = rnorm(length(mu.f),mu.f,100), # X = rnorm(length(mu.f),mu.f,100)) # Cmodel$setInits(inits.pred) -for(i in 1:length(y.ind)) { +for (i in 1:length(y.ind)) { # ironically, here we have to "toggle" the value of y.ind[i] # this specifies that when y.ind[i] = 1, # indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } -dat <- runMCMC(Cmcmc, niter = 100000, nburnin=20000) #need to make sure you run for awhile to avoid autocorrelation problems +dat <- runMCMC(Cmcmc, niter = 100000, nburnin = 20000) # need to make sure you run for awhile to avoid autocorrelation problems -plot(dat[,grep('y.censored',colnames(dat))[3]]) #should be a line +plot(dat[, grep("y.censored", colnames(dat))[3]]) # should be a line -burnin <- .2*nrow(dat) +burnin <- .2 * nrow(dat) ## update parameters iystar <- grep("y_star", colnames(dat), fixed = TRUE) -iq <- grep("q", colnames(dat)) -iQ <- grep("Q", colnames(dat)) -iX <- grep("X[", colnames(dat), fixed = TRUE) -iX.mod <- grep("X.mod", colnames(dat), fixed = TRUE) +iq <- grep("q", colnames(dat)) +iQ <- grep("Q", colnames(dat)) +iX <- grep("X[", colnames(dat), fixed = TRUE) +iX.mod <- grep("X.mod", colnames(dat), fixed = TRUE) X.a <- colMeans(dat[, iX]) Xmod.a <- colMeans(dat[, iX.mod]) q.a <- matrix(colMeans(dat[, iq]), 4, 4) mu.a <- colMeans(dat[burnin:nrow(dat), iX]) ystar.a <- colMeans(dat[, iystar]) -Pa <- cov(dat[, iX]) +Pa <- cov(dat[, iX]) Pa[is.na(Pa)] <- 0 -rbind(mu.a/sum(mu.a[1:9]),mu.f/sum(mu.f[1:9])) +rbind(mu.a / sum(mu.a[1:9]), mu.f / sum(mu.f[1:9])) layout(matrix(1:4, 2, 2)) for (i in 1:9) { - plot(dat[burnin:nrow(dat),iX[i]], type = 'l') + plot(dat[burnin:nrow(dat), iX[i]], type = "l") } -rbind(ystar.a,y.censored) +rbind(ystar.a, y.censored) layout(matrix(1:4, 2, 2)) for (i in 1:3) { - plot(dat[,iystar[i]], type = 'l') - abline(h=alr(mu.a)[i],col='red') + plot(dat[, iystar[i]], type = "l") + abline(h = alr(mu.a)[i], col = "red") } layout(matrix(1:4, 2, 2)) for (i in 1:4) { - plot(dat[,iX.mod[i]], type = 'l') - abline(h=mu.f[i],col='red') + plot(dat[, iX.mod[i]], type = "l") + abline(h = mu.f[i], col = "red") } # layout(matrix(1:16, 4, 4)) # for (i in 1:16) { # plot(dat[,iq[i]], type = 'l') # } -# -# +# +# # layout(matrix(1:16, 4, 4)) # for (i in 1:16) { # plot(dat[,iQ[i]], type = 'l') diff --git a/modules/assim.sequential/inst/covariates.R b/modules/assim.sequential/inst/covariates.R index 99ea5ab585b..e4fb26479e9 100644 --- a/modules/assim.sequential/inst/covariates.R +++ b/modules/assim.sequential/inst/covariates.R @@ -1,9 +1,9 @@ ##' @title Covariate Data Prep ##' @author Joshua Ploshay ##' -##' @description This script lists the code needed to download, resample, and +##' @description This script lists the code needed to download, resample, and ##' stack the covariate data used in the downscale_function.R sript. -##' +##' ##' @return A spatraster object consisting of a stack of maps with labeled layers #### WorldClim #### @@ -14,44 +14,52 @@ # 2023 REU project used 10 minute spatial resolution ## Solar Radiation (kJ m-2 day-1) -srad <- terra::rast(list.files(path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/10m_srad", - pattern='.tif$', - all.files= T, - full.names= T)) +srad <- terra::rast(list.files( + path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/10m_srad", + pattern = ".tif$", + all.files = T, + full.names = T +)) srad <- terra::app(srad, mean) ## Vapor Pressure (kPa) -vapr <- terra::rast(list.files(path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/10m_vapr", - pattern='.tif$', - all.files= T, - full.names= T)) +vapr <- terra::rast(list.files( + path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/10m_vapr", + pattern = ".tif$", + all.files = T, + full.names = T +)) vapr <- terra::app(vapr, mean) ## Average Temperature (*C) -tavg <- terra::rast(list.files(path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/avg_temp_prep/WorldClim", - pattern='.tif$', - all.files= T, - full.names= T)) +tavg <- terra::rast(list.files( + path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/avg_temp_prep/WorldClim", + pattern = ".tif$", + all.files = T, + full.names = T +)) tavg <- terra::app(tavg, mean) ## Total Precipitation (mm) -prec <- terra::rast(list.files(path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/total_prec", - pattern='.tif$', - all.files= T, - full.names= T)) +prec <- terra::rast(list.files( + path = "/projectnb/dietzelab/jploshay/pecan_copy/jploshay/total_prec", + pattern = ".tif$", + all.files = T, + full.names = T +)) prec <- terra::app(prec, mean) #### SoilGrids #### # geodata::soil_world() pulls data from the SoilGRIDS database -# More information on pulling different soil data can be found in the geodata +# More information on pulling different soil data can be found in the geodata # manual https://cran.r-project.org/web/packages/geodata/geodata.pdf ## Soil pH -phh2o <- geodata::soil_world(var = "phh2o", depth = 5, stat = "mean", path = tempdir()) +phh2o <- geodata::soil_world(var = "phh2o", depth = 5, stat = "mean", path = tempdir()) ## Soil Nitrogen (g kg-1) nitrogen <- geodata::soil_world(var = "nitrogen", depth = 5, stat = "mean", path = tempdir()) @@ -65,14 +73,16 @@ sand <- geodata::soil_world(var = "sand", depth = 5, stat = "mean", path = tempd #### Land Cover #### GLanCE_extract <- function(pattern, path) { - files <- list.files(path = "/projectnb/dietzelab/dietze/glance2012/e4ftl01.cr.usgs.gov/MEASURES/GLanCE30.001/2012.07.01", #make this path default - all.files = T, - full.names = T, - pattern) + files <- list.files( + path = "/projectnb/dietzelab/dietze/glance2012/e4ftl01.cr.usgs.gov/MEASURES/GLanCE30.001/2012.07.01", # make this path default + all.files = T, + full.names = T, + pattern + ) # empty .vrt file path - vrtfile <- paste0(tempfile(), ".vrt") + vrtfile <- paste0(tempfile(), ".vrt") # "connects" tiles together that returns SpatRaster - GLanCE_product <- terra::vrt(files, vrtfile, overwrite = T) + GLanCE_product <- terra::vrt(files, vrtfile, overwrite = T) return(GLanCE_product) } @@ -84,14 +94,14 @@ land_cover <- GLanCE_extract(pattern = "NA_LC.tif$") # Define the extent to crop the covariates to North America NA_extent <- terra::ext(-178.19453125, -10, 7.22006835937502, 83.5996093750001) -# Stack WorldClim maps +# Stack WorldClim maps WorldClim <- c(tavg, srad, prec, vapr) # Crop WolrdClim stack to North America using North America extent NA_WorldClim <- terra::crop(WorldClim, NA_extent) names(NA_WorldClim) <- c("tavg", "srad", "prec", "vapr") -# Stack SoilGrids maps +# Stack SoilGrids maps SoilGrids <- c(phh2o, nitrogen, soc, sand) # Crop SoilGrids stack to North America using North America extent @@ -99,11 +109,11 @@ NA_SoilGrids <- terra::crop(SoilGrids, NA_extent) names(NA_SoilGrids) <- c("phh2o", "nitrogen", "soc", "sand") # Resample SoilGrids to match WorldClim maps -NA_SoilGrids <- terra::resample(NA_SoilGrids, NA_WorldClim, method = 'bilinear') +NA_SoilGrids <- terra::resample(NA_SoilGrids, NA_WorldClim, method = "bilinear") # Resample land cover to match WorldClim maps (~25 min) -land_cover <- terra::resample(land_cover, NA_WorldClim, method = 'near') +land_cover <- terra::resample(land_cover, NA_WorldClim, method = "near") names(land_cover) <- "land_cover" -# Stack all maps -covariates <- c(NA_WorldClim, NA_SoilGrids, land_cover) \ No newline at end of file +# Stack all maps +covariates <- c(NA_WorldClim, NA_SoilGrids, land_cover) diff --git a/modules/assim.sequential/inst/ensemble_adj_visualization.R b/modules/assim.sequential/inst/ensemble_adj_visualization.R index be2882e38c2..7279c188857 100644 --- a/modules/assim.sequential/inst/ensemble_adj_visualization.R +++ b/modules/assim.sequential/inst/ensemble_adj_visualization.R @@ -1,137 +1,151 @@ -setwd('/fs/data2/output//PEcAn_1000008683/') -load('/fs/data2/output//PEcAn_1000008683/sda.output.Rdata') -load('/fs/data2/output//PEcAn_1000008683/out/sda.initial.runs.Rdata') +setwd("/fs/data2/output//PEcAn_1000008683/") +load("/fs/data2/output//PEcAn_1000008683/sda.output.Rdata") +load("/fs/data2/output//PEcAn_1000008683/out/sda.initial.runs.Rdata") library(nimble) -time_step<-seq(950,1950,100) -ntrees.save <- agb.pft.save <- array(NA,dim=c(9,length(run.id), - length(1:1950))) - -for(t in 1:(length(time_step)-1)){ - for(i in 1:length(run.id)){ - load(paste0('/fs/data2/output//PEcAn_1000008588/out/',run.id[[i]],'/',time_step[t],'-12-31 23:59:59','linkages.out.Rdata')) - ntrees.save[,i,time_step[t]:(time_step[t+1]-1)] <- ntrees.birth - agb.pft.save[,i,time_step[t]:(time_step[t+1]-1)] <- agb.pft - #dbh +time_step <- seq(950, 1950, 100) +ntrees.save <- agb.pft.save <- array(NA, dim = c( + 9, length(run.id), + length(1:1950) +)) + +for (t in 1:(length(time_step) - 1)) { + for (i in 1:length(run.id)) { + load(paste0("/fs/data2/output//PEcAn_1000008588/out/", run.id[[i]], "/", time_step[t], "-12-31 23:59:59", "linkages.out.Rdata")) + ntrees.save[, i, time_step[t]:(time_step[t + 1] - 1)] <- ntrees.birth + agb.pft.save[, i, time_step[t]:(time_step[t + 1] - 1)] <- agb.pft + # dbh } } library(colorspace) -matplot(950:1949,apply(ntrees.save[,,950:1949]/(1/12),1,colMeans,na.rm=T),typ='l',lwd=3,col=rainbow(9),ylab='Stem Density (trees/ha)') -matplot(950:1949,apply(agb.pft.save[,,950:1949],1,colMeans,na.rm=T),typ='l',lwd=3,col=rainbow(9),ylab='PFT Biomass (kgC/m^2)') +matplot(950:1949, apply(ntrees.save[, , 950:1949] / (1 / 12), 1, colMeans, na.rm = T), typ = "l", lwd = 3, col = rainbow(9), ylab = "Stem Density (trees/ha)") +matplot(950:1949, apply(agb.pft.save[, , 950:1949], 1, colMeans, na.rm = T), typ = "l", lwd = 3, col = rainbow(9), ylab = "PFT Biomass (kgC/m^2)") -sd.df <- apply(ntrees.save[,,950:1949]/(1/12),1,colMeans,na.rm=T) -ag.df <- apply(agb.pft.save[,,950:1949],1,colMeans,na.rm=T) +sd.df <- apply(ntrees.save[, , 950:1949] / (1 / 12), 1, colMeans, na.rm = T) +ag.df <- apply(agb.pft.save[, , 950:1949], 1, colMeans, na.rm = T) -quant.keep<-quant.keep.a<-list() -for(i in 1:9){ - quant.keep[[i]]<-apply(ntrees.save[i,,950:1949]/(1/12),2,quantile,c(.025,.5,.975),na.rm=T) - quant.keep.a[[i]]<-apply(agb.pft.save[i,,950:1949],2,quantile,c(.025,.5,.975),na.rm=T) - } +quant.keep <- quant.keep.a <- list() +for (i in 1:9) { + quant.keep[[i]] <- apply(ntrees.save[i, , 950:1949] / (1 / 12), 2, quantile, c(.025, .5, .975), na.rm = T) + quant.keep.a[[i]] <- apply(agb.pft.save[i, , 950:1949], 2, quantile, c(.025, .5, .975), na.rm = T) +} load("/fs/data2/output/PEcAn_1000008588/run/1001823086/linkages.input.Rdata") -par(mfrow=c(3,2),mar=c(rep(3.8,4))) -for(i in c(9,8,4)){ - plot(950:1949,quant.keep[[i]][2,],typ='l',col=rainbow_hcl(9)[i], - ylab='Stem Density (trees/ha)',xlab='Year', - main=NA,lwd=2,ylim=c(0,max(quant.keep[[i]]))) - ciEnvelope(x=950:1949,ylo=quant.keep[[i]][1,],yhi=quant.keep[[i]][3,],col=rainbow_hcl(9,alpha = .75)[i]) - lines(950:1949,quant.keep[[i]][2,],col=rainbow_hcl(9)[i],lwd=4) - - plot(950:1949,quant.keep.a[[i]][2,],typ='l',col=rainbow_hcl(9)[i], - ylab='Spp. Biomass (kgC/m^2)',xlab='Year', - main=NA,lwd=2,ylim=c(0,max(quant.keep.a[[i]]))) - ciEnvelope(x=950:1949,ylo=quant.keep.a[[i]][1,],yhi=quant.keep.a[[i]][3,],col=rainbow_hcl(9,alpha = .75)[i]) - lines(950:1949,quant.keep.a[[i]][2,],col=rainbow_hcl(9)[i],lwd=4) +par(mfrow = c(3, 2), mar = c(rep(3.8, 4))) +for (i in c(9, 8, 4)) { + plot(950:1949, quant.keep[[i]][2, ], + typ = "l", col = rainbow_hcl(9)[i], + ylab = "Stem Density (trees/ha)", xlab = "Year", + main = NA, lwd = 2, ylim = c(0, max(quant.keep[[i]])) + ) + ciEnvelope(x = 950:1949, ylo = quant.keep[[i]][1, ], yhi = quant.keep[[i]][3, ], col = rainbow_hcl(9, alpha = .75)[i]) + lines(950:1949, quant.keep[[i]][2, ], col = rainbow_hcl(9)[i], lwd = 4) + + plot(950:1949, quant.keep.a[[i]][2, ], + typ = "l", col = rainbow_hcl(9)[i], + ylab = "Spp. Biomass (kgC/m^2)", xlab = "Year", + main = NA, lwd = 2, ylim = c(0, max(quant.keep.a[[i]])) + ) + ciEnvelope(x = 950:1949, ylo = quant.keep.a[[i]][1, ], yhi = quant.keep.a[[i]][3, ], col = rainbow_hcl(9, alpha = .75)[i]) + lines(950:1949, quant.keep.a[[i]][2, ], col = rainbow_hcl(9)[i], lwd = 4) } pf <- enkf.params[[10]]$Pf q.bar <- solve(enkf.params[[10]]$q.bar) -rownames(pf) <- rownames(q.bar) <- colnames(pf) <- colnames(q.bar) <- c('Maple','Birch','Hickory', - 'Chestnut','Beech','Spruce', - 'Pine','Oak','Hemlock','SoilCarbon') - -par(mfrow=c(1,1)) -corrplot(cov2cor(pf), type = "upper", tl.srt = 25, - tl.cex = .8,col=c('#ca0020','#0571b0'),diag=FALSE, - order='original') -corrplot(cov2cor(pf+q.bar), type = "upper", tl.srt = 25, - tl.cex = .8,col=c('#ca0020','#0571b0'),diag=FALSE) - -df <- round(apply(ntrees.save[,,950:1949]/(1/12),1,colMeans,na.rm=T)) -colnames(df) <- c('a','b','c','d','e','f','g','h','i') +rownames(pf) <- rownames(q.bar) <- colnames(pf) <- colnames(q.bar) <- c( + "Maple", "Birch", "Hickory", + "Chestnut", "Beech", "Spruce", + "Pine", "Oak", "Hemlock", "SoilCarbon" +) + +par(mfrow = c(1, 1)) +corrplot(cov2cor(pf), + type = "upper", tl.srt = 25, + tl.cex = .8, col = c("#ca0020", "#0571b0"), diag = FALSE, + order = "original" +) +corrplot(cov2cor(pf + q.bar), + type = "upper", tl.srt = 25, + tl.cex = .8, col = c("#ca0020", "#0571b0"), diag = FALSE +) + +df <- round(apply(ntrees.save[, , 950:1949] / (1 / 12), 1, colMeans, na.rm = T)) +colnames(df) <- c("a", "b", "c", "d", "e", "f", "g", "h", "i") rownames(df) <- stringi::stri_rand_strings(1000, 5) barplot(t(as.data.frame(df))) list.trees <- list() -for(i in 1:9){ - list.trees[[i]] <- apply(ntrees.save[i,,950:1949],2,quantile,c(.025,.5,.975),na.rm=T) +for (i in 1:9) { + list.trees[[i]] <- apply(ntrees.save[i, , 950:1949], 2, quantile, c(.025, .5, .975), na.rm = T) } -plot(list.trees[[1]][2,],ylim=c(0,35),typ='l') -for(i in 1:9){ - lines(list.trees[[i]][2,]) +plot(list.trees[[1]][2, ], ylim = c(0, 35), typ = "l") +for (i in 1:9) { + lines(list.trees[[i]][2, ]) } -par(mfrow=c(1,1)) -matplot(t(apply(agb.pft.save[,,,10],2,rowMeans,na.rm=TRUE)),typ='l') +par(mfrow = c(1, 1)) +matplot(t(apply(agb.pft.save[, , , 10], 2, rowMeans, na.rm = TRUE)), typ = "l") sum.list <- ntrees.list <- list() -for(i in 1:10){ - sum.list[[i]] <- t(apply(agb.pft.save[,,,i],2,rowMeans,na.rm=TRUE)) - ntrees.list[[i]] <- t(apply(ntrees.save[,,,i],2,rowMeans,na.rm=TRUE)) +for (i in 1:10) { + sum.list[[i]] <- t(apply(agb.pft.save[, , , i], 2, rowMeans, na.rm = TRUE)) + ntrees.list[[i]] <- t(apply(ntrees.save[, , , i], 2, rowMeans, na.rm = TRUE)) } -sum.all <- do.call(rbind,sum.list) -ntrees.all <- do.call(rbind,ntrees.list) -par(mfrow=c(1,1)) +sum.all <- do.call(rbind, sum.list) +ntrees.all <- do.call(rbind, ntrees.list) +par(mfrow = c(1, 1)) matplot(sum.all) matplot(ntrees.all) library(corrplot) -par(mfrow=c(1,1)) +par(mfrow = c(1, 1)) corrplot(cov2cor(enkf.params[[9]]$R)) -###-------------------------------------------------------------------### +### -------------------------------------------------------------------### ### ensemble adjustment plots ### -###-------------------------------------------------------------------### +### -------------------------------------------------------------------### -#function for plotting matplot with ensemble number as label -mattext = function(data, data_names, colors, ylab, xlab, type='b', na.fix = FALSE){ - if(na.fix == TRUE){ +# function for plotting matplot with ensemble number as label +mattext <- function(data, data_names, colors, ylab, xlab, type = "b", na.fix = FALSE) { + if (na.fix == TRUE) { data[is.na(data)] <- 0 } - - matplot(data, pch=NA, type=type, col=colors, ylab = ylab, xlab = xlab) - for (i in 1:ncol(data)){ - text(x=1:nrow(data), y=data[,i], lab=data_names[i], col=colors[i]) + + matplot(data, pch = NA, type = type, col = colors, ylab = ylab, xlab = xlab) + for (i in 1:ncol(data)) { + text(x = 1:nrow(data), y = data[, i], lab = data_names[i], col = colors[i]) } } -#calculate the likelihood of the ensemble members given mu.a and Pa +# calculate the likelihood of the ensemble members given mu.a and Pa nens <- nrow(FORECAST[[1]]) nt <- length(FORECAST) -wt.mat <- matrix(NA,nrow=nens,ncol=nt) -for(t in seq_len(nt-1)){ - for(i in seq_len(nens)){ - wt.mat[i,t]<-dmnorm_chol(FORECAST[[t]][i,],enkf.params[[t]]$mu.a,solve(enkf.params[[t]]$Pa)) +wt.mat <- matrix(NA, nrow = nens, ncol = nt) +for (t in seq_len(nt - 1)) { + for (i in seq_len(nens)) { + wt.mat[i, t] <- dmnorm_chol(FORECAST[[t]][i, ], enkf.params[[t]]$mu.a, solve(enkf.params[[t]]$Pa)) } } -#put into weights table -wt.props <- t(prop.table(wt.mat,2)) - -pdf(file.path(settings$outdir,'ensemble.weights.time-series.pdf')) -par(mfrow=c(1,1)) -mattext(data = wt.props,data_names = as.character(1:nens),colors=rainbow(nens), - ylab = c('Ensemble Weight'), xlab = c('Time')) +# put into weights table +wt.props <- t(prop.table(wt.mat, 2)) + +pdf(file.path(settings$outdir, "ensemble.weights.time-series.pdf")) +par(mfrow = c(1, 1)) +mattext( + data = wt.props, data_names = as.character(1:nens), colors = rainbow(nens), + ylab = c("Ensemble Weight"), xlab = c("Time") +) dev.off() library(Hmisc) @@ -140,131 +154,129 @@ settings <- read.settings("pecan.SDA.xml") pft.names <- as.character(lapply(settings$pfts, function(x) x[["name"]])) param.names <- names(params[[1]][[1]]) -param.hist <- array(NA,dim=c(length(param.names),length(pft.names),nens)) -wt.df <- array(NA, dim = c(length(param.names),length(pft.names),nt,4)) -diff.mean.mat <- matrix(NA,19,9) - -pdf('weighted.param.time-series.pdf') -par(mfrow=c(4,3)) -for(p in 1:19){ - for(s in 1:4){ +param.hist <- array(NA, dim = c(length(param.names), length(pft.names), nens)) +wt.df <- array(NA, dim = c(length(param.names), length(pft.names), nt, 4)) +diff.mean.mat <- matrix(NA, 19, 9) + +pdf("weighted.param.time-series.pdf") +par(mfrow = c(4, 3)) +for (p in 1:19) { + for (s in 1:4) { pft <- pft.names[s] param.plot <- param.names[p] - - param.check <- unlist(lapply(lapply(params,'[[',pft),'[[',param.plot)) - - if(!is.null(param.check)){ - param.hist[p,s,] <- param.check + + param.check <- unlist(lapply(lapply(params, "[[", pft), "[[", param.plot)) + + if (!is.null(param.check)) { + param.hist[p, s, ] <- param.check wt.mean <- wt.var <- numeric(nt) - - for(t in 2:(nt-1)){ - wt.mean[t] <- wtd.mean(x=param.hist[p,s,], w = wt.props[t,]) - wt.var[t] <- wtd.var(x=param.hist[p,s,], w = wt.props[t,]) + + for (t in 2:(nt - 1)) { + wt.mean[t] <- wtd.mean(x = param.hist[p, s, ], w = wt.props[t, ]) + wt.var[t] <- wtd.var(x = param.hist[p, s, ], w = wt.props[t, ]) } - - wt.df[p,s,,1] <- wt.mean - wt.df[p,s,,2] <- wt.mean - mean(param.hist[p,s,]) - wt.df[p,s,,3] <- wt.var - wt.df[p,s,,4] <- wt.var - stats::var(param.hist[p,s,]) - - #plot weighted mean - plot(wt.mean[2:9],type='l',ylab='Weighted Mean',xlab='Time') - points(wt.mean[2:9], pch=19,cex=.4) - abline(h=mean(param.hist[p,s,])) - abline(h = param.hist[p,s,which.min(colMeans(wt.props,na.rm = TRUE))],col='red') - abline(h = param.hist[p,s,which.max(colMeans(wt.props,na.rm = TRUE))],col='green') - title(main = list(paste(pft,'\n',param.plot), cex = .5)) - - #coloring by the difference in the mean relative to the scale of the parameter - diff.mean <- diff.mean.mat[p,s] <- abs(mean(wt.mean,na.rm=T) - mean(param.hist[p,s,],na.rm=T)) - if(diff.mean > abs(.00001*mean(param.hist[p,s,]))){ - mtext(text = paste(signif(diff.mean,digits = 3)), side = 3,col = 'red') - }else{ - mtext(text = paste(signif(diff.mean,digits = 3)), side = 3) + + wt.df[p, s, , 1] <- wt.mean + wt.df[p, s, , 2] <- wt.mean - mean(param.hist[p, s, ]) + wt.df[p, s, , 3] <- wt.var + wt.df[p, s, , 4] <- wt.var - stats::var(param.hist[p, s, ]) + + # plot weighted mean + plot(wt.mean[2:9], type = "l", ylab = "Weighted Mean", xlab = "Time") + points(wt.mean[2:9], pch = 19, cex = .4) + abline(h = mean(param.hist[p, s, ])) + abline(h = param.hist[p, s, which.min(colMeans(wt.props, na.rm = TRUE))], col = "red") + abline(h = param.hist[p, s, which.max(colMeans(wt.props, na.rm = TRUE))], col = "green") + title(main = list(paste(pft, "\n", param.plot), cex = .5)) + + # coloring by the difference in the mean relative to the scale of the parameter + diff.mean <- diff.mean.mat[p, s] <- abs(mean(wt.mean, na.rm = T) - mean(param.hist[p, s, ], na.rm = T)) + if (diff.mean > abs(.00001 * mean(param.hist[p, s, ]))) { + mtext(text = paste(signif(diff.mean, digits = 3)), side = 3, col = "red") + } else { + mtext(text = paste(signif(diff.mean, digits = 3)), side = 3) } - - #Plot weighted variance - plot(wt.var,type='l',ylab='Weighted Variance',xlab='Time') - points(wt.var, pch=19,cex=.5) - abline(h= stats::var(param.hist[p,s,])) - title(main = list(paste(pft,'\n',param.plot), cex = .5)) - - hist(param.hist[p,s,], freq = FALSE, col= 'lightgrey', main = paste(pft,'\n',param.plot)) - for(t in 2:(nt-1)){ - lines(density(param.hist[p,s,], weights = wt.props[t,], na.rm = TRUE), - lwd = 2, col=cm.colors(10)[t]) + + # Plot weighted variance + plot(wt.var, type = "l", ylab = "Weighted Variance", xlab = "Time") + points(wt.var, pch = 19, cex = .5) + abline(h = stats::var(param.hist[p, s, ])) + title(main = list(paste(pft, "\n", param.plot), cex = .5)) + + hist(param.hist[p, s, ], freq = FALSE, col = "lightgrey", main = paste(pft, "\n", param.plot)) + for (t in 2:(nt - 1)) { + lines(density(param.hist[p, s, ], weights = wt.props[t, ], na.rm = TRUE), + lwd = 2, col = cm.colors(10)[t] + ) } - - }else{ + } else { plot.new() } - } } dev.off() -pdf('weighted.hists.pdf') -par(mfrow = c(4,4)) +pdf("weighted.hists.pdf") +par(mfrow = c(4, 4)) plot.new() -legend('center',c('Weighted Means','Prior Means'),pch = 19,col=c('lightgrey','black')) -for(p in 1:length(param.names)){ - hist(wt.df[p,,,1], main=param.names[p], freq = FALSE, col = 'lightgrey', xlab = 'Param Value') - lines(density(rowMeans(param.hist[p,,]),na.rm = TRUE), lwd = 2) - +legend("center", c("Weighted Means", "Prior Means"), pch = 19, col = c("lightgrey", "black")) +for (p in 1:length(param.names)) { + hist(wt.df[p, , , 1], main = param.names[p], freq = FALSE, col = "lightgrey", xlab = "Param Value") + lines(density(rowMeans(param.hist[p, , ]), na.rm = TRUE), lwd = 2) } dev.off() -plot(param.hist[1,,],param.hist[5,,]) -par(mfrow=c(1,1)) -for(s in c(1,7,8,9)){ - plot(param.hist[2,s,],param.hist[5,s,],col='black',pch=19,main=pft.names[s],xlab='MPLANT',ylab='AGEMX') - for(t in 1:nt){ - points(param.hist[2,s,],param.hist[5,s,],col=terrain.colors(nt)[t],cex=wt.props[t,]*75) - +plot(param.hist[1, , ], param.hist[5, , ]) +par(mfrow = c(1, 1)) +for (s in c(1, 7, 8, 9)) { + plot(param.hist[2, s, ], param.hist[5, s, ], col = "black", pch = 19, main = pft.names[s], xlab = "MPLANT", ylab = "AGEMX") + for (t in 1:nt) { + points(param.hist[2, s, ], param.hist[5, s, ], col = terrain.colors(nt)[t], cex = wt.props[t, ] * 75) } - points(param.hist[2,s,order(colMeans(wt.props,na.rm=TRUE))],param.hist[5,s,order(colMeans(wt.props,na.rm=TRUE))],col=grey(seq(0,1,length.out = 50)),pch=19,cex=1) - #points(param.hist[2,s,which.min(colMeans(wt.props,na.rm = TRUE))],param.hist[5,s,which.min(colMeans(wt.props,na.rm = TRUE))],col='red',cex=1) -} + points(param.hist[2, s, order(colMeans(wt.props, na.rm = TRUE))], param.hist[5, s, order(colMeans(wt.props, na.rm = TRUE))], col = grey(seq(0, 1, length.out = 50)), pch = 19, cex = 1) + # points(param.hist[2,s,which.min(colMeans(wt.props,na.rm = TRUE))],param.hist[5,s,which.min(colMeans(wt.props,na.rm = TRUE))],col='red',cex=1) +} -which.min(colMeans(wt.props,na.rm = TRUE)) -which.max(colMeans(wt.props,na.rm = TRUE)) +which.min(colMeans(wt.props, na.rm = TRUE)) +which.max(colMeans(wt.props, na.rm = TRUE)) -par(mfrow=c(1,1)) -mattext(param.hist[1,,], data_names = as.character(1:nens), colors=rainbow(nens), - ylab = c('Parameter Value'), xlab = c('PFT'), type='p', na.fix = TRUE) +par(mfrow = c(1, 1)) +mattext(param.hist[1, , ], + data_names = as.character(1:nens), colors = rainbow(nens), + ylab = c("Parameter Value"), xlab = c("PFT"), type = "p", na.fix = TRUE +) library(weights) -par(mfrow=c(1,2)) -weighted.hist(x = param.hist, w = wt.props[nt,],col = 'lightgrey') -hist(param.hist,col = 'lightgrey',xlim = range(dd$x)) +par(mfrow = c(1, 2)) +weighted.hist(x = param.hist, w = wt.props[nt, ], col = "lightgrey") +hist(param.hist, col = "lightgrey", xlim = range(dd$x)) plot(density(param.hist)) -plot(density(param.hist*wt.props[nt,]*10)) +plot(density(param.hist * wt.props[nt, ] * 10)) ## weighted quantile -wtd.quantile <- function(x,wt,q){ +wtd.quantile <- function(x, wt, q) { ord <- order(x) - wstar <- cumsum(wt[ord])/sum(wt) - qi <- findInterval(q,wstar); qi[qi<1]=1;qi[qi>length(x)]=length(x) + wstar <- cumsum(wt[ord]) / sum(wt) + qi <- findInterval(q, wstar) + qi[qi < 1] <- 1 + qi[qi > length(x)] <- length(x) return(x[ord[qi]]) } param.quant <- matrix(NA, 3, nt) -for(t in seq_len(nt)){ - param.quant[,t] <- wtd.quantile(x = param.hist, wt=wt.mat[,t],q=c(.025,.5,.975)) +for (t in seq_len(nt)) { + param.quant[, t] <- wtd.quantile(x = param.hist, wt = wt.mat[, t], q = c(.025, .5, .975)) } -plot(param.quant[2,], ylim = range(param.quant,na.rm = TRUE)) -ciEnvelope(x = 1:nt, ylo = param.quant[1,1:nt], yhi = param.quant[3,1:nt], col = 'lightblue') -points(param.quant[2,], pch = 19, cex = 1) - - - +plot(param.quant[2, ], ylim = range(param.quant, na.rm = TRUE)) +ciEnvelope(x = 1:nt, ylo = param.quant[1, 1:nt], yhi = param.quant[3, 1:nt], col = "lightblue") +points(param.quant[2, ], pch = 19, cex = 1) diff --git a/modules/assim.sequential/inst/fluxnet_sandbox/ApplyingWeights_sps_v4.R b/modules/assim.sequential/inst/fluxnet_sandbox/ApplyingWeights_sps_v4.R index 221de4f630f..2911776e5eb 100644 --- a/modules/assim.sequential/inst/fluxnet_sandbox/ApplyingWeights_sps_v4.R +++ b/modules/assim.sequential/inst/fluxnet_sandbox/ApplyingWeights_sps_v4.R @@ -11,9 +11,9 @@ #--------------------------------------------------------------------------------------------------# # Close all devices and delete all variables. -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -28,8 +28,8 @@ library(lubridate) library(osfr) library(dplyr) library(PEcAn.utils) -library(bigleaf) # for unit conversions -#umolCO2.to.gC(-9.374368e-09, constants = bigleaf.constants()) # gC m-2 d-1 +library(bigleaf) # for unit conversions +# umolCO2.to.gC(-9.374368e-09, constants = bigleaf.constants()) # gC m-2 d-1 library(RColorBrewer) # display.brewer.all() @@ -39,17 +39,19 @@ osfr::osf_auth("PUT_YOUR_OSF_KEY_HERE") ## options Var <- "GPP" ensembles <- 20 -year_select <- seq(2002,2014,1) -Site <- "2000002567" #"2000002575", 2000002574, 2000002570, 2000002583, 2000002567 -Site_name <- "US-Me2" #US-Syv, US-PFa, US-NR1, US-MMS, US-Me2 +year_select <- seq(2002, 2014, 1) +Site <- "2000002567" # "2000002575", 2000002574, 2000002570, 2000002583, 2000002567 +Site_name <- "US-Me2" # US-Syv, US-PFa, US-NR1, US-MMS, US-Me2 #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ### Output figure directory -sda_plots_dir <- file.path(paste0('~/Data/Dropbox/MANUSCRIPTS/BNL_TEST/Dokoohaki_etal_NASA_CMS_SDA_part1/Figures/', - Site_name)) -if (! file.exists(sda_plots_dir)) dir.create(file.path(sda_plots_dir),recursive=TRUE, showWarnings = FALSE) +sda_plots_dir <- file.path(paste0( + "~/Data/Dropbox/MANUSCRIPTS/BNL_TEST/Dokoohaki_etal_NASA_CMS_SDA_part1/Figures/", + Site_name +)) +if (!file.exists(sda_plots_dir)) dir.create(file.path(sda_plots_dir), recursive = TRUE, showWarnings = FALSE) #--------------------------------------------------------------------------------------------------# @@ -64,49 +66,61 @@ fluxnet_dir fluxnet_dirs <- osf_ls_files(fluxnet_dir, n_max = Inf) fluxnet_dirs -fluxnet_site_data_num <- grep(Site_name, fluxnet_dirs$name) -fluxnet_site_data <- fluxnet_dirs[fluxnet_site_data_num,] +fluxnet_site_data_num <- grep(Site_name, fluxnet_dirs$name) +fluxnet_site_data <- fluxnet_dirs[fluxnet_site_data_num, ] # INFO: https://fluxnet.fluxdata.org/data/fluxnet2015-dataset/subset-data-product/ -fluxnet_data_dir <- file.path('~/Data/Dropbox/MANUSCRIPTS/BNL_TEST/Dokoohaki_etal_NASA_CMS_SDA_part1/Data/FLUXNET') -if (! file.exists(fluxnet_data_dir)) dir.create(file.path(fluxnet_data_dir),recursive=TRUE, showWarnings = FALSE) +fluxnet_data_dir <- file.path("~/Data/Dropbox/MANUSCRIPTS/BNL_TEST/Dokoohaki_etal_NASA_CMS_SDA_part1/Data/FLUXNET") +if (!file.exists(fluxnet_data_dir)) dir.create(file.path(fluxnet_data_dir), recursive = TRUE, showWarnings = FALSE) # $links$download # fluxnet_site_data$meta[[1]]$links$download # to get download link -fluxnet_local_file <- osf_retrieve_file(fluxnet_site_data$meta[[1]]$links$download) %>% - osf_download(conflicts = "skip", path = file.path(fluxnet_data_dir), - verbose = TRUE, progress = TRUE) -fluxnet_data_dir_uncomp <- file.path(fluxnet_data_dir, - gsub(".zip", "", basename(fluxnet_local_file$local_path))) +fluxnet_local_file <- osf_retrieve_file(fluxnet_site_data$meta[[1]]$links$download) %>% + osf_download( + conflicts = "skip", path = file.path(fluxnet_data_dir), + verbose = TRUE, progress = TRUE + ) +fluxnet_data_dir_uncomp <- file.path( + fluxnet_data_dir, + gsub(".zip", "", basename(fluxnet_local_file$local_path)) +) unzip(fluxnet_local_file$local_path, exdir = fluxnet_data_dir_uncomp, overwrite = FALSE) local_flux_files <- list.files(gsub(pattern = ".zip", replacement = "", fluxnet_local_file$local_path)) # weekly data -fluxnet_ww_data <- read.csv(file = file.path(fluxnet_data_dir_uncomp, - local_flux_files[grep("WW", local_flux_files)]), header = T) +fluxnet_ww_data <- read.csv(file = file.path( + fluxnet_data_dir_uncomp, + local_flux_files[grep("WW", local_flux_files)] +), header = T) head(fluxnet_ww_data) fluxnet_ww_data[fluxnet_ww_data == -9999.00000] <- NA -range(fluxnet_ww_data$NEE_VUT_REF, na.rm=T) -weeks <- as.Date(as.character(fluxnet_ww_data$TIMESTAMP_END),format="%Y%m%d") -png(filename = file.path(fluxnet_data_dir,paste0("FLX_",Site_name,"_FLUXNET2015_SUBSET_WW_1999-2014_1-4.png")), - width = 2800, height = 1200, res = 200) -par(mfrow=c(1,1), mar=c(4.5,4.7,0.3,0.4), oma=c(0.3,0.9,0.3,0.1)) # B, L, T, R -plot(weeks,fluxnet_ww_data$NEE_VUT_REF, type="l", ylab="NEE (gC m-2 d-1)", lwd=2) -box(lwd=2.2) +range(fluxnet_ww_data$NEE_VUT_REF, na.rm = T) +weeks <- as.Date(as.character(fluxnet_ww_data$TIMESTAMP_END), format = "%Y%m%d") +png( + filename = file.path(fluxnet_data_dir, paste0("FLX_", Site_name, "_FLUXNET2015_SUBSET_WW_1999-2014_1-4.png")), + width = 2800, height = 1200, res = 200 +) +par(mfrow = c(1, 1), mar = c(4.5, 4.7, 0.3, 0.4), oma = c(0.3, 0.9, 0.3, 0.1)) # B, L, T, R +plot(weeks, fluxnet_ww_data$NEE_VUT_REF, type = "l", ylab = "NEE (gC m-2 d-1)", lwd = 2) +box(lwd = 2.2) dev.off() # monthly data -fluxnet_mm_data <- read.csv(file = file.path(fluxnet_data_dir_uncomp, - local_flux_files[grep("_MM_", local_flux_files)]), header = T) +fluxnet_mm_data <- read.csv(file = file.path( + fluxnet_data_dir_uncomp, + local_flux_files[grep("_MM_", local_flux_files)] +), header = T) head(fluxnet_mm_data) fluxnet_mm_data[fluxnet_mm_data == -9999.00000] <- NA -range(fluxnet_mm_data$NEE_VUT_REF, na.rm=T) -months <- as.Date(paste0(as.character(fluxnet_mm_data$TIMESTAMP), '01'), format = '%Y%m%d') -png(filename = file.path(fluxnet_data_dir,paste0("FLX_",Site_name,"_FLUXNET2015_SUBSET_MM_1999-2014_1-4.png")), - width = 2800, height = 1200, res = 200) -par(mfrow=c(1,1), mar=c(4.5,4.7,0.3,0.4), oma=c(0.3,0.9,0.3,0.1)) # B, L, T, R -plot(months,fluxnet_mm_data$NEE_VUT_REF, type="l", ylab="NEE (gC m-2 d-1)", lwd=2) -box(lwd=2.2) +range(fluxnet_mm_data$NEE_VUT_REF, na.rm = T) +months <- as.Date(paste0(as.character(fluxnet_mm_data$TIMESTAMP), "01"), format = "%Y%m%d") +png( + filename = file.path(fluxnet_data_dir, paste0("FLX_", Site_name, "_FLUXNET2015_SUBSET_MM_1999-2014_1-4.png")), + width = 2800, height = 1200, res = 200 +) +par(mfrow = c(1, 1), mar = c(4.5, 4.7, 0.3, 0.4), oma = c(0.3, 0.9, 0.3, 0.1)) # B, L, T, R +plot(months, fluxnet_mm_data$NEE_VUT_REF, type = "l", ylab = "NEE (gC m-2 d-1)", lwd = 2) +box(lwd = 2.2) dev.off() #--------------------------------------------------------------------------------------------------# @@ -118,9 +132,9 @@ nasa_cms nasa_cms_dirs <- osf_ls_files(nasa_cms) nasa_cms_dirs -sda_results_dir <- file.path('~/Data/Dropbox/MANUSCRIPTS/BNL_TEST/Dokoohaki_etal_NASA_CMS_SDA_part1/Data/SDA_results') -if (! file.exists(sda_results_dir)) dir.create(file.path(sda_results_dir),recursive=TRUE, showWarnings = FALSE) -site_level_w <- osf_retrieve_file("https://osf.io/wyd9a/") %>% +sda_results_dir <- file.path("~/Data/Dropbox/MANUSCRIPTS/BNL_TEST/Dokoohaki_etal_NASA_CMS_SDA_part1/Data/SDA_results") +if (!file.exists(sda_results_dir)) dir.create(file.path(sda_results_dir), recursive = TRUE, showWarnings = FALSE) +site_level_w <- osf_retrieve_file("https://osf.io/wyd9a/") %>% osf_download(conflicts = "skip", path = file.path(sda_results_dir)) site_level_w <- readRDS(site_level_w$local_path) head(site_level_w) @@ -131,90 +145,99 @@ head(site_level_w) ### Get SDA ncfiles from OSF ncdf_data <- osf_ls_files(nasa_cms, path = nasa_cms_dirs$name[5]) ncdf_data -ncdf_file_num <- grep(Var, ncdf_data$name) -ncdf_file <- ncdf_data[ncdf_file_num,] +ncdf_file_num <- grep(Var, ncdf_data$name) +ncdf_file <- ncdf_data[ncdf_file_num, ] -ncdf_sda_local_file <- osf_retrieve_file(ncdf_file$meta[[1]]$links$download) %>% - osf_download(conflicts = "skip", path = file.path(sda_results_dir), - verbose = TRUE, progress = TRUE) +ncdf_sda_local_file <- osf_retrieve_file(ncdf_file$meta[[1]]$links$download) %>% + osf_download( + conflicts = "skip", path = file.path(sda_results_dir), + verbose = TRUE, progress = TRUE + ) ncdf_sda_ncin <- nc_open(ncdf_sda_local_file$local_path) ncdf_sda_ncin #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# -rw <- ncvar_get(ncdf_sda_ncin, 'weights_rrel') -sites <- ncvar_get(ncdf_sda_ncin, 'site') %>% as.character() -Year <- ncvar_get(ncdf_sda_ncin, 'Year') -time <- ncvar_get(ncdf_sda_ncin, 'time') +rw <- ncvar_get(ncdf_sda_ncin, "weights_rrel") +sites <- ncvar_get(ncdf_sda_ncin, "site") %>% as.character() +Year <- ncvar_get(ncdf_sda_ncin, "Year") +time <- ncvar_get(ncdf_sda_ncin, "time") tunits <- ncatt_get(ncdf_sda_ncin, "time", "units") var.nc <- ncvar_get(ncdf_sda_ncin, Var) # keep the focal site keep <- which(sites %in% Site) -#convert time -data_posix_time <- seq.POSIXt(as.POSIXct("1986-01-01 00:00:00", tz="US/Eastern"), - as.POSIXct("2018-12-30 21:00:00", tz="US/Eastern"), - length.out = length(time)) - -png(filename = file.path(sda_plots_dir,paste0(Site_name,'_raw_SDA_',Var,'_kgC_m2_s1.png')), - width = 2800, height = 1200, res = 200) -par(mfrow=c(1,1), mar=c(4.5,4.7,0.3,0.4), oma=c(0.3,0.9,0.3,0.1)) # B, L, T, R -plot(data_posix_time,var.nc[,keep,1], type="l", ylab=paste0(Var," (kgC m-2 s-1)"), lwd=2) -box(lwd=2.2) +# convert time +data_posix_time <- seq.POSIXt(as.POSIXct("1986-01-01 00:00:00", tz = "US/Eastern"), + as.POSIXct("2018-12-30 21:00:00", tz = "US/Eastern"), + length.out = length(time) +) + +png( + filename = file.path(sda_plots_dir, paste0(Site_name, "_raw_SDA_", Var, "_kgC_m2_s1.png")), + width = 2800, height = 1200, res = 200 +) +par(mfrow = c(1, 1), mar = c(4.5, 4.7, 0.3, 0.4), oma = c(0.3, 0.9, 0.3, 0.1)) # B, L, T, R +plot(data_posix_time, var.nc[, keep, 1], type = "l", ylab = paste0(Var, " (kgC m-2 s-1)"), lwd = 2) +box(lwd = 2.2) dev.off() ### -- Reformating the nc from matrix to data.frame --- DRAFT! # which site to extract? -flux.data <- keep %>% - map_dfr(function(site){ +flux.data <- keep %>% + map_dfr(function(site) { seq_len(ensembles) %>% # foreach ensemble - #seq_len(1) %>% # foreach ensemble - map_dfr(~{ + # seq_len(1) %>% # foreach ensemble + map_dfr(~ { var.nc[, keep, .x] %>% as.data.frame() %>% - mutate(Site=sites[keep], - ensemble=.x + mutate( + Site = sites[keep], + ensemble = .x ) %>% - `colnames<-`(c(Var, 'Site', 'ensemble')) %>% - mutate(posix_time=data_posix_time) + `colnames<-`(c(Var, "Site", "ensemble")) %>% + mutate(posix_time = data_posix_time) }) }) head(flux.data) # convert to umols C m-2 s-1 from kgC m-2 s-1 -temp_convert <- misc.convert(flux.data[,Var], "kg C m-2 s-1", "umol C m-2 s-1") -temp_convert.2 <- umolCO2.to.gC(temp_convert) # gC m-2 3h-1 -flux.data[,Var] <- temp_convert.2 +temp_convert <- misc.convert(flux.data[, Var], "kg C m-2 s-1", "umol C m-2 s-1") +temp_convert.2 <- umolCO2.to.gC(temp_convert) # gC m-2 3h-1 +flux.data[, Var] <- temp_convert.2 head(flux.data) rm(temp_convert, temp_convert.2) -#hist(flux.data[,Var]) +# hist(flux.data[,Var]) #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# keep <- which(site_level_w$Site %in% Site) -temp <- site_level_w[keep,] +temp <- site_level_w[keep, ] head(temp) site_level_w_1site <- temp new.p <- flux.data %>% - mutate(Year=lubridate::year(posix_time)) %>% - left_join(site_level_w_1site , - by=c('Site'='Site', - 'ensemble'='ens', - 'Year'='Year')) + mutate(Year = lubridate::year(posix_time)) %>% + left_join(site_level_w_1site, + by = c( + "Site" = "Site", + "ensemble" = "ens", + "Year" = "Year" + ) + ) head(new.p) ## currently SDA has 53 weeks and FLUXNET 52? Seems we need to fix date def for SDA unique(lubridate::week(new.p$posix_time)) flux.data.df <- new.p %>% - mutate(Week=lubridate::week(posix_time)) %>% + mutate(Week = lubridate::week(posix_time)) %>% group_by(Site, Year, Week) flux.data.df[flux.data.df$Week == 53, "Week"] <- 52 # make SDA match FLUXNET definition of weeks (total of 52) -flux.data.df$floor_date <- lubridate::floor_date(as.Date(flux.data.df$posix_time), unit="week") -flux.data.df$ceiling_date <- lubridate::ceiling_date(flux.data.df$posix_time, unit="week") +flux.data.df$floor_date <- lubridate::floor_date(as.Date(flux.data.df$posix_time), unit = "week") +flux.data.df$ceiling_date <- lubridate::ceiling_date(flux.data.df$posix_time, unit = "week") unique(flux.data.df$Week) head(flux.data.df) @@ -223,30 +246,30 @@ flux.data.df <- flux.data.df %>% summarise( Forecast = mean(get(Var)), ForecastVar = var(get(Var)), - Analysis= Hmisc::wtd.mean(get(Var), weights = Relative_weight), - AnalysisVar= Hmisc::wtd.var(get(Var), weights = Relative_weight, normwt=TRUE, method='unbiased'), + Analysis = Hmisc::wtd.mean(get(Var), weights = Relative_weight), + AnalysisVar = Hmisc::wtd.var(get(Var), weights = Relative_weight, normwt = TRUE, method = "unbiased"), ) head(flux.data.df) flux.data.df <- flux.data.df %>% - mutate(Date=as.Date( - paste(Year, Week, 1), + mutate(Date = as.Date( + paste(Year, Week, 1), format = "%Y %U %u", tz = "US/Eastern" - )) + )) head(flux.data.df) unique(flux.data.df$Week) flux.data.df <- flux.data.df %>% mutate( - ForecastSD=sqrt(ForecastVar), - AnalysisSD=sqrt(AnalysisVar) + ForecastSD = sqrt(ForecastVar), + AnalysisSD = sqrt(AnalysisVar) ) %>% mutate( - FLL =Forecast + ForecastSD, # Change this to what level of uncertainty you wanna plot - FUL =Forecast - ForecastSD, - ALL =Analysis + AnalysisSD, - AUL =Analysis - AnalysisSD + FLL = Forecast + ForecastSD, # Change this to what level of uncertainty you wanna plot + FUL = Forecast - ForecastSD, + ALL = Analysis + AnalysisSD, + AUL = Analysis - AnalysisSD ) names(flux.data.df) head(flux.data.df) @@ -261,7 +284,7 @@ head(flux.data.df_sub) names(fluxnet_ww_data) fluxnet_ww_data_sub <- fluxnet_ww_data %>% - mutate(Date=as.Date(paste0(as.character(TIMESTAMP_START), '01'), format='%Y%m%d')) %>% + mutate(Date = as.Date(paste0(as.character(TIMESTAMP_START), "01"), format = "%Y%m%d")) %>% filter(lubridate::year(Date) %in% year_select) %>% mutate(Week = 1:n()) head(fluxnet_ww_data_sub) @@ -274,22 +297,26 @@ head(fluxnet_ww_data_sub) # quick analysis/forecast fig, based on Hamze's example flux.data.df %>% split(.$Site) %>% - {.[1:1]} %>% # plot just the first site + { + .[1:1] + } %>% # plot just the first site map(~ { .x %>% - mutate(Date=as.POSIXct(Date))%>% - ggplot(aes(x=Date))+ - geom_pointrange(aes(y= Forecast, ymin=FLL, ymax=FUL, color="Forecast"), alpha=0.75)+ - geom_pointrange(aes(y= Analysis, ymin=ALL, ymax=AUL, color="Analysis"), alpha=0.75)+ - labs(title = .x$Site, y=Var)+ - scale_color_manual(values=c("#ff7f00",'#4daf4a','#984ea3'), name="")+ - theme_minimal(base_size = 15)+ - scale_x_datetime(date_breaks = "2 year")+ - theme(axis.text.x = element_text(angle = 90, hjust = 1))+ + mutate(Date = as.POSIXct(Date)) %>% + ggplot(aes(x = Date)) + + geom_pointrange(aes(y = Forecast, ymin = FLL, ymax = FUL, color = "Forecast"), alpha = 0.75) + + geom_pointrange(aes(y = Analysis, ymin = ALL, ymax = AUL, color = "Analysis"), alpha = 0.75) + + labs(title = .x$Site, y = Var) + + scale_color_manual(values = c("#ff7f00", "#4daf4a", "#984ea3"), name = "") + + theme_minimal(base_size = 15) + + scale_x_datetime(date_breaks = "2 year") + + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + theme(legend.position = "top") - ggsave(file=file.path(sda_plots_dir,paste0("timeseries_",.x$Site %>% unique(),".png")), - width=14, height=7) - #browser() + ggsave( + file = file.path(sda_plots_dir, paste0("timeseries_", .x$Site %>% unique(), ".png")), + width = 14, height = 7 + ) + # browser() }) @@ -301,8 +328,8 @@ flux.data.df %>% # mutate(Week = 1:n()) # flux.data.df_sub_yr <- flux.data.df_sub %>% # filter(Year %in% year_select[i]) -# -# png(filename = file.path(sda_plots_dir,paste0(Site_name,'_SDA_',year_select[i],'_NEE_weeklty_comparison_v1.png')), +# +# png(filename = file.path(sda_plots_dir,paste0(Site_name,'_SDA_',year_select[i],'_NEE_weeklty_comparison_v1.png')), # width = 2800, height = 1200, res = 200) # par(mfrow=c(1,1), mar=c(4.5,4.7,0.3,0.4), oma=c(0.3,0.9,0.3,0.1)) # B, L, T, R # plot(fluxnet_ww_data_sub_yr$Week,fluxnet_ww_data_sub_yr$NEE_VUT_REF, type="l", ylab="NEE (gC m-2 d-1)", @@ -318,15 +345,15 @@ flux.data.df %>% # Site Year Week Forecast ForecastVar Analysis AnalysisVar Date ForecastSD AnalysisSD FLL FUL ALL AUL head(fluxnet_ww_data_sub) -#fluxnet_ww_data_sub[,paste0(Var,"_NT_VUT_REF")] -if (Var %in% c("GPP","RECO")) { - varef <- paste0(Var,"_NT_VUT_REF") - ymin_var <- paste0(Var,"_NT_VUT_25") - ymax_var <- paste0(Var,"_NT_VUT_75") +# fluxnet_ww_data_sub[,paste0(Var,"_NT_VUT_REF")] +if (Var %in% c("GPP", "RECO")) { + varef <- paste0(Var, "_NT_VUT_REF") + ymin_var <- paste0(Var, "_NT_VUT_25") + ymax_var <- paste0(Var, "_NT_VUT_75") } else { - varef <- paste0(Var,"_VUT_REF") - ymin_var <- paste0(Var,"_VUT_25") - ymax_var <- paste0(Var,"_VUT_75") + varef <- paste0(Var, "_VUT_REF") + ymin_var <- paste0(Var, "_VUT_25") + ymax_var <- paste0(Var, "_VUT_75") } # brewer.pal(5, "Dark2") @@ -337,30 +364,53 @@ for (i in seq_along(year_select)) { mutate(Week = 1:n()) flux.data.df_sub_yr <- flux.data.df_sub %>% filter(Year %in% year_select[i]) - - p <- ggplot() + - geom_ribbon(data=flux.data.df_sub_yr, aes(x=Week, y=Forecast, ymin=FLL, ymax=FUL), - alpha=0.3, colour="black", fill = "#1B9E77") + theme_bw(base_size = 15) + - geom_point(data=flux.data.df_sub_yr, aes(x=Week, y=Forecast), colour="#1B9E77", - alpha=0.85, size = 5, shape=18) + - geom_line(data=flux.data.df_sub_yr, aes(x=Week, y=Forecast), - colour="#1B9E77") + - geom_ribbon(data=flux.data.df_sub_yr, aes(x=Week, y=Analysis, ymin=ALL, ymax=AUL), - alpha=0.3, colour="black", fill = "#D95F02") + theme_bw(base_size = 15) + - geom_point(data=flux.data.df_sub_yr, aes(x=Week, y=Analysis), colour="#D95F02", - alpha=0.75, size = 5) + geom_line(data=flux.data.df_sub_yr, aes(x=Week, y=Analysis), - colour="#D95F02") + - geom_errorbar(data=fluxnet_ww_data_sub_yr, - aes(x=Week, ymin=get(ymin_var), ymax=get(ymax_var))) + - geom_line(data=fluxnet_ww_data_sub_yr,aes(x=Week,y=get(varef))) + - ylab(bquote(.(Var) ~ (gC~m^{-2}~d^{-1}))) + - xlab(paste0("Week ",year_select[i])) - ggsave(p,file=file.path(sda_plots_dir,paste0(Site_name,"_SDA_",year_select[i],"_",Var,"_weekly_comparison_v2.png")), - width=14, height=7) - rm(p,fluxnet_ww_data_sub_yr,flux.data.df_sub_yr) + + p <- ggplot() + + geom_ribbon( + data = flux.data.df_sub_yr, aes(x = Week, y = Forecast, ymin = FLL, ymax = FUL), + alpha = 0.3, colour = "black", fill = "#1B9E77" + ) + + theme_bw(base_size = 15) + + geom_point( + data = flux.data.df_sub_yr, aes(x = Week, y = Forecast), colour = "#1B9E77", + alpha = 0.85, size = 5, shape = 18 + ) + + geom_line( + data = flux.data.df_sub_yr, aes(x = Week, y = Forecast), + colour = "#1B9E77" + ) + + geom_ribbon( + data = flux.data.df_sub_yr, aes(x = Week, y = Analysis, ymin = ALL, ymax = AUL), + alpha = 0.3, colour = "black", fill = "#D95F02" + ) + + theme_bw(base_size = 15) + + geom_point( + data = flux.data.df_sub_yr, aes(x = Week, y = Analysis), colour = "#D95F02", + alpha = 0.75, size = 5 + ) + + geom_line( + data = flux.data.df_sub_yr, aes(x = Week, y = Analysis), + colour = "#D95F02" + ) + + geom_errorbar( + data = fluxnet_ww_data_sub_yr, + aes(x = Week, ymin = get(ymin_var), ymax = get(ymax_var)) + ) + + geom_line(data = fluxnet_ww_data_sub_yr, aes(x = Week, y = get(varef))) + + ylab(bquote(.(Var) ~ (gC ~ m^{ + -2 + } ~ d^{ + -1 + }))) + + xlab(paste0("Week ", year_select[i])) + ggsave(p, + file = file.path(sda_plots_dir, paste0(Site_name, "_SDA_", year_select[i], "_", Var, "_weekly_comparison_v2.png")), + width = 14, height = 7 + ) + rm(p, fluxnet_ww_data_sub_yr, flux.data.df_sub_yr) } #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# -### EOF \ No newline at end of file +### EOF diff --git a/modules/assim.sequential/inst/fluxnet_sandbox/FLUX_plotting_Hamze.R b/modules/assim.sequential/inst/fluxnet_sandbox/FLUX_plotting_Hamze.R index 76f328e98ad..bf7bfb9d7f4 100644 --- a/modules/assim.sequential/inst/fluxnet_sandbox/FLUX_plotting_Hamze.R +++ b/modules/assim.sequential/inst/fluxnet_sandbox/FLUX_plotting_Hamze.R @@ -5,81 +5,90 @@ library(scales) library(lubridate) setwd("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Outs/NC") site_level_w <- readRDS("/projectnb/dietzelab/hamzed/SDA/ProductionRun/500Sites/Weights/site_level_w.RDS") -# The main difference between this and other flux comparison files is that this +# The main difference between this and other flux comparison files is that this # would use the nc files not the SQL database #----------------------------------------------------------------------------------- -Var <- 'NEE' -ncin <- nc_open(paste0(Var, '.nc')) -rw <- ncvar_get(ncin, 'weights_rrel') -sites <- ncvar_get(ncin, 'site') %>% as.character() -Year <- ncvar_get(ncin, 'Year') -time <- ncvar_get(ncin, 'time') +Var <- "NEE" +ncin <- nc_open(paste0(Var, ".nc")) +rw <- ncvar_get(ncin, "weights_rrel") +sites <- ncvar_get(ncin, "site") %>% as.character() +Year <- ncvar_get(ncin, "Year") +time <- ncvar_get(ncin, "time") tunits <- ncatt_get(ncin, "time", "units") var.nc <- ncvar_get(ncin, Var) -#convert time -posix<- seq.POSIXt(as.POSIXct("1986-01-01 00:00:00", tz="EDT"), - as.POSIXct("2018-12-30 21:00:00", tz="EDT"), - length.out = length(time)) +# convert time +posix <- seq.POSIXt(as.POSIXct("1986-01-01 00:00:00", tz = "EDT"), + as.POSIXct("2018-12-30 21:00:00", tz = "EDT"), + length.out = length(time) +) #-- Reformating the nc from matrix to data.frame -flux.data <-seq_len(length(sites)) %>% - map_dfr(function(site){ +flux.data <- seq_len(length(sites)) %>% + map_dfr(function(site) { seq_len(20) %>% # foreach ensemble - map_dfr(~{ + map_dfr(~ { var.nc[, site, .x] %>% as.data.frame() %>% - mutate(Site=sites[site], - ensemble=.x + mutate( + Site = sites[site], + ensemble = .x ) %>% - `colnames<-`(c(Var, 'Site', 'ensemble')) %>% - mutate(posix=posix) + `colnames<-`(c(Var, "Site", "ensemble")) %>% + mutate(posix = posix) }) }) #--------------------------------------------------------------------------------- new.p <- flux.data %>% - mutate(Year=lubridate::year(posix)) %>% - left_join(site_level_w , - by=c('Site'='Site', - 'ensemble'='ens', - 'Year'='Year')) + mutate(Year = lubridate::year(posix)) %>% + left_join(site_level_w, + by = c( + "Site" = "Site", + "ensemble" = "ens", + "Year" = "Year" + ) + ) flux.data.df <- new.p %>% - mutate(Week=week(posix)) %>% + mutate(Week = week(posix)) %>% group_by(Site, Year, Week) %>% summarise( Forecast = mean(NEE), - ForecastVar=var(NEE), - Analysis= Hmisc::wtd.mean(NEE, weights = Relative_weight), - AnalysisVar= Hmisc::wtd.var(NEE, weights = Relative_weight, normwt=TRUE, method='unbiased'), + ForecastVar = var(NEE), + Analysis = Hmisc::wtd.mean(NEE, weights = Relative_weight), + AnalysisVar = Hmisc::wtd.var(NEE, weights = Relative_weight, normwt = TRUE, method = "unbiased"), ) %>% - mutate(Date=as.Date( - paste(Year, Week, 1), + mutate(Date = as.Date( + paste(Year, Week, 1), format = "%Y %U %u" )) %>% mutate( - ForecastSD=sqrt(ForecastVar), - AnalysisSD=sqrt(AnalysisVar) + ForecastSD = sqrt(ForecastVar), + AnalysisSD = sqrt(AnalysisVar) ) %>% mutate( - FLL =Forecast + ForecastSD, # Change this to what level of uncertainty you wanna plot - FUL =Forecast - ForecastSD, - ALL =Analysis + AnalysisSD, - AUL =Analysis - AnalysisSD + FLL = Forecast + ForecastSD, # Change this to what level of uncertainty you wanna plot + FUL = Forecast - ForecastSD, + ALL = Analysis + AnalysisSD, + AUL = Analysis - AnalysisSD ) flux.data.df %>% split(.$Site) %>% - {.[1:5]} %>% # plot just the first site + { + .[1:5] + } %>% # plot just the first site map(~ { .x %>% - mutate(Date=as.POSIXct(Date))%>% - ggplot(aes(x=Date))+ - geom_pointrange(aes(y= Forecast, ymin=FLL, ymax=FUL, color="Forecast"), alpha=0.75)+ - geom_pointrange(aes(y= Analysis, ymin=ALL, ymax=AUL, color="Analysis"), alpha=0.75)+ - labs(title = .x$Site, y=Var)+ - scale_color_manual(values=c("#ff7f00",'#4daf4a','#984ea3'), name="")+ - theme_minimal(base_size = 15)+ - scale_x_datetime(date_breaks = "2 year")+ - theme(axis.text.x = element_text(angle = 90, hjust = 1))+ + mutate(Date = as.POSIXct(Date)) %>% + ggplot(aes(x = Date)) + + geom_pointrange(aes(y = Forecast, ymin = FLL, ymax = FUL, color = "Forecast"), alpha = 0.75) + + geom_pointrange(aes(y = Analysis, ymin = ALL, ymax = AUL, color = "Analysis"), alpha = 0.75) + + labs(title = .x$Site, y = Var) + + scale_color_manual(values = c("#ff7f00", "#4daf4a", "#984ea3"), name = "") + + theme_minimal(base_size = 15) + + scale_x_datetime(date_breaks = "2 year") + + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + theme(legend.position = "top") - ggsave(file=paste0("timeseries_",.x$Site %>% unique(),".png"), - width=14, height=7) - #browser() - }) \ No newline at end of file + ggsave( + file = paste0("timeseries_", .x$Site %>% unique(), ".png"), + width = 14, height = 7 + ) + # browser() + }) diff --git a/modules/assim.sequential/inst/fluxnet_sandbox/plot_sda_results_by_site.R b/modules/assim.sequential/inst/fluxnet_sandbox/plot_sda_results_by_site.R index 7aa24ea643d..7d3f12b64bf 100644 --- a/modules/assim.sequential/inst/fluxnet_sandbox/plot_sda_results_by_site.R +++ b/modules/assim.sequential/inst/fluxnet_sandbox/plot_sda_results_by_site.R @@ -11,9 +11,9 @@ #--------------------------------------------------------------------------------------------------# # Close all devices and delete all variables. -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/hf_landscape/04_ForwardOnlyForecast.R b/modules/assim.sequential/inst/hf_landscape/04_ForwardOnlyForecast.R index 965d488d1bb..ea6035d04e4 100644 --- a/modules/assim.sequential/inst/hf_landscape/04_ForwardOnlyForecast.R +++ b/modules/assim.sequential/inst/hf_landscape/04_ForwardOnlyForecast.R @@ -2,67 +2,72 @@ library("PEcAn.all") ## parse start date -option_list = list(optparse::make_option("--start.date", - default = Sys.Date(), - type="character")) +option_list <- list(optparse::make_option("--start.date", + default = Sys.Date(), + type = "character" +)) args <- optparse::parse_args(optparse::OptionParser(option_list = option_list)) -#args$start.date = "2022-05-17" -start.date = lubridate::as_date(args$start.date) - +# args$start.date = "2022-05-17" +start.date <- lubridate::as_date(args$start.date) + #### grab & update default settings #### -set = readRDS("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/pecan.RDS") -##start.date -for(s in seq_along(set)){ - set[[s]]$run$start.date = start.date - set[[s]]$run$end.date = start.date + lubridate::days(35) +set <- readRDS("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/pecan.RDS") +## start.date +for (s in seq_along(set)) { + set[[s]]$run$start.date <- start.date + set[[s]]$run$end.date <- start.date + lubridate::days(35) } ## Find GEFS -con <-PEcAn.DB::db.open(set$database$bety) +con <- PEcAn.DB::db.open(set$database$bety) input_check <- PEcAn.DB::dbfile.input.check( - siteid = 646, #NEON 1000004945, #EMS 758, + siteid = 646, # NEON 1000004945, #EMS 758, startdate = as.character(start.date), enddate = NULL, parentid = NA, - mimetype="text/csv", - formatname="Sipnet.climna", + mimetype = "text/csv", + formatname = "Sipnet.climna", con = con, hostname = PEcAn.remote::fqdn(), - pattern = NULL, + pattern = NULL, exact.dates = TRUE, - return.all=TRUE + return.all = TRUE ) -metList = as.list(file.path(input_check$file_path,input_check$file_name)) -names(metList) = rep("path",length(metList)) +metList <- as.list(file.path(input_check$file_path, input_check$file_name)) +names(metList) <- rep("path", length(metList)) ## met path -for(s in seq_along(set)){ - set[[s]]$run$inputs$met$source = "GEFS" - set[[s]]$run$inputs$met$path = metList +for (s in seq_along(set)) { + set[[s]]$run$inputs$met$source <- "GEFS" + set[[s]]$run$inputs$met$path <- metList } ## outdirs -set$outdir = file.path(set$outdir,paste0("FOF",start.date)) -set$rundir = file.path(set$outdir,"run") -set$modeloutdir = file.path(set$outdir,"out") -set$pfts$pft$outdir = file.path(set$outdir,"pft") +set$outdir <- file.path(set$outdir, paste0("FOF", start.date)) +set$rundir <- file.path(set$outdir, "run") +set$modeloutdir <- file.path(set$outdir, "out") +set$pfts$pft$outdir <- file.path(set$outdir, "pft") dir.create(set$outdir) dir.create(set$rundir) dir.create(set$modeloutdir) dir.create(set$pfts$pft$outdir) ## job.sh -set$model$jobtemplate = "/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/template.job" +set$model$jobtemplate <- "/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/template.job" set <- PEcAn.settings::prepare.settings(set, force = FALSE) ## add soil pft to all sites -soil.pft = grep(pattern = "soil",x=unlist(sapply(set$pfts,function(x){x$name}))) -for(i in seq_along(set)){ - set$run[[i]]$site$site.pft[[2]] = set$pfts[[soil.pft]]$name - names(set$run[[i]]$site$site.pft)[2] = "pft.name" +soil.pft <- grep(pattern = "soil", x = unlist(sapply(set$pfts, function(x) { + x$name +}))) +for (i in seq_along(set)) { + set$run[[i]]$site$site.pft[[2]] <- set$pfts[[soil.pft]]$name + names(set$run[[i]]$site$site.pft)[2] <- "pft.name" } ## check PFTs -pft.names = unlist(sapply(set$pfts,function(x){x$name})) -set$pfts[[which(is.na(pft.names))]] = NULL +pft.names <- unlist(sapply(set$pfts, function(x) { + x$name +})) +set$pfts[[which(is.na(pft.names))]] <- NULL ## run workflow set <- PEcAn.workflow::runModule.run.write.configs(set) @@ -71,4 +76,4 @@ PEcAn.workflow::runModule_start_model_runs(set, stop.on.error = FALSE) ## future work ## * Integrate in Phyllis's changes to be able to save and reuse previous ensemble draws ## * restart from previous forecast -## not sure why need to delete last line from met to get things to run. \ No newline at end of file +## not sure why need to delete last line from met to get things to run. diff --git a/modules/assim.sequential/inst/hf_landscape/05B_SDA_Workflow_NA.reanalysis.R b/modules/assim.sequential/inst/hf_landscape/05B_SDA_Workflow_NA.reanalysis.R index 66ea9181fda..4c173ad1344 100644 --- a/modules/assim.sequential/inst/hf_landscape/05B_SDA_Workflow_NA.reanalysis.R +++ b/modules/assim.sequential/inst/hf_landscape/05B_SDA_Workflow_NA.reanalysis.R @@ -1,44 +1,46 @@ ## Reanalysis helper script around 05_SDA_Workflow_NA ## original start date was 2022-05-17 -runDays <- seq(as.Date("2022-05-20"), as.Date("2023-05-11"), by="days") -FORCE = FALSE ## should we overwrite previously completed runs +runDays <- seq(as.Date("2022-05-20"), as.Date("2023-05-11"), by = "days") +FORCE <- FALSE ## should we overwrite previously completed runs for (s in seq_along(runDays)) { ## did we do this run already? - now = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA",runDays[s]) + now <- paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA", runDays[s]) print(now) - this.out = dir(file.path(now,"out"),full.names = TRUE) - if(length(this.out) > 0 & !FORCE) break - + this.out <- dir(file.path(now, "out"), full.names = TRUE) + if (length(this.out) > 0 & !FORCE) break + ## find previous run - yesterday = runDays[s] - lubridate::days(1) - NoMet = read.csv("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/NO_MET",header=FALSE)[,1] - while(as.character(yesterday) %in% NoMet & yesterday - runDays[s] < lubridate::days(35) ){ - yesterday = yesterday - lubridate::days(1) + yesterday <- runDays[s] - lubridate::days(1) + NoMet <- read.csv("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/NO_MET", header = FALSE)[, 1] + while (as.character(yesterday) %in% NoMet & yesterday - runDays[s] < lubridate::days(35)) { + yesterday <- yesterday - lubridate::days(1) } - prev = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA",yesterday,"/") - if(dir.exists(prev)){ + prev <- paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA", yesterday, "/") + if (dir.exists(prev)) { ## is there output there? - prev.out = dir(file.path(prev,"out"),full.names = TRUE) - if(length(prev.out)>0){ - prev.files = sapply(as.list(prev.out),function(x){length(dir(x,pattern = "*.nc"))}) - if(min(prev.files)>0){ - + prev.out <- dir(file.path(prev, "out"), full.names = TRUE) + if (length(prev.out) > 0) { + prev.files <- sapply(as.list(prev.out), function(x) { + length(dir(x, pattern = "*.nc")) + }) + if (min(prev.files) > 0) { ######### RUN FORECAST ######## - msg = system2("/home/dietze/pecan/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R", - paste("--start.date",runDays[s],"--prev",prev), - wait=TRUE, - stdout="stdout.log", - stderr="stderr.log") + msg <- system2("/home/dietze/pecan/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R", + paste("--start.date", runDays[s], "--prev", prev), + wait = TRUE, + stdout = "stdout.log", + stderr = "stderr.log" + ) print(msg) - } - } else { break } + } else { + break + } } else { ## previous run didn't occur break } - } ########################################## @@ -50,8 +52,8 @@ for (s in seq_along(runDays)) { source("~/pecan/modules/assim.sequential/inst/hf_landscape/PEcAn2EFI.R") source("~/pecan/modules/assim.sequential/inst/hf_landscape/minio_secrets.R") ## minio_secrets contains the following: -#minio_key <- Sys.getenv("MINIO_ACCESS_KEY", "username") -#minio_secret <- Sys.getenv("MINIO_SECRET_KEY", "password") +# minio_key <- Sys.getenv("MINIO_ACCESS_KEY", "username") +# minio_secret <- Sys.getenv("MINIO_SECRET_KEY", "password") minio_host <- Sys.getenv("MINIO_HOST", "test-pecan.bu.edu") minio_port <- Sys.getenv("MINIO_PORT", "9000") minio_arrow_bucket <- Sys.getenv("MINIO_ARROW_BUCKET", "hf-landscape-none") @@ -66,52 +68,64 @@ minio_uri_public <- function(...) { sprintf(template, minio_path(...), minio_host, ":", minio_port) } -runDays <- seq(as.Date("2022-05-18"), as.Date("2023-05-11"), by="days") +runDays <- seq(as.Date("2022-05-18"), as.Date("2023-05-11"), by = "days") ## loop over dates -FORCE = FALSE +FORCE <- FALSE for (s in seq_along(runDays)) { ## did we do this run already? - now = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA",runDays[s]) + now <- paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA", runDays[s]) print(now) - this.out = dir(file.path(now,"out"),full.names = TRUE) - if(length(this.out) == 0){ + this.out <- dir(file.path(now, "out"), full.names = TRUE) + if (length(this.out) == 0) { print("no output") next - } - + } + ## did we write this run to minio already? - if(!FORCE){ ## if not overwriting - ens = arrow::open_dataset(minio_uri_public(), format = "parquet" ) %>% - dplyr::filter(lubridate::as_datetime(reference_datetime) == runDays[s]) %>% - dplyr::distinct(parameter) %>% dplyr::collect() - if(length(ens$parameter)>0) { ## already have output - print(paste("skipping",length(ens$parameter))) + if (!FORCE) { ## if not overwriting + ens <- arrow::open_dataset(minio_uri_public(), format = "parquet") %>% + dplyr::filter(lubridate::as_datetime(reference_datetime) == runDays[s]) %>% + dplyr::distinct(parameter) %>% + dplyr::collect() + if (length(ens$parameter) > 0) { ## already have output + print(paste("skipping", length(ens$parameter))) next } } - + ## identify runs in the output folder - runs = sapply(strsplit(this.out,"/"),function(x){x[grep("ENS",x)]}) - site_ids = unique(sapply(strsplit(runs ,"-"),function(x){as.numeric(x[3])})) - ens_ids = unique(sapply(strsplit(runs ,"-"),function(x){as.numeric(x[2])})) - + runs <- sapply(strsplit(this.out, "/"), function(x) { + x[grep("ENS", x)] + }) + site_ids <- unique(sapply(strsplit(runs, "-"), function(x) { + as.numeric(x[3]) + })) + ens_ids <- unique(sapply(strsplit(runs, "-"), function(x) { + as.numeric(x[2]) + })) + ## read output, convert to EFI standard - out = list() - for(i in seq_along(runs)){ - out[[runs[i]]] = PEcAn2EFI.ens(outdir = file.path(now,"out"), - run.id = runs[i], - start_date = runDays[s]) + out <- list() + for (i in seq_along(runs)) { + out[[runs[i]]] <- PEcAn2EFI.ens( + outdir = file.path(now, "out"), + run.id = runs[i], + start_date = runDays[s] + ) } - out = dplyr::bind_rows(out) - if(!is.numeric(nrow(out)) | nrow(out) == 0) next ## don't insert empty days into minio - out = out %>% relocate(parameter) %>% + out <- dplyr::bind_rows(out) + if (!is.numeric(nrow(out)) | nrow(out) == 0) next ## don't insert empty days into minio + out <- out %>% + relocate(parameter) %>% relocate(site_id) %>% - relocate(time_bounds) %>% rename(datetime=time_bounds) %>% + relocate(time_bounds) %>% + rename(datetime = time_bounds) %>% relocate(reference_datetime) - out = tidyr::pivot_longer(out,5:ncol(out),names_to = "variable",values_to = "prediction") + out <- tidyr::pivot_longer(out, 5:ncol(out), names_to = "variable", values_to = "prediction") ## push to container in parquet format - out %>% dplyr::group_by(reference_datetime) %>% arrow::write_dataset(minio_uri(),format="parquet") - + out %>% + dplyr::group_by(reference_datetime) %>% + arrow::write_dataset(minio_uri(), format = "parquet") } diff --git a/modules/assim.sequential/inst/hf_landscape/05C_SDA_Workflow_NA.forecast.R b/modules/assim.sequential/inst/hf_landscape/05C_SDA_Workflow_NA.forecast.R index dd6e15b88b8..c4d5c9897de 100644 --- a/modules/assim.sequential/inst/hf_landscape/05C_SDA_Workflow_NA.forecast.R +++ b/modules/assim.sequential/inst/hf_landscape/05C_SDA_Workflow_NA.forecast.R @@ -1,58 +1,61 @@ ## Forecast helper script around 05_SDA_Workflow_NA -runDays = Sys.Date() -FORCE = FALSE ## should we overwrite previously completed runs +runDays <- Sys.Date() +FORCE <- FALSE ## should we overwrite previously completed runs ## check for missed days -start_date = runDays -success = FALSE -NoMet = read.csv("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/NO_MET",header=FALSE)[,1] -while(!success & runDays - start_date < lubridate::days(35) ){ - this.out = dir(file.path(paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA",start_date),"out"),full.names = TRUE) - if(length(this.out) > 0 & !FORCE) { ## this day ran successfully - success = TRUE +start_date <- runDays +success <- FALSE +NoMet <- read.csv("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/NO_MET", header = FALSE)[, 1] +while (!success & runDays - start_date < lubridate::days(35)) { + this.out <- dir(file.path(paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA", start_date), "out"), full.names = TRUE) + if (length(this.out) > 0 & !FORCE) { ## this day ran successfully + success <- TRUE break } - start_date = start_date - lubridate::days(1) + start_date <- start_date - lubridate::days(1) } -runDays = seq(from=start_date,to=runDays,by="1 day") +runDays <- seq(from = start_date, to = runDays, by = "1 day") ## run forecast for (s in seq_along(runDays)) { ## did we do this run already? - now = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA",runDays[s]) + now <- paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA", runDays[s]) print(now) - this.out = dir(file.path(now,"out"),full.names = TRUE) - if(length(this.out) > 0 & !FORCE) break - + this.out <- dir(file.path(now, "out"), full.names = TRUE) + if (length(this.out) > 0 & !FORCE) break + ## find previous run - yesterday = runDays[s] - lubridate::days(1) - NoMet = read.csv("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/NO_MET",header=FALSE)[,1] - while(as.character(yesterday) %in% NoMet & yesterday - runDays[s] < lubridate::days(35) ){ - yesterday = yesterday - lubridate::days(1) + yesterday <- runDays[s] - lubridate::days(1) + NoMet <- read.csv("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/NO_MET", header = FALSE)[, 1] + while (as.character(yesterday) %in% NoMet & yesterday - runDays[s] < lubridate::days(35)) { + yesterday <- yesterday - lubridate::days(1) } - prev = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA",yesterday) - if(dir.exists(prev)){ + prev <- paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA", yesterday) + if (dir.exists(prev)) { ## is there output there? - prev.out = dir(file.path(prev,"out"),full.names = TRUE) - if(length(prev.out)>0){ - prev.files = sapply(as.list(prev.out),function(x){length(dir(x,pattern = "*.nc"))}) - if(min(prev.files)>0){ - + prev.out <- dir(file.path(prev, "out"), full.names = TRUE) + if (length(prev.out) > 0) { + prev.files <- sapply(as.list(prev.out), function(x) { + length(dir(x, pattern = "*.nc")) + }) + if (min(prev.files) > 0) { ######### RUN FORECAST ######## - msg = system2("/home/dietze/pecan/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R", - paste("--start.date",runDays[s],"--prev",prev), - wait=TRUE, - stdout="stdout.log", - stderr="stderr.log") + msg <- system2("/home/dietze/pecan/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R", + paste("--start.date", runDays[s], "--prev", prev), + wait = TRUE, + stdout = "stdout.log", + stderr = "stderr.log" + ) print(msg) - } - } else { break } - } else {mc("ls minio") + } else { + break + } + } else { + mc("ls minio") ## previous run didn't occur break } - } ########################################## @@ -63,8 +66,8 @@ for (s in seq_along(runDays)) { source("~/pecan/modules/assim.sequential/inst/hf_landscape/PEcAn2EFI.R") source("~/pecan/modules/assim.sequential/inst/hf_landscape/minio_secrets.R") ## minio_secrets contains the following: -#minio_key <- Sys.getenv("MINIO_ACCESS_KEY", "username") -#minio_secret <- Sys.getenv("MINIO_SECRET_KEY", "password") +# minio_key <- Sys.getenv("MINIO_ACCESS_KEY", "username") +# minio_secret <- Sys.getenv("MINIO_SECRET_KEY", "password") minio_host <- Sys.getenv("MINIO_HOST", "test-pecan.bu.edu") minio_port <- Sys.getenv("MINIO_PORT", "9000") minio_arrow_bucket <- Sys.getenv("MINIO_ARROW_BUCKET", "hf-landscape-none") @@ -82,40 +85,48 @@ minio_uri_public <- function(...) { ## loop over dates for (s in seq_along(runDays)) { ## did we do this run already? - now = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA",runDays[s]) + now <- paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/FNA", runDays[s]) print(now) - this.out = dir(file.path(now,"out"),full.names = TRUE) - if(length(this.out) == 0){ + this.out <- dir(file.path(now, "out"), full.names = TRUE) + if (length(this.out) == 0) { print("no output") next - } - + } + ## did we write this run to minio already? - ens = arrow::open_dataset(minio_uri_public(), format = "parquet" ) %>% - dplyr::filter(lubridate::as_datetime(reference_datetime) == runDays[s]) %>% - dplyr::distinct(parameter) %>% dplyr::collect() - if(length(ens$parameter)>0) { - print(paste("skipping",length(ens$parameter))) + ens <- arrow::open_dataset(minio_uri_public(), format = "parquet") %>% + dplyr::filter(lubridate::as_datetime(reference_datetime) == runDays[s]) %>% + dplyr::distinct(parameter) %>% + dplyr::collect() + if (length(ens$parameter) > 0) { + print(paste("skipping", length(ens$parameter))) next } - + ## identify runs in the output folder - runs = sapply(strsplit(this.out,"/"),function(x){x[grep("ENS",x)]}) - site_ids = unique(sapply(strsplit(runs ,"-"),function(x){as.numeric(x[3])})) - ens_ids = unique(sapply(strsplit(runs ,"-"),function(x){as.numeric(x[2])})) - + runs <- sapply(strsplit(this.out, "/"), function(x) { + x[grep("ENS", x)] + }) + site_ids <- unique(sapply(strsplit(runs, "-"), function(x) { + as.numeric(x[3]) + })) + ens_ids <- unique(sapply(strsplit(runs, "-"), function(x) { + as.numeric(x[2]) + })) + ## read output, convert to EFI standard - out = list() - for(i in seq_along(runs)){ - out[[runs[i]]] = PEcAn2EFI.ens(outdir = file.path(now,"out"), - run.id = runs[i], - start_date = runDays[s]) + out <- list() + for (i in seq_along(runs)) { + out[[runs[i]]] <- PEcAn2EFI.ens( + outdir = file.path(now, "out"), + run.id = runs[i], + start_date = runDays[s] + ) } - out = dplyr::bind_rows(out) - + out <- dplyr::bind_rows(out) + ## push to container in parquet format - out %>% dplyr::group_by(reference_datetime) %>% arrow::write_dataset(minio_uri(),format="parquet") - + out %>% + dplyr::group_by(reference_datetime) %>% + arrow::write_dataset(minio_uri(), format = "parquet") } - - diff --git a/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R b/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R index 6c73d37cdb2..ce378d691b8 100755 --- a/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R +++ b/modules/assim.sequential/inst/hf_landscape/05_SDA_Workflow_NA.R @@ -13,11 +13,11 @@ library("tidyverse") library("furrr") library("R.utils") library("dynutils") -library('nimble') +library("nimble") library("sp") library("sf") library("lubridate") -#plan(multisession) +# plan(multisession) stop_quietly <- function() { opt <- options(show.error.messages = FALSE) on.exit(options(opt)) @@ -28,70 +28,81 @@ stop_quietly <- function() { #------------------------------------------Prepared SDA Settings ----- # ---------------------------------------------------------------------------------------------- ## parse start date -option_list = list(optparse::make_option("--start.date", - default = Sys.Date(), - type="character"), - optparse::make_option("--prev", - default = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA",Sys.Date()-lubridate::days(1)), - type="character") - ) +option_list <- list( + optparse::make_option("--start.date", + default = Sys.Date(), + type = "character" + ), + optparse::make_option("--prev", + default = paste0("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FNA", Sys.Date() - lubridate::days(1)), + type = "character" + ) +) args <- optparse::parse_args(optparse::OptionParser(option_list = option_list)) -#args$start.date = "2022-05-18 00:00:00" -#args$prev = "/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FOF2022-05-17/" -start.date = lubridate::as_date(args$start.date) +# args$start.date = "2022-05-18 00:00:00" +# args$prev = "/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/FOF2022-05-17/" +start.date <- lubridate::as_date(args$start.date) #------------------------------------------------------------------------------------------------ #------------------------------------------ Preparing the pecan xml ----------------------------- #------------------------------------------------------------------------------------------------ restart <- list() restart$filepath <- args$prev -set = readRDS("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/pecan.RDS") +set <- readRDS("/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/pecan.RDS") -#set met.start & met.end +# set met.start & met.end end.date <- start.date + lubridate::days(35) -sda.start = start.date + lubridate::days(1) +sda.start <- start.date + lubridate::days(1) # -------------------------------------------------------------------------------------------------- #---------------------------------------------- NA DATA ------------------------------------- # -------------------------------------------------------------------------------------------------- -#initialize obs.mean/cov NAs +# initialize obs.mean/cov NAs ## TODO: Alexis's version had two dates, need to take a closer list at what dates these should be set to -site.ids <- papply(set,function(x)(x$run$site$id)) %>% unlist() %>% as.character() -nsite = length(site.ids) - -NAdata = data.frame(date = c(rep(start.date,nsite),rep(sda.start,nsite)), - site_id = rep(site.ids,times=2), - data = rep(NA,nsite*2)) +site.ids <- papply(set, function(x) (x$run$site$id)) %>% + unlist() %>% + as.character() +nsite <- length(site.ids) + +NAdata <- data.frame( + date = c(rep(start.date, nsite), rep(sda.start, nsite)), + site_id = rep(site.ids, times = 2), + data = rep(NA, nsite * 2) +) obs.mean <- obs.cov <- split(NAdata, NAdata$date) date.obs <- names(obs.mean) obs.mean <- purrr::map( names(obs.mean), - function(namesl){ + function(namesl) { split( obs.mean[[namesl]], - obs.mean[[namesl]]$site_id) %>% + obs.mean[[namesl]]$site_id + ) %>% purrr::map( - ~.x[3] %>% + ~ .x[3] %>% stats::setNames(c("LAI")) %>% - `row.names<-`(NULL)) + `row.names<-`(NULL) + ) } ) %>% stats::setNames(date.obs) obs.cov <- purrr::map( names(obs.cov), - function(namesl){ + function(namesl) { purrr::map( split( obs.cov[[namesl]], - obs.cov[[namesl]]$site_id), - ~.x[3]^2 %>% - unlist %>% - diag(nrow = 1, ncol = 1)) + obs.cov[[namesl]]$site_id + ), + ~ .x[3]^2 %>% + unlist() %>% + diag(nrow = 1, ncol = 1) + ) } ) %>% stats::setNames(date.obs) -#add start.cut to restart list +# add start.cut to restart list restart$start.cut <- start.date restart$start.cut <- format(restart$start.cut, "%Y-%m-%d %H:%M:%S", tz = "GMT") @@ -99,33 +110,33 @@ restart$start.cut <- format(restart$start.cut, "%Y-%m-%d %H:%M:%S", tz = "GMT") #----------------------------------------------------------------------------------------------- #------------------------------------------ Fixing the settings -------------------------------- #----------------------------------------------------------------------------------------------- -#Using the found dates to run - this will help to download mets -for(s in seq_along(set)){ - set[[s]]$run$start.date = start.date - set[[s]]$run$end.date = end.date - set[[s]]$run$site$met.start = start.date - set[[s]]$run$site$met.end = end.date +# Using the found dates to run - this will help to download mets +for (s in seq_along(set)) { + set[[s]]$run$start.date <- start.date + set[[s]]$run$end.date <- end.date + set[[s]]$run$site$met.start <- start.date + set[[s]]$run$site$met.end <- end.date } # Setting dates in assimilation tags - This will help with preprocess split in SDA code -set$state.data.assimilation$start.date <-as.character(start.date) -set$state.data.assimilation$end.date <-as.character(max(names(obs.mean))) +set$state.data.assimilation$start.date <- as.character(start.date) +set$state.data.assimilation$end.date <- as.character(max(names(obs.mean))) # -------------------------------------------------------------------------------------------------- #---------------------------------------------- PEcAn Workflow ------------------------------------- # -------------------------------------------------------------------------------------------------- -#info +# info set$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S")) next.oldir <- paste0(format(Sys.time(), "%Y-%m-%d-%H-%M")) -#Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -#set <- PEcAn.settings::prepare.settings(set, force = TRUE) +# Update/fix/check settings. Will only run the first time it's called, unless force=TRUE +# set <- PEcAn.settings::prepare.settings(set, force = TRUE) ## TODO: make sure settings are prepared; for remote, make sure to set host directories ## outdirs -set$outdir = file.path(set$outdir,paste0("FNA",start.date,"/")) -set$rundir = file.path(set$outdir,"run") -set$modeloutdir = file.path(set$outdir,"out") -set$pfts$pft$outdir = file.path(set$outdir,"pft") +set$outdir <- file.path(set$outdir, paste0("FNA", start.date, "/")) +set$rundir <- file.path(set$outdir, "run") +set$modeloutdir <- file.path(set$outdir, "out") +set$pfts$pft$outdir <- file.path(set$outdir, "pft") set$host$rundir <- set$rundir set$host$outdir <- set$modeloutdir set$host$folder <- set$modeloutdir @@ -134,58 +145,60 @@ dir.create(set$rundir) dir.create(set$modeloutdir) dir.create(set$pfts$pft$outdir) -#manually add in clim files +# manually add in clim files met_paths <- list.files(path = file.path("/projectnb/dietzelab/ahelgeso/NOAA_met_data_CH1/noaa_clim/HARV", start.date), full.names = TRUE, pattern = ".clim") -if(is_empty(met_paths)){ - print(paste("SKIPPING: NO MET FOR",start.date)) - cat(as.character(start.date),sep="\n",file=file.path(dirname(set$outdir),"NO_MET"),append=TRUE) ## add to list of dates missing met +if (is_empty(met_paths)) { + print(paste("SKIPPING: NO MET FOR", start.date)) + cat(as.character(start.date), sep = "\n", file = file.path(dirname(set$outdir), "NO_MET"), append = TRUE) ## add to list of dates missing met stop_quietly() } -met_paths = as.list(met_paths) -names(met_paths) = rep("path",nsite) -for(s in seq_along(set)){ - set[[s]]$run$inputs$met$source = "GEFS" - set[[s]]$run$inputs$met$path = met_paths +met_paths <- as.list(met_paths) +names(met_paths) <- rep("path", nsite) +for (s in seq_along(set)) { + set[[s]]$run$inputs$met$source <- "GEFS" + set[[s]]$run$inputs$met$path <- met_paths } -#add run ids from previous sda to settings object to be passed to build X -prev_run_ids = list.files(file.path(restart$filepath, "out")) -run_id = as.data.frame(strsplit(prev_run_ids,"-")) %>% t() -colnames(run_id) <- c("pre","ens","site") +# add run ids from previous sda to settings object to be passed to build X +prev_run_ids <- list.files(file.path(restart$filepath, "out")) +run_id <- as.data.frame(strsplit(prev_run_ids, "-")) %>% t() +colnames(run_id) <- c("pre", "ens", "site") rownames(run_id) <- NULL -run_id = as.data.frame(run_id) %>% mutate(folder=prev_run_ids,id = paste0("id",.data$ens)) %>% group_by(site) -###settings$runs$id = run_id -for(s in seq_along(set)){ - site_run_id = run_id %>% filter(site == set[[s]]$run$site$id) - set[[s]]$run$id = as.list(site_run_id$folder) - names(set[[s]]$run$id) = site_run_id$id +run_id <- as.data.frame(run_id) %>% + mutate(folder = prev_run_ids, id = paste0("id", .data$ens)) %>% + group_by(site) +### settings$runs$id = run_id +for (s in seq_along(set)) { + site_run_id <- run_id %>% filter(site == set[[s]]$run$site$id) + set[[s]]$run$id <- as.list(site_run_id$folder) + names(set[[s]]$run$id) <- site_run_id$id } ## job.sh -set$model$jobtemplate = "/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/template.job" +set$model$jobtemplate <- "/projectnb/dietzelab/dietze/hf_landscape_SDA/test02/template.job" -#save restart object +# save restart object save(restart, next.oldir, args, file = file.path(set$outdir, "restart.Rdata")) -#run sda function -sda.enkf.multisite(settings = set, - obs.mean = obs.mean, - obs.cov = obs.cov, - Q = NULL, - restart = restart, - forceRun = TRUE, - keepNC = TRUE, - control = list(trace = TRUE, - FF = FALSE, - interactivePlot = FALSE, - TimeseriesPlot = FALSE, - BiasPlot = FALSE, - plot.title = NULL, - facet.plots = FALSE, - debug = FALSE, - pause = FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)) - - - - +# run sda function +sda.enkf.multisite( + settings = set, + obs.mean = obs.mean, + obs.cov = obs.cov, + Q = NULL, + restart = restart, + forceRun = TRUE, + keepNC = TRUE, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = FALSE, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) +) diff --git a/modules/assim.sequential/inst/hf_landscape/07_SMAP.R b/modules/assim.sequential/inst/hf_landscape/07_SMAP.R index 4cd9c3c2841..a8f3717f810 100644 --- a/modules/assim.sequential/inst/hf_landscape/07_SMAP.R +++ b/modules/assim.sequential/inst/hf_landscape/07_SMAP.R @@ -1,70 +1,72 @@ ## SMAP data prep cron script #### Libraries and configs -#BiocManager::install("rhdf5") +# BiocManager::install("rhdf5") library(smapr) library(raster) library(rhdf5) library(magrittr) -### Run once: smapr::set_smap_credentials(username,password) +### Run once: smapr::set_smap_credentials(username,password) ### using login for https://urs.earthdata.nasa.gov/ -smap_dir = "/projectnb/dietzelab/dietze/hf_landscape_SDA/SMAP_data" -smap_file = file.path(smap_dir, "SMAP.csv") -smap_download_dir = file.path(smap_dir,"raw") +smap_dir <- "/projectnb/dietzelab/dietze/hf_landscape_SDA/SMAP_data" +smap_file <- file.path(smap_dir, "SMAP.csv") +smap_download_dir <- file.path(smap_dir, "raw") ## sites from pecan.xml -set = readRDS("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/pecan.RDS") -sites <- set$run %>% purrr::map('site') %>% - purrr::map_dfr(~c(.x[['id']],.x[['lon']],.x[['lat']]) %>%as.numeric)%>% - t %>% `colnames<-`(c("id","lon","lat")) %>% as.data.frame() -site.id = 758 ## NEON HARV -lat = mean(sites$lat) -lon = mean(sites$lon) +set <- readRDS("/projectnb/dietzelab/dietze/hf_landscape_SDA/test01/pecan.RDS") +sites <- set$run %>% + purrr::map("site") %>% + purrr::map_dfr(~ c(.x[["id"]], .x[["lon"]], .x[["lat"]]) %>% as.numeric()) %>% + t() %>% + `colnames<-`(c("id", "lon", "lat")) %>% + as.data.frame() +site.id <- 758 ## NEON HARV +lat <- mean(sites$lat) +lon <- mean(sites$lon) ### DEFINE HELPER FUNCTIONS -extract_SMAP <- function(file, name = c("Soil_Moisture_Retrieval_Data_AM","Soil_Moisture_Retrieval_Data_PM"), in_memory = FALSE,x=NULL,y=NULL){ +extract_SMAP <- function(file, name = c("Soil_Moisture_Retrieval_Data_AM", "Soil_Moisture_Retrieval_Data_PM"), in_memory = FALSE, x = NULL, y = NULL) { ## debugging version of smapr::extract_smap -# smapr:::validate_data(data) -# file <- smapr:::local_h5_paths(data) + # smapr:::validate_data(data) + # file <- smapr:::local_h5_paths(data) n_files <- length(file) - if(n_files > 1) { + if (n_files > 1) { print("currently not supporting multiple time points") return() } rasters <- vector("list", length = 2) for (i in 1:2) { - print(paste(file,name[i])) + print(paste(file, name[i])) h5_in <- rhdf5::h5read(file, name[i]) - if(length(h5_in) == 0){ - print(paste("EMPTY FILE:",file,name[i])) + if (length(h5_in) == 0) { + print(paste("EMPTY FILE:", file, name[i])) next } - if(is.null(x)){ + if (is.null(x)) { ## original design, return a raster - if (smapr:::is_cube(h5_in)) { - rasters[[i]] <- smapr:::rasterize_cube(h5_in, file, name) - } else { -# r <- smapr:::rasterize_matrix(h5_in, file, name) - rasters[[i]] <- rasterize_list(h5_in, file, name[i]) - } + if (smapr:::is_cube(h5_in)) { + rasters[[i]] <- smapr:::rasterize_cube(h5_in, file, name) + } else { + # r <- smapr:::rasterize_matrix(h5_in, file, name) + rasters[[i]] <- rasterize_list(h5_in, file, name[i]) + } } else { ## new version, return extracted data - rasters[[i]] = extract_SMAP_h5(h5_in = h5_in,name=name[i],x=x,y=y) + rasters[[i]] <- extract_SMAP_h5(h5_in = h5_in, name = name[i], x = x, y = y) } } -# output <- smapr:::bundle_rasters(rasters, data, in_memory) -# output - rasters + # output <- smapr:::bundle_rasters(rasters, data, in_memory) + # output + rasters } -rasterize_list <- function (h5_in, file, name) -{ +rasterize_list <- function(h5_in, file, name) { fill_value <- smapr:::find_fill_value(file, name) - if(name == "Soil_Moisture_Retrieval_Data_PM"){ - matrix <- h5_in[["soil_moisture_pm"]] + if (name == "Soil_Moisture_Retrieval_Data_PM") { + matrix <- h5_in[["soil_moisture_pm"]] } else { matrix <- h5_in[["soil_moisture"]] } @@ -74,139 +76,146 @@ rasterize_list <- function (h5_in, file, name) raster_layer } -extract_SMAP_h5 <- function(h5_in,name,x,y){ +extract_SMAP_h5 <- function(h5_in, name, x, y) { fill_value <- smapr:::find_fill_value(file, name) - if(name == "Soil_Moisture_Retrieval_Data_PM"){ - matrix <- h5_in[["soil_moisture_pm"]] - xmap = h5_in[["longitude_centroid_pm"]] - ymap = h5_in[["latitude_centroid_pm"]] + if (name == "Soil_Moisture_Retrieval_Data_PM") { + matrix <- h5_in[["soil_moisture_pm"]] + xmap <- h5_in[["longitude_centroid_pm"]] + ymap <- h5_in[["latitude_centroid_pm"]] } else { matrix <- h5_in[["soil_moisture"]] - xmap = h5_in[["longitude_centroid"]] - ymap = h5_in[["latitude_centroid"]] + xmap <- h5_in[["longitude_centroid"]] + ymap <- h5_in[["latitude_centroid"]] } - if(length(xmap) ==0) { + if (length(xmap) == 0) { print("missing coordinates") print(names(h5_in)) } matrix[matrix == fill_value] <- NA - xmap[xmap==fill_value] = NA - ymap[ymap==fill_value] = NA - Longlat_matrix = as.matrix(data.frame(lon = as.vector(xmap),lat=as.vector(ymap))) - #print(dim(Longlat_matrix)) - valid = apply(Longlat_matrix,1,function(x){all(!is.na(x))}) - smp = rep(NA,length(x)) - for(i in seq_along(x)){ - #print(dim(Longlat_matrix[valid,])) - Distance <- sp::spDistsN1(Longlat_matrix[valid,], - as.matrix(data.frame(lon=as.numeric(x[i]),lat=as.numeric(y[i]))), longlat = TRUE)[-1] - minD = which.min(Distance) + xmap[xmap == fill_value] <- NA + ymap[ymap == fill_value] <- NA + Longlat_matrix <- as.matrix(data.frame(lon = as.vector(xmap), lat = as.vector(ymap))) + # print(dim(Longlat_matrix)) + valid <- apply(Longlat_matrix, 1, function(x) { + all(!is.na(x)) + }) + smp <- rep(NA, length(x)) + for (i in seq_along(x)) { + # print(dim(Longlat_matrix[valid,])) + Distance <- sp::spDistsN1(Longlat_matrix[valid, ], + as.matrix(data.frame(lon = as.numeric(x[i]), lat = as.numeric(y[i]))), + longlat = TRUE + )[-1] + minD <- which.min(Distance) dist <- Distance[minD] - print(paste("dist = ",dist)) - if(dist < 10) { ## threshold distance for being in the gridcell, km + print(paste("dist = ", dist)) + if (dist < 10) { ## threshold distance for being in the gridcell, km distloc <- which(valid)[minD] - smp[i] = matrix[distloc] + smp[i] <- matrix[distloc] } } smp } -ave_smp <- function(sm_raster){ - am=sm_raster[[1]] - pm=sm_raster[[2]] - a = apply(cbind(am,pm),1,mean,na.rm=TRUE) - a[is.nan(a)] = NA +ave_smp <- function(sm_raster) { + am <- sm_raster[[1]] + pm <- sm_raster[[2]] + a <- apply(cbind(am, pm), 1, mean, na.rm = TRUE) + a[is.nan(a)] <- NA a } -### Extraction workflow +### Extraction workflow ## Determine dates to be processed -smap_dates = Sys.Date() -smap_dates = as.Date(seq(Sys.Date(),Sys.Date()-lubridate::days(5),by="-1 day")) +smap_dates <- Sys.Date() +smap_dates <- as.Date(seq(Sys.Date(), Sys.Date() - lubridate::days(5), by = "-1 day")) ## NOTE: to grab data from the past, just set smap_dates to the date range you need ## Determine dates already downloaded -prev_download = dir(smap_download_dir,"h5$") -prev_dates = as.Date(substr(prev_download,14,21),format = "%Y%m%d") +prev_download <- dir(smap_download_dir, "h5$") +prev_dates <- as.Date(substr(prev_download, 14, 21), format = "%Y%m%d") ### Download raw h5 files ### -status = data.frame(date = smap_dates, - file = rep(NA,length(smap_dates)), - avail = rep(NA,length(smap_dates))) -for(t in seq_along(smap_dates)){ +status <- data.frame( + date = smap_dates, + file = rep(NA, length(smap_dates)), + avail = rep(NA, length(smap_dates)) +) +for (t in seq_along(smap_dates)) { print(smap_dates[t]) ## check if we've downloaded the raw data for this date already - if(status$date[t] %in% prev_dates){ - - #add to list of filenames - status$file[t] = prev_download[prev_dates == status$date[t]] - status$avail[t] = TRUE - + if (status$date[t] %in% prev_dates) { + # add to list of filenames + status$file[t] <- prev_download[prev_dates == status$date[t]] + status$avail[t] <- TRUE } else { - ## check if new data is available + ## check if new data is available available_data <- find_smap(id = "SPL3SMP", date = smap_dates[t], version = 8) ### if multiple file versions, assume we want the last (latest?) one - available_data = available_data[nrow(available_data),] - if(is.na(available_data$dir)){ - status$avail[t] = FALSE + available_data <- available_data[nrow(available_data), ] + if (is.na(available_data$dir)) { + status$avail[t] <- FALSE next } - - downloads <- download_smap(available_data,directory = smap_download_dir) - status$file[t] = paste0(downloads$name,".h5") - status$avail[t] = TRUE - + + downloads <- download_smap(available_data, directory = smap_download_dir) + status$file[t] <- paste0(downloads$name, ".h5") + status$avail[t] <- TRUE } } ## eliminate dates without data -status = status %>% dplyr::filter(avail == TRUE) +status <- status %>% dplyr::filter(avail == TRUE) ## set up storage -if(file.exists(smap_file)){ - SMAP_CSV = read.csv(smap_file) - SMAP_CSV$date = as.Date(SMAP_CSV$date) +if (file.exists(smap_file)) { + SMAP_CSV <- read.csv(smap_file) + SMAP_CSV$date <- as.Date(SMAP_CSV$date) } else { - SMAP_CSV <- matrix(NA, 0, 6) %>% `colnames<-`(c("date", "site_id", "lat", "lon", "smp", "sd")) %>% as.data.frame() + SMAP_CSV <- matrix(NA, 0, 6) %>% + `colnames<-`(c("date", "site_id", "lat", "lon", "smp", "sd")) %>% + as.data.frame() } #### EXTRACT DATA #### -for(t in seq_along(status$date)){ +for (t in seq_along(status$date)) { print(status$date[t]) - if(!status$avail[t]) next - + if (!status$avail[t]) next + ## determine what sites have already been processed - SMAP_done = SMAP_CSV[SMAP_CSV$site_id %in% site.id & SMAP_CSV$date == status$date[t],] - if(nrow(SMAP_done) > 0){ - site.todo = !(site.id %in% SMAP_done$site_id) - if(all(!site.todo)){ next } ## done, skip to the next year + SMAP_done <- SMAP_CSV[SMAP_CSV$site_id %in% site.id & SMAP_CSV$date == status$date[t], ] + if (nrow(SMAP_done) > 0) { + site.todo <- !(site.id %in% SMAP_done$site_id) + if (all(!site.todo)) { + next + } ## done, skip to the next year } else { ## no sites already processed - site.todo = rep(TRUE,length(site.id)) + site.todo <- rep(TRUE, length(site.id)) } - - + + ## load data as a raster, returns a list of length 2 for AM and PM passes - sm_raster <- extract_SMAP(file = file.path(smap_download_dir,status$file[t]), - x=lon[site.todo],y=lat[site.todo]) + sm_raster <- extract_SMAP( + file = file.path(smap_download_dir, status$file[t]), + x = lon[site.todo], y = lat[site.todo] + ) ## extract missing site days -# am = raster::extract(sm_raster[[1]],y = data.frame(lat=lat[site.todo],lon=lon[site.todo]),cellnumbers=TRUE)[,"layer"] -# pm = raster::extract(sm_raster[[2]],y = data.frame(lat=lat[site.todo],lon=lon[site.todo]),cellnumbers=TRUE)[,"layer"] - smp = ave_smp(sm_raster) + # am = raster::extract(sm_raster[[1]],y = data.frame(lat=lat[site.todo],lon=lon[site.todo]),cellnumbers=TRUE)[,"layer"] + # pm = raster::extract(sm_raster[[2]],y = data.frame(lat=lat[site.todo],lon=lon[site.todo]),cellnumbers=TRUE)[,"layer"] + smp <- ave_smp(sm_raster) ## a more sophisticated way of doing this would be to use the cellnumbers to detect duplicate extractions - sd = rep(0.04,length(smp)) ## using same default as Dongchen, which comes from Dan G. - SMAP_CSV <- rbind(SMAP_CSV, tibble::tibble(date=as.Date(status$date[t]), site_id = site.id[site.todo], lat=lat[site.todo], lon=lon[site.todo], smp, sd)) + sd <- rep(0.04, length(smp)) ## using same default as Dongchen, which comes from Dan G. + SMAP_CSV <- rbind(SMAP_CSV, tibble::tibble(date = as.Date(status$date[t]), site_id = site.id[site.todo], lat = lat[site.todo], lon = lon[site.todo], smp, sd)) - ## save - utils::write.csv(SMAP_CSV, file = file.path(smap_dir, "SMAP.csv"), row.names = F) + ## save + utils::write.csv(SMAP_CSV, file = file.path(smap_dir, "SMAP.csv"), row.names = F) } - - diff --git a/modules/assim.sequential/inst/hf_landscape/PEcAn2EFI.R b/modules/assim.sequential/inst/hf_landscape/PEcAn2EFI.R index 304e2ecd245..40a1e121a41 100644 --- a/modules/assim.sequential/inst/hf_landscape/PEcAn2EFI.R +++ b/modules/assim.sequential/inst/hf_landscape/PEcAn2EFI.R @@ -8,29 +8,29 @@ runModule.PEcAn2EFI <- function(settings) { } } -PEcAn2EFI <- function(outdir = NULL, ne = NULL, site_id = NULL, start_date=NULL,end_date = NULL,s=settings){ - #s = settings[[1]] ## testing - if(is.null(outdir)) outdir <- s$modeloutdir - if(is.null(ne)) ne = s$ensemble$size - if(is.null(site_id)) site_id = s$run$site$id - if(is.null(start_date)) start_date = s$run$start.date - if(is.null(end_date)) end_date = start_date + lubridate::days(35) - start.year = lubridate::year(start_date) - end.year = lubridate::year(end_date) - +PEcAn2EFI <- function(outdir = NULL, ne = NULL, site_id = NULL, start_date = NULL, end_date = NULL, s = settings) { + # s = settings[[1]] ## testing + if (is.null(outdir)) outdir <- s$modeloutdir + if (is.null(ne)) ne <- s$ensemble$size + if (is.null(site_id)) site_id <- s$run$site$id + if (is.null(start_date)) start_date <- s$run$start.date + if (is.null(end_date)) end_date <- start_date + lubridate::days(35) + start.year <- lubridate::year(start_date) + end.year <- lubridate::year(end_date) + # ## load ensemble objects, used code from get.results # if (!is.null(ens.ensemble.id)) { - # fname <- ensemble.filename(s, "ensemble.samples", "Rdata", - # ensemble.id = ens.ensemble.id, + # fname <- ensemble.filename(s, "ensemble.samples", "Rdata", + # ensemble.id = ens.ensemble.id, # all.var.yr = TRUE) # } else if (!is.null(s$ensemble$ensemble.id)) { # ens.ensemble.id <- s$ensemble$ensemble.id # fname <- ensemble.filename(s, "ensemble.samples", "Rdata", - # ensemble.id = ens.ensemble.id, + # ensemble.id = ens.ensemble.id, # all.var.yr = TRUE) # } else { # fname <- file.path(outdir, "samples.Rdata") - # } + # } # if (!file.exists(fname)) { # PEcAn.logger::logger.severe("No ensemble samples file found!") # } @@ -38,67 +38,69 @@ PEcAn2EFI <- function(outdir = NULL, ne = NULL, site_id = NULL, start_date=NULL, # if (!exists("ens.run.ids")) { # ens.run.ids <- runs.samples$ens # } - ens.run.ids = paste("ENS", - formatC(1:ne, width = 5, format = "d", flag = "0"), - site_id, - sep="-" + ens.run.ids <- paste("ENS", + formatC(1:ne, width = 5, format = "d", flag = "0"), + site_id, + sep = "-" ) - + ensemble.output <- list() - for (row in seq_along(ens.run.ids)) { #rownames(ens.run.ids) + for (row in seq_along(ens.run.ids)) { # rownames(ens.run.ids) ## read output - #run.id <- ens.run.ids[row, "id"] + # run.id <- ens.run.ids[row, "id"] run.id <- ens.run.ids[row] PEcAn.logger::logger.info("reading ensemble output from run id: ", format(run.id, scientific = FALSE)) - ensemble.output[[row]] <- PEcAn.utils::read.output(run.id, file.path(outdir, run.id), start.year, end.year, variables=NULL) - + ensemble.output[[row]] <- PEcAn.utils::read.output(run.id, file.path(outdir, run.id), start.year, end.year, variables = NULL) + ## reprocess time_bounds - doy = as.vector(ensemble.output[[row]]$time_bounds[1,]) ## day of year - years = c(which(doy < 0.00001),length(doy)+1) ## get breaks between years - if(years[1] != 1) years = c(1,years) ## add current year if not start of year - years = rep(lubridate::year(start_date):lubridate::year(end_date),times=diff(years)) ## convert to years - tod = lubridate::as_datetime(lubridate::seconds_to_period(floor((doy - floor(doy)) * 60 *60))) ## time of day - lubridate::year(tod) <- years - lubridate::yday(tod) <- floor(doy) - ensemble.output[[row]]$time_bounds = tod - + doy <- as.vector(ensemble.output[[row]]$time_bounds[1, ]) ## day of year + years <- c(which(doy < 0.00001), length(doy) + 1) ## get breaks between years + if (years[1] != 1) years <- c(1, years) ## add current year if not start of year + years <- rep(lubridate::year(start_date):lubridate::year(end_date), times = diff(years)) ## convert to years + tod <- lubridate::as_datetime(lubridate::seconds_to_period(floor((doy - floor(doy)) * 60 * 60))) ## time of day + lubridate::year(tod) <- years + lubridate::yday(tod) <- floor(doy) + ensemble.output[[row]]$time_bounds <- tod + ## convert to data frame - ensemble.output[[row]] = as.data.frame(ensemble.output[[row]]) %>% + ensemble.output[[row]] <- as.data.frame(ensemble.output[[row]]) %>% dplyr::mutate( parameter = row, reference_datetime = lubridate::as_datetime(start_date), site_id = site_id ) } - names(ensemble.output) = ens.run.ids - + names(ensemble.output) <- ens.run.ids + return(dplyr::bind_rows(ensemble.output)) } -PEcAn2EFI.ens <- function(outdir,run.id,start_date,end_date = NULL){ - if(is.null(end_date)) end_date = start_date + lubridate::days(35) - start.year = lubridate::year(start_date) - end.year = lubridate::year(end_date) - ens.id = as.numeric(strsplit(run.id,"-")[[1]][2]) - site_id = as.numeric(strsplit(run.id,"-")[[1]][3]) - +PEcAn2EFI.ens <- function(outdir, run.id, start_date, end_date = NULL) { + if (is.null(end_date)) end_date <- start_date + lubridate::days(35) + start.year <- lubridate::year(start_date) + end.year <- lubridate::year(end_date) + ens.id <- as.numeric(strsplit(run.id, "-")[[1]][2]) + site_id <- as.numeric(strsplit(run.id, "-")[[1]][3]) + PEcAn.logger::logger.info("reading ensemble output from run id: ", format(run.id, scientific = FALSE)) - ensemble.output <- PEcAn.utils::read.output(run.id, file.path(outdir, run.id), start.year, end.year, variables=NULL) - - if(!is.numeric(nrow(ensemble.output))) return(NULL) - + ensemble.output <- PEcAn.utils::read.output(run.id, file.path(outdir, run.id), start.year, end.year, variables = NULL) + + if (!is.numeric(nrow(ensemble.output))) { + return(NULL) + } + ## reprocess time_bounds - doy = as.vector(ensemble.output$time_bounds[1,]) ## day of year - years = c(which(doy < 0.00001),length(doy)+1) ## get breaks between years - if(years[1] != 1) years = c(1,years) ## add current year if not start of year - years = rep(lubridate::year(start_date):lubridate::year(end_date),times=diff(years)) ## convert to years - tod = lubridate::as_datetime(lubridate::seconds_to_period(floor((doy - floor(doy)) * 60 *60*24))) ## time of day - lubridate::year(tod) <- years - lubridate::yday(tod) <- floor(doy) - ensemble.output$time_bounds = tod - + doy <- as.vector(ensemble.output$time_bounds[1, ]) ## day of year + years <- c(which(doy < 0.00001), length(doy) + 1) ## get breaks between years + if (years[1] != 1) years <- c(1, years) ## add current year if not start of year + years <- rep(lubridate::year(start_date):lubridate::year(end_date), times = diff(years)) ## convert to years + tod <- lubridate::as_datetime(lubridate::seconds_to_period(floor((doy - floor(doy)) * 60 * 60 * 24))) ## time of day + lubridate::year(tod) <- years + lubridate::yday(tod) <- floor(doy) + ensemble.output$time_bounds <- tod + ## convert to data frame - ensemble.output = as.data.frame(ensemble.output) %>% + ensemble.output <- as.data.frame(ensemble.output) %>% dplyr::mutate( parameter = ens.id, reference_datetime = lubridate::as_datetime(start_date), diff --git a/modules/assim.sequential/inst/metfiltering.exploration.R b/modules/assim.sequential/inst/metfiltering.exploration.R index 6e7f7b7c60b..8ddfd87045e 100644 --- a/modules/assim.sequential/inst/metfiltering.exploration.R +++ b/modules/assim.sequential/inst/metfiltering.exploration.R @@ -1,42 +1,44 @@ library(PEcAn.all) library(nimble) -# SIPNET @ Harvard Forest, ens size 1000 +# SIPNET @ Harvard Forest, ens size 1000 setwd("/fs/data2/output//PEcAn_1000008768") load("/fs/data2/output/PEcAn_1000008768/sda.all.runs.Rdata") -#function for plotting matplot with ensemble number as label -mattext = function(data, data_names, colors, ylab, xlab, type='b', na.fix = FALSE){ - if(na.fix == TRUE){ +# function for plotting matplot with ensemble number as label +mattext <- function(data, data_names, colors, ylab, xlab, type = "b", na.fix = FALSE) { + if (na.fix == TRUE) { data[is.na(data)] <- 0 } - - matplot(data, pch=NA, type=type, col=colors, ylab = ylab, xlab = xlab) - for (i in 1:ncol(data)){ - text(x=1:nrow(data), y=data[,i], lab=data_names[i], col=colors[i]) + + matplot(data, pch = NA, type = type, col = colors, ylab = ylab, xlab = xlab) + for (i in 1:ncol(data)) { + text(x = 1:nrow(data), y = data[, i], lab = data_names[i], col = colors[i]) } } -#calculate the likelihood of the ensemble members given mu.a and Pa +# calculate the likelihood of the ensemble members given mu.a and Pa nens <- nrow(FORECAST[[1]]) nt <- length(FORECAST) -wt.mat <- matrix(NA,nrow=nens,ncol=nt) -for(t in seq_len(nt)){ - for(i in seq_len(nens)){ - wt.mat[i,t]<-dmnorm_chol(FORECAST[[t]][i,],enkf.params[[t]]$mu.a,solve(enkf.params[[t]]$Pa)) +wt.mat <- matrix(NA, nrow = nens, ncol = nt) +for (t in seq_len(nt)) { + for (i in seq_len(nens)) { + wt.mat[i, t] <- dmnorm_chol(FORECAST[[t]][i, ], enkf.params[[t]]$mu.a, solve(enkf.params[[t]]$Pa)) } } -#put into weights table -wt.props <- t(prop.table(wt.mat,2)) +# put into weights table +wt.props <- t(prop.table(wt.mat, 2)) -pdf(file.path(settings$outdir,'ensemble.weights.time-series.pdf')) -par(mfrow=c(1,1)) -mattext(data = wt.props,data_names = as.character(1:nens),colors=rainbow(nens), - ylab = c('Ensemble Weight'), xlab = c('Time')) +pdf(file.path(settings$outdir, "ensemble.weights.time-series.pdf")) +par(mfrow = c(1, 1)) +mattext( + data = wt.props, data_names = as.character(1:nens), colors = rainbow(nens), + ylab = c("Ensemble Weight"), xlab = c("Time") +) dev.off() library(Hmisc) @@ -54,11 +56,11 @@ ens.names <- gsub(".1960.*", "", ens.names) # load orig ensembles to associate names with values load("/fs/data3/istfer/AGU17/met_ensembles/agg_ensembles.Rdata") -# match values -ens.hist <- array(NA,dim=c(nens, nt)) -for(me in seq_len(nt)){ +# match values +ens.hist <- array(NA, dim = c(nens, nt)) +for (me in seq_len(nt)) { inds <- sapply(ens.names, function(x) which(rownames(pre.mat) == x)) - ens.hist[, me] <- pre.mat[inds, (me+1)] + ens.hist[, me] <- pre.mat[inds, (me + 1)] } wt.mean <- wt.var <- numeric(nt) @@ -72,65 +74,72 @@ pre.dfm$year <- as.factor(pre.dfm$year) pre.dfm$variable <- as.factor(pre.dfm$variable) pre.dfm$value <- as.numeric(pre.dfm$value) -fontsize = list(title = 18, axis = 14) -theme_set(theme_classic() + theme(axis.text.x = element_text(angle = 90, hjust = 1, size = fontsize$axis), - panel.grid.minor = element_blank(), - panel.border = element_blank(), - axis.text.y = element_text(size = fontsize$axis))) +fontsize <- list(title = 18, axis = 14) +theme_set(theme_classic() + theme( + axis.text.x = element_text(angle = 90, hjust = 1, size = fontsize$axis), + panel.grid.minor = element_blank(), + panel.border = element_blank(), + axis.text.y = element_text(size = fontsize$axis) +)) -ggplot(pre.dfm, aes(x = value, y = year)) + - geom_density_ridges(fill = "dodgerblue1") + theme(legend.position="none")+ coord_flip() +ggplot(pre.dfm, aes(x = value, y = year)) + + geom_density_ridges(fill = "dodgerblue1") + + theme(legend.position = "none") + + coord_flip() pre.dfm$variable <- "prior" pre.dfm$variable <- as.factor(pre.dfm$variable) -for(t in 1:(nt)){ - wt.mean[t] <- wtd.mean(x=ens.hist[,t], weights = wt.props[t,]) - - if(all(is.nan(wt.props[t,]))){ - wt.props[t,] <- 1 +for (t in 1:(nt)) { + wt.mean[t] <- wtd.mean(x = ens.hist[, t], weights = wt.props[t, ]) + + if (all(is.nan(wt.props[t, ]))) { + wt.props[t, ] <- 1 } - - tempw <- wt.props[t,] - tempw[tempw < 0 ] <- 0 - wt.var[t] <- wtd.var(x=ens.hist[,t], weights = tempw) - - tmpbw <- density(ens.hist[,t]) - - - dens <- density(ens.hist[,t], weights = tempw, na.rm = TRUE) - bu <- 1000*(dens$y * dens$bw) - bu[bu<1] <- 1 + + tempw <- wt.props[t, ] + tempw[tempw < 0] <- 0 + wt.var[t] <- wtd.var(x = ens.hist[, t], weights = tempw) + + tmpbw <- density(ens.hist[, t]) + + + dens <- density(ens.hist[, t], weights = tempw, na.rm = TRUE) + bu <- 1000 * (dens$y * dens$bw) + bu[bu < 1] <- 1 bo <- rep(dens$x, times = bu) - - bo.df <- data.frame(year= rep(years[t], length(bo)), - variable = rep("posterior", length(bo)), - value = bo) - pre.dfm <- rbind(pre.dfm,bo.df) + + bo.df <- data.frame( + year = rep(years[t], length(bo)), + variable = rep("posterior", length(bo)), + value = bo + ) + pre.dfm <- rbind(pre.dfm, bo.df) } pre.dfm$variable <- as.factor(pre.dfm$variable) -ggplot(pre.dfm, aes(x = value, y = year, fill = variable)) + - geom_density_ridges() + theme(legend.position="none")+ coord_flip()+ stat_density_ridges(bandwidth = 0.0735)+ +ggplot(pre.dfm, aes(x = value, y = year, fill = variable)) + + geom_density_ridges() + + theme(legend.position = "none") + + coord_flip() + + stat_density_ridges(bandwidth = 0.0735) + scale_fill_manual(values = c("dodgerblue1", "lightcoral")) # do same for tmp # tmp.dfm$variable <- "prior" # tmp.dfm$variable <- as.factor(tmp.dfm$variable) -# -# +# +# # tmp.df <- data.frame(t(tmp.mat)) # tmp.df$year <- 1960:2010 # tmp.dfm <- reshape::melt(tmp.df, id = c("year")) # tmp.dfm$year <- as.factor(tmp.dfm$year) # tmp.dfm$variable <- as.factor(tmp.dfm$variable) # tmp.dfm$value <- as.numeric(tmp.dfm$value) -# -# ggplot(tmp.dfm, aes(x = value, y = year)) + -# geom_density_ridges(fill = "dodgerblue1") + theme(legend.position="none")+ coord_flip() - - +# +# ggplot(tmp.dfm, aes(x = value, y = year)) + +# geom_density_ridges(fill = "dodgerblue1") + theme(legend.position="none")+ coord_flip() diff --git a/modules/assim.sequential/inst/paleon_sda.R b/modules/assim.sequential/inst/paleon_sda.R index e2857a06579..73ba6687114 100644 --- a/modules/assim.sequential/inst/paleon_sda.R +++ b/modules/assim.sequential/inst/paleon_sda.R @@ -1,4 +1,3 @@ - library(PEcAn.all) library(PEcAn.SIPNET) library(PEcAn.LINKAGES) @@ -8,80 +7,80 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential -####TENSION ZONE 1000009964 #/fs/data2/output//PEcAn_1000009979#/fs/data2/output//PEcAn_1000009985 -#LINKAGES #AGB.pft #Harvard Forest -#setwd('/fs/data2/output//PEcAn_1000003314/') -#setwd('/fs/data2/output//PEcAn_1000007999/') #full run 50 nens -#setwd('/fs/data2/output//PEcAn_1000008008/') -#setwd('/fs/data2/output//PEcAn_1000009667/') -setwd('/fs/data2/output//PEcAn_1000009225/') +#### TENSION ZONE 1000009964 #/fs/data2/output//PEcAn_1000009979#/fs/data2/output//PEcAn_1000009985 +# LINKAGES #AGB.pft #Harvard Forest +# setwd('/fs/data2/output//PEcAn_1000003314/') +# setwd('/fs/data2/output//PEcAn_1000007999/') #full run 50 nens +# setwd('/fs/data2/output//PEcAn_1000008008/') +# setwd('/fs/data2/output//PEcAn_1000009667/') +setwd("/fs/data2/output//PEcAn_1000009225/") -file.copy('/fs/data2/output//PEcAn_1000007999/sda.obs.Rdata',getwd()) -#TO DO: Having problem with running proc.var == TRUE because nimble isn't keeping the toggle sampler in the function environment. +file.copy("/fs/data2/output//PEcAn_1000007999/sda.obs.Rdata", getwd()) +# TO DO: Having problem with running proc.var == TRUE because nimble isn't keeping the toggle sampler in the function environment. ## linkages fcomp -setwd('/fs/data2/output//PEcAn_1000008588/') #with variance inflation -adjustment=TRUE +setwd("/fs/data2/output//PEcAn_1000008588/") # with variance inflation +adjustment <- TRUE -setwd('/fs/data2/output//PEcAn_1000008683/') #without variance inflation -adjustment=TRUE +setwd("/fs/data2/output//PEcAn_1000008683/") # without variance inflation +adjustment <- TRUE load("out/sda.initial.runs.Rdata") -#run inter part +# run inter part load("sda.output.Rdata") nt <- length(obs.list$obs.mean) -aqq <- array(NA,dim=c(nt,10,10)) -t <- length(FORECAST)+1 -aqq[t,,]<- solve(enkf.params[[t-1]]$q.bar)*enkf.params[[t-1]]$n -bqq<-numeric(nt) -bqq[t]<-enkf.params[[t-1]]$n +aqq <- array(NA, dim = c(nt, 10, 10)) +t <- length(FORECAST) + 1 +aqq[t, , ] <- solve(enkf.params[[t - 1]]$q.bar) * enkf.params[[t - 1]]$n +bqq <- numeric(nt) +bqq[t] <- enkf.params[[t - 1]]$n -#SIPNET -#setwd('/fs/data2/output//PEcAn_1000003356') -#setwd('/fs/data2/output//PEcAn_1000007732') -#TO DO: Skip ensemble members that fail or are missing in read.restart -#See talk with with Mike on 6/21/17 -#covariance for NPP is really weird #need to revisit +# SIPNET +# setwd('/fs/data2/output//PEcAn_1000003356') +# setwd('/fs/data2/output//PEcAn_1000007732') +# TO DO: Skip ensemble members that fail or are missing in read.restart +# See talk with with Mike on 6/21/17 +# covariance for NPP is really weird #need to revisit #---------------- Load PEcAn settings file. --------------------------------# # Open and read in settings file for PEcAn run. settings <- read.settings("pecan.SDA.xml") -obs.list <- load_data_paleon_sda(settings = settings) #add a way to get the correct time step in this function? +obs.list <- load_data_paleon_sda(settings = settings) # add a way to get the correct time step in this function? IC <- NULL status.start("IC") ne <- as.numeric(settings$state.data.assimilation$n.ensemble) -state <- as.data.frame(rmvnorm(ne,as.numeric(obs.list$obs.mean[[1]]),(obs.list$obs.cov[[1]]), method = "svd")) -colnames(state)<-c('AbvGrndWood','GWBI') +state <- as.data.frame(rmvnorm(ne, as.numeric(obs.list$obs.mean[[1]]), (obs.list$obs.cov[[1]]), method = "svd")) +colnames(state) <- c("AbvGrndWood", "GWBI") IC <- sample.IC.SIPNET(ne, state = state) status.end() -#develop/debug -if(FALSE){ - obs.mean = obs.list$obs.mean - obs.cov = obs.list$obs.cov - Q = NULL - adjustment = TRUE - restart=NULL +# develop/debug +if (FALSE) { + obs.mean <- obs.list$obs.mean + obs.cov <- obs.list$obs.cov + Q <- NULL + adjustment <- TRUE + restart <- NULL } PEcAnAssimSequential::sda.enkf(settings, obs.mean = obs.list$obs.mean, obs.cov = obs.list$obs.cov, IC = IC) obmn <- obvn <- list() -times.keep <- seq(1,1100,100) +times.keep <- seq(1, 1100, 100) -for(i in 1:length(times.keep)){ +for (i in 1:length(times.keep)) { obmn[[i]] <- obs.mean[[times.keep[i]]] obvn[[i]] <- obs.cov[[times.keep[i]]] } @@ -91,13 +90,15 @@ names(obmn) <- names(obvn) <- names(obs.mean)[times.keep] obs.mean <- obmn obs.cov <- obvn -##### +##### ##### Plot Data ##### -##### +##### t1 <- 1 t <- length(obs.list$obs.mean) -names.y <- unique(unlist(lapply(obs.list$obs.mean[t1:t], function(x) { names(x) }))) +names.y <- unique(unlist(lapply(obs.list$obs.mean[t1:t], function(x) { + names(x) +}))) Ybar <- t(sapply(obs.list$obs.mean[t1:t], function(x) { tmp <- rep(NA, length(names.y)) names(tmp) <- names.y @@ -107,7 +108,7 @@ Ybar <- t(sapply(obs.list$obs.mean[t1:t], function(x) { })) YCI <- t(as.matrix(sapply(obs.list$obs.cov[t1:t], function(x) { - if (length(x)<2) { + if (length(x) < 2) { rep(NA, length(names.y)) } sqrt(diag(x)) @@ -115,24 +116,26 @@ YCI <- t(as.matrix(sapply(obs.list$obs.cov[t1:t], function(x) { obs.dates <- rownames(Ybar) -green <- col2rgb("green") +green <- col2rgb("green") alphagreen <- rgb(green[1], green[2], green[3], 75, max = 255) -for(i in 1:length(obs.list$obs.mean[[1]])){ - plot(as.Date(obs.dates), - as.numeric(Ybar[, i]), - type = "l", - col = "darkgreen", - lwd = 2,ylim=c(0,1),main=colnames(Ybar)[i]) +for (i in 1:length(obs.list$obs.mean[[1]])) { + plot(as.Date(obs.dates), + as.numeric(Ybar[, i]), + type = "l", + col = "darkgreen", + lwd = 2, ylim = c(0, 1), main = colnames(Ybar)[i] + ) ciEnvelope(as.Date(obs.dates), - as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, - as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, - col = alphagreen) - lines(as.Date(obs.dates), - as.numeric(Ybar[, i]), - type = "l", - col = "darkgreen", - lwd = 2) + as.numeric(Ybar[, i]) - as.numeric(YCI[, i]) * 1.96, + as.numeric(Ybar[, i]) + as.numeric(YCI[, i]) * 1.96, + col = alphagreen + ) + lines(as.Date(obs.dates), + as.numeric(Ybar[, i]), + type = "l", + col = "darkgreen", + lwd = 2 + ) } - diff --git a/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_LAI.R b/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_LAI.R index 9c16ac72362..f1815ba41fc 100644 --- a/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_LAI.R +++ b/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_LAI.R @@ -10,11 +10,11 @@ library("tidyverse") library("furrr") library("R.utils") library("dynutils") -library('nimble') +library("nimble") library("sp") library("sf") library("lubridate") -#plan(multisession) +# plan(multisession) # ---------------------------------------------------------------------------------------------- @@ -24,159 +24,166 @@ library("lubridate") # if(length(tmp) < 6){ # logger.severe("Missing required arguments") # } -#forecastPath points to the folder where unconstrained forecast runs can be found -#forecastPath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/Fixed_PAR/" -#SDApath points to the folder where SDA forecast runs can be found +# forecastPath points to the folder where unconstrained forecast runs can be found +# forecastPath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/Fixed_PAR/" +# SDApath points to the folder where SDA forecast runs can be found SDApath <- "/projectnb/dietzelab/ahelgeso/SDA/HF_SDA_Output/Fixed_PAR" -#SDApath <- tmp[1] -#manually set to previous run settings$info$date it creates the filepath to previous run -#when you run with write to BETY = FALSE the system uses the system date/time as the unique folder name for runs +# SDApath <- tmp[1] +# manually set to previous run settings$info$date it creates the filepath to previous run +# when you run with write to BETY = FALSE the system uses the system date/time as the unique folder name for runs next.oldir <- "2022-09-23-11-49" -#next.oldir <- tmp[2] -#outputPath points to location where you would like to save SDA output note this path could match SDApath but does not need to +# next.oldir <- tmp[2] +# outputPath points to location where you would like to save SDA output note this path could match SDApath but does not need to outputPath <- "/projectnb/dietzelab/ahelgeso/SDA/HF_SDA_Output/Fixed_PAR" -#outputPath <- tmp[3] -#settingsPath points to location where multisite xml can be found +# outputPath <- tmp[3] +# settingsPath points to location where multisite xml can be found settingsPath <- "/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/inst/Site_XMLS/testingMulti_HF.xml" -#settingsPath <- tmp[4] -#to manually change start date -runDays <- seq(as.Date("2021-07-28"), as.Date("2021-07-29"), by="days") -#runDays <- seq(as.Date(tmp[5]), as.Date(tmp[6]), by="days") +# settingsPath <- tmp[4] +# to manually change start date +runDays <- seq(as.Date("2021-07-28"), as.Date("2021-07-29"), by = "days") +# runDays <- seq(as.Date(tmp[5]), as.Date(tmp[6]), by="days") #------------------------------------------------------------------------------------------------ #------------------------------------------ Preparing the pecan xml ----------------------------- #------------------------------------------------------------------------------------------------ for (s in 1:length(runDays)) { -#restart list will store the full filepath to previous runs and when to start SDA cut -restart <- list() -setwd(outputPath) -#set sda.start -sda.start <- as.Date(runDays[s]) -#sda.start <- as.Date("2021-07-15") - -#reading xml -settings <- read.settings(settingsPath) - -#grab site info -site_info <- list( - site_id = settings$run$site$id, - site_name = settings$run$site$name, - lat = settings$run$site$lat, - lon = settings$run$site$lon, - time_zone = "UTC") - -#grab old.dir filepath from previous SDA run -sda.runs <- list.files(SDApath, full.names = TRUE, pattern = paste0("PEcAn_", next.oldir)) -#add filpath to restart list -restart$filepath <- sda.runs - -#connecting to DB -con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) -on.exit(db.close(con)) - -# #query database for previous forecast run (i.e. t=0) -# query.run <- paste0("SELECT * FROM runs WHERE site_id =", site_info$site_id) -# run <- PEcAn.DB::db.query(query.run, con) -# #filter for sda.start -# run <- dplyr::filter(run, start_time == as.Date(sda.start -1)) -# daydiff <- difftime(Sys.time(), run$created_at, units = "days") -# runday <- which(min(daydiff) == daydiff) -# startday <- run$created_at[runday] -# run <- dplyr::filter(run, as.Date(created_at) == as.Date(startday)) -# run <- dplyr::filter(run, !is.na(finished_at)) -# #add filter for model -# query.ens <- paste0("SELECT * FROM ensembles WHERE id =", run$ensemble_id) -# ens <- PEcAn.DB::db.query(query.ens, con) -# #now that we have the workflow id for forecast run we can close connection to BETY -# PEcAn.DB::db.close(con) -# #add filepath to restart object, this is where SDA will look for runs for t=1 -# restart$filepath <- paste0(forecastPath, "PEcAn_", ens$workflow_id, "/") -# #restart$filepath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/FluxPaper/PEcAn_1000022323/" -# #check if all ensemble members are present -# ensPresent <- list() -# for(k in 1:length(run$ensemble_id)){ -# ensPresent[[k]] <- file.exists(paste0(restart$filepath, "out/", run$id[k], "/2021.nc")) -# } -# if(FALSE %in% ensPresent){ -# next -# } + # restart list will store the full filepath to previous runs and when to start SDA cut + restart <- list() + setwd(outputPath) + # set sda.start + sda.start <- as.Date(runDays[s]) + # sda.start <- as.Date("2021-07-15") + + # reading xml + settings <- read.settings(settingsPath) + + # grab site info + site_info <- list( + site_id = settings$run$site$id, + site_name = settings$run$site$name, + lat = settings$run$site$lat, + lon = settings$run$site$lon, + time_zone = "UTC" + ) + + # grab old.dir filepath from previous SDA run + sda.runs <- list.files(SDApath, full.names = TRUE, pattern = paste0("PEcAn_", next.oldir)) + # add filpath to restart list + restart$filepath <- sda.runs + + # connecting to DB + con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) + on.exit(db.close(con)) + + # #query database for previous forecast run (i.e. t=0) + # query.run <- paste0("SELECT * FROM runs WHERE site_id =", site_info$site_id) + # run <- PEcAn.DB::db.query(query.run, con) + # #filter for sda.start + # run <- dplyr::filter(run, start_time == as.Date(sda.start -1)) + # daydiff <- difftime(Sys.time(), run$created_at, units = "days") + # runday <- which(min(daydiff) == daydiff) + # startday <- run$created_at[runday] + # run <- dplyr::filter(run, as.Date(created_at) == as.Date(startday)) + # run <- dplyr::filter(run, !is.na(finished_at)) + # #add filter for model + # query.ens <- paste0("SELECT * FROM ensembles WHERE id =", run$ensemble_id) + # ens <- PEcAn.DB::db.query(query.ens, con) + # #now that we have the workflow id for forecast run we can close connection to BETY + # PEcAn.DB::db.close(con) + # #add filepath to restart object, this is where SDA will look for runs for t=1 + # restart$filepath <- paste0(forecastPath, "PEcAn_", ens$workflow_id, "/") + # #restart$filepath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/FluxPaper/PEcAn_1000022323/" + # #check if all ensemble members are present + # ensPresent <- list() + # for(k in 1:length(run$ensemble_id)){ + # ensPresent[[k]] <- file.exists(paste0(restart$filepath, "out/", run$id[k], "/2021.nc")) + # } + # if(FALSE %in% ensPresent){ + # next + # } -#set met.start & met.end -met.start <- sda.start - 1 -met.end <- met.start + lubridate::days(35) - -# -------------------------------------------------------------------------------------------------- -#---------------------------------------------- LAI DATA ------------------------------------- -# -------------------------------------------------------------------------------------------------- - - - lai <- call_MODIS(outdir = NULL, - var = 'lai', - site_info = site_info, - product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")),paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), - run_parallel = TRUE, - ncores = NULL, - product = "MOD15A2H", - band = "Lai_500m", - package_method = "MODISTools", - QC_filter = TRUE, - progress = TRUE) - -#filter for good resolution data - lai <- lai %>% filter(qc == "000") -#filter for lai that matches sda.start + # set met.start & met.end + met.start <- sda.start - 1 + met.end <- met.start + lubridate::days(35) + + # -------------------------------------------------------------------------------------------------- + #---------------------------------------------- LAI DATA ------------------------------------- + # -------------------------------------------------------------------------------------------------- + + + lai <- call_MODIS( + outdir = NULL, + var = "lai", + site_info = site_info, + product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")), paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), + run_parallel = TRUE, + ncores = NULL, + product = "MOD15A2H", + band = "Lai_500m", + package_method = "MODISTools", + QC_filter = TRUE, + progress = TRUE + ) + + # filter for good resolution data + lai <- lai %>% filter(qc == "000") + # filter for lai that matches sda.start lai <- lai %>% filter(calendar_date == sda.start) - if(dim(lai)[1] < 1){ - lai = data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) + if (dim(lai)[1] < 1) { + lai <- data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) PEcAn.logger::logger.warn(paste0("MODIS mean Data not available for these dates, initialzing NA")) } - lai_sd <- call_MODIS(outdir = NULL, - var = 'lai', - site_info = site_info, - product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")),paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), - run_parallel = TRUE, - ncores = NULL, - product = "MOD15A2H", - band = "LaiStdDev_500m", - package_method = "MODISTools", - QC_filter = TRUE, - progress = TRUE) - -#filter for good resolution data + lai_sd <- call_MODIS( + outdir = NULL, + var = "lai", + site_info = site_info, + product_dates = c(paste0(lubridate::year(met.start), strftime(met.start, format = "%j")), paste0(lubridate::year(met.end), strftime(met.end, format = "%j"))), + run_parallel = TRUE, + ncores = NULL, + product = "MOD15A2H", + band = "LaiStdDev_500m", + package_method = "MODISTools", + QC_filter = TRUE, + progress = TRUE + ) + + # filter for good resolution data lai_sd <- lai_sd %>% filter(qc == "000") -#filter for lai.sd that matches sda.start + # filter for lai.sd that matches sda.start lai_sd <- lai_sd %>% filter(calendar_date == sda.start) - - if(dim(lai_sd)[1] < 1){ - lai_sd = data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) + + if (dim(lai_sd)[1] < 1) { + lai_sd <- data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) PEcAn.logger::logger.warn(paste0("MODIS standard deviation Data not available for these dates, initialzing NA")) } -#build obs mean/cov matrix for LAI - #add NA obs for 1 day after LAI obs available + # build obs mean/cov matrix for LAI + # add NA obs for 1 day after LAI obs available na.date <- as.Date(sda.start + 1) na.date <- as.character(na.date) obs.mean <- data.frame(date = c(lai$calendar_date, na.date), site_id = c(lai$site_id, lai$site_id), lai = c(lai$data, NA)) - obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) + obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) obs.mean <- split(obs.mean, obs.mean$date) - + date.obs <- names(obs.mean) - + obs.mean <- purrr::map( names(obs.mean), - function(namesl){ + function(namesl) { split( obs.mean[[namesl]], - obs.mean[[namesl]]$site_id) %>% + obs.mean[[namesl]]$site_id + ) %>% purrr::map( - ~.x[3] %>% + ~ .x[3] %>% stats::setNames(c("LAI")) %>% - `row.names<-`(NULL)) + `row.names<-`(NULL) + ) } ) %>% stats::setNames(date.obs) - + # #remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) # names = date.obs # for (name in names){ @@ -191,25 +198,27 @@ met.end <- met.start + lubridate::days(35) # } # } # } - + obs.cov <- data.frame(date = c(lai_sd$calendar_date, na.date), site_id = c(lai_sd$site_id, lai_sd$site_id), lai = c(lai_sd$data, NA)) - obs.cov$date = as.character(obs.cov$date, stringsAsFactors = FALSE) + obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = FALSE) obs.cov <- split(obs.cov, obs.cov$date) - + obs.cov <- purrr::map( names(obs.cov), - function(namesl){ + function(namesl) { purrr::map( split( obs.cov[[namesl]], - obs.cov[[namesl]]$site_id), - ~.x[3]^2 %>% - unlist %>% - diag(nrow = 2, ncol = 2)) + obs.cov[[namesl]]$site_id + ), + ~ .x[3]^2 %>% + unlist() %>% + diag(nrow = 2, ncol = 2) + ) } ) %>% stats::setNames(date.obs) - - + + # names = date.obs # for (name in names){ # for (site in names(obs.cov[[name]])){ @@ -230,131 +239,130 @@ met.end <- met.start + lubridate::days(35) # } # } # } - #add start.cut to restart list + # add start.cut to restart list restart$start.cut <- lubridate::as_datetime(min(lai$calendar_date)) restart$start.cut <- format(restart$start.cut, "%Y-%m-%d %H:%M:%S", tz = "EST") - - -#----------------------------------------------------------------------------------------------- -#------------------------------------------ Fixing the settings -------------------------------- -#----------------------------------------------------------------------------------------------- -#Using the found dates to run - this will help to download mets -settings$run$site$met.start <- as.character(met.start) -settings$run$site$met.end <- as.character(met.end) - -# Setting dates in assimilation tags - This will help with preprocess split in SDA code -settings$state.data.assimilation$start.date <-as.character(sda.start) -sda.end <- max(names(obs.mean)) -settings$state.data.assimilation$end.date <-as.character(sda.end) - -# -------------------------------------------------------------------------------------------------- -#---------------------------------------------- PEcAn Workflow ------------------------------------- -# -------------------------------------------------------------------------------------------------- -#info -settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S")) -next.oldir <- paste0(format(Sys.time(), "%Y-%m-%d-%H-%M")) -#Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force = TRUE) -settings$host$rundir <- settings$rundir -settings$host$outdir <- settings$modeloutdir -settings$host$folder <- settings$modeloutdir -setwd(settings$outdir) -#Write pecan.CHECKED.xml -PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") -# start from scratch if no continue is passed in -statusFile <- file.path(settings$outdir, "STATUS") -if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile)) { - file.remove(statusFile) -} - -#manually add in clim files -con <-PEcAn.DB::db.open(settings$database$bety) -on.exit(db.close(con), add = TRUE) - -input_check <- PEcAn.DB::dbfile.input.check( - siteid= site_info$site_id %>% as.character(), - startdate = met.start %>% as.Date, - enddate = NULL, - parentid = NA, - mimetype="text/csv", - formatname="Sipnet.climna", - con = con, - hostname = PEcAn.remote::fqdn(), - pattern = NULL, - exact.dates = TRUE, - return.all=TRUE -) - -#If INPUTS already exists, add id and met path to settings file - -if(length(input_check$id) > 0){ - #met paths - clim_check = list() - for(i in 1:length(input_check$file_path)){ - - clim_check[[i]] <- file.path(input_check$file_path[i], input_check$file_name[i]) - }#end i loop for creating file paths - #ids - index_id = list() - index_path = list() - for(i in 1:length(input_check$id)){ - index_id[[i]] = as.character(input_check$id[i])#get ids as list - - }#end i loop for making lists - names(index_id) = sprintf("id%s",seq(1:length(input_check$id))) #rename list - names(clim_check) = sprintf("path%s",seq(1:length(input_check$id))) - - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check -}else{PEcAn.utils::logger.error("No met file found")} -#settings <- PEcAn.workflow::do_conversions(settings, T, T, T) - -if(is_empty(settings$run$inputs$met$path) & length(clim_check)>0){ - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check -} -# #add runs ids from previous forecast to settings object to be passed to build X -# run_id <- list() -# for (k in 1:length(run$id)) { -# run_id[[k]] = as.character(run$id[k]) -# } -# names(run_id) = sprintf("id%s",seq(1:length(run$id))) #rename list -# settings$runs$id = run_id - -# #add run ids from previous sda to settings object to be passed to build X -# run_id <- list() -# for (k in 1:length(previous.ens)) { -# run_id[[k]] = as.character(previous.ens[k]) -# } -# names(run_id) = sprintf("id%s",seq(1:length(previous.ens))) #rename list -# settings$runs$id = run_id - -#save restart object -save(restart, next.oldir, obs.mean, obs.cov, file = file.path(settings$outdir, "restart.Rdata")) -#run sda function -sda.enkf.multisite(settings = settings, - obs.mean = obs.mean, - obs.cov = obs.cov, - Q = NULL, - restart = restart, - forceRun = TRUE, - keepNC = TRUE, - control = list(trace = TRUE, - FF = FALSE, - interactivePlot = FALSE, - TimeseriesPlot = FALSE, - BiasPlot = FALSE, - plot.title = NULL, - facet.plots = FALSE, - debug = FALSE, - pause = FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)) + #----------------------------------------------------------------------------------------------- + #------------------------------------------ Fixing the settings -------------------------------- + #----------------------------------------------------------------------------------------------- + # Using the found dates to run - this will help to download mets + settings$run$site$met.start <- as.character(met.start) + settings$run$site$met.end <- as.character(met.end) + + # Setting dates in assimilation tags - This will help with preprocess split in SDA code + settings$state.data.assimilation$start.date <- as.character(sda.start) + sda.end <- max(names(obs.mean)) + settings$state.data.assimilation$end.date <- as.character(sda.end) + + # -------------------------------------------------------------------------------------------------- + #---------------------------------------------- PEcAn Workflow ------------------------------------- + # -------------------------------------------------------------------------------------------------- + # info + settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S")) + next.oldir <- paste0(format(Sys.time(), "%Y-%m-%d-%H-%M")) + # Update/fix/check settings. Will only run the first time it's called, unless force=TRUE + settings <- PEcAn.settings::prepare.settings(settings, force = TRUE) + settings$host$rundir <- settings$rundir + settings$host$outdir <- settings$modeloutdir + settings$host$folder <- settings$modeloutdir + setwd(settings$outdir) + # Write pecan.CHECKED.xml + PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") + # start from scratch if no continue is passed in + statusFile <- file.path(settings$outdir, "STATUS") + if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile)) { + file.remove(statusFile) + } + # manually add in clim files + con <- PEcAn.DB::db.open(settings$database$bety) + on.exit(db.close(con), add = TRUE) + + input_check <- PEcAn.DB::dbfile.input.check( + siteid = site_info$site_id %>% as.character(), + startdate = met.start %>% as.Date(), + enddate = NULL, + parentid = NA, + mimetype = "text/csv", + formatname = "Sipnet.climna", + con = con, + hostname = PEcAn.remote::fqdn(), + pattern = NULL, + exact.dates = TRUE, + return.all = TRUE + ) + + # If INPUTS already exists, add id and met path to settings file + + if (length(input_check$id) > 0) { + # met paths + clim_check <- list() + for (i in 1:length(input_check$file_path)) { + clim_check[[i]] <- file.path(input_check$file_path[i], input_check$file_name[i]) + } # end i loop for creating file paths + # ids + index_id <- list() + index_path <- list() + for (i in 1:length(input_check$id)) { + index_id[[i]] <- as.character(input_check$id[i]) # get ids as list + } # end i loop for making lists + names(index_id) <- sprintf("id%s", seq(1:length(input_check$id))) # rename list + names(clim_check) <- sprintf("path%s", seq(1:length(input_check$id))) + + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check + } else { + PEcAn.utils::logger.error("No met file found") + } + # settings <- PEcAn.workflow::do_conversions(settings, T, T, T) + if (is_empty(settings$run$inputs$met$path) & length(clim_check) > 0) { + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check + } + # #add runs ids from previous forecast to settings object to be passed to build X + # run_id <- list() + # for (k in 1:length(run$id)) { + # run_id[[k]] = as.character(run$id[k]) + # } + # names(run_id) = sprintf("id%s",seq(1:length(run$id))) #rename list + # settings$runs$id = run_id + # #add run ids from previous sda to settings object to be passed to build X + # run_id <- list() + # for (k in 1:length(previous.ens)) { + # run_id[[k]] = as.character(previous.ens[k]) + # } + # names(run_id) = sprintf("id%s",seq(1:length(previous.ens))) #rename list + # settings$runs$id = run_id + + # save restart object + save(restart, next.oldir, obs.mean, obs.cov, file = file.path(settings$outdir, "restart.Rdata")) + # run sda function + sda.enkf.multisite( + settings = settings, + obs.mean = obs.mean, + obs.cov = obs.cov, + Q = NULL, + restart = restart, + forceRun = TRUE, + keepNC = TRUE, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = FALSE, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) + ) } diff --git a/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_NA.R b/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_NA.R index d1f3b5d9c13..935d43822d9 100644 --- a/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_NA.R +++ b/modules/assim.sequential/inst/restart_SDAworkflow_scripts/SDA_Workflow_NA.R @@ -10,22 +10,22 @@ library("tidyverse") library("furrr") library("R.utils") library("dynutils") -library('nimble') +library("nimble") library("sp") library("sf") library("lubridate") -#plan(multisession) +# plan(multisession) # ---------------------------------------------------------------------------------------------- #------------------------------------------Prepared SDA Settings ----- # ---------------------------------------------------------------------------------------------- -#forecastPath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/FluxPaper/" +# forecastPath <- "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/FluxPaper/" SDApath <- "/projectnb/dietzelab/ahelgeso/SDA/HF_SDA_Output/ESA/" -#manually set to previous run settings$info$date it creates the filepath to previous run +# manually set to previous run settings$info$date it creates the filepath to previous run next.oldir <- "2022-07-21-08-38-57" -#to manually change start date -runDays <- seq(as.Date("2021-08-22"), as.Date("2021-08-31"), by="days") +# to manually change start date +runDays <- seq(as.Date("2021-08-22"), as.Date("2021-08-31"), by = "days") #------------------------------------------------------------------------------------------------ #------------------------------------------ Preparing the pecan xml ----------------------------- @@ -34,31 +34,32 @@ for (s in 1:length(runDays)) { restart <- list() outputPath <- "/projectnb/dietzelab/ahelgeso/SDA/HF_SDA_Output/ESA" setwd(outputPath) - #set sda.start + # set sda.start sda.start <- as.Date(runDays[s]) - #sda.start <- as.Date("2021-07-15") - - #reading xml + # sda.start <- as.Date("2021-07-15") + + # reading xml settings <- read.settings("/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/inst/Site_XMLS/testingMulti_HF.xml") - - #grab site info + + # grab site info site_info <- list( site_id = settings$run$site$id, site_name = settings$run$site$name, lat = settings$run$site$lat, lon = settings$run$site$lon, - time_zone = "UTC") - - #grab old.dir filepath from previous SDA run + time_zone = "UTC" + ) + + # grab old.dir filepath from previous SDA run sda.runs <- list.files(SDApath, full.names = TRUE, pattern = paste0("PEcAn_", next.oldir)) # previous <- sda.runs[2] restart$filepath <- sda.runs # previous.ens <- list.files(paste0(previous, "/out")) - - #connecting to DB - con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) + + # connecting to DB + con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) on.exit(db.close(con)) - + # #query database for previous forecast run (i.e. t=0) # query.run <- paste0("SELECT * FROM runs WHERE site_id =", site_info$site_id) # run <- PEcAn.DB::db.query(query.run, con) @@ -85,145 +86,149 @@ for (s in 1:length(runDays)) { # if(FALSE %in% ensPresent){ # next # } - - #set met.start & met.end + + # set met.start & met.end met.start <- sda.start - 1 met.end <- met.start + lubridate::days(35) - + # -------------------------------------------------------------------------------------------------- #---------------------------------------------- NA DATA ------------------------------------- # -------------------------------------------------------------------------------------------------- - - #initialize obs.mean/cov NAs - NAdata = data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) + + # initialize obs.mean/cov NAs + NAdata <- data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) PEcAn.logger::logger.warn(paste0("Observation mean Data not available for these dates, initialzing NA")) - - NAdata.sd = data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) + + NAdata.sd <- data.frame(calendar_date = sda.start, site_id = site_info$site_id, data = NA) PEcAn.logger::logger.warn(paste0("Observation standard deviation Data not available for these dates, initialzing NA")) - - #add NA obs for 1 day after sda.start + + # add NA obs for 1 day after sda.start na.date <- as.Date(sda.start + 1) na.date <- as.character(na.date) obs.mean <- data.frame(date = c(NAdata$calendar_date, na.date), site_id = c(NAdata$site_id, NAdata$site_id), lai = c(NAdata$data, NA)) - obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) + obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) obs.mean <- split(obs.mean, obs.mean$date) - + date.obs <- names(obs.mean) - + obs.mean <- purrr::map( names(obs.mean), - function(namesl){ + function(namesl) { split( obs.mean[[namesl]], - obs.mean[[namesl]]$site_id) %>% + obs.mean[[namesl]]$site_id + ) %>% purrr::map( - ~.x[3] %>% + ~ .x[3] %>% stats::setNames(c("LAI")) %>% - `row.names<-`(NULL)) + `row.names<-`(NULL) + ) } ) %>% stats::setNames(date.obs) - + obs.cov <- data.frame(date = c(NAdata.sd$calendar_date, na.date), site_id = c(NAdata.sd$site_id, lai_sd$site_id), lai = c(NAdata.sd$data, NA)) - obs.cov$date = as.character(obs.cov$date, stringsAsFactors = FALSE) + obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = FALSE) obs.cov <- split(obs.cov, obs.cov$date) - + obs.cov <- purrr::map( names(obs.cov), - function(namesl){ + function(namesl) { purrr::map( split( obs.cov[[namesl]], - obs.cov[[namesl]]$site_id), - ~.x[3]^2 %>% - unlist %>% - diag(nrow = 2, ncol = 2)) + obs.cov[[namesl]]$site_id + ), + ~ .x[3]^2 %>% + unlist() %>% + diag(nrow = 2, ncol = 2) + ) } ) %>% stats::setNames(date.obs) - - #add start.cut to restart list + + # add start.cut to restart list restart$start.cut <- lubridate::as_datetime(sda.start) restart$start.cut <- format(restart$start.cut, "%Y-%m-%d %H:%M:%S", tz = "EST") - - + + #----------------------------------------------------------------------------------------------- #------------------------------------------ Fixing the settings -------------------------------- #----------------------------------------------------------------------------------------------- - #Using the found dates to run - this will help to download mets + # Using the found dates to run - this will help to download mets settings$run$site$met.start <- as.character(met.start) settings$run$site$met.end <- as.character(met.end) - + # Setting dates in assimilation tags - This will help with preprocess split in SDA code - settings$state.data.assimilation$start.date <-as.character(sda.start) + settings$state.data.assimilation$start.date <- as.character(sda.start) sda.end <- max(names(obs.mean)) - settings$state.data.assimilation$end.date <-as.character(sda.end) - + settings$state.data.assimilation$end.date <- as.character(sda.end) + # -------------------------------------------------------------------------------------------------- #---------------------------------------------- PEcAn Workflow ------------------------------------- # -------------------------------------------------------------------------------------------------- - #info + # info settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S")) next.oldir <- paste0(format(Sys.time(), "%Y-%m-%d-%H-%M")) - #Update/fix/check settings. Will only run the first time it's called, unless force=TRUE + # Update/fix/check settings. Will only run the first time it's called, unless force=TRUE settings <- PEcAn.settings::prepare.settings(settings, force = TRUE) - #settings$host$rundir <- settings$rundir - #settings$host$outdir <- settings$modeloutdir - #settings$host$folder <- settings$modeloutdir + # settings$host$rundir <- settings$rundir + # settings$host$outdir <- settings$modeloutdir + # settings$host$folder <- settings$modeloutdir setwd(settings$outdir) - #Write pecan.CHECKED.xml + # Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") # start from scratch if no continue is passed in statusFile <- file.path(settings$outdir, "STATUS") if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile)) { file.remove(statusFile) } - - #manually add in clim files - con <-PEcAn.DB::db.open(settings$database$bety) - + + # manually add in clim files + con <- PEcAn.DB::db.open(settings$database$bety) + input_check <- PEcAn.DB::dbfile.input.check( - siteid= site_info$site_id %>% as.character(), - startdate = met.start %>% as.Date, + siteid = site_info$site_id %>% as.character(), + startdate = met.start %>% as.Date(), enddate = NULL, parentid = NA, - mimetype="text/csv", - formatname="Sipnet.climna", + mimetype = "text/csv", + formatname = "Sipnet.climna", con = con, hostname = PEcAn.remote::fqdn(), - pattern = NULL, + pattern = NULL, exact.dates = TRUE, - return.all=TRUE + return.all = TRUE ) - - #If INPUTS already exists, add id and met path to settings file - - if(length(input_check$id) > 0){ - #met paths - clim_check = list() - for(i in 1:length(input_check$file_path)){ - + + # If INPUTS already exists, add id and met path to settings file + + if (length(input_check$id) > 0) { + # met paths + clim_check <- list() + for (i in 1:length(input_check$file_path)) { clim_check[[i]] <- file.path(input_check$file_path[i], input_check$file_name[i]) - }#end i loop for creating file paths - #ids - index_id = list() - index_path = list() - for(i in 1:length(input_check$id)){ - index_id[[i]] = as.character(input_check$id[i])#get ids as list - - }#end i loop for making lists - names(index_id) = sprintf("id%s",seq(1:length(input_check$id))) #rename list - names(clim_check) = sprintf("path%s",seq(1:length(input_check$id))) - - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check - }else{PEcAn.utils::logger.error("No met file found")} - #settings <- PEcAn.workflow::do_conversions(settings, T, T, T) - - if(is_empty(settings$run$inputs$met$path) & length(clim_check)>0){ - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check + } # end i loop for creating file paths + # ids + index_id <- list() + index_path <- list() + for (i in 1:length(input_check$id)) { + index_id[[i]] <- as.character(input_check$id[i]) # get ids as list + } # end i loop for making lists + names(index_id) <- sprintf("id%s", seq(1:length(input_check$id))) # rename list + names(clim_check) <- sprintf("path%s", seq(1:length(input_check$id))) + + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check + } else { + PEcAn.utils::logger.error("No met file found") + } + # settings <- PEcAn.workflow::do_conversions(settings, T, T, T) + + if (is_empty(settings$run$inputs$met$path) & length(clim_check) > 0) { + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check } - - + + # #add runs ids from previous forecast to settings object to be passed to build X # run_id <- list() # for (k in 1:length(run$id)) { @@ -231,7 +236,7 @@ for (s in 1:length(runDays)) { # } # names(run_id) = sprintf("id%s",seq(1:length(run$id))) #rename list # settings$runs$id = run_id - + # #add run ids from previous sda to settings object to be passed to build X # run_id <- list() # for (k in 1:length(previous.ens)) { @@ -239,31 +244,30 @@ for (s in 1:length(runDays)) { # } # names(run_id) = sprintf("id%s",seq(1:length(previous.ens))) #rename list # settings$runs$id = run_id - - #save restart object + + # save restart object save(restart, next.oldir, file = file.path(settings$outdir, "restart.Rdata")) - #run sda function - sda.enkf.multisite(settings = settings, - obs.mean = obs.mean, - obs.cov = obs.cov, - Q = NULL, - restart = restart, - forceRun = TRUE, - keepNC = TRUE, - control = list(trace = TRUE, - FF = FALSE, - interactivePlot = FALSE, - TimeseriesPlot = FALSE, - BiasPlot = FALSE, - plot.title = NULL, - facet.plots = FALSE, - debug = FALSE, - pause = FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)) - - - - - -} \ No newline at end of file + # run sda function + sda.enkf.multisite( + settings = settings, + obs.mean = obs.mean, + obs.cov = obs.cov, + Q = NULL, + restart = restart, + forceRun = TRUE, + keepNC = TRUE, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = FALSE, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) + ) +} diff --git a/modules/assim.sequential/inst/sda.particle.R b/modules/assim.sequential/inst/sda.particle.R index 6d2b3574ad1..b478f53f2c8 100644 --- a/modules/assim.sequential/inst/sda.particle.R +++ b/modules/assim.sequential/inst/sda.particle.R @@ -1,48 +1,48 @@ ### State Variable Data Assimilation: -### Particle Filter +### Particle Filter ### Michael Dietze ### Prerequisite:assumes that you have run an ensemble run sda.particle <- function(model, settings) { - sda.demo <- FALSE ## debugging flag - unit.conv <- 0.001 * 2 # kgC/ha/yr to Mg/ha/yr - + sda.demo <- FALSE ## debugging flag + unit.conv <- 0.001 * 2 # kgC/ha/yr to Mg/ha/yr + ## internal functions library(Hmisc) weighted quantile wtd.quantile <- function(x, wt, q) { ord <- order(x) - wstar <- cumsum(wt[ord])/sum(wt) + wstar <- cumsum(wt[ord]) / sum(wt) qi <- findInterval(q, wstar) return(x[ord[qi]]) } # wtd.quantile - + ## extract time from one ensemble member Year <- PEcAn.utils::read.output("ENS00001", settings$outdir, variables = "Year", model = model)$Year time <- as.numeric(names(table(Year))) - + ### Load Ensemble ensp.all <- PEcAn.uncertainty::read.ensemble.ts(model) ensp <- ensp.all[[1]] ensp <- t(apply(ensp, 1, tapply, Year, mean)) * unit.conv - np <- nrow(ensp) ## number of particles - nt <- ncol(ensp) ## number of time steps - w <- matrix(1, np, nt) ## matrix of weights - + np <- nrow(ensp) ## number of particles + nt <- ncol(ensp) ## number of time steps + w <- matrix(1, np, nt) ## matrix of weights + ### Load Data if (sda.demo) { ## use one of the ensemble members as the true data - sd <- apply(ensp, 2, sd) * 0.3 ## pseudo data uncertainty - ref <- sample(1:np, 1) ## choose an ensemble member - y <- stats::rnorm(nt, ensp[ref, ], sd) ## add noise + sd <- apply(ensp, 2, sd) * 0.3 ## pseudo data uncertainty + ref <- sample(1:np, 1) ## choose an ensemble member + y <- stats::rnorm(nt, ensp[ref, ], sd) ## add noise } else { load(file.path(settings$outdir, "plot2AGB.Rdata")) mch <- which(yrvec %in% time) - y <- mNPP[1, mch] ## data mean - sd <- sNPP[1, mch] ## data uncertainty + y <- mNPP[1, mch] ## data mean + sd <- sNPP[1, mch] ## data uncertainty } - + ## diagnostic figures, not printed as part of normal operation if (FALSE) { ## plot ensemble @@ -59,40 +59,40 @@ sda.particle <- function(model, settings) { graphics::lines(yrvec[-1], low) } } - + ### analysis: generate log-weights for (t in seq_len(nt)) { if (!is.na(y[t])) { for (j in seq_len(np)) { - w[j, t] <- sum(stats::dnorm(y[t], ensp[j, t], sd[t], log = TRUE)) # loglik + w[j, t] <- sum(stats::dnorm(y[t], ensp[j, t], sd[t], log = TRUE)) # loglik } } } - + ### calculate weighted mean and CI of the ensemble - wc <- t(exp(apply(w, 1, cumsum))) - wbar <- apply(wc, 2, mean) - Xap <- 0 - Pap <- 0 + wc <- t(exp(apply(w, 1, cumsum))) + wbar <- apply(wc, 2, mean) + Xap <- 0 + Pap <- 0 XapCI <- matrix(NA, 2, nt) for (i in seq_len(nt)) { - Xap[i] <- stats::weighted.mean(ensp[, i], wc[, i]) + Xap[i] <- stats::weighted.mean(ensp[, i], wc[, i]) XapCI[, i] <- wtd.quantile(ensp[, i], wc[, i] / wbar[i], c(0.025, 0.975)) # Pap[i] <- wtd.var(ensp[,i],wc[,i]/wbar[i]) } Pap[Pap < 0] <- 0 - Xbar <- apply(ensp, 2, mean) - Xci <- apply(ensp, 2, stats::quantile, c(0.025, 0.975)) - + Xbar <- apply(ensp, 2, mean) + Xci <- apply(ensp, 2, stats::quantile, c(0.025, 0.975)) + ### Diagnostic graphs grDevices::pdf(file.path(outfolder, "ParticleFilter.pdf")) - + ## plot ensemble, filter, and data mean's and CI's graphics::par(mfrow = c(1, 1)) plot(time, y, ylim = range(c(y + 1.96 * sd, y - 1.96 * sd)), type = "b", xlab = "time", ylab = "Mg/ha/yr") graphics::lines(time, y + 1.96 * sd, col = 2) graphics::lines(time, y - 1.96 * sd, col = 2) - + plot(time, y, ylim = range(Xci), type = "n", xlab = "time", ylab = "Mg/ha/yr") ## ensemble graphics::lines(time, Xbar[1:nt], col = 6) @@ -103,34 +103,36 @@ sda.particle <- function(model, settings) { graphics::lines(time, y, col = 1, lwd = 3, pch = 1, type = "b") graphics::lines(time, y + 1.96 * sd, col = 1, lwd = 2, lty = 2) graphics::lines(time, y - 1.96 * sd, col = 1, lwd = 2, lty = 2) - - if (sda.demo) + + if (sda.demo) { graphics::lines(time, ensp[ref, ], col = 2, lwd = 2) - # for(i in 1:min(500,np)){ - # lines(t2,ensp[,i],lty=3,col='grey') + } + # for(i in 1:min(500,np)){ + # lines(t2,ensp[,i],lty=3,col='grey') # } # lines(t,x,type='b') # points(tvec,yvec,col=2,pch=19) - + # legend('topleft',c('True','Data','ens','ensmean'),col=c(1:2,'grey',6),lty=c(1,0,3,1),pch=c(1,19,1,1),cex=1.5) - + ## assimilation graphics::lines(time, Xap, col = 3, type = "b", lwd = 3, pch = 2) graphics::lines(time, XapCI[1, ], col = 3, lty = 2, lwd = 2) graphics::lines(time, XapCI[2, ], col = 3, lty = 2, lwd = 2) # lines(t,Xap+1.96*sqrt(Pap),col=3,lty=2) lines(t,Xap-1.96*sqrt(Pap),col=3,lty=2) - graphics::legend("bottomleft", - c("True", "Data", "PF", "ens", "ensmean"), - col = c(1:3, "grey", 6), - lty = c(1, 0, 1, 3, 1), - pch = c(1, 19, 1, 1, 0), - cex = 1.5) - - ### Plots demonstrating how the constraint of your target variable impacts + graphics::legend("bottomleft", + c("True", "Data", "PF", "ens", "ensmean"), + col = c(1:3, "grey", 6), + lty = c(1, 0, 1, 3, 1), + pch = c(1, 19, 1, 1, 0), + cex = 1.5 + ) + + ### Plots demonstrating how the constraint of your target variable impacts ### the other model pools and fluxes - + ## Calculate long-term means for all ensemble extracted variables - unit <- rep(unit.conv, 5) # unit[3] = 1 + unit <- rep(unit.conv, 5) # unit[3] = 1 ensp.conv <- list() for (i in seq_along(ensp.all)) { ensp.conv[[i]] <- t(apply(ensp.all[[i]], 1, tapply, Year, mean)) * unit[i] @@ -141,52 +143,54 @@ sda.particle <- function(model, settings) { plot(ensp.conv[[1]][, nt], ensp.conv[[i]][, nt], xlab = names(ensp.all)[1], ylab = names(ensp.all)[i]) } graphics::abline(0, 1, col = 2) - + ## unweighted distributions for (i in c(2, 3)) { graphics::hist(ensp.conv[[i]][, nt], main = names(ensp.all)[i], probability = TRUE) } - + ## Weighted distributions for (i in c(2, 3)) { - plotrix::weighted.hist(ensp.conv[[i]][, nt], wc[, nt]/sum(wc[, nt]), main = names(ensp.all)[i]) + plotrix::weighted.hist(ensp.conv[[i]][, nt], wc[, nt] / sum(wc[, nt]), main = names(ensp.all)[i]) } - + # for(i in c(1,2,4,5)){ for (i in c(2, 3)) { if (i == 5) { - plotrix::weighted.hist(ensp.conv[[i]][, nt], - wc[, nt]/sum(wc[, nt]), - main = names(ensp.all)[i], - col = 2) + plotrix::weighted.hist(ensp.conv[[i]][, nt], + wc[, nt] / sum(wc[, nt]), + main = names(ensp.all)[i], + col = 2 + ) } else { plotrix::weighted.hist(ensp.conv[[i]][, nt], - wc[, nt]/sum(wc[, nt]), - main = names(ensp.all)[i], - xlim = range(ensp.conv[[i]][, nt]) * c(0.9, 1.1), - col = 2) + wc[, nt] / sum(wc[, nt]), + main = names(ensp.all)[i], + xlim = range(ensp.conv[[i]][, nt]) * c(0.9, 1.1), + col = 2 + ) } graphics::hist(ensp.conv[[i]][, nt], main = names(ensp.all)[i], probability = TRUE, add = TRUE) } - + for (i in c(2, 3)) { h <- graphics::hist(ensp.conv[[i]][, nt], plot = FALSE) w <- plotrix::weighted.hist(ensp.conv[[i]][, nt], wc[, nt] / sum(wc[, nt]), plot = FALSE, breaks = h$breaks) dx <- diff(h$breaks)[1] - plot(w$mids - dx/2, - w$density/dx, - main = names(ensp.all)[i], - xlim = range(ensp.conv[[i]][, nt]) * c(0.9, 1.1), - col = 2, - type = "s", - lwd = 2) + plot(w$mids - dx / 2, + w$density / dx, + main = names(ensp.all)[i], + xlim = range(ensp.conv[[i]][, nt]) * c(0.9, 1.1), + col = 2, + type = "s", + lwd = 2 + ) graphics::lines(h$mids - dx / 2, h$density, col = 1, type = "s", lwd = 2) # hist(ensp.conv[[i]][,nt],main=names(ensp.all)[i],probability=TRUE,add=TRUE) } - + grDevices::dev.off() - + ## save all outputs save.image(paste(settings$outdir, "sda.particle.Rdata")) - } # sda.particle diff --git a/modules/assim.sequential/inst/sda.rewind.R b/modules/assim.sequential/inst/sda.rewind.R index ff4f4d94ea2..b212fd4b0df 100644 --- a/modules/assim.sequential/inst/sda.rewind.R +++ b/modules/assim.sequential/inst/sda.rewind.R @@ -1,115 +1,134 @@ ##' @title sda.rewind ##' @name sda.rewind ##' @author Ann Raiho \email{araiho@nd.edu} -##' +##' ##' @param settings SDA settings object ##' @param run.id list of current run.ids ##' @param time_to_rewind year that should be deleted last -##' +##' ##' @description Helper function for deleting SDA files to be able to run from a specified point -##' +##' ##' @return NA ##' @export -##' +##' -sda_rewind <- function(settings,run.id,time_to_rewind){ - if(nchar(time_to_rewind) == 4){ - for(i in 1:length(run.id)){ - +sda_rewind <- function(settings, run.id, time_to_rewind) { + if (nchar(time_to_rewind) == 4) { + for (i in 1:length(run.id)) { ### Create file to keep old output in. - dir_put <- file.path(settings$outdir,'out',run.id[[i]],as.numeric(time_to_rewind)-1) - if(!dir.exists(dir_put)) dir.create(dir_put) - - if(file.exists(file.path(settings$outdir,'out',run.id[[i]],'linkages.out.Rdata'))){ - file.copy(from = file.path(settings$outdir,'out',run.id[[i]],'linkages.out.Rdata'),to = dir_put) #going to be a lot of memory. might want to just take model specific files - }else{ - print(paste('No linkages.out.Rdata for', run.id[[i]])) + dir_put <- file.path(settings$outdir, "out", run.id[[i]], as.numeric(time_to_rewind) - 1) + if (!dir.exists(dir_put)) dir.create(dir_put) + + if (file.exists(file.path(settings$outdir, "out", run.id[[i]], "linkages.out.Rdata"))) { + file.copy(from = file.path(settings$outdir, "out", run.id[[i]], "linkages.out.Rdata"), to = dir_put) # going to be a lot of memory. might want to just take model specific files + } else { + print(paste("No linkages.out.Rdata for", run.id[[i]])) } - + ### Rename linkages specific files - if(file.exists(paste0(settings$outdir, - '/out/',run.id[[i]], - '/',time_to_rewind,'-12-31 23:59:59linkages.out.Rdata'))){ - - file.rename(paste0(settings$outdir, - '/out/',run.id[[i]], - '/',time_to_rewind,'-12-31 23:59:59linkages.out.Rdata'), - paste0(settings$outdir, - '/out/',run.id[[i]], - '/linkages.out.Rdata')) - + if (file.exists(paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", time_to_rewind, "-12-31 23:59:59linkages.out.Rdata" + ))) { + file.rename( + paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", time_to_rewind, "-12-31 23:59:59linkages.out.Rdata" + ), + paste0( + settings$outdir, + "/out/", run.id[[i]], + "/linkages.out.Rdata" + ) + ) } - - + + ### Remove netcdfs - for(t in (as.numeric(time_to_rewind)+1):year(settings$state.data.assimilation$end.date)){ - - file.remove(paste0(settings$outdir, - '/out/',run.id[[i]], - '/',t,'.nc')) - - file.remove(paste0(settings$outdir, - '/out/',run.id[[i]], - '/',t,'.nc.var')) + for (t in (as.numeric(time_to_rewind) + 1):year(settings$state.data.assimilation$end.date)) { + file.remove(paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", t, ".nc" + )) + + file.remove(paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", t, ".nc.var" + )) } - print(paste('editing runid',run.id[[i]])) + print(paste("editing runid", run.id[[i]])) } } - + #--- Updating the nt and etc - if(!dir.exists(file.path(settings$outdir,"SDA",as.numeric(time_to_rewind)-1))) dir.create(file.path(settings$outdir,"SDA",as.numeric(time_to_rewind)-1)) - + if (!dir.exists(file.path(settings$outdir, "SDA", as.numeric(time_to_rewind) - 1))) dir.create(file.path(settings$outdir, "SDA", as.numeric(time_to_rewind) - 1)) + # finding/moving files to it's end year dir - files.last.sda <- list.files.nodir(file.path(settings$outdir,"SDA")) - - #copying - if(file.exists(file.path(settings$outdir,"SDA"))){ - file.copy(file.path(file.path(settings$outdir,"SDA"),files.last.sda), - file.path(file.path(settings$outdir,"SDA"),paste0(as.numeric(time_to_rewind)-1,"/",files.last.sda))) - - load(file.path(settings$outdir,"SDA",'sda.output.Rdata')) - + files.last.sda <- list.files.nodir(file.path(settings$outdir, "SDA")) + + # copying + if (file.exists(file.path(settings$outdir, "SDA"))) { + file.copy( + file.path(file.path(settings$outdir, "SDA"), files.last.sda), + file.path(file.path(settings$outdir, "SDA"), paste0(as.numeric(time_to_rewind) - 1, "/", files.last.sda)) + ) + + load(file.path(settings$outdir, "SDA", "sda.output.Rdata")) + X <- FORECAST[[t]] FORECAST[t] <- NULL ANALYSIS[t] <- NULL enkf.params[t] <- NULL - - for(i in 1:length(new.state)) new.state[[i]] <- ANALYSIS[[t-1]][,i] #not sure if this should be t or t-1 - - t = t-1 - + + for (i in 1:length(new.state)) new.state[[i]] <- ANALYSIS[[t - 1]][, i] # not sure if this should be t or t-1 + + t <- t - 1 + save(site.locs, t, X, FORECAST, ANALYSIS, enkf.params, new.state, new.params, run.id, - ensemble.id, ensemble.samples, inputs, Viz.output, file = file.path(settings$outdir,"SDA", "sda.output.Rdata")) + ensemble.id, ensemble.samples, inputs, Viz.output, + file = file.path(settings$outdir, "SDA", "sda.output.Rdata") + ) } - + ### Paleon specific with leading zero dates - if(nchar(time_to_rewind) == 3){ - for(i in 1:length(run.id)){ - file.rename(paste0(settings$outdir, - '/out/',run.id[[i]], - '/',time_to_rewind,'-12-31 23:59:59linkages.out.Rdata'), - paste0(settings$outdir, - '/out/',run.id[[i]], - '/linkages.out.Rdata')) - for(t in time_to_rewind:year(settings$state.data.assimilation$end.date)){ - file.remove(paste0(settings$outdir, - '/out/',run.id[[i]], - '/','0',t,'.nc')) - file.remove(paste0(settings$outdir, - '/out/',run.id[[i]], - '/','0',t,'.nc.var')) + if (nchar(time_to_rewind) == 3) { + for (i in 1:length(run.id)) { + file.rename( + paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", time_to_rewind, "-12-31 23:59:59linkages.out.Rdata" + ), + paste0( + settings$outdir, + "/out/", run.id[[i]], + "/linkages.out.Rdata" + ) + ) + for (t in time_to_rewind:year(settings$state.data.assimilation$end.date)) { + file.remove(paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", "0", t, ".nc" + )) + file.remove(paste0( + settings$outdir, + "/out/", run.id[[i]], + "/", "0", t, ".nc.var" + )) } } } - } -#sda_rewind(settings, run.id, time_to_rewind = as.character(951)) +# sda_rewind(settings, run.id, time_to_rewind = as.character(951)) -# for example if you want to restart your sda run -# where t=1 and obs.times = 950 then you want time_to_rewind -# to be 951 because that is the last year of model +# for example if you want to restart your sda run +# where t=1 and obs.times = 950 then you want time_to_rewind +# to be 951 because that is the last year of model # run data you don't want - - diff --git a/modules/assim.sequential/inst/sda.tobit.simulation.R b/modules/assim.sequential/inst/sda.tobit.simulation.R index 16c5d05e7c9..6e38dba5055 100644 --- a/modules/assim.sequential/inst/sda.tobit.simulation.R +++ b/modules/assim.sequential/inst/sda.tobit.simulation.R @@ -16,73 +16,75 @@ sampler_toggle <- nimbleFunction( contains = sampler_BASE, setup = function(model, mvSaved, target, control) { type <- control$type - nested_sampler_name <- paste0('sampler_', type) - control_new <- nimbleOptions('MCMCcontrolDefaultList') + nested_sampler_name <- paste0("sampler_", type) + control_new <- nimbleOptions("MCMCcontrolDefaultList") control_new[[names(control)]] <- control nested_sampler_list <- nimbleFunctionList(sampler_BASE) nested_sampler_list[[1]] <- do.call(nested_sampler_name, list(model, mvSaved, target, control_new)) toggle <- 1 }, run = function() { - if(toggle == 1) + if (toggle == 1) { nested_sampler_list[[1]]$run() + } }, methods = list( - reset = function() + reset = function() { nested_sampler_list[[1]]$reset() + } ) ) # `row[i] ~ dcat(weights)` and then loop over the data as `X[row[j],]` -tobit.model <- nimbleCode({ - - q[1:N,1:N] ~ dwish(R = aq[1:N,1:N], df = bq) ## aq and bq are estimated over time - Q[1:N,1:N] <- inverse(q[1:N,1:N]) - X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N,1:N]) ## Model Forecast ##muf and pf are assigned from ensembles - +tobit.model <- nimbleCode({ + q[1:N, 1:N] ~ dwish(R = aq[1:N, 1:N], df = bq) ## aq and bq are estimated over time + Q[1:N, 1:N] <- inverse(q[1:N, 1:N]) + X.mod[1:N] ~ dmnorm(muf[1:N], prec = pf[1:N, 1:N]) ## Model Forecast ##muf and pf are assigned from ensembles + ## add process error - X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N,1:N]) - - #observation operator - y_star[1:YN] <- X[1:N]#y_star_create(X[1:YN]) - + X[1:N] ~ dmnorm(X.mod[1:N], prec = q[1:N, 1:N]) + + # observation operator + y_star[1:YN] <- X[1:N] # y_star_create(X[1:YN]) + ## Analysis - y.censored[1:YN] ~ dmnorm(y_star[1:YN], prec = r[1:YN,1:YN]) #is it an okay assumpution to just have X and Y in the same order? + y.censored[1:YN] ~ dmnorm(y_star[1:YN], prec = r[1:YN, 1:YN]) # is it an okay assumpution to just have X and Y in the same order? + - - #don't flag y.censored as data, y.censored in inits - #remove y.censored samplers and only assign univariate samplers on NAs - - for(i in 1:YN){ + # don't flag y.censored as data, y.censored in inits + # remove y.censored samplers and only assign univariate samplers on NAs + + for (i in 1:YN) { y.ind[i] ~ dinterval(y.censored[i], 0) } - }) library(mvtnorm) -ciEnvelope <- function(x,ylo,yhi,...){ - polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi), - ylo[1])), border = NA,...) +ciEnvelope <- function(x, ylo, yhi, ...) { + polygon(cbind(c(x, rev(x), x[1]), c( + ylo, rev(yhi), + ylo[1] + )), border = NA, ...) } -wish.df <- function(Om,X,i,j,col){ - n = (Om[i,j]^2 + Om[i,i]*Om[j,j])/stats::var(X[,col]) +wish.df <- function(Om, X, i, j, col) { + n <- (Om[i, j]^2 + Om[i, i] * Om[j, j]) / stats::var(X[, col]) return(n) } ## Simulate Forecast Data nens <- 50 -X <- matrix(0,nens,2) -X[,1] <- rnorm(nens,mean = 3, 1) -X[,2]<- rnorm(nens,mean = 5, 1) +X <- matrix(0, nens, 2) +X[, 1] <- rnorm(nens, mean = 3, 1) +X[, 2] <- rnorm(nens, mean = 5, 1) -#X[1:49,2] <- 0 -intervalX <- matrix(c(0,0,10000,10000),2,2) +# X[1:49,2] <- 0 +intervalX <- matrix(c(0, 0, 10000, 10000), 2, 2) -x.ind <- x.censored <- matrix(NA, ncol=ncol(X), nrow=nrow(X)) +x.ind <- x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) mu.f <- colMeans(X) Pf <- cov(X) @@ -90,10 +92,10 @@ Pf <- cov(X) J <- ncol(X) N <- nrow(X) -for(j in 1:J){ - for(n in 1:N){ - x.ind[n,j] <- as.numeric(X[n,j] > 0) - x.censored[n,j] <- as.numeric(ifelse(X[n,j] > intervalX[j,2], 0, X[n,j])) +for (j in 1:J) { + for (n in 1:N) { + x.ind[n, j] <- as.numeric(X[n, j] > 0) + x.censored[n, j] <- as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) } } @@ -101,16 +103,15 @@ for(j in 1:J){ # to manually remove a burn-in period # set up the "d" function for the distribution ddirchmulti <- nimbleFunction( - run = function(x = double(1), alpha = double(1), size = double(0), log = integer(0)){ + run = function(x = double(1), alpha = double(1), size = double(0), log = integer(0)) { returnType(double(0)) logProb <- lgamma(sum(alpha)) - sum(lgamma(alpha)) + sum(lgamma(alpha + x)) - lgamma(sum(alpha) + size) - - if(log) { + + if (log) { return(logProb) } else { return(exp(logProb)) } - } ) @@ -118,18 +119,21 @@ ddirchmulti <- nimbleFunction( rdirchmulti <- nimbleFunction( run = function(n = integer(0), alpha = double(1), size = double(0)) { returnType(double(1)) - if(n != 1) nimPrint("rdirchmulti only allows n = 1; using n = 1.") + if (n != 1) nimPrint("rdirchmulti only allows n = 1; using n = 1.") p <- rdirch(1, alpha) return(rmulti(1, size = size, prob = p)) - }) + } +) # tell NIMBLE about the newly available distribution -registerDistributions(list(ddirchmulti = list(BUGSdist = "ddirchmulti(alpha, size)", - types = c('value = double(1)', 'alpha = double(1)')))) +registerDistributions(list(ddirchmulti = list( + BUGSdist = "ddirchmulti(alpha, size)", + types = c("value = double(1)", "alpha = double(1)") +))) dwtmnorm <- nimbleFunction( - run = function(x = double(1), mean = double(1), - cov = double(2), wt = double(0), log = integer(0)){ + run = function(x = double(1), mean = double(1), + cov = double(2), wt = double(0), log = integer(0)) { returnType(double(0)) Prob <- dmnorm_chol(x, mean, chol(cov), prec_param = FALSE) * wt return(Prob) @@ -142,9 +146,9 @@ dwtmnorm <- nimbleFunction( rwtmnorm <- nimbleFunction( run = function(n = integer(0), mean = double(1), - cov = double(2), wt = double(0)){ + cov = double(2), wt = double(0)) { returnType(double(1)) - if(n != 1) nimPrint("rdirchmulti only allows n = 1; using n = 1.") + if (n != 1) nimPrint("rdirchmulti only allows n = 1; using n = 1.") Prob <- rmnorm_chol(n, mean, chol(cov), prec_param = FALSE) * wt return(Prob) } @@ -152,271 +156,299 @@ rwtmnorm <- nimbleFunction( dwtmnorm <- nimbleFunction( run = function(x = double(1), mean = double(1), cov = double(2), - wt = double(0), log = integer(0)){ + wt = double(0), log = integer(0)) { returnType(double(0)) - - logProb <- dmnorm_chol(x = x, mean = mean, cholesky = chol(cov), prec_param = FALSE,log = log) * wt - - if(log){return((logProb))} else {return((exp(logProb)))} + + logProb <- dmnorm_chol(x = x, mean = mean, cholesky = chol(cov), prec_param = FALSE, log = log) * wt + + if (log) { + return((logProb)) + } else { + return((exp(logProb))) + } } ) -registerDistributions(list(dwtmnorm = list(BUGSdist = "dwtmnorm(mean, cov, wt)", - types = c('value = double(1)','mean = double(1)', 'cov = double(2)', 'wt = double(0)')))) -#more likely -(dwtmnorm(x=c(10,10),mean = c(20,10), cov = diag(2), wt = 1,log=1)) -#less likely -(dwtmnorm(x=c(100,100),mean = c(20,10), cov = diag(2), wt = 1,log=1)) +registerDistributions(list(dwtmnorm = list( + BUGSdist = "dwtmnorm(mean, cov, wt)", + types = c("value = double(1)", "mean = double(1)", "cov = double(2)", "wt = double(0)") +))) +# more likely +(dwtmnorm(x = c(10, 10), mean = c(20, 10), cov = diag(2), wt = 1, log = 1)) +# less likely +(dwtmnorm(x = c(100, 100), mean = c(20, 10), cov = diag(2), wt = 1, log = 1)) dw_test <- nimbleCode({ - - y[1:2] ~ dwtmnorm(mean = muf[1:2], cov = pf[1:2,1:2], wt = 1) - + y[1:2] ~ dwtmnorm(mean = muf[1:2], cov = pf[1:2, 1:2], wt = 1) }) -dw_test_pred <- nimbleModel(dw_test, - data = list(muf = c(10,20), - pf=diag(2)*5), - name = 'dw') -conf_dw_test <- configureMCMC(dw_test_pred, print=TRUE) +dw_test_pred <- nimbleModel(dw_test, + data = list( + muf = c(10, 20), + pf = diag(2) * 5 + ), + name = "dw" +) +conf_dw_test <- configureMCMC(dw_test_pred, print = TRUE) Rmcmc_dw_test <- buildMCMC(conf_dw_test) -Cmodel_dw_test <- compileNimble(dw_test_pred,showCompilerOutput = TRUE) +Cmodel_dw_test <- compileNimble(dw_test_pred, showCompilerOutput = TRUE) tobit2space.model <- nimbleCode({ - - for(i in 1:N){ - #row[i] ~ dcat(wts[1:N]) - #s[i] ~ dcat(wts[1:N]) - #d[i] <- myCalculation(grid[1:N], s[i]) - y.censored[i,1:J] ~ dwtmnorm(mean = muf[1:J], cov = pf[1:J,1:J], wt = wts[i]) - for(j in 1:J){ - y.ind[i,j] ~ dinterval(y.censored[i,j],0) + for (i in 1:N) { + # row[i] ~ dcat(wts[1:N]) + # s[i] ~ dcat(wts[1:N]) + # d[i] <- myCalculation(grid[1:N], s[i]) + y.censored[i, 1:J] ~ dwtmnorm(mean = muf[1:J], cov = pf[1:J, 1:J], wt = wts[i]) + for (j in 1:J) { + y.ind[i, j] ~ dinterval(y.censored[i, j], 0) } } - - #weighting the likelihood - #row[i] ~ dcat(weights) - #X[row[j],] - - muf[1:J] ~ dmnorm(mean = mu_0[1:J], cov = pf[1:J,1:J]) - - Sigma[1:J,1:J] <- lambda_0[1:J,1:J]/nu_0 - pf[1:J,1:J] ~ dinvwish(S = Sigma[1:J,1:J], df = J) - + + # weighting the likelihood + # row[i] ~ dcat(weights) + # X[row[j],] + + muf[1:J] ~ dmnorm(mean = mu_0[1:J], cov = pf[1:J, 1:J]) + + Sigma[1:J, 1:J] <- lambda_0[1:J, 1:J] / nu_0 + pf[1:J, 1:J] ~ dinvwish(S = Sigma[1:J, 1:J], df = J) }) -x.ind <- x.censored <- matrix(NA, ncol=ncol(X), nrow=nrow(X)) -for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - x.ind[n,j] <- as.numeric(X[n,j] > 0) - x.censored[n,j] <- as.numeric(ifelse(X[n,j] > intervalX[j,2], 0, X[n,j])) # +x.ind <- x.censored <- matrix(NA, ncol = ncol(X), nrow = nrow(X)) +for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + x.ind[n, j] <- as.numeric(X[n, j] > 0) + x.censored[n, j] <- as.numeric(ifelse(X[n, j] > intervalX[j, 2], 0, X[n, j])) # } } -#all equal -wts <- rep(1,nens) - -constants.tobit2space = list(N = nrow(X), - J = length(mu.f)) +# all equal +wts <- rep(1, nens) -data.tobit2space = list(y.ind = x.ind, - y.censored = x.censored, - mu_0 = rep(0,length(mu.f)), - lambda_0 = diag(10,length(mu.f)), - nu_0 = 3, - wts = wts)#some measure of prior obs +constants.tobit2space <- list( + N = nrow(X), + J = length(mu.f) +) -inits.tobit2space = list(pf = Pf, muf = colMeans(X)) -#set.seed(0) -#ptm <- proc.time() -tobit2space_pred <- nimbleModel(tobit2space.model, data = data.tobit2space, - constants = constants.tobit2space, inits = inits.tobit2space, - name = 'space') +data.tobit2space <- list( + y.ind = x.ind, + y.censored = x.censored, + mu_0 = rep(0, length(mu.f)), + lambda_0 = diag(10, length(mu.f)), + nu_0 = 3, + wts = wts +) # some measure of prior obs + +inits.tobit2space <- list(pf = Pf, muf = colMeans(X)) +# set.seed(0) +# ptm <- proc.time() +tobit2space_pred <- nimbleModel(tobit2space.model, + data = data.tobit2space, + constants = constants.tobit2space, inits = inits.tobit2space, + name = "space" +) ## Adding X.mod,q,r as data for building model. -conf_tobit2space <- configureMCMC(tobit2space_pred, thin = 10, print=TRUE) -conf_tobit2space$addMonitors(c("pf", "muf","y.censored")) +conf_tobit2space <- configureMCMC(tobit2space_pred, thin = 10, print = TRUE) +conf_tobit2space$addMonitors(c("pf", "muf", "y.censored")) ## [1] conjugate_dmnorm_dmnorm sampler: X[1:5] ## important! ## this is needed for correct indexing later samplerNumberOffset_tobit2space <- length(conf_tobit2space$getSamplers()) -for(j in seq_along(mu.f)){ - for(n in seq_len(nrow(X))){ - node <- paste0('y.censored[',n,',',j,']') - conf_tobit2space$addSampler(node, 'toggle', control=list(type='RW')) +for (j in seq_along(mu.f)) { + for (n in seq_len(nrow(X))) { + node <- paste0("y.censored[", n, ",", j, "]") + conf_tobit2space$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } } -#conf_tobit2space$printSamplers() +# conf_tobit2space$printSamplers() Rmcmc_tobit2space <- buildMCMC(conf_tobit2space) -Cmodel_tobit2space <- compileNimble(tobit2space_pred,showCompilerOutput = TRUE) +Cmodel_tobit2space <- compileNimble(tobit2space_pred, showCompilerOutput = TRUE) Cmcmc_tobit2space <- compileNimble(Rmcmc_tobit2space, project = tobit2space_pred) -#somewhat sampled +# somewhat sampled set.seed(0) -wts_samps <- sample(size = nens,x=1:nens,replace = F) -wts <- rdirch(n=1,alpha = wts_samps)*50 +wts_samps <- sample(size = nens, x = 1:nens, replace = F) +wts <- rdirch(n = 1, alpha = wts_samps) * 50 -#really sampled +# really sampled set.seed(0) -wts_samps <- sample(size = nens,x=c(rep(1,49),50),replace = F) -wts <- rdirch(n=1,alpha = wts_samps)*50 +wts_samps <- sample(size = nens, x = c(rep(1, 49), 50), replace = F) +wts <- rdirch(n = 1, alpha = wts_samps) * 50 -#equal weights -wts <- rep(1,nens) +# equal weights +wts <- rep(1, nens) Cmodel_tobit2space$wts <- wts -for(i in seq_along(X)) { +for (i in seq_along(X)) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space+i]], 'toggle', 1-x.ind[i]) + valueInCompiledNimbleFunction(Cmcmc_tobit2space$samplerFunctions[[samplerNumberOffset_tobit2space + i]], "toggle", 1 - x.ind[i]) } set.seed(0) -dat.tobit2space <- runMCMC(Cmcmc_tobit2space, niter = 50000, progressBar=TRUE) +dat.tobit2space <- runMCMC(Cmcmc_tobit2space, niter = 50000, progressBar = TRUE) -paste('1) posteior mean =',mean(dat.tobit2space[,1]),'weighted mean = ', stats::weighted.mean(X[,1],wts),'unweighted mean = ',mu.f[1]) -paste('2) posteior mean =',mean(dat.tobit2space[,2]),'weighted mean = ', stats::weighted.mean(X[,2],wts),'unweighted mean = ',mu.f[2]) +paste("1) posteior mean =", mean(dat.tobit2space[, 1]), "weighted mean = ", stats::weighted.mean(X[, 1], wts), "unweighted mean = ", mu.f[1]) +paste("2) posteior mean =", mean(dat.tobit2space[, 2]), "weighted mean = ", stats::weighted.mean(X[, 2], wts), "unweighted mean = ", mu.f[2]) -#pdf(file.path(outdir,paste0('assessParams',t,'.pdf'))) -assessParams(dat = dat.tobit2space[1000:5000,], Xt = X) -#dev.off() +# pdf(file.path(outdir,paste0('assessParams',t,'.pdf'))) +assessParams(dat = dat.tobit2space[1000:5000, ], Xt = X) +# dev.off() ## update parameters -dat.tobit2space <- dat.tobit2space[1000:5000, ] -imuf <- grep("muf", colnames(dat.tobit2space)) +dat.tobit2space <- dat.tobit2space[1000:5000, ] +imuf <- grep("muf", colnames(dat.tobit2space)) mu.f <- colMeans(dat.tobit2space[, imuf]) -iPf <- grep("pf", colnames(dat.tobit2space)) -Pf <- matrix(colMeans(dat.tobit2space[, iPf]),ncol(X),ncol(X)) +iPf <- grep("pf", colnames(dat.tobit2space)) +Pf <- matrix(colMeans(dat.tobit2space[, iPf]), ncol(X), ncol(X)) -iycens <- grep("y.censored",colnames(dat.tobit2space)) +iycens <- grep("y.censored", colnames(dat.tobit2space)) # Why does cov(X.new) != Pf ? -X.new <- matrix(colMeans(dat.tobit2space[,iycens]),nrow(X),ncol(X)) -#Pf <- cov(X.new) - -nt = 50 -m = c(1.03,.9) -model = matrix(0,nt,2) ; Y.dat = model ; y.ind = model ; y.censored = model -model[1,] = c(0,10) -q = diag(2) #process variance -R = r = diag(2)*2 #observation error #the lower you make this the better the convergence on the covariance matrix - - -for(t in 2:nt){ - model[t,] = rmvnorm(1,m*model[t-1,],q) +X.new <- matrix(colMeans(dat.tobit2space[, iycens]), nrow(X), ncol(X)) +# Pf <- cov(X.new) + +nt <- 50 +m <- c(1.03, .9) +model <- matrix(0, nt, 2) +Y.dat <- model +y.ind <- model +y.censored <- model +model[1, ] <- c(0, 10) +q <- diag(2) # process variance +R <- r <- diag(2) * 2 # observation error #the lower you make this the better the convergence on the covariance matrix + + +for (t in 2:nt) { + model[t, ] <- rmvnorm(1, m * model[t - 1, ], q) } -for(t in 1:nt){ - Y.dat[t,] = rmvnorm(1,model[t,],r) - y.ind[t,] <- as.numeric(Y.dat[t,]>0) - y.censored[t,] <- as.numeric(ifelse(Y.dat[t,]>=0, Y.dat[t,], 0)) +for (t in 1:nt) { + Y.dat[t, ] <- rmvnorm(1, model[t, ], r) + y.ind[t, ] <- as.numeric(Y.dat[t, ] > 0) + y.censored[t, ] <- as.numeric(ifelse(Y.dat[t, ] >= 0, Y.dat[t, ], 0)) } #### Plot data -plot(Y.dat[,1],ylim=range(Y.dat),pch=19) -lines(model[,1],lwd=2) -points(Y.dat[,2],col="blue",pch=18) -lines(model[,2],col="blue",lwd=2) +plot(Y.dat[, 1], ylim = range(Y.dat), pch = 19) +lines(model[, 1], lwd = 2) +points(Y.dat[, 2], col = "blue", pch = 18) +lines(model[, 2], col = "blue", lwd = 2) #### Storage arrays -aqq = array(0,dim=c(2,2,nt+1)); Sbar.save = aqq; Pf.save = aqq; q.bar.save = aqq; Pa.save = aqq -bqq = numeric(nt+1); -Sbar.CI = array(0,dim=c(3,4,nt)); q.bar.CI = Sbar.CI -dat.save = array(0,dim=c(501,8,nt)) -CI.X1 <- matrix(0,3,nt) ; CI.X2 = CI.X1 +aqq <- array(0, dim = c(2, 2, nt + 1)) +Sbar.save <- aqq +Pf.save <- aqq +q.bar.save <- aqq +Pa.save <- aqq +bqq <- numeric(nt + 1) +Sbar.CI <- array(0, dim = c(3, 4, nt)) +q.bar.CI <- Sbar.CI +dat.save <- array(0, dim = c(501, 8, nt)) +CI.X1 <- matrix(0, 3, nt) +CI.X2 <- CI.X1 #### initial conditions -bqq[1] <- length(mu.f) -if(is.null(aqq)){ - aqq <- array(0, dim = c(nt,ncol(X),ncol(X))) -}else{ - if(ncol(X)!=dim(aqq)[2]|ncol(X)!=dim(aqq)[3]){ - print('error: X has changed dimensions') +bqq[1] <- length(mu.f) +if (is.null(aqq)) { + aqq <- array(0, dim = c(nt, ncol(X), ncol(X))) +} else { + if (ncol(X) != dim(aqq)[2] | ncol(X) != dim(aqq)[3]) { + print("error: X has changed dimensions") } } -aqq[1, , ] <- diag(length(mu.f)) * bqq[1] #Q - -t = 1 - -constants.tobit = list(N = ncol(X), YN = length(y.ind[t,])) #doing y.ind[t,] because we only want one obs at a time. obs at t. -dimensions.tobit = list(X = length(mu.f), X.mod = ncol(X), - Q = c(length(mu.f),length(mu.f))) - -data.tobit = list(muf = as.vector(mu.f), - pf = solve(Pf), - aq = aqq[t,,], bq = bqq[t], - y.ind = y.ind[t,], - y.censored = y.censored[t,], - r = solve(R)) -inits.pred = list(q = diag(length(mu.f)), X.mod = as.vector(mu.f), - X = rnorm(length(mu.f),0,1)) - -#set.seed(0) -#ptm <- proc.time() -model_pred <- nimbleModel(tobit.model, data = data.tobit, dimensions = dimensions.tobit, - constants = constants.tobit, inits = inits.pred) +aqq[1, , ] <- diag(length(mu.f)) * bqq[1] # Q + +t <- 1 + +constants.tobit <- list(N = ncol(X), YN = length(y.ind[t, ])) # doing y.ind[t,] because we only want one obs at a time. obs at t. +dimensions.tobit <- list( + X = length(mu.f), X.mod = ncol(X), + Q = c(length(mu.f), length(mu.f)) +) + +data.tobit <- list( + muf = as.vector(mu.f), + pf = solve(Pf), + aq = aqq[t, , ], bq = bqq[t], + y.ind = y.ind[t, ], + y.censored = y.censored[t, ], + r = solve(R) +) +inits.pred <- list( + q = diag(length(mu.f)), X.mod = as.vector(mu.f), + X = rnorm(length(mu.f), 0, 1) +) + +# set.seed(0) +# ptm <- proc.time() +model_pred <- nimbleModel(tobit.model, + data = data.tobit, dimensions = dimensions.tobit, + constants = constants.tobit, inits = inits.pred +) ## Adding X.mod,q,r as data for building model. -conf <- configureMCMC(model_pred, print=TRUE) -conf$addMonitors(c("X","q","Q")) +conf <- configureMCMC(model_pred, print = TRUE) +conf$addMonitors(c("X", "q", "Q")) ## [1] conjugate_dmnorm_dmnorm sampler: X[1:5] ## important! ## this is needed for correct indexing later samplerNumberOffset <- length(conf$getSamplers()) -for(i in 1:length(y.ind[t,])) { - node <- paste0('y.censored[',i,']') - conf$addSampler(node, 'toggle', control=list(type='RW')) +for (i in 1:length(y.ind[t, ])) { + node <- paste0("y.censored[", i, "]") + conf$addSampler(node, "toggle", control = list(type = "RW")) ## could instead use slice samplers, or any combination thereof, e.g.: - ##conf$addSampler(node, 'toggle', control=list(type='slice')) + ## conf$addSampler(node, 'toggle', control=list(type='slice')) } conf$printSamplers() ## can monitor y.censored, if you wish, to verify correct behaviour -conf$addMonitors('y.censored') +conf$addMonitors("y.censored") Rmcmc <- buildMCMC(conf) Cmodel <- compileNimble(model_pred) Cmcmc <- compileNimble(Rmcmc, project = model_pred) -for(i in 1:length(y.ind[t,])) { +for (i in 1:length(y.ind[t, ])) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[i]) + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[i]) } set.seed(0) dat <- runMCMC(Cmcmc, niter = 50000) ## update parameters -dat <- dat[10000:50000, ] -iq <- grep("q", colnames(dat)) -iX <- grep("X[", colnames(dat), fixed = TRUE) +dat <- dat[10000:50000, ] +iq <- grep("q", colnames(dat)) +iX <- grep("X[", colnames(dat), fixed = TRUE) mu.a <- colMeans(dat[, iX]) -Pa <- cov(dat[, iX]) +Pa <- cov(dat[, iX]) Pa[is.na(Pa)] <- 0 CI.X1[, t] <- quantile(dat[, iX[1]], c(0.025, 0.5, 0.975)) CI.X2[, t] <- quantile(dat[, iX[2]], c(0.025, 0.5, 0.975)) -mq <- dat[, iq] # Omega, Precision -q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision +mq <- dat[, iq] # Omega, Precision +q.bar <- matrix(apply(mq, 2, mean), length(mu.f), length(mu.f)) # Mean Omega, Precision -col <- matrix(1:length(mu.f) ^ 2, length(mu.f), length(mu.f)) -WV <- matrix(0, length(mu.f), length(mu.f)) +col <- matrix(1:length(mu.f)^2, length(mu.f), length(mu.f)) +WV <- matrix(0, length(mu.f), length(mu.f)) for (i in seq_along(mu.f)) { for (j in seq_along(mu.f)) { WV[i, j] <- wish.df(q.bar, X = mq, i = i, j = j, col = col[i, j]) @@ -429,118 +461,117 @@ if (n < length(mu.f)) { } V <- solve(q.bar) * n -aqq[t + 1, , ] <- V -bqq[t + 1] <- n +aqq[t + 1, , ] <- V +bqq[t + 1] <- n -#ptm <- proc.time() -for(t in 1:nt){ - - Cmodel$y.ind <- y.ind[t,] - Cmodel$y.censored <- y.censored[t,] - Cmodel$aq <- aqq[,,t] +# ptm <- proc.time() +for (t in 1:nt) { + Cmodel$y.ind <- y.ind[t, ] + Cmodel$y.censored <- y.censored[t, ] + Cmodel$aq <- aqq[, , t] Cmodel$bq <- bqq[t] Cmodel$muf <- mu.f Cmodel$pf <- Pf - - for(i in 1:2) { + + for (i in 1:2) { ## ironically, here we have to "toggle" the value of y.ind[i] ## this specifies that when y.ind[i] = 1, ## indicator variable is set to 0, which specifies *not* to sample - valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset+i]], 'toggle', 1-y.ind[t,i]) + valueInCompiledNimbleFunction(Cmcmc$samplerFunctions[[samplerNumberOffset + i]], "toggle", 1 - y.ind[t, i]) } - + set.seed(0) - dat <- runMCMC(Cmcmc, niter = 10000, progressBar=FALSE) - - dat = dat[500:1000,] - #dat.save[,,t] = dat - mu.a = colMeans(dat[,5:6]) #X - Pa = cov(dat[,5:6]) #cov(X) - Pa.save[,,t] = Pa - Pa[is.na(Pa)]<- 0 - - CI.X1[,t] = quantile(dat[,5],c(0.025,0.5,0.975))#X[1] - CI.X2[,t] = quantile(dat[,6],c(0.025,0.5,0.975))#X[2] - - mq = dat[,1:4] #Sigma, Variance #Q - mq1 = dat[,9:12] #Omega, Precision #q - - Sbar = matrix(apply(mq,2,mean),2,2) #Mean Sigma, Variance - q.bar = matrix(apply(mq1,2,mean),2,2) #Mean Omega, Precision - - col = matrix(1:4,2,2) - WV = matrix(0,2,2) - for(i in 1:2){ - for(j in 1:2){ - WV[i,j] <- wish.df(q.bar, X = mq1, i=i, j=j, col=col[i,j]) + dat <- runMCMC(Cmcmc, niter = 10000, progressBar = FALSE) + + dat <- dat[500:1000, ] + # dat.save[,,t] = dat + mu.a <- colMeans(dat[, 5:6]) # X + Pa <- cov(dat[, 5:6]) # cov(X) + Pa.save[, , t] <- Pa + Pa[is.na(Pa)] <- 0 + + CI.X1[, t] <- quantile(dat[, 5], c(0.025, 0.5, 0.975)) # X[1] + CI.X2[, t] <- quantile(dat[, 6], c(0.025, 0.5, 0.975)) # X[2] + + mq <- dat[, 1:4] # Sigma, Variance #Q + mq1 <- dat[, 9:12] # Omega, Precision #q + + Sbar <- matrix(apply(mq, 2, mean), 2, 2) # Mean Sigma, Variance + q.bar <- matrix(apply(mq1, 2, mean), 2, 2) # Mean Omega, Precision + + col <- matrix(1:4, 2, 2) + WV <- matrix(0, 2, 2) + for (i in 1:2) { + for (j in 1:2) { + WV[i, j] <- wish.df(q.bar, X = mq1, i = i, j = j, col = col[i, j]) } } - - n = mean(WV) #n + 1 - if(n < 2) n = 2 - V = solve(q.bar) * n - - aqq[,,t+1] = V - bqq[t+1] = n - - plot(bqq[1:t+1],pch=19) - - q.bar.save[,,t] = q.bar - q.bar.CI[,,t] = apply(mq1,2,quantile,c(0.025,0.5,0.975)) - Sbar.save[,,t] = Sbar - Sbar.CI[,,t] = apply(mq,2,quantile,c(0.025,0.5,0.975)) - + + n <- mean(WV) # n + 1 + if (n < 2) n <- 2 + V <- solve(q.bar) * n + + aqq[, , t + 1] <- V + bqq[t + 1] <- n + + plot(bqq[1:t + 1], pch = 19) + + q.bar.save[, , t] <- q.bar + q.bar.CI[, , t] <- apply(mq1, 2, quantile, c(0.025, 0.5, 0.975)) + Sbar.save[, , t] <- Sbar + Sbar.CI[, , t] <- apply(mq, 2, quantile, c(0.025, 0.5, 0.975)) + ## Ensemble forward simulation - Xf = rmvnorm(1000,m*mu.a,Pa) - mu.f = t(colMeans(Xf)) - Pf = solve(cov(Xf)) - Pf.save[,,t] = Pf + Xf <- rmvnorm(1000, m * mu.a, Pa) + mu.f <- t(colMeans(Xf)) + Pf <- solve(cov(Xf)) + Pf.save[, , t] <- Pf } -#proc.time() - ptm +# proc.time() - ptm ### degrees of freedom over time -> should be increasing because we are always getting more data -plot(bqq,xlab="Time",ylab="Degrees of Freedom of Wishart",pch=16) +plot(bqq, xlab = "Time", ylab = "Degrees of Freedom of Wishart", pch = 16) ### Data assimilation time series -plot(Y.dat[,1],ylim=range(Y.dat)+c(0,20),pch=19,xlab="Time",ylab="Xs") -lines(model[,1],lwd=2) -col=col2rgb("darkgrey") -col1=rgb(col[1],col[2],col[3],0.4*256,maxColorValue=256) -ciEnvelope(1:nt,CI.X1[1,],CI.X1[3,],col=col1) - -points(Y.dat[,2],col="blue",pch=18) -lines(model[,2],col="blue",lwd=2) -col=col2rgb("lightblue") -col1=rgb(col[1],col[2],col[3],0.4*256,maxColorValue=256) -ciEnvelope(1:nt,CI.X2[1,],CI.X2[3,],col=col1) +plot(Y.dat[, 1], ylim = range(Y.dat) + c(0, 20), pch = 19, xlab = "Time", ylab = "Xs") +lines(model[, 1], lwd = 2) +col <- col2rgb("darkgrey") +col1 <- rgb(col[1], col[2], col[3], 0.4 * 256, maxColorValue = 256) +ciEnvelope(1:nt, CI.X1[1, ], CI.X1[3, ], col = col1) + +points(Y.dat[, 2], col = "blue", pch = 18) +lines(model[, 2], col = "blue", lwd = 2) +col <- col2rgb("lightblue") +col1 <- rgb(col[1], col[2], col[3], 0.4 * 256, maxColorValue = 256) +ciEnvelope(1:nt, CI.X2[1, ], CI.X2[3, ], col = col1) ## how well are we estimating process error (Q) ---> should be diag(2) -par(mfrow=c(2,2)) -plot(Sbar.save[1,1,],ylim=range(Sbar.CI[,1,])) -abline(h=q[1,1]) -ciEnvelope(1:nt,Sbar.CI[1,1,],Sbar.CI[3,1,],col=col1) +par(mfrow = c(2, 2)) +plot(Sbar.save[1, 1, ], ylim = range(Sbar.CI[, 1, ])) +abline(h = q[1, 1]) +ciEnvelope(1:nt, Sbar.CI[1, 1, ], Sbar.CI[3, 1, ], col = col1) -plot(Sbar.save[1,2,],ylim=range(Sbar.CI[,2,])) -abline(h=q[1,2]) -ciEnvelope(1:nt,Sbar.CI[1,2,],Sbar.CI[3,2,],col=col1) +plot(Sbar.save[1, 2, ], ylim = range(Sbar.CI[, 2, ])) +abline(h = q[1, 2]) +ciEnvelope(1:nt, Sbar.CI[1, 2, ], Sbar.CI[3, 2, ], col = col1) -plot(Sbar.save[2,2,],ylim=range(Sbar.CI[,4,])) -abline(h=q[2,2]) -ciEnvelope(1:nt,Sbar.CI[1,4,],Sbar.CI[3,4,],col=col1) +plot(Sbar.save[2, 2, ], ylim = range(Sbar.CI[, 4, ])) +abline(h = q[2, 2]) +ciEnvelope(1:nt, Sbar.CI[1, 4, ], Sbar.CI[3, 4, ], col = col1) -plot(Sbar.save[2,1,],ylim=range(Sbar.CI[,3,])) -abline(h=q[2,1]) -ciEnvelope(1:nt,Sbar.CI[1,3,],Sbar.CI[3,3,],col=col1) +plot(Sbar.save[2, 1, ], ylim = range(Sbar.CI[, 3, ])) +abline(h = q[2, 1]) +ciEnvelope(1:nt, Sbar.CI[1, 3, ], Sbar.CI[3, 3, ], col = col1) #### Looking for autocorrelation between process covariance and forecast covariance -par(mfrow=c(2,2)) -plot(Pa.save[1,1,seq(2,50,2)],Sbar.save[1,1,seq(2,50,2)],pch=16,xlab="Pa",ylab="Sbar",main="Element [1,1]",ylim=c(-1,9),xlim=c(0,1.5)) -points(Pa.save[1,1,seq(1,50,2)],Sbar.save[1,1,seq(1,50,2)],col="blue",pch=16) -abline(h=1) -abline(0,1) -plot(Pa.save[1,2,],Sbar.save[1,2,],pch=16,xlab="Pa",ylab="Sbar",main="Element [1,2]") -abline(h=0) -plot(Pa.save[2,1,],Sbar.save[2,1,],pch=16,xlab="Pa",ylab="Sbar",main="Element [2,1]") -abline(h=0) -plot(Pa.save[2,2,],Sbar.save[2,2,],pch=16,xlab="Pa",ylab="Sbar",main="Element [2,2]") -abline(h=1) +par(mfrow = c(2, 2)) +plot(Pa.save[1, 1, seq(2, 50, 2)], Sbar.save[1, 1, seq(2, 50, 2)], pch = 16, xlab = "Pa", ylab = "Sbar", main = "Element [1,1]", ylim = c(-1, 9), xlim = c(0, 1.5)) +points(Pa.save[1, 1, seq(1, 50, 2)], Sbar.save[1, 1, seq(1, 50, 2)], col = "blue", pch = 16) +abline(h = 1) +abline(0, 1) +plot(Pa.save[1, 2, ], Sbar.save[1, 2, ], pch = 16, xlab = "Pa", ylab = "Sbar", main = "Element [1,2]") +abline(h = 0) +plot(Pa.save[2, 1, ], Sbar.save[2, 1, ], pch = 16, xlab = "Pa", ylab = "Sbar", main = "Element [2,1]") +abline(h = 0) +plot(Pa.save[2, 2, ], Sbar.save[2, 2, ], pch = 16, xlab = "Pa", ylab = "Sbar", main = "Element [2,2]") +abline(h = 1) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multi_Site_Constructors.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multi_Site_Constructors.R index 545cdd3c331..0534dba657f 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multi_Site_Constructors.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multi_Site_Constructors.R @@ -1,125 +1,125 @@ ##' @title Contruct.Pf ##' @name Contruct.Pf ##' @author Hamze Dokoohaki -##' -##' @param site.ids a vector name of site ids. +##' +##' @param site.ids a vector name of site ids. ##' @param var.names vector names of state variable names. ##' @param X a matrix of state variables. In this matrix rows represent ensembles, while columns show the variables for different sites. ##' @param localization.FUN This is the function that performs the localization of the Pf matrix and it returns a localized matrix with the same dimensions. ##' @description The argument X needs to have an attribute pointing the state variables to their corresponding site. This attribute needs to be called `Site`. ##' At the moment, the cov between state variables at blocks defining the cov between two sites are assumed zero. ##' @return It returns the var-cov matrix of state variables at multiple sites. -##' @export +##' @export -Contruct.Pf <- function(site.ids, var.names, X, localization.FUN=NULL, t=1, blocked.dis=NULL, ...) { - #setup +Contruct.Pf <- function(site.ids, var.names, X, localization.FUN = NULL, t = 1, blocked.dis = NULL, ...) { + # setup nsite <- length(site.ids) nvariable <- length(var.names) # I will make a big cov matrix and then I will populate it with the cov of each site - pf.matrix <-matrix(0,(nsite*nvariable),(nsite*nvariable)) - + pf.matrix <- matrix(0, (nsite * nvariable), (nsite * nvariable)) + ## This makes the diagonal of our big matrix - first filters out each site, estimates the cov and puts it where it needs to go. - for (site in site.ids){ - #let's find out where this cov (for the current site needs to go in the main cov matrix) - pos.in.matrix <- which(attr(X,"Site") %in% site) - #foreach site let's get the Xs - pf.matrix [pos.in.matrix, pos.in.matrix] <- cov( X [, pos.in.matrix] ,use="complete.obs") + for (site in site.ids) { + # let's find out where this cov (for the current site needs to go in the main cov matrix) + pos.in.matrix <- which(attr(X, "Site") %in% site) + # foreach site let's get the Xs + pf.matrix[pos.in.matrix, pos.in.matrix] <- cov(X[, pos.in.matrix], use = "complete.obs") } - + # This is where we estimate the cov between state variables of different sites - #I put this into a sperate loop so we can have more control over it - site.cov.orders <- expand.grid(site.ids,site.ids) %>% - filter( Var1 != Var2) + # I put this into a sperate loop so we can have more control over it + site.cov.orders <- expand.grid(site.ids, site.ids) %>% + filter(Var1 != Var2) - for (i in 1:nrow(site.cov.orders)){ + for (i in 1:nrow(site.cov.orders)) { # first we need to find out where to put it in the big matrix - rows.in.matrix <- which(attr(X,"Site") %in% site.cov.orders[i,1]) - cols.in.matrix <- which(attr(X,"Site") %in% site.cov.orders[i,2]) - #estimated between these two sites - two.site.cov <- cov( X [, c(rows.in.matrix, cols.in.matrix)],use="complete.obs" )[(nvariable+1):(2*nvariable),1:nvariable] - # I'm setting the off diag to zero - two.site.cov [which(lower.tri(two.site.cov, diag = FALSE),TRUE) %>% rbind (which(upper.tri(two.site.cov,FALSE),TRUE))] <- 0 - #putting it back to the main matrix - pf.matrix [rows.in.matrix, cols.in.matrix] <- two.site.cov + rows.in.matrix <- which(attr(X, "Site") %in% site.cov.orders[i, 1]) + cols.in.matrix <- which(attr(X, "Site") %in% site.cov.orders[i, 2]) + # estimated between these two sites + two.site.cov <- cov(X[, c(rows.in.matrix, cols.in.matrix)], use = "complete.obs")[(nvariable + 1):(2 * nvariable), 1:nvariable] + # I'm setting the off diag to zero + two.site.cov[which(lower.tri(two.site.cov, diag = FALSE), TRUE) %>% rbind(which(upper.tri(two.site.cov, FALSE), TRUE))] <- 0 + # putting it back to the main matrix + pf.matrix[rows.in.matrix, cols.in.matrix] <- two.site.cov } - + # if I see that there is a localization function passed to this - I run it by the function. if (!is.null(localization.FUN)) { - pf.matrix.out <- localization.FUN (pf.matrix, blocked.dis, ...) - } else{ + pf.matrix.out <- localization.FUN(pf.matrix, blocked.dis, ...) + } else { pf.matrix.out <- pf.matrix } - + # adding labels to rownames and colnames - labelss <- paste0(rep(var.names, length(site.ids)) %>% as.character(),"(", - rep(site.ids, each=length(var.names)),")") - - colnames(pf.matrix.out ) <-labelss - rownames(pf.matrix.out ) <-labelss - - return(pf.matrix.out) + labelss <- paste0( + rep(var.names, length(site.ids)) %>% as.character(), "(", + rep(site.ids, each = length(var.names)), ")" + ) + colnames(pf.matrix.out) <- labelss + rownames(pf.matrix.out) <- labelss + + return(pf.matrix.out) } ##' @title Construct.R ##' @name Construct.R ##' @author Hamze Dokoohaki -##' -##' @param site.ids a vector name of site ids +##' +##' @param site.ids a vector name of site ids ##' @param var.names vector names of state variable names ##' @param obs.t.mean list of vector of means for the time t for different sites. ##' @param obs.t.cov list of list of cov for the time for different sites. -##’ -##' +## ’ +##' ##' @description Make sure that both lists are named with siteids. -##' +##' ##' @return This function returns a list with Y and R ready to be sent to the analysis functions. ##' @export -Construct.R<-function(site.ids, var.names, obs.t.mean, obs.t.cov){ - +Construct.R <- function(site.ids, var.names, obs.t.mean, obs.t.cov) { # keeps Hs of sites - site.specific.Rs <-list() + site.specific.Rs <- list() # nsite <- length(site.ids) # nvariable <- length(var.names) - Y<-c() - - for (site in site.ids){ - choose <- sapply(var.names, agrep, x=names(obs.t.mean[[site]]), max=1, USE.NAMES = FALSE) %>% unlist + Y <- c() + + for (site in site.ids) { + choose <- sapply(var.names, agrep, x = names(obs.t.mean[[site]]), max = 1, USE.NAMES = FALSE) %>% unlist() # if there is no obs for this site - if(length(choose)==0){ - next; - }else{ + if (length(choose) == 0) { + next + } else { Y <- c(Y, unlist(obs.t.mean[[site]][choose])) # collecting them - site.specific.Rs <- c(site.specific.Rs, list(as.matrix(obs.t.cov[[site]][choose,choose])) ) - } - #make block matrix out of our collection - R <- Matrix::bdiag(site.specific.Rs) %>% as.matrix() + site.specific.Rs <- c(site.specific.Rs, list(as.matrix(obs.t.cov[[site]][choose, choose]))) } + # make block matrix out of our collection + R <- Matrix::bdiag(site.specific.Rs) %>% as.matrix() + } - return(list(Y=Y, R=R)) + return(list(Y = Y, R = R)) } ##' @title block_matrix ##' @name block_matrix ##' @author Guy J. Abel -##' +##' ##' @param x Vector of numbers to identify each block. ##' @param b Numeric value for the size of the blocks within the matrix ordered depending on byrow ##' @param byrow logical value. If FALSE (the default) the blocks are filled by columns, otherwise the blocks in the matrix are filled by rows. ##' @param dimnames Character string of name attribute for the basis of the block matrix. If NULL a vector of the same length of b provides the basis of row and column names.#'. -##’ -##' +## ’ +##' ##' @description This function is adopted from migest package. -##' +##' ##' @return Returns a matrix with block sizes determined by the b argument. Each block is filled with the same value taken from x. ##' @export -block_matrix <- function (x = NULL, b = NULL, byrow = FALSE, dimnames = NULL) { +block_matrix <- function(x = NULL, b = NULL, byrow = FALSE, dimnames = NULL) { n <- length(b) bb <- rep(1:n, times = b) dn <- NULL @@ -156,91 +156,90 @@ block_matrix <- function (x = NULL, b = NULL, byrow = FALSE, dimnames = NULL) { ##' @title Construct.H.multisite ##' @name Construct.H.multisite ##' @author Hamze -##' -##' @param site.ids a vector name of site ids +##' +##' @param site.ids a vector name of site ids ##' @param var.names vector names of state variable names -##' @param obs.t.mean list of vector of means for the time t for different sites. -##' +##' @param obs.t.mean list of vector of means for the time t for different sites. +##' ##' @description This function is makes the blocked mapping function. -##' +##' ##' @return Returns a matrix with block sizes determined by the b argument. Each block is filled with the same value taken from x. ##' @export -Construct.H.multisite <- function(site.ids, var.names, obs.t.mean){ - +Construct.H.multisite <- function(site.ids, var.names, obs.t.mean) { site.ids.with.data <- names(obs.t.mean) site.specific.Hs <- list() - - + + nsite <- length(site.ids) # number of sites - nsite.ids.with.data <-length(site.ids.with.data) # number of sites with data + nsite.ids.with.data <- length(site.ids.with.data) # number of sites with data nvariable <- length(var.names) - #This is used inside the loop below for moving between the sites when populating the big H matrix - nobs <- obs.t.mean %>% map_dbl(~length(.x)) %>% max # this gives me the max number of obs at sites - nobstotal<-obs.t.mean %>% purrr::flatten() %>% length() # this gives me the total number of obs - - #Having the total number of obs as the row number - H <- matrix(0, nobstotal, (nvariable*nsite)) - j<-1 - - for(i in seq_along(site.ids)) - { + # This is used inside the loop below for moving between the sites when populating the big H matrix + nobs <- obs.t.mean %>% + map_dbl(~ length(.x)) %>% + max() # this gives me the max number of obs at sites + nobstotal <- obs.t.mean %>% + purrr::flatten() %>% + length() # this gives me the total number of obs + + # Having the total number of obs as the row number + H <- matrix(0, nobstotal, (nvariable * nsite)) + j <- 1 + + for (i in seq_along(site.ids)) + { site <- site.ids[i] obs.names <- names(obs.t.mean[[site]]) - - if(is.null(obs.names)) next; - - if (length(obs.names) == 1) - { - + + if (is.null(obs.names)) next + + if (length(obs.names) == 1) { # choose <- sapply(var.names, agrep, x = names(obs.t.mean[[site]]), # max = 1, USE.NAMES = FALSE) %>% unlist - choose.col <- sapply(obs.names, agrep, x = var.names, max = 1, USE.NAMES = FALSE) %>% unlist - choose.row <- sapply(var.names, agrep, x = obs.names, max = 1, USE.NAMES = FALSE) %>% unlist - + choose.col <- sapply(obs.names, agrep, x = var.names, max = 1, USE.NAMES = FALSE) %>% unlist() + choose.row <- sapply(var.names, agrep, x = obs.names, max = 1, USE.NAMES = FALSE) %>% unlist() + # empty matrix for this site H.this.site <- matrix(0, nrow(H), nvariable) # fill in the ones based on choose - H.this.site [choose.row, choose.col] <- 1 + H.this.site[choose.row, choose.col] <- 1 } - - if (length(obs.names) > 1) - { + + if (length(obs.names) > 1) { # empty matrix for this site H.this.site <- matrix(0, nobs, nvariable) - + for (n in seq_along(obs.names)) { - choose.col <- sapply(obs.names[n], agrep, x = var.names, max = 1, USE.NAMES = FALSE) %>% unlist - H.this.site[n, choose.col] = 1 - - } - H.this.site = do.call(rbind, replicate(length(obs.names), H.this.site, simplify = FALSE)) + choose.col <- sapply(obs.names[n], agrep, x = var.names, max = 1, USE.NAMES = FALSE) %>% unlist() + H.this.site[n, choose.col] <- 1 } + H.this.site <- do.call(rbind, replicate(length(obs.names), H.this.site, simplify = FALSE)) + } # for (n in seq_along(obs.names)) # { # choose.col <- sapply(obs.names[n], agrep, x = var.names, max = 1, USE.NAMES = FALSE) %>% unlist # H.this.obs[n, choose.col] = 1 - # + # # } # H.this.site = data.frame() # for (x in seq_along(obs.names)) # { # test = do.call(rbind, replicate(length(obs.names), H.this.obs[x,], simplify = FALSE)) # H.this.site = rbind(H.this.site, test) - # + # # } # H.this.site = as.matrix(H.this.site) # } - # - pos.row = 1:nobstotal - #pos.row<- ((nobs*j)-(nobs-1)):(nobs*j) - pos.col<- ((nvariable*i)-(nvariable-1)):(nvariable*i) - - H[pos.row,pos.col] <-H.this.site - - j <- j +1 + # + pos.row <- 1:nobstotal + # pos.row<- ((nobs*j)-(nobs-1)):(nobs*j) + pos.col <- ((nvariable * i) - (nvariable - 1)):(nvariable * i) + + H[pos.row, pos.col] <- H.this.site + + j <- j + 1 } - + return(H) } diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey.R index 2472f88ffbc..3ed2118e80e 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,7 +23,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -33,7 +33,7 @@ library(dplyr) # temporary step until we get this code integrated into pecan # library(RCurl) -# script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", +# script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", # ssl.verifypeer = FALSE) # eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# @@ -42,16 +42,16 @@ library(dplyr) #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/bmorrison/sda/lai" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions -# Deifine observation - use existing or generate new? +# Deifine observation - use existing or generate new? # set to a specific file, use that. -#observation <- "" -#observation = c("1000000048", "796") -#observation = c("1000000048", "796", "1100", "71", "954", "39") +# observation <- "" +# observation = c("1000000048", "796") +# observation = c("1000000048", "796", "1100", "71", "954", "39") # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA.xml") @@ -64,15 +64,22 @@ settings <- read.settings("pecan_MultiSite_SDA.xml") # observation <- c(observation,obs) # } -observation = "1000000048" +observation <- "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -82,49 +89,60 @@ get.parameter.samples(settings, #---------AGB----------# PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation -suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, - ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, + ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", - data_dir, product_dates=NULL, file.path(work_dir,"Obs")) -sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", - data_dir, product_dates=NULL, file.path(work_dir,"Obs")) +med_agb_data <- extract.LandTrendr.AGB(site_info, "median", + buffer = NULL, fun = "mean", + data_dir, product_dates = NULL, file.path(work_dir, "Obs") +) +sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", + buffer = NULL, fun = "mean", + data_dir, product_dates = NULL, file.path(work_dir, "Obs") +) med_agb_data_sda <- med_agb_data[[1]] %>% filter(Site_ID %in% site.ids) sdev_agb_data_sda <- sdev_agb_data[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -med_agb_data_sda <- med_agb_data_sda[site.order,] -sdev_agb_data_sda <- sdev_agb_data_sda[site.order,] +site.order <- sapply(site.ids, function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +med_agb_data_sda <- med_agb_data_sda[site.order, ] +sdev_agb_data_sda <- sdev_agb_data_sda[site.order, ] #----------LAI----------# -#set directory to output MODIS data too +# set directory to output MODIS data too data_dir <- "/data/bmorrison/sda/lai/modis_lai_data" # get the site location information to grab the correct lat/lons for site + add info to the point_list # ################ Not working on interactive job on MODEX -bety <- list(user='bety', password='bety', host='localhost',dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list(user = "bety", password = "bety", host = "localhost", dbname = "bety", driver = "PostgreSQL", write = TRUE) con <- PEcAn.DB::db.open(bety) bety$con <- con site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = observation, .con = con) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = observation, .con = con +) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) # site_info = data.frame() # for (i in seq_along(1:length(settings$run))) { @@ -137,108 +155,106 @@ suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) site_IDs <- qry_results$id site_names <- qry_results$sitename site_coords <- data.frame(cbind(qry_results$lon, qry_results$lat)) -site_info = as.data.frame(cbind(site_IDs, site_names, site_coords)) -names(site_info) = c("IDs", "Names", "Longitude", "Latitude") -site_info$Longitude = as.numeric(site_info$Longitude) -site_info$Latitude = as.numeric(site_info$Latitude) +site_info <- as.data.frame(cbind(site_IDs, site_names, site_coords)) +names(site_info) <- c("IDs", "Names", "Longitude", "Latitude") +site_info$Longitude <- as.numeric(site_info$Longitude) +site_info$Latitude <- as.numeric(site_info$Latitude) library(doParallel) -cl <- parallel::makeCluster(5, outfile="") +cl <- parallel::makeCluster(5, outfile = "") doParallel::registerDoParallel(cl) -start = Sys.time() -data = foreach(i=1:nrow(site_info)) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2010/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$Latitude[i], lon = site_info$Longitude[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T) -end = Sys.time() -difference = end-start +start <- Sys.time() +data <- foreach(i = 1:nrow(site_info)) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2010/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$Latitude[i], lon = site_info$Longitude[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T) +end <- Sys.time() +difference <- end - start stopCluster(cl) -output = as.data.frame(data) +output <- as.data.frame(data) # LAI is every 7 days --> calculate the peak LAI for a year for each site -load('/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_5site.RData') +load("/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_5site.RData") for (i in 1:nrow(site_info)) { - name = as.character(site_info$Names[i], stringsAsFactor = F) - g = which(round(output$lat, digits = 3) == round(site_info$Latitude[i], digits = 3)) - output$tile[g] = name + name <- as.character(site_info$Names[i], stringsAsFactor = F) + g <- which(round(output$lat, digits = 3) == round(site_info$Latitude[i], digits = 3)) + output$tile[g] <- name } -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) for (i in 1:length(years)) { - year = years[i] - g = grep(data$calendar_date, pattern = year) - d = data[g,] - sites = unique(data$tile) + year <- years[i] + g <- grep(data$calendar_date, pattern = year) + d <- data[g, ] + sites <- unique(data$tile) for (j in 1:length(sites)) { - info = site_info[which(site_info$Names == sites[j]),] - index = which(round(d$lat, digits = 3) == round(info$Latitude, digits = 3) & round(d$lon, digits = 3) == round(info$Longitude, digits = 3)) - - if (length(index) > 0) - { - site = d[index,] - site$band = info$ID - max = which(site$data == max(site$data, na.rm = T)) - peak = site[max[1],] - #peak$data = max - #peak$sd = mean - peak$calendar_date = paste("Year", year, sep = "_") - peak$tile = sites[j] - peak_lai = rbind(peak_lai, peak) + info <- site_info[which(site_info$Names == sites[j]), ] + index <- which(round(d$lat, digits = 3) == round(info$Latitude, digits = 3) & round(d$lon, digits = 3) == round(info$Longitude, digits = 3)) + + if (length(index) > 0) { + site <- d[index, ] + site$band <- info$ID + max <- which(site$data == max(site$data, na.rm = T)) + peak <- site[max[1], ] + # peak$data = max + # peak$sd = mean + peak$calendar_date <- paste("Year", year, sep = "_") + peak$tile <- sites[j] + peak_lai <- rbind(peak_lai, peak) } - } - } # sort the data by site so the correct values are placed into the resized data frames below. -peak_lai = peak_lai[order(peak_lai$tile), ] +peak_lai <- peak_lai[order(peak_lai$tile), ] # # separate data into hotdog style dataframes with row == site and columns = info/data for each site -med_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(med_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +med_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(med_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -sdev_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(sdev_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +sdev_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(sdev_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) point_list$med_lai_data <- point_list$med_lai_data[[1]] %>% filter(Site_ID %in% site.ids) point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -point_list$median_lai <- point_list$median_lai[site.order,] -point_list$stdv_lai <- point_list$stdv_lai[site.order,] - -peak_lai_data_sda = point_list$median_lai -sdev_lai_data_sda = point_list$stdv_lai -# -# -# +site.order <- sapply(site.ids, function(x) which(point_list$median_lai$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +point_list$median_lai <- point_list$median_lai[site.order, ] +point_list$stdv_lai <- point_list$stdv_lai[site.order, ] + +peak_lai_data_sda <- point_list$median_lai +sdev_lai_data_sda <- point_list$stdv_lai +# +# +# # # make sure agb and lai only use same dates (for now just to test sda, will fix later) # date_agb = colnames(med_agb_data_sda) # date_lai = colnames(peak_lai_data_sda) -# +# # if (length(date_agb) > length(date_lai)) # { # index = which(!(date_agb %in% date_lai)) # med_agb_data_sda = med_agb_data_sda[,-index] # sdev_agb_data_sda = sdev_agb_data_sda[,-index] -# } +# } # if (length(date_lai) > length(date_agb)) # { # index = which(!(date_lai %in% date_agb)) # peak_lai_data_sda = peak_lai_data_sda[,-index] # sdev_lai_data_sda = sdev_lai_data_sda[,-index] # } -# +# # # combine agb and lai datasets # med_data_sda = list() # med_data_da @@ -247,41 +263,48 @@ sdev_lai_data_sda = point_list$stdv_lai # # point_list$agb$stdv_agb = sdev_agb_data_sda # # point_list$lai$peak_lai = peak_lai_data_sda # # point_list$lai$stdv_lai = sdev_lai_data_sda -# -# # +# +# # # #point_list$agb$median_agb = as.character(point_list$agb$median_agb[[1]]) %>% filter(site_ID %in% site.ids) -# +# -point_list = list() -point_list$median_lai = med_lai_data -point_list$sdev_lai = sdev_lai_data +point_list <- list() +point_list$median_lai <- med_lai_data +point_list$sdev_lai <- sdev_lai_data point_list$median_lai <- point_list$median_lai[[1]] %>% filter(site_ID %in% site.ids) point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -point_list$median_lai <- point_list$median_lai[site.order,] -point_list$stdv_lai <- point_list$stdv_lai[site.order,] +site.order <- sapply(site.ids, function(x) which(point_list$median_lai$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +point_list$median_lai <- point_list$median_lai[site.order, ] +point_list$stdv_lai <- point_list$stdv_lai[site.order, ] -med_lai_data_sda = point_list$median_lai -sdev_lai_data_sda = point_list$sdev_lai +med_lai_data_sda <- point_list$median_lai +sdev_lai_data_sda <- point_list$sdev_lai # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(med_lai_data_sda),"_")[3:length(med_lai_data_sda)] %>% map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(med_lai_data_sda), "_")[3:length(med_lai_data_sda)] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") obs.mean <- names(med_lai_data_sda)[3:length(med_lai_data_sda)] %>% - map(function(namesl){ + map(function(namesl) { ((med_lai_data_sda)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('LAI'))) %>% - setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) - -obs.cov <-names(sdev_lai_data_sda)[3:length(sdev_lai_data_sda)] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("LAI"))) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) + +obs.cov <- names(sdev_lai_data_sda)[3:length(sdev_lai_data_sda)] %>% map(function(namesl) { ((sdev_lai_data_sda)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #--------------------------------------------------------------------------------------------------# @@ -291,7 +314,7 @@ obs.cov <-names(sdev_lai_data_sda)[3:length(sdev_lai_data_sda)] %>% new.settings <- PEcAn.settings::prepare.settings(settings) #--------------------------------------------------------------------------------------------------# -#Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]]) +# Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]]) @@ -310,18 +333,22 @@ new.settings <- PEcAn.settings::prepare.settings(settings) # debug=F, # pause=F)) -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="Sobol sampling - 5sites/15 Ensemble - LAI", - facet.plots=T, - debug=T, - pause=F)) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "Sobol sampling - 5sites/15 Ensemble - LAI", + facet.plots = T, + debug = T, + pause = F + ) +) #--------------------------------------------------------------------------------------------------# @@ -331,8 +358,10 @@ sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, ## Wrap up # Send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, - paste0("SDA workflow has finished executing at ", base::date())) + sendmail( + settings$email$from, settings$email$to, + paste0("SDA workflow has finished executing at ", base::date()) + ) } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI.R index b2ba9da9070..4352f74cb67 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,7 +23,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -33,7 +33,7 @@ library(dplyr) # temporary step until we get this code integrated into pecan # library(RCurl) -# script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", +# script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", # ssl.verifypeer = FALSE) # eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# @@ -42,16 +42,16 @@ library(dplyr) #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/bmorrison/sda/lai" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions -# Deifine observation - use existing or generate new? +# Deifine observation - use existing or generate new? # set to a specific file, use that. -#observation <- "" -#observation = c("1000000048", "796") -#observation = c("1000000048", "796", "1100", "71", "954", "39") +# observation <- "" +# observation = c("1000000048", "796") +# observation = c("1000000048", "796", "1100", "71", "954", "39") # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB.xml") @@ -59,20 +59,27 @@ settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB.xml") # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -#observation = "1000000048" +# observation = "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -87,39 +94,44 @@ data_dir <- "/data/bmorrison/sda/lai/modis_lai_data" # ################ Not working on interactive job on MODEX PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" # med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs")) # sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs")) -# -# -# +# +# +# # med_agb_data_sda <- med_agb_data[[1]] %>% filter(Site_ID %in% site.ids) # sdev_agb_data_sda <- sdev_agb_data[[1]] %>% filter(Site_ID %in% site.ids) # site.order <- sapply(site.ids,function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% # as.numeric() %>% na.omit() # med_agb_data_sda <- med_agb_data_sda[site.order,] # sdev_agb_data_sda <- sdev_agb_data_sda[site.order,] -# +# # save(med_agb_data_sda, file = '/data/bmorrison/sda/lai/modis_lai_data/med_agb_data_5sites.Rdata') # save(sdev_agb_data_sda, file = '/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data_5sites.Rdata') -load('/data/bmorrison/sda/lai/modis_lai_data/med_agb_data_5sites.Rdata') -load( '/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data_5sites.Rdata') +load("/data/bmorrison/sda/lai/modis_lai_data/med_agb_data_5sites.Rdata") +load("/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data_5sites.Rdata") # med_agb_data_sda = med_agb_data_sda[1,] # sdev_agb_data_sda = sdev_agb_data_sda[1,] @@ -128,163 +140,162 @@ load( '/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data_5sites.Rdata') # library(doParallel) # cl <- parallel::makeCluster(5, outfile="") # doParallel::registerDoParallel(cl) -# +# # start = Sys.time() # data = foreach(i=1:length(site_info$site_id), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2017/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T, progress = T) # end = Sys.time() # difference = end-start # stopCluster(cl) -#for 1 site -#output2 = PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2010/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T) +# for 1 site +# output2 = PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2010/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T) # output = as.data.frame(data) # save(output, file = '/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_5sites.Rdata') -load('/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_5sites.Rdata') +load("/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_5sites.Rdata") -#rename tiles by actual site name +# rename tiles by actual site name for (i in 1:length(site_info$site_name)) { - name = as.character(site_info$site_name[i], stringsAsFactor = F) - g = which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3)) - output$tile[g] = name + name <- as.character(site_info$site_name[i], stringsAsFactor = F) + g <- which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3)) + output$tile[g] <- name } # compute peak lai per year -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) for (i in 1:length(years)) { - year = years[i] - g = grep(data$calendar_date, pattern = year) - d = data[g,] - sites = unique(data$tile) - for (j in 1:length(sites)) - { - index = which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) - #info = site_info[which(site_info$site_name == sites[j]),] - #index = which(round(d$lat, digits = 3) == round(site_info$lat, digits = 3) & round(d$lon, digits = 3) == round(site_info$lon, digits = 3)) - - if (length(index) > 0) - { - site = d[index,] - site$band = site_info$site_id[j] - max = which(site$data == max(site$data, na.rm = T)) - peak = site[max[1],] - #peak$data = max - #peak$sd = mean - peak$calendar_date = paste("Year", year, sep = "_") - peak$tile = sites[j] - peak_lai = rbind(peak_lai, peak) - } - + year <- years[i] + g <- grep(data$calendar_date, pattern = year) + d <- data[g, ] + sites <- unique(data$tile) + for (j in 1:length(sites)) + { + index <- which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) + # info = site_info[which(site_info$site_name == sites[j]),] + # index = which(round(d$lat, digits = 3) == round(site_info$lat, digits = 3) & round(d$lon, digits = 3) == round(site_info$lon, digits = 3)) + + if (length(index) > 0) { + site <- d[index, ] + site$band <- site_info$site_id[j] + max <- which(site$data == max(site$data, na.rm = T)) + peak <- site[max[1], ] + # peak$data = max + # peak$sd = mean + peak$calendar_date <- paste("Year", year, sep = "_") + peak$tile <- sites[j] + peak_lai <- rbind(peak_lai, peak) } - + } } # sort the data by site so the correct values are placed into the resized data frames below. -peak_lai = peak_lai[order(peak_lai$tile), ] +peak_lai <- peak_lai[order(peak_lai$tile), ] # # separate data into hotdog style dataframes with row == site and columns = info/data for each site -med_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(med_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -med_lai_data$Site_ID = as.character(med_lai_data$Site_ID) -med_lai_data = list(med_lai_data) +med_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(med_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +med_lai_data$Site_ID <- as.character(med_lai_data$Site_ID) +med_lai_data <- list(med_lai_data) -sdev_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(sdev_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -sdev_lai_data$Site_ID = as.character(sdev_lai_data$Site_ID) -sdev_lai_data = list(sdev_lai_data) +sdev_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(sdev_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +sdev_lai_data$Site_ID <- as.character(sdev_lai_data$Site_ID) +sdev_lai_data <- list(sdev_lai_data) -#med_lai_data = list(med_lai_data) +# med_lai_data = list(med_lai_data) med_lai_data_sda <- med_lai_data[[1]] %>% filter(Site_ID %in% site.ids) sdev_lai_data_sda <- sdev_lai_data[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(med_lai_data_sda$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -med_lai_data_sda <- med_lai_data_sda[site.order,] -sdev_lai_data_sda <- sdev_lai_data_sda[site.order,] +site.order <- sapply(site.ids, function(x) which(med_lai_data_sda$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +med_lai_data_sda <- med_lai_data_sda[site.order, ] +sdev_lai_data_sda <- sdev_lai_data_sda[site.order, ] -#make sure agb and lai only use same dates (for now just to test sda, will fix later) -date_agb = colnames(med_agb_data_sda) -date_lai = colnames(med_lai_data_sda) +# make sure agb and lai only use same dates (for now just to test sda, will fix later) +date_agb <- colnames(med_agb_data_sda) +date_lai <- colnames(med_lai_data_sda) -if (length(date_agb) > length(date_lai)) -{ - index = which(!(date_agb %in% date_lai)) - med_agb_data_sda = med_agb_data_sda[,-index] - sdev_agb_data_sda = sdev_agb_data_sda[,-index] +if (length(date_agb) > length(date_lai)) { + index <- which(!(date_agb %in% date_lai)) + med_agb_data_sda <- med_agb_data_sda[, -index] + sdev_agb_data_sda <- sdev_agb_data_sda[, -index] +} +if (length(date_lai) > length(date_agb)) { + index <- which(!(date_lai %in% date_agb)) + med_lai_data_sda <- med_lai_data_sda[, -index] + sdev_lai_data_sda <- sdev_lai_data_sda[, -index] } -if (length(date_lai) > length(date_agb)) -{ - index = which(!(date_lai %in% date_agb)) - med_lai_data_sda = med_lai_data_sda[,-index] - sdev_lai_data_sda = sdev_lai_data_sda[,-index] -} ### REFORMAT ALL DATA BY YEAR INSTEAD OF SITE HOTDOG STYLE. COMBINE AGB + LAI INTO 1 MED + 1 SDEV LIST(S). -med_data = as.data.frame(cbind(colnames(med_lai_data_sda[,3:ncol(med_lai_data_sda)]), med_lai_data_sda$Site_ID, unlist(med_lai_data_sda[,3:ncol(med_lai_data_sda)]), unlist(med_agb_data_sda[,3:ncol(med_agb_data_sda)])), row.names = F, stringsAsFactors = F) -names(med_data) = c("date", "site_id", "med_lai", "med_agb") -med_data = med_data[order(med_data$date),] -med_data$date = as.character(med_data$date) -med_data$site_id = as.character(med_data$site_id, stringsAsFactors = F) -med_data$med_lai = as.numeric(med_data$med_lai, stringsAsFactors = F) -med_data$med_agb = as.numeric(med_data$med_agb, stringsAsFactors = F) -med_data = med_data %>% +med_data <- as.data.frame(cbind(colnames(med_lai_data_sda[, 3:ncol(med_lai_data_sda)]), med_lai_data_sda$Site_ID, unlist(med_lai_data_sda[, 3:ncol(med_lai_data_sda)]), unlist(med_agb_data_sda[, 3:ncol(med_agb_data_sda)])), row.names = F, stringsAsFactors = F) +names(med_data) <- c("date", "site_id", "med_lai", "med_agb") +med_data <- med_data[order(med_data$date), ] +med_data$date <- as.character(med_data$date) +med_data$site_id <- as.character(med_data$site_id, stringsAsFactors = F) +med_data$med_lai <- as.numeric(med_data$med_lai, stringsAsFactors = F) +med_data$med_agb <- as.numeric(med_data$med_agb, stringsAsFactors = F) +med_data <- med_data %>% split(.$date) date.obs <- strsplit(names(med_data), "_") %>% - map_chr(~.x[2]) %>% paste0(.,"/07/15") + map_chr(~ .x[2]) %>% + paste0(., "/07/15") -med_data = names(med_data) %>% - map(function(namesl){ +med_data <- names(med_data) %>% + map(function(namesl) { med_data[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4] %>% setNames(c("LAI", "AbvGrndWood"))) %>% + map(~ .x[3:4] %>% setNames(c("LAI", "AbvGrndWood"))) %>% setNames(site.ids) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -names = names(med_data) +names <- names(med_data) for (i in 1:length(names)) { for (j in 1:length(names(med_data[[names[1]]]))) { - rownames(med_data[[i]][[j]]) = NULL + rownames(med_data[[i]][[j]]) <- NULL } } -sdev_data = as.data.frame(cbind(colnames(sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)]), sdev_lai_data_sda$Site_ID, unlist(sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)]), rep(0, nrow(sdev_lai_data_sda)), rep(0, nrow(sdev_lai_data_sda)),unlist(sdev_agb_data_sda[,3:ncol(sdev_agb_data_sda)])), row.names = F, stringsAsFactors =F) -names(sdev_data) = c("date", "site_id", "sdev_lai", "h1", "h2", "sdev_agb") -sdev_data = sdev_data[order(sdev_data$date),] -sdev_data$date = as.character(sdev_data$date, stringsAsFactors = F) -sdev_data$site_id = as.character(sdev_data$site_id, stringsAsFactors = F) -sdev_data$sdev_lai = as.numeric(sdev_data$sdev_lai, stringsAsFactors = F) -sdev_data$sdev_agb = as.numeric(sdev_data$sdev_agb, stringsAsFactors = F) -sdev_data$h1 = as.numeric(sdev_data$h1) -sdev_data$h2 = as.numeric(sdev_data$h2) -sdev_data = sdev_data %>% - split(.$date) - +sdev_data <- as.data.frame(cbind(colnames(sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)]), sdev_lai_data_sda$Site_ID, unlist(sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)]), rep(0, nrow(sdev_lai_data_sda)), rep(0, nrow(sdev_lai_data_sda)), unlist(sdev_agb_data_sda[, 3:ncol(sdev_agb_data_sda)])), row.names = F, stringsAsFactors = F) +names(sdev_data) <- c("date", "site_id", "sdev_lai", "h1", "h2", "sdev_agb") +sdev_data <- sdev_data[order(sdev_data$date), ] +sdev_data$date <- as.character(sdev_data$date, stringsAsFactors = F) +sdev_data$site_id <- as.character(sdev_data$site_id, stringsAsFactors = F) +sdev_data$sdev_lai <- as.numeric(sdev_data$sdev_lai, stringsAsFactors = F) +sdev_data$sdev_agb <- as.numeric(sdev_data$sdev_agb, stringsAsFactors = F) +sdev_data$h1 <- as.numeric(sdev_data$h1) +sdev_data$h2 <- as.numeric(sdev_data$h2) +sdev_data <- sdev_data %>% + split(.$date) + -sdev_data = names(sdev_data) %>% - map(function(namesl){ +sdev_data <- names(sdev_data) %>% + map(function(namesl) { sdev_data[[namesl]] %>% split(.$site_id) %>% - map(~matrix(data = .x[3:6]^2, nrow = 2, ncol = 2)) %>% + map(~ matrix(data = .x[3:6]^2, nrow = 2, ncol = 2)) %>% setNames(site.ids) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) + - -obs.mean = med_data +obs.mean <- med_data -obs.cov = sdev_data +obs.cov <- sdev_data #--------------------------------------------------------------------------------------------------# @@ -293,7 +304,7 @@ obs.cov = sdev_data new.settings <- PEcAn.settings::prepare.settings(settings) #--------------------------------------------------------------------------------------------------# -#Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]]) +# Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]]) @@ -301,31 +312,37 @@ new.settings <- PEcAn.settings::prepare.settings(settings) ## Run SDA -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=TRUE, - FF=FALSE, - interactivePlot=FALSE, - TimeseriesPlot=FALSE, - BiasPlot=FALSE, - plot.title=NULL, - facet.plots=4, - debug=F, - pause=FALSE)) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = 4, + debug = F, + pause = FALSE + ) +) #-----------------------------------------------------------------------------------------------# -load('/data/bmorrison/sda/lai/SDA/sda.output.Rdata', verbose = T) +load("/data/bmorrison/sda/lai/SDA/sda.output.Rdata", verbose = T) obs.times <- names(obs.mean) -post.analysis.multisite.ggplot(settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=4, readsFF=NULL) +post.analysis.multisite.ggplot(settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title = NULL, facetg = 4, readsFF = NULL) #--------------------------------------------------------------------------------------------------# ## Wrap up # Send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, - paste0("SDA workflow has finished executing at ", base::date())) + sendmail( + settings$email$from, settings$email$to, + paste0("SDA workflow has finished executing at ", base::date()) + ) } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites.R index 29375d12a3a..68bc4b390cf 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites.R @@ -1,8 +1,7 @@ - #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -15,7 +14,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -25,7 +24,7 @@ library(dplyr) #--------------------------------------------------------------------------------------------------# # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_2_Sites.xml") @@ -33,20 +32,27 @@ settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_2_Sites.xml") # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -#observation = "1000000048" +# observation = "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -55,179 +61,185 @@ get.parameter.samples(settings, # ################ Not working on interactive job on MODEX PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) -load('/data/bmorrison/sda/lai/modis_lai_data/med_agb_data.Rdata') -load( '/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data.Rdata') -load('/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_2_site.Rdata') +load("/data/bmorrison/sda/lai/modis_lai_data/med_agb_data.Rdata") +load("/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data.Rdata") +load("/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_2_site.Rdata") -#rename tiles by actual site name +# rename tiles by actual site name for (i in 1:length(site_info$site_name)) { - name = as.character(site_info$site_name[i], stringsAsFactor = F) - g = which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3)) - output$tile[g] = name + name <- as.character(site_info$site_name[i], stringsAsFactor = F) + g <- which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3)) + output$tile[g] <- name } # compute peak lai per year -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) for (i in 1:length(years)) { - year = years[i] - g = grep(data$calendar_date, pattern = year) - d = data[g,] - sites = unique(data$tile) + year <- years[i] + g <- grep(data$calendar_date, pattern = year) + d <- data[g, ] + sites <- unique(data$tile) for (j in 1:length(sites)) { - #info = site_info[which(site_info$site_name == sites[j]),] - index = which(round(d$lat, digits = 3) == round(site_info$lat, digits = 3) & round(d$lon, digits = 3) == round(site_info$lon, digits = 3)) - - if (length(index) > 0) - { - site = d[index,] - site$band = site_info$site_id - max = which(site$data == max(site$data, na.rm = T)) - peak = site[max[1],] - #peak$data = max - #peak$sd = mean - peak$calendar_date = paste("Year", year, sep = "_") - peak$tile = sites[j] - peak_lai = rbind(peak_lai, peak) + # info = site_info[which(site_info$site_name == sites[j]),] + index <- which(round(d$lat, digits = 3) == round(site_info$lat, digits = 3) & round(d$lon, digits = 3) == round(site_info$lon, digits = 3)) + + if (length(index) > 0) { + site <- d[index, ] + site$band <- site_info$site_id + max <- which(site$data == max(site$data, na.rm = T)) + peak <- site[max[1], ] + # peak$data = max + # peak$sd = mean + peak$calendar_date <- paste("Year", year, sep = "_") + peak$tile <- sites[j] + peak_lai <- rbind(peak_lai, peak) } - } - } # sort the data by site so the correct values are placed into the resized data frames below. -peak_lai = peak_lai[order(peak_lai$tile), ] +peak_lai <- peak_lai[order(peak_lai$tile), ] # # separate data into hotdog style dataframes with row == site and columns = info/data for each site -med_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(med_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -med_lai_data$Site_ID = as.character(med_lai_data$Site_ID) -med_lai_data = list(med_lai_data) +med_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(med_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +med_lai_data$Site_ID <- as.character(med_lai_data$Site_ID) +med_lai_data <- list(med_lai_data) -sdev_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(sdev_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -sdev_lai_data$Site_ID = as.character(sdev_lai_data$Site_ID) -sdev_lai_data = list(sdev_lai_data) +sdev_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(sdev_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +sdev_lai_data$Site_ID <- as.character(sdev_lai_data$Site_ID) +sdev_lai_data <- list(sdev_lai_data) -#med_lai_data = list(med_lai_data) +# med_lai_data = list(med_lai_data) med_lai_data_sda <- med_lai_data[[1]] %>% filter(Site_ID %in% site.ids) sdev_lai_data_sda <- sdev_lai_data[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(med_lai_data_sda$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -med_lai_data_sda <- med_lai_data_sda[site.order,] -sdev_lai_data_sda <- sdev_lai_data_sda[site.order,] +site.order <- sapply(site.ids, function(x) which(med_lai_data_sda$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +med_lai_data_sda <- med_lai_data_sda[site.order, ] +sdev_lai_data_sda <- sdev_lai_data_sda[site.order, ] -#make sure agb and lai only use same dates (for now just to test sda, will fix later) -date_agb = colnames(med_agb_data_sda) -date_lai = colnames(med_lai_data_sda) +# make sure agb and lai only use same dates (for now just to test sda, will fix later) +date_agb <- colnames(med_agb_data_sda) +date_lai <- colnames(med_lai_data_sda) -if (length(date_agb) > length(date_lai)) -{ - index = which(!(date_agb %in% date_lai)) - med_agb_data_sda = med_agb_data_sda[,-index] - sdev_agb_data_sda = sdev_agb_data_sda[,-index] +if (length(date_agb) > length(date_lai)) { + index <- which(!(date_agb %in% date_lai)) + med_agb_data_sda <- med_agb_data_sda[, -index] + sdev_agb_data_sda <- sdev_agb_data_sda[, -index] +} +if (length(date_lai) > length(date_agb)) { + index <- which(!(date_lai %in% date_agb)) + med_lai_data_sda <- med_lai_data_sda[, -index] + sdev_lai_data_sda <- sdev_lai_data_sda[, -index] } -if (length(date_lai) > length(date_agb)) -{ - index = which(!(date_lai %in% date_agb)) - med_lai_data_sda = med_lai_data_sda[,-index] - sdev_lai_data_sda = sdev_lai_data_sda[,-index] -} ### REFORMAT ALL DATA BY YEAR INSTEAD OF SITE HOTDOG STYLE. COMBINE AGB + LAI INTO 1 MED + 1 SDEV LIST(S). -med_data = as.data.frame(cbind(colnames(med_lai_data_sda[,3:ncol(med_lai_data_sda)]), med_lai_data_sda$Site_ID, unlist(med_agb_data_sda[,3:ncol(med_agb_data_sda)]), unlist(med_lai_data_sda[,3:ncol(med_lai_data_sda)])), row.names = F, stringsAsFactors = F) -names(med_data) = c("date", "site_id", "med_agb", "med_lai") -med_data = med_data[order(med_data$date),] -med_data$date = as.character(med_data$date) -med_data$site_id = as.character(med_data$site_id, stringsAsFactors = F) -med_data$med_lai = as.numeric(med_data$med_lai, stringsAsFactors = F) -med_data$med_agb = as.numeric(med_data$med_agb, stringsAsFactors = F) -med_data = med_data %>% +med_data <- as.data.frame(cbind(colnames(med_lai_data_sda[, 3:ncol(med_lai_data_sda)]), med_lai_data_sda$Site_ID, unlist(med_agb_data_sda[, 3:ncol(med_agb_data_sda)]), unlist(med_lai_data_sda[, 3:ncol(med_lai_data_sda)])), row.names = F, stringsAsFactors = F) +names(med_data) <- c("date", "site_id", "med_agb", "med_lai") +med_data <- med_data[order(med_data$date), ] +med_data$date <- as.character(med_data$date) +med_data$site_id <- as.character(med_data$site_id, stringsAsFactors = F) +med_data$med_lai <- as.numeric(med_data$med_lai, stringsAsFactors = F) +med_data$med_agb <- as.numeric(med_data$med_agb, stringsAsFactors = F) +med_data <- med_data %>% split(.$date) date.obs <- strsplit(names(med_data), "_") %>% - map_chr(~.x[2]) %>% paste0(.,"/07/15") + map_chr(~ .x[2]) %>% + paste0(., "/07/15") -med_data = names(med_data) %>% - map(function(namesl){ +med_data <- names(med_data) %>% + map(function(namesl) { med_data[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>% + map(~ .x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>% setNames(site.ids) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -names = names(med_data) +names <- names(med_data) for (i in 1:length(names)) { for (j in 1:length(names(med_data[[names[1]]]))) { - rownames(med_data[[i]][[j]]) = NULL + rownames(med_data[[i]][[j]]) <- NULL } } -sdev_data = as.data.frame(cbind(colnames(sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)]), sdev_lai_data_sda$Site_ID, unlist(sdev_agb_data_sda[,3:ncol(sdev_agb_data_sda)]), rep(0, nrow(sdev_lai_data_sda)), rep(0, nrow(sdev_lai_data_sda)),unlist(sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)])), row.names = F, stringsAsFactors =F) -names(sdev_data) = c("date", "site_id", "sdev_agb", "h1", "h2", "sdev_lai") -sdev_data = sdev_data[order(sdev_data$date),] -sdev_data$date = as.character(sdev_data$date, stringsAsFactors = F) -sdev_data$site_id = as.character(sdev_data$site_id, stringsAsFactors = F) -sdev_data$sdev_lai = as.numeric(sdev_data$sdev_lai, stringsAsFactors = F) -sdev_data$sdev_agb = as.numeric(sdev_data$sdev_agb, stringsAsFactors = F) -sdev_data$h1 = as.numeric(sdev_data$h1) -sdev_data$h2 = as.numeric(sdev_data$h2) -sdev_data = sdev_data %>% - split(.$date) - -sdev_data = names(sdev_data) %>% - map(function(namesl){ +sdev_data <- as.data.frame(cbind(colnames(sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)]), sdev_lai_data_sda$Site_ID, unlist(sdev_agb_data_sda[, 3:ncol(sdev_agb_data_sda)]), rep(0, nrow(sdev_lai_data_sda)), rep(0, nrow(sdev_lai_data_sda)), unlist(sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)])), row.names = F, stringsAsFactors = F) +names(sdev_data) <- c("date", "site_id", "sdev_agb", "h1", "h2", "sdev_lai") +sdev_data <- sdev_data[order(sdev_data$date), ] +sdev_data$date <- as.character(sdev_data$date, stringsAsFactors = F) +sdev_data$site_id <- as.character(sdev_data$site_id, stringsAsFactors = F) +sdev_data$sdev_lai <- as.numeric(sdev_data$sdev_lai, stringsAsFactors = F) +sdev_data$sdev_agb <- as.numeric(sdev_data$sdev_agb, stringsAsFactors = F) +sdev_data$h1 <- as.numeric(sdev_data$h1) +sdev_data$h2 <- as.numeric(sdev_data$h2) +sdev_data <- sdev_data %>% + split(.$date) + +sdev_data <- names(sdev_data) %>% + map(function(namesl) { sdev_data[[namesl]] %>% split(.$site_id) %>% - map(~matrix(data = .x[3:6]^2, nrow = 2, ncol = 2)) %>% + map(~ matrix(data = .x[3:6]^2, nrow = 2, ncol = 2)) %>% setNames(site.ids) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -obs.mean = med_data +obs.mean <- med_data -obs.cov = sdev_data +obs.cov <- sdev_data new.settings <- PEcAn.settings::prepare.settings(settings) -#unlink(c('run','out','SDA'),recursive = T) - -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=TRUE, - FF=FALSE, - interactivePlot=FALSE, - TimeseriesPlot=FALSE, - BiasPlot=FALSE, - plot.title=NULL, - facet.plots=4, - debug=F, - pause=FALSE)) - - +# unlink(c('run','out','SDA'),recursive = T) + +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = 4, + debug = F, + pause = FALSE + ) +) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites_with_NA.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites_with_NA.R index b533bfb432f..d050fb42dbf 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites_with_NA.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_AGB_LAI_2_sites_with_NA.R @@ -1,8 +1,7 @@ - #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -15,7 +14,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -25,7 +24,7 @@ library(dplyr) #--------------------------------------------------------------------------------------------------# # delete an old run -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_2_Sites.xml") @@ -33,20 +32,27 @@ settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_2_Sites.xml") # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -#observation = "1000000048" +# observation = "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -55,95 +61,98 @@ get.parameter.samples(settings, # ################ Not working on interactive job on MODEX PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) -load('/data/bmorrison/sda/lai/modis_lai_data/med_agb_data.Rdata') -load( '/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data.Rdata') -load('/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_2_site.Rdata') +load("/data/bmorrison/sda/lai/modis_lai_data/med_agb_data.Rdata") +load("/data/bmorrison/sda/lai/modis_lai_data/sdev_agb_data.Rdata") +load("/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_2_site.Rdata") -#rename tiles by actual site name +# rename tiles by actual site name for (i in 1:length(site_info$site_name)) { - name = as.character(site_info$site_name[i], stringsAsFactor = F) - g = which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3)) - output$tile[g] = name + name <- as.character(site_info$site_name[i], stringsAsFactor = F) + g <- which(round(output$lat, digits = 3) == round(site_info$lat[i], digits = 3)) + output$tile[g] <- name } # compute peak lai per year -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$calendar_date, "%Y-%m-%d"))) for (i in 1:length(years)) { - year = years[i] - g = grep(data$calendar_date, pattern = year) - d = data[g,] - sites = unique(data$tile) + year <- years[i] + g <- grep(data$calendar_date, pattern = year) + d <- data[g, ] + sites <- unique(data$tile) for (j in 1:length(sites)) { - #info = site_info[which(site_info$site_name == sites[j]),] - index = which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) - - if (length(index) > 0) - { - site = d[index,] - site$band = site_info$site_id[j] - max = which(site$data == max(site$data[which(site$data <= quantile(site$data, probs = 0.95))], na.rm = T))#max(site$data, na.rm = T)) - peak = site[max[1],] - - peak$calendar_date = paste("Year", year, sep = "_") - peak$tile = sites[j] - peak_lai = rbind(peak_lai, peak) + # info = site_info[which(site_info$site_name == sites[j]),] + index <- which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) + + if (length(index) > 0) { + site <- d[index, ] + site$band <- site_info$site_id[j] + max <- which(site$data == max(site$data[which(site$data <= quantile(site$data, probs = 0.95))], na.rm = T)) # max(site$data, na.rm = T)) + peak <- site[max[1], ] + + peak$calendar_date <- paste("Year", year, sep = "_") + peak$tile <- sites[j] + peak_lai <- rbind(peak_lai, peak) } - } - } # sort the data by site so the correct values are placed into the resized data frames below. -peak_lai = peak_lai[order(peak_lai$tile), ] +peak_lai <- peak_lai[order(peak_lai$tile), ] # following the methods of Viskari et al 2015 for LAI sd values -peak_lai$sd[peak_lai$sd <0.66] = 0.66 +peak_lai$sd[peak_lai$sd < 0.66] <- 0.66 # # separate data into hotdog style dataframes with row == site and columns = info/data for each site -med_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(med_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -med_lai_data$Site_ID = as.character(med_lai_data$Site_ID) -med_lai_data = list(med_lai_data) +med_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$data)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(med_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +med_lai_data$Site_ID <- as.character(med_lai_data$Site_ID) +med_lai_data <- list(med_lai_data) -sdev_lai_data = cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) -colnames(sdev_lai_data) = c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) -sdev_lai_data$Site_ID = as.character(sdev_lai_data$Site_ID) -sdev_lai_data = list(sdev_lai_data) +sdev_lai_data <- cbind(unique(peak_lai$band), unique(peak_lai$tile), as.data.frame(matrix(unlist(t(peak_lai$sd)), byrow = T, length(unique(peak_lai$tile)), length(years)))) +colnames(sdev_lai_data) <- c("Site_ID", "Site_Name", unique(peak_lai$calendar_date)) +sdev_lai_data$Site_ID <- as.character(sdev_lai_data$Site_ID) +sdev_lai_data <- list(sdev_lai_data) -#med_lai_data = list(med_lai_data) +# med_lai_data = list(med_lai_data) med_lai_data_sda <- med_lai_data[[1]] %>% filter(Site_ID %in% site.ids) sdev_lai_data_sda <- sdev_lai_data[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(med_lai_data_sda$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -med_lai_data_sda <- med_lai_data_sda[site.order,] -sdev_lai_data_sda <- sdev_lai_data_sda[site.order,] +site.order <- sapply(site.ids, function(x) which(med_lai_data_sda$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +med_lai_data_sda <- med_lai_data_sda[site.order, ] +sdev_lai_data_sda <- sdev_lai_data_sda[site.order, ] -#make sure agb and lai only use same dates (for now just to test sda, will fix later) -date_agb = colnames(med_agb_data_sda) -date_lai = colnames(med_lai_data_sda) +# make sure agb and lai only use same dates (for now just to test sda, will fix later) +date_agb <- colnames(med_agb_data_sda) +date_lai <- colnames(med_lai_data_sda) # if (length(date_agb) > length(date_lai)) # { @@ -156,164 +165,167 @@ date_lai = colnames(med_lai_data_sda) # index = which(!(date_lai %in% date_agb)) # med_lai_data_sda = med_lai_data_sda[,-index] # sdev_lai_data_sda = sdev_lai_data_sda[,-index] -# } +# } # fix missing data to feed into SDA -colnames = sort(unique(c(date_agb, date_lai))) +colnames <- sort(unique(c(date_agb, date_lai))) -blank = as.data.frame(matrix(NA, nrow = 2, ncol = length(colnames))) -colnames(blank) = colnames +blank <- as.data.frame(matrix(NA, nrow = 2, ncol = length(colnames))) +colnames(blank) <- colnames -lai_same = which(colnames(blank) %in% colnames(med_lai_data_sda))[-(1:2)] -agb_same = which(colnames(blank) %in% colnames(med_agb_data_sda))[-(1:2)] +lai_same <- which(colnames(blank) %in% colnames(med_lai_data_sda))[-(1:2)] +agb_same <- which(colnames(blank) %in% colnames(med_agb_data_sda))[-(1:2)] -if (length(agb_same) < length(colnames(blank)[-(1:2)])) -{ - agb_med= blank - agb_sdev = blank - agb_med[,1:2] = med_agb_data_sda[,1:2] - agb_sdev[,1:2] = sdev_agb_data_sda[,1:2] - agb_med[,agb_missing] = med_agb_data_sda[-agb_missing] - agb_sdev[,agb_missing] = sdev_agb_data_sda[-agb_missing] +if (length(agb_same) < length(colnames(blank)[-(1:2)])) { + agb_med <- blank + agb_sdev <- blank + agb_med[, 1:2] <- med_agb_data_sda[, 1:2] + agb_sdev[, 1:2] <- sdev_agb_data_sda[, 1:2] + agb_med[, agb_missing] <- med_agb_data_sda[-agb_missing] + agb_sdev[, agb_missing] <- sdev_agb_data_sda[-agb_missing] } else { - agb_med = med_agb_data_sda - agb_sdev = sdev_agb_data_sda + agb_med <- med_agb_data_sda + agb_sdev <- sdev_agb_data_sda } -if (length(lai_same) < length(colnames(blank)[-(1:2)])) -{ - lai_med = blank - lai_sdev = blank - lai_med[,1:2] = med_lai_data_sda[,1:2] - lai_sdev[,1:2] = sdev_lai_data_sda[,1:2] - lai_med[ ,lai_same] = med_lai_data_sda[,3:ncol(med_lai_data_sda)] - lai_sdev[ ,lai_same] = sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)] +if (length(lai_same) < length(colnames(blank)[-(1:2)])) { + lai_med <- blank + lai_sdev <- blank + lai_med[, 1:2] <- med_lai_data_sda[, 1:2] + lai_sdev[, 1:2] <- sdev_lai_data_sda[, 1:2] + lai_med[, lai_same] <- med_lai_data_sda[, 3:ncol(med_lai_data_sda)] + lai_sdev[, lai_same] <- sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)] } else { - lai_med = med_lai_data_sda - lai_sdev = sdev_lai_data_sda + lai_med <- med_lai_data_sda + lai_sdev <- sdev_lai_data_sda } -med_lai_data_sda = lai_med -med_agb_data_sda = agb_med -sdev_lai_data_sda = lai_sdev -sdev_agb_data_sda = agb_sdev +med_lai_data_sda <- lai_med +med_agb_data_sda <- agb_med +sdev_lai_data_sda <- lai_sdev +sdev_agb_data_sda <- agb_sdev ### REFORMAT ALL DATA BY YEAR INSTEAD OF SITE HOTDOG STYLE. COMBINE AGB + LAI INTO 1 MED + 1 SDEV LIST(S). -med_data = as.data.frame(cbind(sort(rep(colnames(med_lai_data_sda[,3:ncol(med_lai_data_sda)]), 2)), med_lai_data_sda$Site_ID, unlist(c(med_agb_data_sda[,3:ncol(med_agb_data_sda)]), use.names = F), unlist(c(med_lai_data_sda[,3:ncol(med_lai_data_sda)]), use.names = F))) -names(med_data) = c("date", "site_id", "med_agb", "med_lai") -#med_data = med_data[order(med_data$date),] -med_data$date = as.character(med_data$date) -med_data$site_id = as.character(med_data$site_id, stringsAsFactors = F) -med_data$med_lai = as.numeric(as.character(med_data$med_lai, stringsAsFactors = F))#as.numeric(levels(med_data$med_lai), stringsAsFactors = F)) -med_data$med_agb = as.numeric(as.character(med_data$med_agb, stringsAsFactors = F))#as.numeric(levels(med_data$med_agb), stringsAsFactors = F) -med_data = med_data %>% +med_data <- as.data.frame(cbind(sort(rep(colnames(med_lai_data_sda[, 3:ncol(med_lai_data_sda)]), 2)), med_lai_data_sda$Site_ID, unlist(c(med_agb_data_sda[, 3:ncol(med_agb_data_sda)]), use.names = F), unlist(c(med_lai_data_sda[, 3:ncol(med_lai_data_sda)]), use.names = F))) +names(med_data) <- c("date", "site_id", "med_agb", "med_lai") +# med_data = med_data[order(med_data$date),] +med_data$date <- as.character(med_data$date) +med_data$site_id <- as.character(med_data$site_id, stringsAsFactors = F) +med_data$med_lai <- as.numeric(as.character(med_data$med_lai, stringsAsFactors = F)) # as.numeric(levels(med_data$med_lai), stringsAsFactors = F)) +med_data$med_agb <- as.numeric(as.character(med_data$med_agb, stringsAsFactors = F)) # as.numeric(levels(med_data$med_agb), stringsAsFactors = F) +med_data <- med_data %>% split(.$date) date.obs <- strsplit(names(med_data), "_") %>% - map_chr(~.x[2]) %>% paste0(.,"/07/15") + map_chr(~ .x[2]) %>% + paste0(., "/07/15") -med_data = names(med_data) %>% - map(function(namesl){ +med_data <- names(med_data) %>% + map(function(namesl) { med_data[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>% + map(~ .x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>% setNames(site.ids) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -names = names(med_data) +names <- names(med_data) for (i in 1:length(names)) { for (j in 1:length(names(med_data[[names[i]]]))) { - d = med_data[[i]][[j]] + d <- med_data[[i]][[j]] - if (length(which(is.na(d)))>=1) - { - d = d[-which(is.na(d))] + if (length(which(is.na(d))) >= 1) { + d <- d[-which(is.na(d))] } - med_data[[i]][[j]] = d - rownames(med_data[[i]][[j]]) = NULL + med_data[[i]][[j]] <- d + rownames(med_data[[i]][[j]]) <- NULL } } -sdev_data = as.data.frame(cbind(sort(rep(colnames(sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)]), 2)), sdev_lai_data_sda$Site_ID, unlist(c(sdev_agb_data_sda[,3:ncol(sdev_agb_data_sda)]), use.names = F), rep(0, nrow(sdev_lai_data_sda)), rep(0, nrow(sdev_lai_data_sda)), unlist(c(sdev_lai_data_sda[,3:ncol(sdev_lai_data_sda)]), use.names = F))) # -names(sdev_data) = c("date", "site_id", "sdev_agb","h1", "h2", "sdev_lai") #c("date", "site_id", "sdev_agb", "h1", "h2", "sdev_lai") -sdev_data = sdev_data[order(sdev_data$date),] -sdev_data$date = as.character(sdev_data$date, stringsAsFactors = F) -sdev_data$site_id = as.character(sdev_data$site_id, stringsAsFactors = F) -sdev_data$sdev_lai = as.numeric(as.character(sdev_data$sdev_lai, stringsAsFactors = F)) #as.numeric(sdev_data$sdev_lai, stringsAsFactors = F) -sdev_data$sdev_agb = as.numeric(as.character(sdev_data$sdev_agb, stringsAsFactors = F))#as.numeric(sdev_data$sdev_agb, stringsAsFactors = F) -sdev_data$h1 = as.numeric(as.character(sdev_data$h1, stringsAsFactors = F)) -sdev_data$h2 = as.numeric(as.character(sdev_data$h2, stringsAsFactors = F)) +sdev_data <- as.data.frame(cbind(sort(rep(colnames(sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)]), 2)), sdev_lai_data_sda$Site_ID, unlist(c(sdev_agb_data_sda[, 3:ncol(sdev_agb_data_sda)]), use.names = F), rep(0, nrow(sdev_lai_data_sda)), rep(0, nrow(sdev_lai_data_sda)), unlist(c(sdev_lai_data_sda[, 3:ncol(sdev_lai_data_sda)]), use.names = F))) # +names(sdev_data) <- c("date", "site_id", "sdev_agb", "h1", "h2", "sdev_lai") # c("date", "site_id", "sdev_agb", "h1", "h2", "sdev_lai") +sdev_data <- sdev_data[order(sdev_data$date), ] +sdev_data$date <- as.character(sdev_data$date, stringsAsFactors = F) +sdev_data$site_id <- as.character(sdev_data$site_id, stringsAsFactors = F) +sdev_data$sdev_lai <- as.numeric(as.character(sdev_data$sdev_lai, stringsAsFactors = F)) # as.numeric(sdev_data$sdev_lai, stringsAsFactors = F) +sdev_data$sdev_agb <- as.numeric(as.character(sdev_data$sdev_agb, stringsAsFactors = F)) # as.numeric(sdev_data$sdev_agb, stringsAsFactors = F) +sdev_data$h1 <- as.numeric(as.character(sdev_data$h1, stringsAsFactors = F)) +sdev_data$h2 <- as.numeric(as.character(sdev_data$h2, stringsAsFactors = F)) -#sdev_data[is.na(sdev_data$sdev_lai), 4:5] = NA +# sdev_data[is.na(sdev_data$sdev_lai), 4:5] = NA -sdev_data = sdev_data %>% - split(.$date) +sdev_data <- sdev_data %>% + split(.$date) -sdev_data = names(sdev_data) %>% - map(function(namesl){ +sdev_data <- names(sdev_data) %>% + map(function(namesl) { sdev_data[[namesl]] %>% split(.$site_id) %>% - map(~matrix(data = .x[3:6]^2, nrow = 2, ncol = 2)) %>% - setNames(site.ids)}) %>% + map(~ matrix(data = .x[3:6]^2, nrow = 2, ncol = 2)) %>% + setNames(site.ids) + }) %>% setNames(date.obs) -names = names(sdev_data) +names <- names(sdev_data) for (i in 1:length(names)) { for (j in 1:length(names(sdev_data[[names[i]]]))) { - d = matrix(unlist(sdev_data[[i]][[j]]), nrow = 2, ncol = 2) - - if (length(which(is.na(d)))>=1) - { - index = which(is.na(d)) - d = matrix(d[-index], nrow = 1, ncol = 1) + d <- matrix(unlist(sdev_data[[i]][[j]]), nrow = 2, ncol = 2) + + if (length(which(is.na(d))) >= 1) { + index <- which(is.na(d)) + d <- matrix(d[-index], nrow = 1, ncol = 1) } - sdev_data[[i]][[j]] = d - # rownames(sdev_data[[i]][[j]]) = NULL + sdev_data[[i]][[j]] <- d + # rownames(sdev_data[[i]][[j]]) = NULL } } -obs.mean = med_data +obs.mean <- med_data -obs.cov = sdev_data +obs.cov <- sdev_data new.settings <- PEcAn.settings::prepare.settings(settings) # unlink(c('run','out','SDA'),recursive = T) -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=TRUE, - FF=FALSE, - interactivePlot=FALSE, - TimeseriesPlot=FALSE, - BiasPlot=FALSE, - plot.title=NULL, - facet.plots=4, - debug=FALSE, - pause=FALSE)) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = 4, + debug = FALSE, + pause = FALSE + ) +) ### FOR PLOTTING ONLY # load('/data/bmorrison/sda/lai/SDA/sda.output.Rdata') # plot.title=NULL # facetg=4 # readsFF=NULL -# +# # settings = new.settings -# +# # obs.mean = Viz.output[[2]] # obs.cov = Viz.output[[3]] # obs.times = names(obs.mean) # PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=4, readsFF=NULL, observed_vars = c("AbvGrndWood", "LAI")) -# -# +# +# # observed_vars = c("AbvGrndWood", "LAI") # ## fix values in obs.mean/obs.cov to include NAs so there are the same number of columns for plotting purposes only # for (name in names(obs.mean)) @@ -332,7 +344,7 @@ sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, # missing_mean = as.data.frame(NA) # colnames(missing_mean) = observed_vars[missing] # d_mean = cbind(d_mean, missing_mean) -# +# # missing_cov = matrix(0, nrow = length(observed_vars), ncol = length(observed_vars)) # diag(missing_cov) = c(diag(d_cov), NA) # d_cov = missing_cov @@ -344,4 +356,4 @@ sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, # obs.cov[name] = data_cov # } -obs.times = names(obs.mean) +obs.times <- names(obs.mean) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_LAI_8_days.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_LAI_8_days.R index 04b6ae3d401..931454a4063 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_LAI_8_days.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Bailey_LAI_8_days.R @@ -1,8 +1,7 @@ - #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -15,7 +14,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -29,7 +28,7 @@ library(tictoc) work_dir <- "/data/bmorrison/sda/lai" # delete an old run -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_4_sites_8_days.xml") @@ -38,20 +37,27 @@ settings <- read.settings("pecan_MultiSite_SDA_LAI_4_sites_8_days.xml") # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -#observation = "1000000048" +# observation = "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -60,70 +66,78 @@ get.parameter.samples(settings, ################ Not working on interactive job on MODEX PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) ################################ START THE SDA ######################################## -load('/data/bmorrison/sda/lai/obs_mean_4_sites_8_days.Rdata') -load('/data/bmorrison/sda/lai/obs_cov_4_sites_8_days.Rdata') -date.obs = names(obs.mean) +load("/data/bmorrison/sda/lai/obs_mean_4_sites_8_days.Rdata") +load("/data/bmorrison/sda/lai/obs_cov_4_sites_8_days.Rdata") +date.obs <- names(obs.mean) -outfolder = "/data/bmorrison/sda/lai/4_sites_8_days" -unlink(c('run','out', outfolder),recursive = T) +outfolder <- "/data/bmorrison/sda/lai/4_sites_8_days" +unlink(c("run", "out", outfolder), recursive = T) new.settings <- PEcAn.settings::prepare.settings(settings) -settings = new.settings -Q = NULL -restart = F -keepNC = T -forceRun = T -daily = TRUE -#unlink(c('run','out','SDA'),recursive = T) - -sda.enkf.multisite(outfolder = outfolder, - settings = new.settings, - obs.mean = obs.mean, - obs.cov = obs.cov, - keepNC = TRUE, - forceRun = TRUE, - daily = TRUE, - control=list(trace=TRUE, - FF=FALSE, - interactivePlot=FALSE, - TimeseriesPlot=FALSE, - BiasPlot=FALSE, - plot.title=NULL, - facet.plots=2, - debug=FALSE, - pause=FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)) +settings <- new.settings +Q <- NULL +restart <- F +keepNC <- T +forceRun <- T +daily <- TRUE +# unlink(c('run','out','SDA'),recursive = T) + +sda.enkf.multisite( + outfolder = outfolder, + settings = new.settings, + obs.mean = obs.mean, + obs.cov = obs.cov, + keepNC = TRUE, + forceRun = TRUE, + daily = TRUE, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = FALSE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = 2, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) +) ### FOR PLOTTING after analysis if TimeseriesPlot == FALSE) -load('/data/bmorrison/sda/lai/4_sites_8_days/sda.output.Rdata') -facetg=2 -readsFF=NULL -settings= new.settings -settings$outfolder = outfolder -obs.mean = Viz.output[[2]] -obs.cov = Viz.output[[3]] -obs.times = names(obs.mean) -PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=2, readsFF=NULL) - +load("/data/bmorrison/sda/lai/4_sites_8_days/sda.output.Rdata") +facetg <- 2 +readsFF <- NULL +settings <- new.settings +settings$outfolder <- outfolder +obs.mean <- Viz.output[[2]] +obs.cov <- Viz.output[[3]] +obs.times <- names(obs.mean) +PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title = NULL, facetg = 2, readsFF = NULL) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Shawn.R b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Shawn.R index beb53b4560d..15528e13321 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Shawn.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/Multisite_SDA_Shawn.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,7 +23,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -33,8 +33,9 @@ library(dplyr) # temporary step until we get this code integrated into pecan library(RCurl) -script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", - ssl.verifypeer = FALSE) +script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", + ssl.verifypeer = FALSE +) eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# @@ -42,34 +43,41 @@ eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/bmorrison/sda/lai" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions -# Deifine observation - use existing or generate new? +# Deifine observation - use existing or generate new? # set to a specific file, use that. -#observation <- "" -#observation <- c("1000025731","1000000048","796", "772", "763", "1000000146") -#observation <- c("1000025731","1000000048","763","796","772","764","765","1000000024","678","1000000146") +# observation <- "" +# observation <- c("1000025731","1000000048","796", "772", "763", "1000000146") +# observation <- c("1000025731","1000000048","763","796","772","764","765","1000000024","678","1000000146") # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB.xml") observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -77,79 +85,97 @@ get.parameter.samples(settings, #--------------------------------------------------------------------------------------------------# ## Prepare observational data - still very hacky here PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation -suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, - ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, + ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", - data_dir, product_dates=NULL, file.path(work_dir,"Obs")) -sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", - data_dir, product_dates=NULL, file.path(work_dir,"Obs")) +med_agb_data <- extract.LandTrendr.AGB(site_info, "median", + buffer = NULL, fun = "mean", + data_dir, product_dates = NULL, file.path(work_dir, "Obs") +) +sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", + buffer = NULL, fun = "mean", + data_dir, product_dates = NULL, file.path(work_dir, "Obs") +) PEcAn.logger::logger.info("**** Preparing data for SDA ****") -#for multi site both mean and cov needs to be a list like this +# for multi site both mean and cov needs to be a list like this # +date # +siteid # c(state variables)/matrix(cov state variables) -# -#reorder sites in obs +# +# reorder sites in obs med_agb_data_sda <- med_agb_data[[1]] %>% filter(Site_ID %in% site.ids) sdev_agb_data_sda <- sdev_agb_data[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -med_agb_data_sda <- med_agb_data_sda[site.order,] -sdev_agb_data_sda <- sdev_agb_data_sda[site.order,] +site.order <- sapply(site.ids, function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +med_agb_data_sda <- med_agb_data_sda[site.order, ] +sdev_agb_data_sda <- sdev_agb_data_sda[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(med_agb_data_sda),"_")[3:length(med_agb_data_sda)] %>% - map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(med_agb_data_sda), "_")[3:length(med_agb_data_sda)] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") obs.mean <- names(med_agb_data_sda)[3:length(med_agb_data_sda)] %>% - map(function(namesl){ - ((med_agb_data_sda)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) - -obs.cov <-names(sdev_agb_data_sda)[3:length(sdev_agb_data_sda)] %>% + map(function(namesl) { + ((med_agb_data_sda)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("AbvGrndWood"))) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) + +obs.cov <- names(sdev_agb_data_sda)[3:length(sdev_agb_data_sda)] %>% map(function(namesl) { ((sdev_agb_data_sda)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ## generate new settings object new.settings <- PEcAn.settings::prepare.settings(settings) -#new.settings = settings +# new.settings = settings #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ## Run SDA -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - plot = T, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="Sobol sampling - 2 sites - AGB", - facet.plots=T, - debug=F, - pause=F) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + plot = T, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "Sobol sampling - 2 sites - AGB", + facet.plots = T, + debug = F, + pause = F + ) ) #--------------------------------------------------------------------------------------------------# @@ -158,8 +184,10 @@ sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, ## Wrap up # Send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, - paste0("SDA workflow has finished executing at ", base::date())) + sendmail( + settings$email$from, settings$email$to, + paste0("SDA workflow has finished executing at ", base::date()) + ) } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_CONUS.R b/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_CONUS.R index 2e4f0849a87..47b5b4f438c 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_CONUS.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_CONUS.R @@ -15,24 +15,24 @@ library(doParallel) library(PEcAn.utils) set.seed(1) -#eco = st_read(dsn = '/data/bmorrison/sda/ecoregion_site_analysis/shapefiles', layer = 'eco_conus_rename') -setwd('/data/bmorrison/sda/ecoregion_site_analysis/modis_data/CONUS') -states = st_read(dsn = "/data/bmorrison/sda/ecoregion_site_analysis/shapefiles/states_21basic/states.shp") -states = as(states, "Spatial") +# eco = st_read(dsn = '/data/bmorrison/sda/ecoregion_site_analysis/shapefiles', layer = 'eco_conus_rename') +setwd("/data/bmorrison/sda/ecoregion_site_analysis/modis_data/CONUS") +states <- st_read(dsn = "/data/bmorrison/sda/ecoregion_site_analysis/shapefiles/states_21basic/states.shp") +states <- as(states, "Spatial") ### testing on 1 ecoregion -region = states -region = st_read(dsn = "/data/bmorrison/sda/ecoregion_site_analysis/shapefiles/states_21basic/states.shp") -region = region[-c(1,28,51),] -#region = eco[eco$name == eco$name[11],] -region = st_union(region) -region = as(region, "Spatial") -region = spTransform(region, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") -region_ll = spTransform(region, CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ") +region <- states +region <- st_read(dsn = "/data/bmorrison/sda/ecoregion_site_analysis/shapefiles/states_21basic/states.shp") +region <- region[-c(1, 28, 51), ] +# region = eco[eco$name == eco$name[11],] +region <- st_union(region) +region <- as(region, "Spatial") +region <- spTransform(region, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") +region_ll <- spTransform(region, CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ") # hexagonal tesellation random sampling -# must be in meters +# must be in meters make_grid <- function(x, cell_diameter, cell_area, clip = FALSE) { if (missing(cell_diameter)) { if (missing(cell_area)) { @@ -44,8 +44,10 @@ make_grid <- function(x, cell_diameter, cell_area, clip = FALSE) { ext <- as(extent(x) + cell_diameter, "SpatialPolygons") projection(ext) <- projection(x) # generate array of hexagon centers - g <- spsample(ext, type = "hexagonal", cellsize = cell_diameter, - offset = c(0.5, 0.5)) + g <- spsample(ext, + type = "hexagonal", cellsize = cell_diameter, + offset = c(0.5, 0.5) + ) # convert center points to hexagons g <- HexPoints2SpatialPolygons(g, dx = cell_diameter) # clip to boundary of study area @@ -62,12 +64,12 @@ make_grid <- function(x, cell_diameter, cell_area, clip = FALSE) { # trick to figure out how many polygons I want vs. cell area of hexagons n <- 1000 -area_of_region = raster::area(region) -cell_area= area_of_region/n +area_of_region <- raster::area(region) +cell_area <- area_of_region / n # make hexagonal tesselation grid hex_grid <- make_grid(region, cell_area = cell_area, clip = FALSE) -#hex_grid <- make_grid(region, cell_diameter = 37894.1, clip = FALSE) +# hex_grid <- make_grid(region, cell_diameter = 37894.1, clip = FALSE) plot(region, col = "grey50", bg = "light blue") plot(hex_grid, border = "orange", add = T) @@ -77,16 +79,16 @@ save(hex_grid, file = paste("/data/bmorrison/sda/ecoregion_site_analysis/hex_gri load(paste("/data/bmorrison/sda/ecoregion_site_analysis/hex_grid_CONUS.Rdata", sep = "")) # randomly select one point from each hexagon (random) -samples = data.frame() +samples <- data.frame() for (i in 1:length(names(hex_grid))) { - hex = hex_grid[i,] - sample = as.data.frame(spsample(hex, n = 1, type = 'random')) - names(sample) = c("x", "y") - samples = rbind(samples, sample) + hex <- hex_grid[i, ] + sample <- as.data.frame(spsample(hex, n = 1, type = "random")) + names(sample) <- c("x", "y") + samples <- rbind(samples, sample) } -coordinates(samples) = ~x+y -projection(samples) = crs(region) +coordinates(samples) <- ~ x + y +projection(samples) <- crs(region) # clip out points outside of ecoregion area samples <- gIntersection(samples, region, byid = TRUE) @@ -94,136 +96,130 @@ samples <- gIntersection(samples, region, byid = TRUE) plot(region, col = "grey50", bg = "light blue", axes = TRUE) plot(hex_grid, border = "orange", add = T) plot(samples, pch = 20, add = T) -samples = spTransform(samples, CRS = crs(states)) -region = spTransform(region, CRS = crs(states)) +samples <- spTransform(samples, CRS = crs(states)) +region <- spTransform(region, CRS = crs(states)) -xy = as.data.frame(samples) -names(xy) = c("lon", "lat") -save(xy, file = paste('/data/bmorrison/sda/ecoregion_site_analysis/random_sites_CONUS.Rdata', sep = "")) +xy <- as.data.frame(samples) +names(xy) <- c("lon", "lat") +save(xy, file = paste("/data/bmorrison/sda/ecoregion_site_analysis/random_sites_CONUS.Rdata", sep = "")) # extract MODIS data for location load("/data/bmorrison/sda/ecoregion_site_analysis/random_sites_CONUS.Rdata") -product = "MOD15A2H" +product <- "MOD15A2H" -dates = PEcAn.utils::retry.func(MODISTools::mt_dates(product, lat = xy$lat[1], lon = xy$lon[1]), maxError = 10, sleep = 2) +dates <- PEcAn.utils::retry.func(MODISTools::mt_dates(product, lat = xy$lat[1], lon = xy$lon[1]), maxError = 10, sleep = 2) -starting_dates = dates$calendar_date[grep(dates$calendar_date, pattern = "2001-01")] -start_count = as.data.frame(table(starting_dates), stringsAsFactors = F) -start_date = gsub("-", "/", start_count$starting_dates[1]) +starting_dates <- dates$calendar_date[grep(dates$calendar_date, pattern = "2001-01")] +start_count <- as.data.frame(table(starting_dates), stringsAsFactors = F) +start_date <- gsub("-", "/", start_count$starting_dates[1]) -ending_dates = dates$calendar_date[grep(dates$calendar_date, pattern = "2018-12")] -end_count = as.data.frame(table(ending_dates), stringsAsFactors = F) -end_date = gsub("-", "/", end_count$ending_dates[nrow(end_count)] ) +ending_dates <- dates$calendar_date[grep(dates$calendar_date, pattern = "2018-12")] +end_count <- as.data.frame(table(ending_dates), stringsAsFactors = F) +end_date <- gsub("-", "/", end_count$ending_dates[nrow(end_count)]) # 10 cpu limit because THREADDS has 10 download limit -#xy = xy[1:nrow(xy),] +# xy = xy[1:nrow(xy),] -cl <- parallel::makeCluster(10) #, outfile= "") +cl <- parallel::makeCluster(10) # , outfile= "") doParallel::registerDoParallel(cl) -output = data.frame() -for (j in 1:ceiling(nrow(xy)/10)) +output <- data.frame() +for (j in 1:ceiling(nrow(xy) / 10)) { - if (j == ceiling(nrow(xy)/10)) - { - coords = xy[((j*10)-9):nrow(xy),] - working = print(paste("working on : ", ((j*10)-9), "-", nrow(xy), sep = "")) - + if (j == ceiling(nrow(xy) / 10)) { + coords <- xy[((j * 10) - 9):nrow(xy), ] + working <- print(paste("working on : ", ((j * 10) - 9), "-", nrow(xy), sep = "")) } else { - coords = xy[((j*10)-9):(j*10),] - working = print(paste("working on : ", ((j*10)-9), "-", (j*10), sep = "")) - + coords <- xy[((j * 10) - 9):(j * 10), ] + working <- print(paste("working on : ", ((j * 10) - 9), "-", (j * 10), sep = "")) } - #siteID = paste(round(coords[i,], digits = 2), collapse = "_") - start = Sys.time() - data = PEcAn.utils::retry.func(foreach(i=1:nrow(coords), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(outfolder = getwd(), iter = ((j*10-10)+i), product = "MOD15A2H",band = "Lai_500m", start_date = start_date, end_date = end_date, lat = coords$lat[i], lon = coords$lon[i],size = 0, band_qc = "FparLai_QC", band_sd = "", package_method = "MODISTools", QC_filter = T), maxError = 10, sleep = 2) - end = Sys.time() - difference = end-start - time = print(difference) - output = rbind(output, data) + # siteID = paste(round(coords[i,], digits = 2), collapse = "_") + start <- Sys.time() + data <- PEcAn.utils::retry.func(foreach(i = 1:nrow(coords), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(outfolder = getwd(), iter = ((j * 10 - 10) + i), product = "MOD15A2H", band = "Lai_500m", start_date = start_date, end_date = end_date, lat = coords$lat[i], lon = coords$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "", package_method = "MODISTools", QC_filter = T), maxError = 10, sleep = 2) + end <- Sys.time() + difference <- end - start + time <- print(difference) + output <- rbind(output, data) } stopCluster(cl) -save(output, file = paste('/data/bmorrison/sda/ecoregion_site_analysis/modis_data_output_', nrow(xy), '.Rdata', sep = "")) -# +save(output, file = paste("/data/bmorrison/sda/ecoregion_site_analysis/modis_data_output_", nrow(xy), ".Rdata", sep = "")) +# # load(paste('/data/bmorrison/sda/ecoregion_site_analysis/modis_data_output_', nrow(output), '.Rdata', sep = "")) # output = as.data.frame(output, row.names = NULL) # for large datasets to group together -files = list.files(path = '/data/bmorrison/sda/ecoregion_site_analysis/modis_data/CONUS', pattern = '.csv', include.dirs = T, full.names = T) -xy = data.frame() +files <- list.files(path = "/data/bmorrison/sda/ecoregion_site_analysis/modis_data/CONUS", pattern = ".csv", include.dirs = T, full.names = T) +xy <- data.frame() for (i in 1:length(files)) { - f = read.csv(files[i]) - xy = rbind(xy, f) + f <- read.csv(files[i]) + xy <- rbind(xy, f) } -output = xy +output <- xy # summarize into anual peak lai from 2001-2018 -years = lubridate::year(start_date):lubridate::year(end_date) +years <- lubridate::year(start_date):lubridate::year(end_date) -data = output -sites = output -coordinates(sites) = ~lon+lat -projection(sites) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -sites = as.data.frame(unique(coordinates(sites))) -coordinates(sites) = ~lon+lat -projection(sites) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +data <- output +sites <- output +coordinates(sites) <- ~ lon + lat +projection(sites) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +sites <- as.data.frame(unique(coordinates(sites))) +coordinates(sites) <- ~ lon + lat +projection(sites) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -compute_annual_lai = function(data, sites) -{ - index = which((round(data$lon, digits = 3)== round(sites$lon, digits = 3)) & (round(data$lat, digits = 3) == round(sites$lat, digits = 3))) - if (length(index) > 0) - { - site = data[index,] - years = unique(lubridate::year(site$calendar_date)) - - summary = data.frame() +compute_annual_lai <- function(data, sites) { + index <- which((round(data$lon, digits = 3) == round(sites$lon, digits = 3)) & (round(data$lat, digits = 3) == round(sites$lat, digits = 3))) + if (length(index) > 0) { + site <- data[index, ] + years <- unique(lubridate::year(site$calendar_date)) + + summary <- data.frame() for (j in 1:length(years)) { - g = grep(site$calendar_date, pattern = years[j]) - if (length(g) > 0) - { - d = site[g,] - percentile = which(d$data <= quantile(d$data, probs = 0.95, na.rm = T)[1]) - peak = max(d$data[percentile], na.rm = T) - - info = d[1,] - info$data = peak - info$calendar_date = years[j] - - summary = rbind(summary, info) + g <- grep(site$calendar_date, pattern = years[j]) + if (length(g) > 0) { + d <- site[g, ] + percentile <- which(d$data <= quantile(d$data, probs = 0.95, na.rm = T)[1]) + peak <- max(d$data[percentile], na.rm = T) + + info <- d[1, ] + info$data <- peak + info$calendar_date <- years[j] + + summary <- rbind(summary, info) } } - peak_lai = summary[1,] - peak_lai$data = max(summary$data[which(summary$data <= quantile(summary$data, probs = 0.95))], na.rm = T) + peak_lai <- summary[1, ] + peak_lai$data <- max(summary$data[which(summary$data <= quantile(summary$data, probs = 0.95))], na.rm = T) return(peak_lai) } } -cl <- parallel::makeCluster(10) #, outfile= "") +cl <- parallel::makeCluster(10) # , outfile= "") doParallel::registerDoParallel(cl) -test = foreach(i=1:nrow(sites), .combine = rbind) %dopar% compute_annual_lai(data = data, sites = sites[i,]) +test <- foreach(i = 1:nrow(sites), .combine = rbind) %dopar% compute_annual_lai(data = data, sites = sites[i, ]) stopCluster(cl) -test = data.frame() +test <- data.frame() for (i in 1:nrow(coordinates(sites))) { - t = compute_annual_lai(data = data, sites = sites[i,]) - test = rbind(test, t) + t <- compute_annual_lai(data = data, sites = sites[i, ]) + test <- rbind(test, t) } -# -# -# +# +# +# # summary =data.frame() # for (i in 1:nrow(xy)) # { @@ -239,60 +235,60 @@ for (i in 1:nrow(coordinates(sites))) # d = site[g,] # percentile = which(d$data <= quantile(d$data, probs = 0.95, na.rm = T)[1]) # peak = max(d$data[percentile], na.rm = T) -# +# # info = d[1,] # info$data = peak # info$calendar_date = years[j] -# +# # summary = rbind(summary, info) # } # } # } # } -# +# # peak_lai = data.frame() # for (i in 1:nrow(xy)) # { # index = which(round(summary$lat, digits = 3) == round(xy$lat[i], digits = 3) & round(summary$lon, digits = 3) == round(xy$lon[i], digits = 3)) -# +# # if (length(index) >0) # { # site = summary[index,] -# +# # peak = mean(site$data, na.rm = T) # info = site[1,] # info$data = peak # peak_lai = rbind(peak_lai, info) # } # } -# +# # peak_lai = as.data.frame(peak_lai, row.names = NULL) # semivariogram analysis -#1. reproject spatial data into aea so distances are in meteres -coordinates(test) = ~lon+lat -projection(test) = crs(sites) -test = spTransform(test, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") +# 1. reproject spatial data into aea so distances are in meteres +coordinates(test) <- ~ lon + lat +projection(test) <- crs(sites) +test <- spTransform(test, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") -library(gstat) +library(gstat) # 1. check that data is normally distributed, if not, transform. hist(test$data) library(MASS) -norm = fitdistr(x = test$data, densfun = "normal") -test$trans= rnorm(test$data, mean = norm$estimate[1], sd = norm$estimate[2]) +norm <- fitdistr(x = test$data, densfun = "normal") +test$trans <- rnorm(test$data, mean = norm$estimate[1], sd = norm$estimate[2]) -v = variogram(trans~1, data = test) -v.data = v[order(v$dist),] +v <- variogram(trans ~ 1, data = test) +v.data <- v[order(v$dist), ] plot(v) -v.vgm = vgm( psill = NA, range = NA, model = "Sph", nugget = 1) -v.fit = fit.variogram(v, v.vgm, fit.sills = T, fit.ranges = T, fit.kappa = T) +v.vgm <- vgm(psill = NA, range = NA, model = "Sph", nugget = 1) +v.fit <- fit.variogram(v, v.vgm, fit.sills = T, fit.ranges = T, fit.kappa = T) plot(v, model = v.fit) -cell_area= 37894.1 +cell_area <- 37894.1 # make hexagonal tesselation grid hex_grid <- make_grid(region, cell_area = cell_area, clip = FALSE) @@ -301,17 +297,16 @@ plot(region, col = "grey50", bg = "light blue") plot(hex_grid, border = "orange", add = T) # clip to ecogreion area -samples = data.frame() +samples <- data.frame() for (i in 1:length(names(hex_grid))) { - hex = hex_grid[i,] - sample = as.data.frame(spsample(hex, n = 1, type = 'random')) - names(sample) = c("x", "y") - samples = rbind(samples, sample) + hex <- hex_grid[i, ] + sample <- as.data.frame(spsample(hex, n = 1, type = "random")) + names(sample) <- c("x", "y") + samples <- rbind(samples, sample) } -coordinates(samples) = ~x+y -projection(samples) = crs(region) +coordinates(samples) <- ~ x + y +projection(samples) <- crs(region) # clip out points outside of ecoregion area samples <- gIntersection(samples, region, byid = TRUE) - diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_agb_trends.R b/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_agb_trends.R index 98885da3863..30eb1c8efc2 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_agb_trends.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/ecoregion_lai_agb_trends.R @@ -15,19 +15,19 @@ library(doParallel) library(PEcAn.utils) set.seed(1) -eco = st_read(dsn = '/data/bmorrison/sda/ecoregion_site_analysis/shapefiles', layer = 'eco_conus_rename') -setwd('/data/bmorrison/sda/ecoregion_site_analysis/modis_data') +eco <- st_read(dsn = "/data/bmorrison/sda/ecoregion_site_analysis/shapefiles", layer = "eco_conus_rename") +setwd("/data/bmorrison/sda/ecoregion_site_analysis/modis_data") ### testing on 1 ecoregion -region = eco[eco$name == eco$name[11],] -region = st_union(region) -region = as(region, "Spatial") -region = spTransform(region, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") +region <- eco[eco$name == eco$name[11], ] +region <- st_union(region) +region <- as(region, "Spatial") +region <- spTransform(region, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") # hexagonal tesellation random sampling -# must be in meters +# must be in meters make_grid <- function(x, cell_diameter, cell_area, clip = FALSE) { if (missing(cell_diameter)) { if (missing(cell_area)) { @@ -39,8 +39,10 @@ make_grid <- function(x, cell_diameter, cell_area, clip = FALSE) { ext <- as(extent(x) + cell_diameter, "SpatialPolygons") projection(ext) <- projection(x) # generate array of hexagon centers - g <- spsample(ext, type = "hexagonal", cellsize = cell_diameter, - offset = c(0.5, 0.5)) + g <- spsample(ext, + type = "hexagonal", cellsize = cell_diameter, + offset = c(0.5, 0.5) + ) # convert center points to hexagons g <- HexPoints2SpatialPolygons(g, dx = cell_diameter) # clip to boundary of study area @@ -57,8 +59,8 @@ make_grid <- function(x, cell_diameter, cell_area, clip = FALSE) { # trick to figure out how many polygons I want vs. cell area of hexagons n <- 1000 -area_of_region = raster::area(region) -cell_area= area_of_region/n +area_of_region <- raster::area(region) +cell_area <- area_of_region / n # make hexagonal tesselation grid hex_grid <- make_grid(region, cell_area = cell_area, clip = FALSE) @@ -72,64 +74,64 @@ save(hex_grid, file = paste("/data/bmorrison/sda/ecoregion_site_analysis/hex_gri load(paste("/data/bmorrison/sda/ecoregion_site_analysis/hex_grid_1164.Rdata", sep = "")) # randomly select one point from each hexagon (random) -samples = data.frame() +samples <- data.frame() for (i in 1:length(names(hex_grid))) { - hex = hex_grid[i,] - sample = as.data.frame(spsample(hex, n = 1, type = 'random')) - names(sample) = c("x", "y") - samples = rbind(samples, sample) + hex <- hex_grid[i, ] + sample <- as.data.frame(spsample(hex, n = 1, type = "random")) + names(sample) <- c("x", "y") + samples <- rbind(samples, sample) } -coordinates(samples) = ~x+y -projection(samples) = crs(region) +coordinates(samples) <- ~ x + y +projection(samples) <- crs(region) # clip out points outside of ecoregion area samples <- gIntersection(samples, region, byid = TRUE) plot(region, col = "grey50", bg = "light blue", axes = TRUE) plot(hex_grid, border = "orange", add = T) -csamples = spTransform(samples, CRS = crs(eco)) -region = spTransform(region, CRS = crs(eco)) +csamples <- spTransform(samples, CRS = crs(eco)) +region <- spTransform(region, CRS = crs(eco)) -xy = as.data.frame(samples) -names(xy) = c("lon", "lat") -save(xy, file = paste('/data/bmorrison/sda/ecoregion_site_analysis/random_sites_', nrow(xy), '.Rdata', sep = "")) +xy <- as.data.frame(samples) +names(xy) <- c("lon", "lat") +save(xy, file = paste("/data/bmorrison/sda/ecoregion_site_analysis/random_sites_", nrow(xy), ".Rdata", sep = "")) # extract MODIS data for location load("/data/bmorrison/sda/ecoregion_site_analysis/random_sites_989.Rdata") -product = "MOD15A2H" +product <- "MOD15A2H" -dates = PEcAn.utils::retry.func(MODISTools::mt_dates(product, lat = xy$lat[1], lon = xy$lon[1]), maxError = 10, sleep = 2) +dates <- PEcAn.utils::retry.func(MODISTools::mt_dates(product, lat = xy$lat[1], lon = xy$lon[1]), maxError = 10, sleep = 2) -starting_dates = dates$calendar_date[grep(dates$calendar_date, pattern = "2001-01")] -start_count = as.data.frame(table(starting_dates), stringsAsFactors = F) -start_date = gsub("-", "/", start_count$starting_dates[1]) +starting_dates <- dates$calendar_date[grep(dates$calendar_date, pattern = "2001-01")] +start_count <- as.data.frame(table(starting_dates), stringsAsFactors = F) +start_date <- gsub("-", "/", start_count$starting_dates[1]) + +ending_dates <- dates$calendar_date[grep(dates$calendar_date, pattern = "2018-12")] +end_count <- as.data.frame(table(ending_dates), stringsAsFactors = F) +end_date <- gsub("-", "/", end_count$ending_dates[nrow(end_count)]) -ending_dates = dates$calendar_date[grep(dates$calendar_date, pattern = "2018-12")] -end_count = as.data.frame(table(ending_dates), stringsAsFactors = F) -end_date = gsub("-", "/", end_count$ending_dates[nrow(end_count)] ) - # 10 cpu limit because THREADDS has 10 download limit # xy = xy[601:nrow(xy),] -# +# # cl <- parallel::makeCluster(10) #, outfile= "") # doParallel::registerDoParallel(cl) -# +# # output = data.frame() # for (j in 1:ceiling(nrow(xy)/10)) # { # if (j == ceiling(nrow(xy)/10)) # { -# coords = xy[((j*10)-9):nrow(xy),] +# coords = xy[((j*10)-9):nrow(xy),] # working = print(paste("working on : ", ((j*10)-9+600), "-", nrow(xy)+600, sep = "")) -# +# # } else { # coords = xy[((j*10)-9):(j*10),] # working = print(paste("working on : ", ((j*10)-9+600), "-", (j*10+600), sep = "")) -# +# # } # # siteID = paste(round(coords[i,], digits = 2), collapse = "_") # start = Sys.time() @@ -139,100 +141,98 @@ end_date = gsub("-", "/", end_count$ending_dates[nrow(end_count)] ) # time = print(difference) # output = rbind(output, data) # } -# +# # # end = Sys.time() # # difference = end-start # # difference # stopCluster(cl) -# -# +# +# # save(output, file = paste('/data/bmorrison/sda/ecoregion_site_analysis/modis_data_output_', nrow(xy), '.Rdata', sep = "")) -# +# # load(paste('/data/bmorrison/sda/ecoregion_site_analysis/modis_data_output_', nrow(output), '.Rdata', sep = "")) # output = as.data.frame(output, row.names = NULL) # extract AGB data -start_date = "2001/01/01" -end_date = "2018/01/01" +start_date <- "2001/01/01" +end_date <- "2018/01/01" library(RCurl) -script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", - ssl.verifypeer = FALSE) +script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", + ssl.verifypeer = FALSE +) eval(parse(text = script)) # for large datasets to group together -files = list.files(path = '/data/bmorrison/sda/ecoregion_site_analysis/modis_data', pattern = '.csv', include.dirs = T, full.names = T) -xy = data.frame() +files <- list.files(path = "/data/bmorrison/sda/ecoregion_site_analysis/modis_data", pattern = ".csv", include.dirs = T, full.names = T) +xy <- data.frame() for (i in 1:length(files)) { - f = read.csv(files[i]) - xy = rbind(xy, f) + f <- read.csv(files[i]) + xy <- rbind(xy, f) } -output = xy +output <- xy # summarize into anual peak lai from 2001-2018 -years = lubridate::year(start_date):lubridate::year(end_date) +years <- lubridate::year(start_date):lubridate::year(end_date) -data = output +data <- output load("/data/bmorrison/sda/ecoregion_site_analysis/random_sites_989.Rdata") # sites = xy # coordinates(sites) = ~lon+lat # projection(sites) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " # sites = as.data.frame(unique(coordinates(sites))) -sites = SpatialPointsDataFrame(data = xy, coords = xy, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ")) -compute_annual_lai = function(data, sites) -{ - index = which((round(data$lon, digits = 3)== round(sites$lon, digits = 3)) & (round(data$lat, digits = 3) == round(sites$lat, digits = 3))) - if (length(index) > 0) - { - site = data[index,] - years = unique(lubridate::year(site$calendar_date)) - - summary = data.frame() +sites <- SpatialPointsDataFrame(data = xy, coords = xy, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ")) +compute_annual_lai <- function(data, sites) { + index <- which((round(data$lon, digits = 3) == round(sites$lon, digits = 3)) & (round(data$lat, digits = 3) == round(sites$lat, digits = 3))) + if (length(index) > 0) { + site <- data[index, ] + years <- unique(lubridate::year(site$calendar_date)) + + summary <- data.frame() for (j in 1:length(years)) { - g = grep(site$calendar_date, pattern = years[j]) - if (length(g) > 0) - { - d = site[g,] - percentile = which(d$data <= quantile(d$data, probs = 0.95, na.rm = T)[1]) - peak = max(d$data[percentile], na.rm = T) - - info = d[1,] - info$data = peak - info$calendar_date = years[j] - - summary = rbind(summary, info) + g <- grep(site$calendar_date, pattern = years[j]) + if (length(g) > 0) { + d <- site[g, ] + percentile <- which(d$data <= quantile(d$data, probs = 0.95, na.rm = T)[1]) + peak <- max(d$data[percentile], na.rm = T) + + info <- d[1, ] + info$data <- peak + info$calendar_date <- years[j] + + summary <- rbind(summary, info) } } - peak_lai = summary[1,] - peak_lai$data = max(summary$data[which(summary$data <= quantile(summary$data, probs = 0.95))], na.rm = T) + peak_lai <- summary[1, ] + peak_lai$data <- max(summary$data[which(summary$data <= quantile(summary$data, probs = 0.95))], na.rm = T) return(peak_lai) } } -test = data.frame() +test <- data.frame() for (i in 1:nrow(sites)) { - working = print(i) - site = sites[i,] - t = compute_annual_lai(data = data, sites = site) - test = rbind(test,t) + working <- print(i) + site <- sites[i, ] + t <- compute_annual_lai(data = data, sites = site) + test <- rbind(test, t) } # cl <- parallel::makeCluster(10) #, outfile= "") # doParallel::registerDoParallel(cl) -# +# # test = foreach(i=1:nrow(sites), .combine = rbind) %dopar% compute_annual_lai(data = data, sites = sites[i,]) -# +# # stopCluster(cl) -# -# -# +# +# +# # summary =data.frame() # for (i in 1:nrow(xy)) # { @@ -248,60 +248,60 @@ for (i in 1:nrow(sites)) # d = site[g,] # percentile = which(d$data <= quantile(d$data, probs = 0.95, na.rm = T)[1]) # peak = max(d$data[percentile], na.rm = T) -# +# # info = d[1,] # info$data = peak # info$calendar_date = years[j] -# +# # summary = rbind(summary, info) # } # } # } # } -# +# # peak_lai = data.frame() # for (i in 1:nrow(xy)) # { # index = which(round(summary$lat, digits = 3) == round(xy$lat[i], digits = 3) & round(summary$lon, digits = 3) == round(xy$lon[i], digits = 3)) -# +# # if (length(index) >0) # { # site = summary[index,] -# +# # peak = mean(site$data, na.rm = T) # info = site[1,] # info$data = peak # peak_lai = rbind(peak_lai, info) # } # } -# +# # peak_lai = as.data.frame(peak_lai, row.names = NULL) # semivariogram analysis -#1. reproject spatial data into aea so distances are in meteres -coordinates(test) = ~lon+lat -projection(test) = crs(eco) -test = spTransform(test, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") +# 1. reproject spatial data into aea so distances are in meteres +coordinates(test) <- ~ lon + lat +projection(test) <- crs(eco) +test <- spTransform(test, CRS = "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ") -library(gstat) +library(gstat) # 1. check that data is normally distributed, if not, transform. hist(test$data) library(MASS) -norm = fitdistr(x = test$data, densfun = "normal") -test$trans= rnorm(test$data, mean = norm$estimate[1], sd = norm$estimate[2]) +norm <- fitdistr(x = test$data, densfun = "normal") +test$trans <- rnorm(test$data, mean = norm$estimate[1], sd = norm$estimate[2]) -v = variogram(trans~1, data = test) -v.data = v[order(v$dist),] +v <- variogram(trans ~ 1, data = test) +v.data <- v[order(v$dist), ] plot(v) -v.vgm = vgm( psill = NA, range = NA, model = "Sph", nugget = 0.9) -v.fit = fit.variogram(v, v.vgm, fit.sills = T, fit.ranges = T, fit.kappa = T) +v.vgm <- vgm(psill = NA, range = NA, model = "Sph", nugget = 0.9) +v.fit <- fit.variogram(v, v.vgm, fit.sills = T, fit.ranges = T, fit.kappa = T) plot(v, model = v.fit) -cell_area= 37894.1 +cell_area <- 37894.1 # make hexagonal tesselation grid hex_grid <- make_grid(region, cell_area = cell_area, clip = FALSE) @@ -310,17 +310,16 @@ plot(region, col = "grey50", bg = "light blue") plot(hex_grid, border = "orange", add = T) # clip to ecogreion area -samples = data.frame() +samples <- data.frame() for (i in 1:length(names(hex_grid))) { - hex = hex_grid[i,] - sample = as.data.frame(spsample(hex, n = 1, type = 'random')) - names(sample) = c("x", "y") - samples = rbind(samples, sample) + hex <- hex_grid[i, ] + sample <- as.data.frame(spsample(hex, n = 1, type = "random")) + names(sample) <- c("x", "y") + samples <- rbind(samples, sample) } -coordinates(samples) = ~x+y -projection(samples) = crs(region) +coordinates(samples) <- ~ x + y +projection(samples) <- crs(region) # clip out points outside of ecoregion area samples <- gIntersection(samples, region, byid = TRUE) - diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/extract_500_site_data.R b/modules/assim.sequential/inst/sda_backup/bmorrison/extract_500_site_data.R index 0bd5fe478fd..70782634a82 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/extract_500_site_data.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/extract_500_site_data.R @@ -1,6 +1,6 @@ -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -13,7 +13,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -25,19 +25,21 @@ library(tictoc) work_dir <- "/data/bmorrison/sda/lai" # delete an old run -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_sitegroup.xml") -if ("sitegroup" %in% names(settings)){ - if (is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id) + sitegroupId = settings$sitegroup$id + ) } else { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id, - nSite = settings$sitegroup$nSite) + sitegroupId = settings$sitegroup$id, + nSite = settings$sitegroup$nSite + ) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } @@ -48,196 +50,214 @@ if ("sitegroup" %in% names(settings)){ # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -# get.parameter.samples(settings, -# ens.sample.method = settings$ensemble$samplingspace$parameters$method) +# get.parameter.samples(settings, +# ens.sample.method = settings$ensemble$samplingspace$parameters$method) # ## Aside: if method were set to unscented, would take minimal changes to do UnKF # #--------------------------------------------------------------------------------------------------# ############################ EXTRACT SITE INFORMATION FROM XML TO DOWNLOAD DATA + RUN SDA ########################### ################ Not working on interactive job on MODEX -observations = observation -lai_data = data.frame() +observations <- observation +lai_data <- data.frame() for (i in 1:5) { - start = (1+((i-1)*10)) - end = start+9 - - obs = observations[start:end] - - working = print(paste("working on: ", i)) - sites = print(obs) + start <- (1 + ((i - 1) * 10)) + end <- start + 9 + + obs <- observations[start:end] + + working <- print(paste("working on: ", i)) + sites <- print(obs) PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") - bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) + bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE + ) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- obs suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) - - suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con + )) + + suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) - - - lai = call_MODIS(outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"), - run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "Lai_500m", - package_method = "MODISTools", QC_filter = TRUE, progress = FALSE) - - lai_data = rbind(lai_data, lai) - + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) + + + lai <- call_MODIS( + outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"), + run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "Lai_500m", + package_method = "MODISTools", QC_filter = TRUE, progress = FALSE + ) + + lai_data <- rbind(lai_data, lai) } -lai_sd = lai_data -save(lai_data, file = '/data/bmorrison/sda/lai/50_site_run/lai_data_sites.Rdata') +lai_sd <- lai_data +save(lai_data, file = "/data/bmorrison/sda/lai/50_site_run/lai_data_sites.Rdata") -observation = observations +observation <- observations PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # # output folder for the data # data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -# +# # # # extract the data # med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# +# # sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# -# +# +# # ndates = colnames(med_agb_data)[-c(1:2)] -# +# # med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) # med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) -# +# # sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) # sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) -# +# # agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) # names(agb_data) = c("Site_ID", "Date", "Median", "SD") # agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE) # # save AGB data into long style -#save(agb_data, file = '/data/bmorrison/sda/lai/50_site_run/agb_data_sites.Rdata') +# save(agb_data, file = '/data/bmorrison/sda/lai/50_site_run/agb_data_sites.Rdata') ######### calculate peak_lai # already in long format style for dataframe -names(lai_sd) = c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc") -output = cbind(lai_data, lai_sd$sd) -names(output) = c(names(lai_data), "sd") -#output = as.data.frame(data) -save(output, file = '/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata') +names(lai_sd) <- c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc") +output <- cbind(lai_data, lai_sd$sd) +names(output) <- c(names(lai_data), "sd") +# output = as.data.frame(data) +save(output, file = "/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata") # change tile names to the site name h # remove extra data -output = output[,c(5, 2, 9, 11)] -colnames(output) = names(agb_data) +output <- output[, c(5, 2, 9, 11)] +colnames(output) <- names(agb_data) # compute peak lai per year -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$Date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$Date, "%Y-%m-%d"))) for (i in seq_along(years)) { - d = data[grep(data$Date, pattern = years[i]),] - sites = unique(d$Site_ID) + d <- data[grep(data$Date, pattern = years[i]), ] + sites <- unique(d$Site_ID) for (j in seq_along(sites)) { - index = which(d$Site_ID == site_info$site_id[j]) #which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) - site = d[index,] - if (length(index) > 0) - { + index <- which(d$Site_ID == site_info$site_id[j]) # which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) + site <- d[index, ] + if (length(index) > 0) { # peak lai is the max value that is the value <95th quantile to remove potential outlier values - max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1] - peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) - peak_lai = rbind(peak_lai, peak) - + max <- site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1], ] # which(d$Median == max(d$Median[index], na.rm = T))[1] + peak <- data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) + peak_lai <- rbind(peak_lai, peak) } } } # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014. -peak_lai$SD[peak_lai$SD < 0.66] = 0.66 +peak_lai$SD[peak_lai$SD < 0.66] <- 0.66 -#output data -names(peak_lai) = c("Site_ID", "Date", "Median", "SD") -save(peak_lai, file = '/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata') +# output data +names(peak_lai) <- c("Site_ID", "Date", "Median", "SD") +save(peak_lai, file = "/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata") # ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ######################## -peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) -peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F) +peak_lai$Site_ID <- as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) +peak_lai$Date <- as.character(peak_lai$Date, stringsAsFactors = F) -observed_vars = c("AbvGrndWood", "LAI") +observed_vars <- c("AbvGrndWood", "LAI") # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets -observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) -names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") +observed_data <- merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) +names(observed_data) <- c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") # order by year -observed_data = observed_data[order(observed_data$Date),] +observed_data <- observed_data[order(observed_data$Date), ] -#sort by date -dates = sort(unique(observed_data$Date)) +# sort by date +dates <- sort(unique(observed_data$Date)) # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon) -obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) -obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) +obs.mean <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) +obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) -obs.mean = obs.mean %>% +obs.mean <- obs.mean %>% split(.$date) # change the dates to be middle of the year date.obs <- strsplit(names(obs.mean), "_") %>% - map_chr(~.x[2]) %>% paste0(.,"/07/15") + map_chr(~ .x[2]) %>% + paste0(., "/07/15") -obs.mean = names(obs.mean) %>% - map(function(namesl){ +obs.mean <- names(obs.mean) %>% + map(function(namesl) { obs.mean[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI")) %>% `row.names<-`(NULL)) - #setNames(site.ids) - }) %>% setNames(date.obs) - -#remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) -names = date.obs + map(~ .x[3:4] %>% + setNames(c("AbvGrndWood", "LAI")) %>% + `row.names<-`(NULL)) + # setNames(site.ids) + }) %>% + setNames(date.obs) + +# remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) +names <- date.obs for (name in names) { for (site in names(obs.mean[[name]])) { - na_index = which(!(is.na(obs.mean[[ name]][[site]]))) - colnames = names(obs.mean[[name]][[site]]) - if (length(na_index) > 0) - { - obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index] + na_index <- which(!(is.na(obs.mean[[name]][[site]]))) + colnames <- names(obs.mean[[name]][[site]]) + if (length(na_index) > 0) { + obs.mean[[name]][[site]] <- obs.mean[[name]][[site]][na_index] } } } @@ -247,42 +267,40 @@ for (name in names) # names(filler_0) = paste0("h", seq_len(length(observed_vars))) # create obs.cov dataframe -->list by date -obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai)#, filler_0) -obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F) +obs.cov <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai) # , filler_0) +obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = F) -obs.cov = obs.cov %>% +obs.cov <- obs.cov %>% split(.$date) -obs.cov = names(obs.cov) %>% - map(function(namesl){ +obs.cov <- names(obs.cov) %>% + map(function(namesl) { obs.cov[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4]^2 %>% unlist %>% diag(nrow = 2, ncol = 2) ) - }) %>% setNames(date.obs) + map(~ .x[3:4]^2 %>% + unlist() %>% + diag(nrow = 2, ncol = 2)) + }) %>% + setNames(date.obs) -names = date.obs +names <- date.obs for (name in names) { for (site in names(obs.cov[[name]])) { - bad = which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) - if (length(bad) > 0) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][,-bad] - if (is.null(dim(obs.cov[[name]][[site]]))) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad] + bad <- which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) + if (length(bad) > 0) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][, -bad] + if (is.null(dim(obs.cov[[name]][[site]]))) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad] } else { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad,] + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad, ] } } } } -save(obs.mean, file = '/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata') -save(obs.cov, file = '/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata') - - - +save(obs.mean, file = "/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata") +save(obs.cov, file = "/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata") diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data.R b/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data.R index cfdb69ee0b5..70782634a82 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data.R @@ -1,6 +1,6 @@ -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -13,7 +13,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -25,19 +25,21 @@ library(tictoc) work_dir <- "/data/bmorrison/sda/lai" # delete an old run -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_sitegroup.xml") -if ("sitegroup" %in% names(settings)){ - if (is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id) + sitegroupId = settings$sitegroup$id + ) } else { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id, - nSite = settings$sitegroup$nSite) + sitegroupId = settings$sitegroup$id, + nSite = settings$sitegroup$nSite + ) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } @@ -48,196 +50,214 @@ if ("sitegroup" %in% names(settings)){ # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -# get.parameter.samples(settings, -# ens.sample.method = settings$ensemble$samplingspace$parameters$method) +# get.parameter.samples(settings, +# ens.sample.method = settings$ensemble$samplingspace$parameters$method) # ## Aside: if method were set to unscented, would take minimal changes to do UnKF # #--------------------------------------------------------------------------------------------------# ############################ EXTRACT SITE INFORMATION FROM XML TO DOWNLOAD DATA + RUN SDA ########################### ################ Not working on interactive job on MODEX -observations = observation -lai_data = data.frame() +observations <- observation +lai_data <- data.frame() for (i in 1:5) { - start = (1+((i-1)*10)) - end = start+9 - - obs = observations[start:end] - - working = print(paste("working on: ", i)) - sites = print(obs) + start <- (1 + ((i - 1) * 10)) + end <- start + 9 + + obs <- observations[start:end] + + working <- print(paste("working on: ", i)) + sites <- print(obs) PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") - bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) + bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE + ) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- obs suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con + )) - suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) - + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) - lai = call_MODIS(outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"), - run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "Lai_500m", - package_method = "MODISTools", QC_filter = TRUE, progress = FALSE) - lai_data = rbind(lai_data, lai) + lai <- call_MODIS( + outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"), + run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "Lai_500m", + package_method = "MODISTools", QC_filter = TRUE, progress = FALSE + ) + lai_data <- rbind(lai_data, lai) } -lai_sd = lai_data -save(lai_data, file = '/data/bmorrison/sda/lai/50_site_run/lai_data_sites.Rdata') +lai_sd <- lai_data +save(lai_data, file = "/data/bmorrison/sda/lai/50_site_run/lai_data_sites.Rdata") -observation = observations +observation <- observations PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # # output folder for the data # data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -# +# # # # extract the data # med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# +# # sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# -# +# +# # ndates = colnames(med_agb_data)[-c(1:2)] -# +# # med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) # med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) -# +# # sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) # sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) -# +# # agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) # names(agb_data) = c("Site_ID", "Date", "Median", "SD") # agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE) # # save AGB data into long style -#save(agb_data, file = '/data/bmorrison/sda/lai/50_site_run/agb_data_sites.Rdata') +# save(agb_data, file = '/data/bmorrison/sda/lai/50_site_run/agb_data_sites.Rdata') ######### calculate peak_lai # already in long format style for dataframe -names(lai_sd) = c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc") -output = cbind(lai_data, lai_sd$sd) -names(output) = c(names(lai_data), "sd") -#output = as.data.frame(data) -save(output, file = '/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata') +names(lai_sd) <- c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc") +output <- cbind(lai_data, lai_sd$sd) +names(output) <- c(names(lai_data), "sd") +# output = as.data.frame(data) +save(output, file = "/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata") # change tile names to the site name h # remove extra data -output = output[,c(5, 2, 9, 11)] -colnames(output) = names(agb_data) +output <- output[, c(5, 2, 9, 11)] +colnames(output) <- names(agb_data) # compute peak lai per year -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$Date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$Date, "%Y-%m-%d"))) for (i in seq_along(years)) { - d = data[grep(data$Date, pattern = years[i]),] - sites = unique(d$Site_ID) + d <- data[grep(data$Date, pattern = years[i]), ] + sites <- unique(d$Site_ID) for (j in seq_along(sites)) { - index = which(d$Site_ID == site_info$site_id[j]) #which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) - site = d[index,] - if (length(index) > 0) - { + index <- which(d$Site_ID == site_info$site_id[j]) # which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) + site <- d[index, ] + if (length(index) > 0) { # peak lai is the max value that is the value <95th quantile to remove potential outlier values - max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1] - peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) - peak_lai = rbind(peak_lai, peak) - + max <- site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1], ] # which(d$Median == max(d$Median[index], na.rm = T))[1] + peak <- data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) + peak_lai <- rbind(peak_lai, peak) } } } # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014. -peak_lai$SD[peak_lai$SD < 0.66] = 0.66 +peak_lai$SD[peak_lai$SD < 0.66] <- 0.66 -#output data -names(peak_lai) = c("Site_ID", "Date", "Median", "SD") -save(peak_lai, file = '/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata') +# output data +names(peak_lai) <- c("Site_ID", "Date", "Median", "SD") +save(peak_lai, file = "/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata") # ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ######################## -peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) -peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F) +peak_lai$Site_ID <- as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) +peak_lai$Date <- as.character(peak_lai$Date, stringsAsFactors = F) -observed_vars = c("AbvGrndWood", "LAI") +observed_vars <- c("AbvGrndWood", "LAI") # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets -observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) -names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") +observed_data <- merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) +names(observed_data) <- c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") # order by year -observed_data = observed_data[order(observed_data$Date),] +observed_data <- observed_data[order(observed_data$Date), ] -#sort by date -dates = sort(unique(observed_data$Date)) +# sort by date +dates <- sort(unique(observed_data$Date)) # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon) -obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) -obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) +obs.mean <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) +obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) -obs.mean = obs.mean %>% +obs.mean <- obs.mean %>% split(.$date) # change the dates to be middle of the year date.obs <- strsplit(names(obs.mean), "_") %>% - map_chr(~.x[2]) %>% paste0(.,"/07/15") + map_chr(~ .x[2]) %>% + paste0(., "/07/15") -obs.mean = names(obs.mean) %>% - map(function(namesl){ +obs.mean <- names(obs.mean) %>% + map(function(namesl) { obs.mean[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI")) %>% `row.names<-`(NULL)) - #setNames(site.ids) - }) %>% setNames(date.obs) - -#remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) -names = date.obs + map(~ .x[3:4] %>% + setNames(c("AbvGrndWood", "LAI")) %>% + `row.names<-`(NULL)) + # setNames(site.ids) + }) %>% + setNames(date.obs) + +# remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) +names <- date.obs for (name in names) { for (site in names(obs.mean[[name]])) { - na_index = which(!(is.na(obs.mean[[ name]][[site]]))) - colnames = names(obs.mean[[name]][[site]]) - if (length(na_index) > 0) - { - obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index] + na_index <- which(!(is.na(obs.mean[[name]][[site]]))) + colnames <- names(obs.mean[[name]][[site]]) + if (length(na_index) > 0) { + obs.mean[[name]][[site]] <- obs.mean[[name]][[site]][na_index] } } } @@ -247,43 +267,40 @@ for (name in names) # names(filler_0) = paste0("h", seq_len(length(observed_vars))) # create obs.cov dataframe -->list by date -obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai)#, filler_0) -obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F) +obs.cov <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai) # , filler_0) +obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = F) -obs.cov = obs.cov %>% +obs.cov <- obs.cov %>% split(.$date) -obs.cov = names(obs.cov) %>% - map(function(namesl){ +obs.cov <- names(obs.cov) %>% + map(function(namesl) { obs.cov[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4]^2 %>% unlist %>% diag(nrow = 2, ncol = 2) ) - }) %>% setNames(date.obs) + map(~ .x[3:4]^2 %>% + unlist() %>% + diag(nrow = 2, ncol = 2)) + }) %>% + setNames(date.obs) -names = date.obs +names <- date.obs for (name in names) { for (site in names(obs.cov[[name]])) { - bad = which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) - if (length(bad) > 0) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][,-bad] - if (is.null(dim(obs.cov[[name]][[site]]))) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad] + bad <- which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) + if (length(bad) > 0) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][, -bad] + if (is.null(dim(obs.cov[[name]][[site]]))) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad] } else { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad,] + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad, ] } } } } -save(obs.mean, file = '/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata') -save(obs.cov, file = '/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata') - - - - \ No newline at end of file +save(obs.mean, file = "/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata") +save(obs.cov, file = "/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata") diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data_2.R b/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data_2.R index cfdb69ee0b5..70782634a82 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data_2.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/extract_50_sitegroup_data_2.R @@ -1,6 +1,6 @@ -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -13,7 +13,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -25,19 +25,21 @@ library(tictoc) work_dir <- "/data/bmorrison/sda/lai" # delete an old run -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_sitegroup.xml") -if ("sitegroup" %in% names(settings)){ - if (is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id) + sitegroupId = settings$sitegroup$id + ) } else { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id, - nSite = settings$sitegroup$nSite) + sitegroupId = settings$sitegroup$id, + nSite = settings$sitegroup$nSite + ) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } @@ -48,196 +50,214 @@ if ("sitegroup" %in% names(settings)){ # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -# get.parameter.samples(settings, -# ens.sample.method = settings$ensemble$samplingspace$parameters$method) +# get.parameter.samples(settings, +# ens.sample.method = settings$ensemble$samplingspace$parameters$method) # ## Aside: if method were set to unscented, would take minimal changes to do UnKF # #--------------------------------------------------------------------------------------------------# ############################ EXTRACT SITE INFORMATION FROM XML TO DOWNLOAD DATA + RUN SDA ########################### ################ Not working on interactive job on MODEX -observations = observation -lai_data = data.frame() +observations <- observation +lai_data <- data.frame() for (i in 1:5) { - start = (1+((i-1)*10)) - end = start+9 - - obs = observations[start:end] - - working = print(paste("working on: ", i)) - sites = print(obs) + start <- (1 + ((i - 1) * 10)) + end <- start + 9 + + obs <- observations[start:end] + + working <- print(paste("working on: ", i)) + sites <- print(obs) PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") - bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) + bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE + ) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- obs suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con + )) - suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) - + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) - lai = call_MODIS(outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"), - run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "Lai_500m", - package_method = "MODISTools", QC_filter = TRUE, progress = FALSE) - lai_data = rbind(lai_data, lai) + lai <- call_MODIS( + outdir = NULL, var = "LAI", site_info = site_info, product_dates = c("1980001", "2018365"), + run_parallel = TRUE, ncores = 10, product = "MOD15A2H", band = "Lai_500m", + package_method = "MODISTools", QC_filter = TRUE, progress = FALSE + ) + lai_data <- rbind(lai_data, lai) } -lai_sd = lai_data -save(lai_data, file = '/data/bmorrison/sda/lai/50_site_run/lai_data_sites.Rdata') +lai_sd <- lai_data +save(lai_data, file = "/data/bmorrison/sda/lai/50_site_run/lai_data_sites.Rdata") -observation = observations +observation <- observations PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # # output folder for the data # data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -# +# # # # extract the data # med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# +# # sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# -# +# +# # ndates = colnames(med_agb_data)[-c(1:2)] -# +# # med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) # med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) -# +# # sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) # sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) -# +# # agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) # names(agb_data) = c("Site_ID", "Date", "Median", "SD") # agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE) # # save AGB data into long style -#save(agb_data, file = '/data/bmorrison/sda/lai/50_site_run/agb_data_sites.Rdata') +# save(agb_data, file = '/data/bmorrison/sda/lai/50_site_run/agb_data_sites.Rdata') ######### calculate peak_lai # already in long format style for dataframe -names(lai_sd) = c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc") -output = cbind(lai_data, lai_sd$sd) -names(output) = c(names(lai_data), "sd") -#output = as.data.frame(data) -save(output, file = '/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata') +names(lai_sd) <- c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "sd", "qc") +output <- cbind(lai_data, lai_sd$sd) +names(output) <- c(names(lai_data), "sd") +# output = as.data.frame(data) +save(output, file = "/data/bmorrison/sda/lai/50_site_run/all_lai_data.Rdata") # change tile names to the site name h # remove extra data -output = output[,c(5, 2, 9, 11)] -colnames(output) = names(agb_data) +output <- output[, c(5, 2, 9, 11)] +colnames(output) <- names(agb_data) # compute peak lai per year -data = output -peak_lai = data.frame() -years = unique(year(as.Date(data$Date, "%Y-%m-%d"))) +data <- output +peak_lai <- data.frame() +years <- unique(year(as.Date(data$Date, "%Y-%m-%d"))) for (i in seq_along(years)) { - d = data[grep(data$Date, pattern = years[i]),] - sites = unique(d$Site_ID) + d <- data[grep(data$Date, pattern = years[i]), ] + sites <- unique(d$Site_ID) for (j in seq_along(sites)) { - index = which(d$Site_ID == site_info$site_id[j]) #which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) - site = d[index,] - if (length(index) > 0) - { + index <- which(d$Site_ID == site_info$site_id[j]) # which(round(d$lat, digits = 3) == round(site_info$lat[j], digits = 3) & round(d$lon, digits = 3) == round(site_info$lon[j], digits = 3)) + site <- d[index, ] + if (length(index) > 0) { # peak lai is the max value that is the value <95th quantile to remove potential outlier values - max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1] - peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) - peak_lai = rbind(peak_lai, peak) - + max <- site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1], ] # which(d$Median == max(d$Median[index], na.rm = T))[1] + peak <- data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) + peak_lai <- rbind(peak_lai, peak) } } } # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014. -peak_lai$SD[peak_lai$SD < 0.66] = 0.66 +peak_lai$SD[peak_lai$SD < 0.66] <- 0.66 -#output data -names(peak_lai) = c("Site_ID", "Date", "Median", "SD") -save(peak_lai, file = '/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata') +# output data +names(peak_lai) <- c("Site_ID", "Date", "Median", "SD") +save(peak_lai, file = "/data/bmorrison/sda/lai/50_site_run/peak_lai_data.Rdata") # ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ######################## -peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) -peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F) +peak_lai$Site_ID <- as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) +peak_lai$Date <- as.character(peak_lai$Date, stringsAsFactors = F) -observed_vars = c("AbvGrndWood", "LAI") +observed_vars <- c("AbvGrndWood", "LAI") # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets -observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) -names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") +observed_data <- merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) +names(observed_data) <- c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") # order by year -observed_data = observed_data[order(observed_data$Date),] +observed_data <- observed_data[order(observed_data$Date), ] -#sort by date -dates = sort(unique(observed_data$Date)) +# sort by date +dates <- sort(unique(observed_data$Date)) # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon) -obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) -obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) +obs.mean <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) +obs.mean$date <- as.character(obs.mean$date, stringsAsFactors = FALSE) -obs.mean = obs.mean %>% +obs.mean <- obs.mean %>% split(.$date) # change the dates to be middle of the year date.obs <- strsplit(names(obs.mean), "_") %>% - map_chr(~.x[2]) %>% paste0(.,"/07/15") + map_chr(~ .x[2]) %>% + paste0(., "/07/15") -obs.mean = names(obs.mean) %>% - map(function(namesl){ +obs.mean <- names(obs.mean) %>% + map(function(namesl) { obs.mean[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI")) %>% `row.names<-`(NULL)) - #setNames(site.ids) - }) %>% setNames(date.obs) - -#remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) -names = date.obs + map(~ .x[3:4] %>% + setNames(c("AbvGrndWood", "LAI")) %>% + `row.names<-`(NULL)) + # setNames(site.ids) + }) %>% + setNames(date.obs) + +# remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) +names <- date.obs for (name in names) { for (site in names(obs.mean[[name]])) { - na_index = which(!(is.na(obs.mean[[ name]][[site]]))) - colnames = names(obs.mean[[name]][[site]]) - if (length(na_index) > 0) - { - obs.mean[[name]][[site]] = obs.mean[[name]][[site]][na_index] + na_index <- which(!(is.na(obs.mean[[name]][[site]]))) + colnames <- names(obs.mean[[name]][[site]]) + if (length(na_index) > 0) { + obs.mean[[name]][[site]] <- obs.mean[[name]][[site]][na_index] } } } @@ -247,43 +267,40 @@ for (name in names) # names(filler_0) = paste0("h", seq_len(length(observed_vars))) # create obs.cov dataframe -->list by date -obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai)#, filler_0) -obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F) +obs.cov <- data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai) # , filler_0) +obs.cov$date <- as.character(obs.cov$date, stringsAsFactors = F) -obs.cov = obs.cov %>% +obs.cov <- obs.cov %>% split(.$date) -obs.cov = names(obs.cov) %>% - map(function(namesl){ +obs.cov <- names(obs.cov) %>% + map(function(namesl) { obs.cov[[namesl]] %>% split(.$site_id) %>% - map(~.x[3:4]^2 %>% unlist %>% diag(nrow = 2, ncol = 2) ) - }) %>% setNames(date.obs) + map(~ .x[3:4]^2 %>% + unlist() %>% + diag(nrow = 2, ncol = 2)) + }) %>% + setNames(date.obs) -names = date.obs +names <- date.obs for (name in names) { for (site in names(obs.cov[[name]])) { - bad = which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) - if (length(bad) > 0) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][,-bad] - if (is.null(dim(obs.cov[[name]][[site]]))) - { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad] + bad <- which(apply(obs.cov[[name]][[site]], 2, function(x) any(is.na(x))) == TRUE) + if (length(bad) > 0) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][, -bad] + if (is.null(dim(obs.cov[[name]][[site]]))) { + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad] } else { - obs.cov[[name]][[site]] = obs.cov[[name]][[site]][-bad,] + obs.cov[[name]][[site]] <- obs.cov[[name]][[site]][-bad, ] } } } } -save(obs.mean, file = '/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata') -save(obs.cov, file = '/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata') - - - - \ No newline at end of file +save(obs.mean, file = "/data/bmorrison/sda/lai/50_site_run/obs_mean_50.Rdata") +save(obs.cov, file = "/data/bmorrison/sda/lai/50_site_run/obs_cov_50.Rdata") diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup.R b/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup.R index 6e48bc71922..a3287acbd29 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup.R @@ -1,8 +1,7 @@ - #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -15,7 +14,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -28,7 +27,7 @@ library(tictoc) work_dir <- "/data/bmorrison/sda/lai" # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_8_Sites_2009.xml") @@ -36,20 +35,27 @@ settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_8_Sites_2009.xml") # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -#observation = "1000000048" +# observation = "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -58,68 +64,73 @@ get.parameter.samples(settings, ################ Not working on interactive job on MODEX PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) -# +# # ###################### EXTRACT AGB DATA + REFORMAT LONG VS. WIDE STYLE ##################################### # ### this is for LandTrendr data ### -# +# # # output folder for the data # data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -# +# # # extract the data # med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# +# # sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# -# +# +# # ### temporary fix to make agb data long vs. wide format to match modis data. ### # ndates = colnames(med_agb_data)[-c(1:2)] -# +# # med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) # med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) -# +# # sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) # sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) -# +# # agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) # names(agb_data) = c("Site_ID", "Date", "Median", "SD") # agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE) -# +# # # save AGB data into long style # save(agb_data, file = '/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata') -# -# +# +# # # ####################### Extract MODISTools LAI data ############################## -# +# # library(doParallel) # cl <- parallel::makeCluster(10, outfile="") # doParallel::registerDoParallel(cl) -# +# # start = Sys.time() -# # keep QC_filter on for this because bad LAI values crash the SDA. Progress can be turned off if it annoys you. +# # keep QC_filter on for this because bad LAI values crash the SDA. Progress can be turned off if it annoys you. # data = foreach(i=1:length(site_info$site_id), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2017/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T, progress = T) # end = Sys.time() # difference = end-start # stopCluster(cl) -# +# # # already in long format style for dataframe # output = as.data.frame(data) # save(output, file = '/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_update_sites.Rdata') -# +# # # change tile names to the site name # for (i in 1:length(site_info$site_name)) # { @@ -130,7 +141,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # # remove extra data # output = output[,c(4,2,8,10)] # colnames(output) = names(agb_data) -# +# # # compute peak lai per year # data = output # peak_lai = data.frame() @@ -149,19 +160,19 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1] # peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) # peak_lai = rbind(peak_lai, peak) -# +# # } # } # } -# +# # # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014. # peak_lai$SD[peak_lai$SD < 0.66] = 0.66 -# +# # #output data # names(peak_lai) = c("Site_ID", "Date", "Median", "SD") # save(peak_lai, file = '/data/bmorrison/sda/lai/modis_lai_data/peak_lai_output_update_sites.Rdata') -# -# +# +# # ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ######################## # ################# # load('/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata') @@ -169,31 +180,31 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # # output likes to make factors ..... :/... so this unfactors them # peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) # peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F) -# +# # observed_vars = c("AbvGrndWood", "LAI") -# -# +# +# # # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets # observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) # names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") -# +# # # order by year # observed_data = observed_data[order(observed_data$Date),] -# +# # #sort by date # dates = sort(unique(observed_data$Date)) -# +# # # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon) # obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) # obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) -# +# # obs.mean = obs.mean %>% # split(.$date) -# +# # # change the dates to be middle of the year # date.obs <- strsplit(names(obs.mean), "_") %>% # map_chr(~.x[2]) %>% paste0(.,"/07/15") -# +# # obs.mean = names(obs.mean) %>% # map(function(namesl){ # obs.mean[[namesl]] %>% @@ -201,7 +212,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>% # setNames(site.ids) # }) %>% setNames(date.obs) -# +# # # remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) # names = date.obs # for (name in names) @@ -217,18 +228,18 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # } # } # } -# +# # # fillers are 0's for the covariance matrix. This will need to change for differing size matrixes when more variables are added in. # filler_0 = as.data.frame(matrix(0, ncol = length(observed_vars), nrow = nrow(observed_data))) # names(filler_0) = paste0("h", seq_len(length(observed_vars))) -# +# # # create obs.cov dataframe -->list by date # obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai, filler_0) # obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F) -# +# # obs.cov = obs.cov %>% # split(.$date) -# +# # #sublist by date --> site # obs.cov = names(obs.cov) %>% # map(function(namesl){ @@ -237,7 +248,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # map(~diag(.x[3:4]^2, nrow = 2, ncol = 2)) %>% # setNames(site.ids)}) %>% # setNames(date.obs) -# +# # # remove NA/missing observations from covariance matrix and removes NA values to restructure size of covar matrix # names = names(obs.cov) # for (name in names) @@ -252,7 +263,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # } # } # } -# +# # # save these lists for future use. # save(obs.mean, file = '/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata') # save(obs.cov, file = '/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata') @@ -261,41 +272,43 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr ################################ START THE SDA ######################################## -load('/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata') -load('/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata') -date.obs = names(obs.mean) +load("/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata") +load("/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata") +date.obs <- names(obs.mean) new.settings <- PEcAn.settings::prepare.settings(settings) -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) -sda.enkf.multisite(new.settings, - obs.mean =obs.mean, - obs.cov = obs.cov, - keepNC = TRUE, - forceRun = TRUE, - control=list(trace=TRUE, - FF=FALSE, - interactivePlot=FALSE, - TimeseriesPlot=TRUE, - BiasPlot=FALSE, - plot.title=NULL, - facet.plots=4, - debug=FALSE, - pause=FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, + obs.cov = obs.cov, + keepNC = TRUE, + forceRun = TRUE, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = TRUE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = 4, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) +) ### FOR PLOTTING after analysis if TimeseriesPlot == FALSE) -load('/data/bmorrison/sda/lai/SDA/sda.output.Rdata') -facetg=4 -readsFF=NULL - -obs.mean = Viz.output[[2]] -obs.cov = Viz.output[[3]] -obs.times = names(obs.mean) -PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=facetg, readsFF=NULL) +load("/data/bmorrison/sda/lai/SDA/sda.output.Rdata") +facetg <- 4 +readsFF <- NULL +obs.mean <- Viz.output[[2]] +obs.cov <- Viz.output[[3]] +obs.times <- names(obs.mean) +PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title = NULL, facetg = facetg, readsFF = NULL) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup_2.R b/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup_2.R index 6e48bc71922..a3287acbd29 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup_2.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/general_sda_setup_2.R @@ -1,8 +1,7 @@ - #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -15,7 +14,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -28,7 +27,7 @@ library(tictoc) work_dir <- "/data/bmorrison/sda/lai" # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_8_Sites_2009.xml") @@ -36,20 +35,27 @@ settings <- read.settings("pecan_MultiSite_SDA_LAI_AGB_8_Sites_2009.xml") # doesn't work for one site observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -#observation = "1000000048" +# observation = "1000000048" # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -58,68 +64,73 @@ get.parameter.samples(settings, ################ Not working on interactive job on MODEX PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) -# +# # ###################### EXTRACT AGB DATA + REFORMAT LONG VS. WIDE STYLE ##################################### # ### this is for LandTrendr data ### -# +# # # output folder for the data # data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -# +# # # extract the data # med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# +# # sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", # data_dir, product_dates=NULL, file.path(work_dir,"Obs"))[[1]] -# -# +# +# # ### temporary fix to make agb data long vs. wide format to match modis data. ### # ndates = colnames(med_agb_data)[-c(1:2)] -# +# # med_agb_data$Site_Name = as.character(med_agb_data$Site_Name, stringsAsFactors = FALSE) # med_agb_data = reshape2::melt(med_agb_data, id.vars = "Site_ID", measure.vars = colnames(med_agb_data)[-c(1:2)]) -# +# # sdev_agb_data$Site_Name = as.character(sdev_agb_data$Site_Name, stringsAsFactors = FALSE) # sdev_agb_data = reshape2::melt(sdev_agb_data, id.vars = "Site_ID", measure.vars = colnames(sdev_agb_data)[-c(1:2)]) -# +# # agb_data = as.data.frame(cbind(med_agb_data, sdev_agb_data$value)) # names(agb_data) = c("Site_ID", "Date", "Median", "SD") # agb_data$Date = as.character(agb_data$Date, stringsAsFactors = FALSE) -# +# # # save AGB data into long style # save(agb_data, file = '/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata') -# -# +# +# # # ####################### Extract MODISTools LAI data ############################## -# +# # library(doParallel) # cl <- parallel::makeCluster(10, outfile="") # doParallel::registerDoParallel(cl) -# +# # start = Sys.time() -# # keep QC_filter on for this because bad LAI values crash the SDA. Progress can be turned off if it annoys you. +# # keep QC_filter on for this because bad LAI values crash the SDA. Progress can be turned off if it annoys you. # data = foreach(i=1:length(site_info$site_id), .combine = rbind) %dopar% PEcAn.data.remote::call_MODIS(start_date = "2000/01/01", end_date = "2017/12/31", band = "Lai_500m", product = "MOD15A2H", lat = site_info$lat[i], lon = site_info$lon[i], size = 0, band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", package_method = "MODISTools", QC_filter = T, progress = T) # end = Sys.time() # difference = end-start # stopCluster(cl) -# +# # # already in long format style for dataframe # output = as.data.frame(data) # save(output, file = '/data/bmorrison/sda/lai/modis_lai_data/modis_lai_output_update_sites.Rdata') -# +# # # change tile names to the site name # for (i in 1:length(site_info$site_name)) # { @@ -130,7 +141,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # # remove extra data # output = output[,c(4,2,8,10)] # colnames(output) = names(agb_data) -# +# # # compute peak lai per year # data = output # peak_lai = data.frame() @@ -149,19 +160,19 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # max = site[which(site$Median == max(site$Median[which(site$Median <= quantile(site$Median, probs = 0.95))], na.rm = T))[1],] #which(d$Median == max(d$Median[index], na.rm = T))[1] # peak = data.frame(max$Site_ID, Date = paste("Year", years[i], sep = "_"), Median = max$Median, SD = max$SD) # peak_lai = rbind(peak_lai, peak) -# +# # } # } # } -# +# # # a fix for low SD values because of an issue with MODIS LAI error calculations. Reference: VISKARI et al 2014. # peak_lai$SD[peak_lai$SD < 0.66] = 0.66 -# +# # #output data # names(peak_lai) = c("Site_ID", "Date", "Median", "SD") # save(peak_lai, file = '/data/bmorrison/sda/lai/modis_lai_data/peak_lai_output_update_sites.Rdata') -# -# +# +# # ######################### TIME TO FIX UP THE OBSERVED DATASETS INTO A FORMAT THAT WORKS TO MAKE OBS.MEAN and OBS.COV FOR SDA ######################## # ################# # load('/data/bmorrison/sda/lai/modis_lai_data/agb_data_update_sites.Rdata') @@ -169,31 +180,31 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # # output likes to make factors ..... :/... so this unfactors them # peak_lai$Site_ID = as.numeric(as.character(peak_lai$Site_ID, stringsAsFactors = F)) # peak_lai$Date = as.character(peak_lai$Date, stringsAsFactors = F) -# +# # observed_vars = c("AbvGrndWood", "LAI") -# -# +# +# # # merge agb and lai dataframes and places NA values where data is missing between the 2 datasets # observed_data = merge(agb_data, peak_lai, by = c("Site_ID", "Date"), all = T) # names(observed_data) = c("Site_ID", "Date", "med_agb", "sdev_agb", "med_lai", "sdev_lai") -# +# # # order by year # observed_data = observed_data[order(observed_data$Date),] -# +# # #sort by date # dates = sort(unique(observed_data$Date)) -# +# # # create the obs.mean list --> this needs to be adjusted to work with load.data in the future (via hackathon) # obs.mean = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, med_agb = observed_data$med_agb, med_lai = observed_data$med_lai) # obs.mean$date = as.character(obs.mean$date, stringsAsFactors = FALSE) -# +# # obs.mean = obs.mean %>% # split(.$date) -# +# # # change the dates to be middle of the year # date.obs <- strsplit(names(obs.mean), "_") %>% # map_chr(~.x[2]) %>% paste0(.,"/07/15") -# +# # obs.mean = names(obs.mean) %>% # map(function(namesl){ # obs.mean[[namesl]] %>% @@ -201,7 +212,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # map(~.x[3:4] %>% setNames(c("AbvGrndWood", "LAI"))) %>% # setNames(site.ids) # }) %>% setNames(date.obs) -# +# # # remove NA data as this will crash the SDA. Removes rown numbers (may not be nessesary) # names = date.obs # for (name in names) @@ -217,18 +228,18 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # } # } # } -# +# # # fillers are 0's for the covariance matrix. This will need to change for differing size matrixes when more variables are added in. # filler_0 = as.data.frame(matrix(0, ncol = length(observed_vars), nrow = nrow(observed_data))) # names(filler_0) = paste0("h", seq_len(length(observed_vars))) -# +# # # create obs.cov dataframe -->list by date # obs.cov = data.frame(date = observed_data$Date, site_id = observed_data$Site_ID, sdev_agb = observed_data$sdev_agb, sdev_lai = observed_data$sdev_lai, filler_0) # obs.cov$date = as.character(obs.cov$date, stringsAsFactors = F) -# +# # obs.cov = obs.cov %>% # split(.$date) -# +# # #sublist by date --> site # obs.cov = names(obs.cov) %>% # map(function(namesl){ @@ -237,7 +248,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # map(~diag(.x[3:4]^2, nrow = 2, ncol = 2)) %>% # setNames(site.ids)}) %>% # setNames(date.obs) -# +# # # remove NA/missing observations from covariance matrix and removes NA values to restructure size of covar matrix # names = names(obs.cov) # for (name in names) @@ -252,7 +263,7 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # } # } # } -# +# # # save these lists for future use. # save(obs.mean, file = '/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata') # save(obs.cov, file = '/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata') @@ -261,41 +272,43 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr ################################ START THE SDA ######################################## -load('/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata') -load('/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata') -date.obs = names(obs.mean) +load("/data/bmorrison/sda/lai/obs_mean_update_sites.Rdata") +load("/data/bmorrison/sda/lai/obs_cov_update_sites.Rdata") +date.obs <- names(obs.mean) new.settings <- PEcAn.settings::prepare.settings(settings) -#unlink(c('run','out','SDA'),recursive = T) +# unlink(c('run','out','SDA'),recursive = T) -sda.enkf.multisite(new.settings, - obs.mean =obs.mean, - obs.cov = obs.cov, - keepNC = TRUE, - forceRun = TRUE, - control=list(trace=TRUE, - FF=FALSE, - interactivePlot=FALSE, - TimeseriesPlot=TRUE, - BiasPlot=FALSE, - plot.title=NULL, - facet.plots=4, - debug=FALSE, - pause=FALSE, - Profiling = FALSE, - OutlierDetection=FALSE)) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, + obs.cov = obs.cov, + keepNC = TRUE, + forceRun = TRUE, + control = list( + trace = TRUE, + FF = FALSE, + interactivePlot = FALSE, + TimeseriesPlot = TRUE, + BiasPlot = FALSE, + plot.title = NULL, + facet.plots = 4, + debug = FALSE, + pause = FALSE, + Profiling = FALSE, + OutlierDetection = FALSE + ) +) ### FOR PLOTTING after analysis if TimeseriesPlot == FALSE) -load('/data/bmorrison/sda/lai/SDA/sda.output.Rdata') -facetg=4 -readsFF=NULL - -obs.mean = Viz.output[[2]] -obs.cov = Viz.output[[3]] -obs.times = names(obs.mean) -PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=facetg, readsFF=NULL) +load("/data/bmorrison/sda/lai/SDA/sda.output.Rdata") +facetg <- 4 +readsFF <- NULL +obs.mean <- Viz.output[[2]] +obs.cov <- Viz.output[[3]] +obs.times <- names(obs.mean) +PEcAnAssimSequential::post.analysis.multisite.ggplot(settings = new.settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title = NULL, facetg = facetg, readsFF = NULL) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/pft_selection.R b/modules/assim.sequential/inst/sda_backup/bmorrison/pft_selection.R index baf648cdf71..56fb3fdeec9 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/pft_selection.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/pft_selection.R @@ -2,27 +2,32 @@ library(raster) library(shapefiles) library(PEcAn.DB) -analysis = readRDS("/Volumes/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS") +analysis <- readRDS("/Volumes/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS") -dates = names(analysis) -sites = unique(attributes(analysis[[names(analysis)[1]]])$Site) -observations = sites +dates <- names(analysis) +sites <- unique(attributes(analysis[[names(analysis)[1]]])$Site) +observations <- sites -#working = print(paste("working on: ", i)) -sites = observations -bety <- list(user='bety', password='bety', host='modex.bnl.gov', - dbname='betydb', driver='PostgreSQL',write=TRUE) +# working = print(paste("working on: ", i)) +sites <- observations +bety <- list( + user = "bety", password = "bety", host = "modex.bnl.gov", + dbname = "betydb", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- sites suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # load('/Volumes/data/bmorrison/sda/500_site_run/all_lai_data_500.Rdata') # site_ids = unique(lai_data$site_id) @@ -33,158 +38,152 @@ site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qr # sites = rbind(sites, lai_data[index[1],]) # } # sites = sites[,c(5,6,7)] -sites = as.data.frame(cbind(site_info$site_id,site_info$lon, site_info$lat)) -names(sites) = c("id", "lon", "lat") -coordinates(sites) = ~lon+lat -projection(sites) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -cover = raster('/Volumes/data/bmorrison/sda/500_site_run/NLCD_2001_Land_Cover_L48_20190424.img') +sites <- as.data.frame(cbind(site_info$site_id, site_info$lon, site_info$lat)) +names(sites) <- c("id", "lon", "lat") +coordinates(sites) <- ~ lon + lat +projection(sites) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +cover <- raster("/Volumes/data/bmorrison/sda/500_site_run/NLCD_2001_Land_Cover_L48_20190424.img") # sites = shapefile('/data/bmorrison/sda/500_site_run/shapefiles/500_site_selection.shp') # s = shapefile('/data/bmorrison/sda/500_site_run/shapefiles/500_site_selection.shp') -#sites = as.data.frame(sites) +# sites = as.data.frame(sites) # sites = sites[, 11:12] # names(sites) = c("x", "y") # coordinates(sites) = ~x+y # projection(sites) = crs(s) -sites = spTransform(sites, CRS = crs(cover)) +sites <- spTransform(sites, CRS = crs(cover)) # make sure projections match -data = extract(cover, sites) -sites$cover = data +data <- extract(cover, sites) +sites$cover <- data # bad = which(data == 11 | data == 12 | data == 31) # site_data = sites[-bad,] -site_data = sites - -ecoregion = shapefile('/Volumes/data2/bmorrison/sda/bailey_paper/data/ecoregions_shapefile/eco_aea_l1.shp') -ecoregion = spTransform(ecoregion, CRS = crs(cover)) -eco_data = extract(ecoregion, site_data) -site_data$region = eco_data$NA_L1CODE -site_data$name = eco_data$NA_L1NAME - -site_data = as.data.frame(site_data) -names(site_data) = c("ID", "cover", "ecoregion", "name", "lon", "lat") -site_data$pft = NA -site_data$cover = as.numeric(site_data$cover) -site_data$ecoregion = as.numeric(site_data$ecoregion) +site_data <- sites + +ecoregion <- shapefile("/Volumes/data2/bmorrison/sda/bailey_paper/data/ecoregions_shapefile/eco_aea_l1.shp") +ecoregion <- spTransform(ecoregion, CRS = crs(cover)) +eco_data <- extract(ecoregion, site_data) +site_data$region <- eco_data$NA_L1CODE +site_data$name <- eco_data$NA_L1NAME + +site_data <- as.data.frame(site_data) +names(site_data) <- c("ID", "cover", "ecoregion", "name", "lon", "lat") +site_data$pft <- NA +site_data$cover <- as.numeric(site_data$cover) +site_data$ecoregion <- as.numeric(site_data$ecoregion) # remove sites that are categorized as unclassified, water, ice/snow, barren -index = which(site_data$cover == 0 | site_data$cover == 11 | site_data$cover == 12 | site_data$cover == 31) -site_data$pft[index] = NA +index <- which(site_data$cover == 0 | site_data$cover == 11 | site_data$cover == 12 | site_data$cover == 31) +site_data$pft[index] <- NA # classify deciduous -index = which(site_data$cover == 41) -site_data$pft[index] = "deciduous" +index <- which(site_data$cover == 41) +site_data$pft[index] <- "deciduous" # classify evergreen/conifer -index = which(site_data$cover == 42) -site_data$pft[index] = "conifer" +index <- which(site_data$cover == 42) +site_data$pft[index] <- "conifer" # classify mixed forest -index = which(site_data$cover == 43) -site_data$pft[index] = "mixed forest" +index <- which(site_data$cover == 43) +site_data$pft[index] <- "mixed forest" # classify developed -index = which(site_data$cover == 21 | site_data$cover == 22 | site_data$cover == 23 | site_data$cover == 24) -site_data$pft[index] = "developed" +index <- which(site_data$cover == 21 | site_data$cover == 22 | site_data$cover == 23 | site_data$cover == 24) +site_data$pft[index] <- "developed" # classify shrub/scrub -index = which(site_data$cover == 52 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) -site_data$pft[index] = "arid grassland" +index <- which(site_data$cover == 52 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) +site_data$pft[index] <- "arid grassland" -index = which(site_data$cover == 52 & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 6 | site_data$ecoregion == 7)) -site_data$pft[index] = "mesic grassland" +index <- which(site_data$cover == 52 & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 6 | site_data$ecoregion == 7)) +site_data$pft[index] <- "mesic grassland" # classify herbaceous -index = which(site_data$cover == 71 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) -site_data$pft[index] = "arid grassland" +index <- which(site_data$cover == 71 & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) +site_data$pft[index] <- "arid grassland" -index = which(site_data$cover == 71 & (site_data$ecoregion == 9 | site_data$ecoregion == 15 | site_data$ecoregion == 7 | site_data$ecoregion == 8 | site_data$ecoregion == 5 | site_data$ecoregion == 6)) -site_data$pft[index] = "mesic grassland" +index <- which(site_data$cover == 71 & (site_data$ecoregion == 9 | site_data$ecoregion == 15 | site_data$ecoregion == 7 | site_data$ecoregion == 8 | site_data$ecoregion == 5 | site_data$ecoregion == 6)) +site_data$pft[index] <- "mesic grassland" # classify hay/pasture crops -index = which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) -site_data$pft[index] = "arid grassland" +index <- which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 10 | site_data$ecoregion == 11 | site_data$ecoregion == 12 | site_data$ecoregion == 13 | site_data$ecoregion == 14)) +site_data$pft[index] <- "arid grassland" -index = which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 7)) -site_data$pft[index] = "mesic grassland" +index <- which((site_data$cover == 81 | site_data$cover == 82) & (site_data$ecoregion == 9 | site_data$ecoregion == 8 | site_data$ecoregion == 7)) +site_data$pft[index] <- "mesic grassland" # classify wetlands -index = which(site_data$cover == 95) -site_data$pft[index] = "mesic grassland" +index <- which(site_data$cover == 95) +site_data$pft[index] <- "mesic grassland" -index = which(site_data$cover == 90) -site_data$pft[index] = "woody wetland" +index <- which(site_data$cover == 90) +site_data$pft[index] <- "woody wetland" # LAI analysis for forests (mixed + woody wetland) -index = which(site_data$cover == 43 | site_data$cover == 90) -data = site_data[index,] -coordinates(data) = ~lon+lat -projection(data) = crs(sites) -data = spTransform(data, CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ") -data = as.data.frame(data, stringsAsFactors = F) +index <- which(site_data$cover == 43 | site_data$cover == 90) +data <- site_data[index, ] +coordinates(data) <- ~ lon + lat +projection(data) <- crs(sites) +data <- spTransform(data, CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ") +data <- as.data.frame(data, stringsAsFactors = F) library(PEcAn.data.remote) -site_id = data$ID -site_name = rep(NA, nrow(data)) -lat = data$lat -lon = data$lon -time_zone = rep("time", nrow(data)) -site_info = list(site_id, site_name, lat, lon, time_zone) -names(site_info) = c("site_id", "site_name", "lat", "lon", "time_zone") +site_id <- data$ID +site_name <- rep(NA, nrow(data)) +lat <- data$lat +lon <- data$lon +time_zone <- rep("time", nrow(data)) +site_info <- list(site_id, site_name, lat, lon, time_zone) +names(site_info) <- c("site_id", "site_name", "lat", "lon", "time_zone") -lai = call_MODIS(outdir = NULL, var = "lai", site_info = site_info, product_dates = c("2001001", "2001365"), run_parallel = T, ncores = 10, product ="MOD15A2H", band = "Lai_500m", package_method = "MODISTools", QC_filter = T, progress = F) -ndvi = call_MODIS(outdir = NULL, var = "NDVI", site_info = site_info, product_dates = c("2001001", "2001365"), run_parallel = T, ncores = 10, product ="MOD13Q1", band = "250m_16_days_NDVI", package_method = "MODISTools", QC_filter = F, progress = F) +lai <- call_MODIS(outdir = NULL, var = "lai", site_info = site_info, product_dates = c("2001001", "2001365"), run_parallel = T, ncores = 10, product = "MOD15A2H", band = "Lai_500m", package_method = "MODISTools", QC_filter = T, progress = F) +ndvi <- call_MODIS(outdir = NULL, var = "NDVI", site_info = site_info, product_dates = c("2001001", "2001365"), run_parallel = T, ncores = 10, product = "MOD13Q1", band = "250m_16_days_NDVI", package_method = "MODISTools", QC_filter = F, progress = F) library(lubridate) -par(mfrow = c(4,5)) -info = data.frame() -data = lai -sites = sort(unique(lai$site_id)) +par(mfrow = c(4, 5)) +info <- data.frame() +data <- lai +sites <- sort(unique(lai$site_id)) # xy = data.frame() # for (i in 1:length(sites)) # { # d = data[which(data$site_id == sites[i]),] # xy = rbind(xy, d[1,c(5,7,6)]) # } -#data$calendar_date = as.Date(data$calendar_date) +# data$calendar_date = as.Date(data$calendar_date) for (i in 21:40) { - site = sites[i] - d = data[which(data$site_id == site),] - d = d[,c(2,5,6,7,9)] - d = d[order(d$calendar_date),] - d$calendar_date = as.Date(d$calendar_date) - min = min(d$data, na.rm = T) - max = max(d$data, na.rm = T) - difference = max-min + site <- sites[i] + d <- data[which(data$site_id == site), ] + d <- d[, c(2, 5, 6, 7, 9)] + d <- d[order(d$calendar_date), ] + d$calendar_date <- as.Date(d$calendar_date) + min <- min(d$data, na.rm = T) + max <- max(d$data, na.rm = T) + difference <- max - min # winter = d %>% # select(calendar_date, site_id, lat, lon, data) %>% # filter((calendar_date >= month(ymd("2001-01-01")) & calendar_date <= month(ymd("2001-02-28"))) | (calendar_date >= month(ymd("2001-12-01")) & calendar_date <= month(ymd("2001-12-31")))) # min = mean(winter$data, na.rm = T) - + # summer = d %>% # select(calendar_date, site_id, lat, lon, data) %>% # filter(calendar_date >= month(ymd("2001-06-01")) & calendar_date <= month(ymd("2001-08-30"))) # max = mean(summer$data, na.rm = T) # difference = max - min - - info = rbind(info, as.data.frame(cbind(site, d$lon[1], d$lat[1], min, max, difference))) - plot(d$calendar_date, d$data, ylim = c(0, max(data$data)+2), main = site) -} - - - - - + info <- rbind(info, as.data.frame(cbind(site, d$lon[1], d$lat[1], min, max, difference))) + plot(d$calendar_date, d$data, ylim = c(0, max(data$data) + 2), main = site) +} diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/pft_site_locations.R b/modules/assim.sequential/inst/sda_backup/bmorrison/pft_site_locations.R index c55189624f0..de71113c544 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/pft_site_locations.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/pft_site_locations.R @@ -4,70 +4,72 @@ library(rgeos) library(rgeos) library(sp) -analysis = readRDS('/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS') -sites = unique(attributes(analysis[[names(analysis)[1]]])$Site) -obs = sites +analysis <- readRDS("/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS") +sites <- unique(attributes(analysis[[names(analysis)[1]]])$Site) +obs <- sites -load('/data2/bmorrison/sda/500_site_run/output_folder/500_site_run_SITE_INFO.Rdata') -sda_500 = site_info -#data -load('/data2/bmorrison/sda/500_site_run/output_folder/sites_with_lai_no_duplicates_SITE_INFO.Rdata') -sda_lai = data +load("/data2/bmorrison/sda/500_site_run/output_folder/500_site_run_SITE_INFO.Rdata") +sda_500 <- site_info +# data +load("/data2/bmorrison/sda/500_site_run/output_folder/sites_with_lai_no_duplicates_SITE_INFO.Rdata") +sda_lai <- data -#site_info -#load(file = '/data2/bmorrison/sda/500_site_run/output_folder/PFT_site_data.Rdata') -load('/data/bmorrison/sda/500_site_run/pft_site_types.RData') -pft_types = site_data +# site_info +# load(file = '/data2/bmorrison/sda/500_site_run/output_folder/PFT_site_data.Rdata') +load("/data/bmorrison/sda/500_site_run/pft_site_types.RData") +pft_types <- site_data -index = which(sda_500$site_id %in% pft_types$ID) +index <- which(sda_500$site_id %in% pft_types$ID) -sda_500 = sda_500[index,] -sda_500$pft = NA +sda_500 <- sda_500[index, ] +sda_500$pft <- NA for (i in 1:nrow(sda_500)) { - index = which(pft_types$ID == sda_500$site_id[i]) - sda_500$pft[i] = pft_types$pft[index] + index <- which(pft_types$ID == sda_500$site_id[i]) + sda_500$pft[i] <- pft_types$pft[index] } -bad = c(which(is.na(sda_500$pft)), which(sda_500$pft == "NA")) -sda_500 = sda_500[-bad,] +bad <- c(which(is.na(sda_500$pft)), which(sda_500$pft == "NA")) +sda_500 <- sda_500[-bad, ] -sda_500$class = sda_500$pft -sda_500$class[sda_500$pft == "mesic grassland"] = "blue" -sda_500$class[sda_500$pft == "arid grassland"] = "goldenrod2" -sda_500$class[sda_500$pft == "conifer"] = "chartreuse4" -sda_500$class[sda_500$pft == "deciduous"] = "green" -sda_500$class[sda_500$pft == "developed"] = "red" +sda_500$class <- sda_500$pft +sda_500$class[sda_500$pft == "mesic grassland"] <- "blue" +sda_500$class[sda_500$pft == "arid grassland"] <- "goldenrod2" +sda_500$class[sda_500$pft == "conifer"] <- "chartreuse4" +sda_500$class[sda_500$pft == "deciduous"] <- "green" +sda_500$class[sda_500$pft == "developed"] <- "red" -coordinates(sda_500) = ~lon+lat -projection(sda_500) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +coordinates(sda_500) <- ~ lon + lat +projection(sda_500) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -us = shapefile('/data2/bmorrison/sda/bailey_paper/data/cb_2018_us_nation_20m/us_boarder_reproject.shp') -us = spTransform(us, CRS = crs(sda_500)) +us <- shapefile("/data2/bmorrison/sda/bailey_paper/data/cb_2018_us_nation_20m/us_boarder_reproject.shp") +us <- spTransform(us, CRS = crs(sda_500)) -pfts = unique(sda_500$pft) -names = toupper(pfts) -jpeg('/data2/bmorrison/sda/500_site_run/manuscript/figures/pft_site_locations.jpeg', width = 8.5, height = 4, res = 300, units ="in") -par(mfrow = c(2,3), mai = c(0.1, 0.1, .5, 0.1)) +pfts <- unique(sda_500$pft) +names <- toupper(pfts) +jpeg("/data2/bmorrison/sda/500_site_run/manuscript/figures/pft_site_locations.jpeg", width = 8.5, height = 4, res = 300, units = "in") +par(mfrow = c(2, 3), mai = c(0.1, 0.1, .5, 0.1)) for (i in 1:length(pfts)) { - index = which(sda_500$pft == pfts[i]) - d = sda_500[index,] + index <- which(sda_500$pft == pfts[i]) + d <- sda_500[index, ] plot(us, main = names[i]) plot(d, add = T, pch = 20, col = d$class) } dev.off() -jpeg('/data2/bmorrison/sda/500_site_run/manuscript/figures/All_pft_site_locations_1_plot.jpeg', width = 8.5, height = 6, res = 300, units ="in") +jpeg("/data2/bmorrison/sda/500_site_run/manuscript/figures/All_pft_site_locations_1_plot.jpeg", width = 8.5, height = 6, res = 300, units = "in") plot(us) plot(sda_500, col = sda_500$class, pch = 20, add = T) -legend("bottomleft", c("Mesic Grassland", "Arid Grassland", "Conifer", "Deciduous", "Developed"), col = c("blue", "goldenrod2", "chartreuse4", "green", "red"), - pch = 20, bty = 'n', cex = 1.1) +legend("bottomleft", c("Mesic Grassland", "Arid Grassland", "Conifer", "Deciduous", "Developed"), + col = c("blue", "goldenrod2", "chartreuse4", "green", "red"), + pch = 20, bty = "n", cex = 1.1 +) dev.off() -save(sda_500, file = '/data2/bmorrison/sda/500_site_run/manuscript/sda_500_pft_information.Rdata') +save(sda_500, file = "/data2/bmorrison/sda/500_site_run/manuscript/sda_500_pft_information.Rdata") @@ -75,50 +77,49 @@ save(sda_500, file = '/data2/bmorrison/sda/500_site_run/manuscript/sda_500_pft_i -site_info = site_info[complete.cases(site_info),] -bad = which(site_info$pft == "NA") -site_info = site_info[-bad,] -index = which(sda_500$site_id %in% site_info$site_id) -sda_500 = sda_500[index,] -sda_500$pft = NA +site_info <- site_info[complete.cases(site_info), ] +bad <- which(site_info$pft == "NA") +site_info <- site_info[-bad, ] +index <- which(sda_500$site_id %in% site_info$site_id) +sda_500 <- sda_500[index, ] +sda_500$pft <- NA for (i in 1:nrow(sda_500)) { - index = which(site_info$site_id == sda_500$site_id[i]) - sda_500$pft[i] = site_info$pft[index] + index <- which(site_info$site_id == sda_500$site_id[i]) + sda_500$pft[i] <- site_info$pft[index] } -sda_500$class = sda_500$pft -sda_500$class[sda_500$pft == "mesic grassland"] = "blue" -sda_500$class[sda_500$pft == "arid grassland"] = "goldenrod2" -sda_500$class[sda_500$pft == "conifer"] = "chartreuse4" -sda_500$class[sda_500$pft == "deciduous"] = "green" -sda_500$class[sda_500$pft == "developed"] = "red" +sda_500$class <- sda_500$pft +sda_500$class[sda_500$pft == "mesic grassland"] <- "blue" +sda_500$class[sda_500$pft == "arid grassland"] <- "goldenrod2" +sda_500$class[sda_500$pft == "conifer"] <- "chartreuse4" +sda_500$class[sda_500$pft == "deciduous"] <- "green" +sda_500$class[sda_500$pft == "developed"] <- "red" -coordinates(sda_500) = ~lon+lat -coordinates(sda_lai) = ~lon+lat -projection(sda_500) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -projection(sda_lai) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +coordinates(sda_500) <- ~ lon + lat +coordinates(sda_lai) <- ~ lon + lat +projection(sda_500) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +projection(sda_lai) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -us = shapefile('/data2/bmorrison/sda/bailey_paper/data/cb_2018_us_nation_20m/us_boarder_reproject.shp') -us = spTransform(us, CRS = crs(sda_500)) +us <- shapefile("/data2/bmorrison/sda/bailey_paper/data/cb_2018_us_nation_20m/us_boarder_reproject.shp") +us <- spTransform(us, CRS = crs(sda_500)) -pfts = toupper(unique(sda_500$pft)) -par(mfrow = c(2,3)) +pfts <- toupper(unique(sda_500$pft)) +par(mfrow = c(2, 3)) for (i in 1:length(pfts)) { - index = which(sda_500$pft == tolower(pfts[i])) - d = sda_500[index,] + index <- which(sda_500$pft == tolower(pfts[i])) + d <- sda_500[index, ] plot(us) plot(d, add = T, pch = 20, col = d$class, main = pfts[i]) } -jpeg('/data2/bmorrison/sda/500_site_run/manuscript/figures/pft_site_locations.jpeg', width = 8.5, height = 7, res = 300, units ="in") +jpeg("/data2/bmorrison/sda/500_site_run/manuscript/figures/pft_site_locations.jpeg", width = 8.5, height = 7, res = 300, units = "in") plot(us) plot(sda_500, col = sda_500$class, pch = 20, add = T) -legend("bottomleft", c("Mesic Grassland", "Arid Grassland", "Conifer", "Deciduous", "Developed"), col = c("blue", "goldenrod2", "chartreuse4", "green", "red"), - pch = 20, bty = 'n', cex = 1.5) +legend("bottomleft", c("Mesic Grassland", "Arid Grassland", "Conifer", "Deciduous", "Developed"), + col = c("blue", "goldenrod2", "chartreuse4", "green", "red"), + pch = 20, bty = "n", cex = 1.5 +) dev.off() - - - diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/register_site_group.R b/modules/assim.sequential/inst/sda_backup/bmorrison/register_site_group.R index 913c30d5555..fdd91d2bc0a 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/register_site_group.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/register_site_group.R @@ -9,7 +9,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -18,27 +18,34 @@ library(dplyr) library(furrr) library(tictoc) -data = shapefile('/data/bmorrison/sda/500_site_run/shapefiles/500_site_selection_final.shp') -data = as.data.frame(data) -names(data) = c("type", "lon", "lat") +data <- shapefile("/data/bmorrison/sda/500_site_run/shapefiles/500_site_selection_final.shp") +data <- as.data.frame(data) +names(data) <- c("type", "lon", "lat") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con -#site_ID <- observation +# site_ID <- observation #-- register sites -site_id <-map2(data$lon, data$lat, function(lon, lat){ - pr<-paste0(round(lon,0), round(lat,0)) - out <-db.query(paste0("INSERT INTO sites (sitename, geometry) VALUES ('CMS_500_SDA_",pr,"', ", - "ST_SetSRID(ST_MakePoint(",lon,",", lat,", 1000), 4326) ) RETURNING id, sitename"), - con +site_id <- map2(data$lon, data$lat, function(lon, lat) { + pr <- paste0(round(lon, 0), round(lat, 0)) + out <- db.query( + paste0( + "INSERT INTO sites (sitename, geometry) VALUES ('CMS_500_SDA_", pr, "', ", + "ST_SetSRID(ST_MakePoint(", lon, ",", lat, ", 1000), 4326) ) RETURNING id, sitename" + ), + con ) out }) -#link to site group +# link to site group site_id %>% - map(~ db.query(paste0("INSERT INTO sitegroups_sites (sitegroup_id , site_id ) VALUES (2000000009, ", - .x[[1]],")"),con)) \ No newline at end of file + map(~ db.query(paste0( + "INSERT INTO sitegroups_sites (sitegroup_id , site_id ) VALUES (2000000009, ", + .x[[1]], ")" + ), con)) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/site_selection/pick_sda_sites.R b/modules/assim.sequential/inst/sda_backup/bmorrison/site_selection/pick_sda_sites.R index d0706d1023e..dd5a2d97649 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/site_selection/pick_sda_sites.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/site_selection/pick_sda_sites.R @@ -1,75 +1,67 @@ library(raster) -r = raster('/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img') +r <- raster("/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img") ################################################################################################### #################### TILE AND REMOVE NON-NATURAL LOCATIONS FROM RASTER ############################ -ex = extent(r) -ncol = ncol(r) -nrow = nrow(r) -row_pix = 10000 -col_pix = 10000 -nc = floor(ncol/col_pix) -nr = floor(nrow/row_pix) +ex <- extent(r) +ncol <- ncol(r) +nrow <- nrow(r) +row_pix <- 10000 +col_pix <- 10000 +nc <- floor(ncol / col_pix) +nr <- floor(nrow / row_pix) # plot(r) -for (i in 1:(nc+1)) +for (i in 1:(nc + 1)) { - if (i == 1) - { - start_col = 1 - end_col = i * col_pix + if (i == 1) { + start_col <- 1 + end_col <- i * col_pix } - if (i == nc+1) - { - end_col = ncol - start_col = i * col_pix + if (i == nc + 1) { + end_col <- ncol + start_col <- i * col_pix } - - if (i > 1 & i < nc+1) - { - end_col = i*col_pix - start_col = end_col - col_pix + + if (i > 1 & i < nc + 1) { + end_col <- i * col_pix + start_col <- end_col - col_pix } - #print(paste(start_col, end_col)) + # print(paste(start_col, end_col)) - for (j in 1:(nr+1)) + for (j in 1:(nr + 1)) { - if (j == 1) - { - start_row = 1 - end_row = start_row + row_pix - 1 + if (j == 1) { + start_row <- 1 + end_row <- start_row + row_pix - 1 } - if (j == nr+1) - { - end_row = nrow - start_row = j * row_pix + if (j == nr + 1) { + end_row <- nrow + start_row <- j * row_pix } - - if (j > 1 & j < nr+1) - { - end_row = j*row_pix - start_row = end_row - row_pix + + if (j > 1 & j < nr + 1) { + end_row <- j * row_pix + start_row <- end_row - row_pix } - - #print(paste( start_row, end_row)) - - ext = extent(xFromCol(r, start_col), xFromCol(r, end_col), yFromRow(r, end_row), yFromRow(r, start_row)) + + # print(paste( start_row, end_row)) + + ext <- extent(xFromCol(r, start_col), xFromCol(r, end_col), yFromRow(r, end_row), yFromRow(r, start_row)) # points = rbind(c(ext[1], ext[3]), c(ext[1], ext[4]), c(ext[2], ext[3]), c(ext[2], ext[4])) # points(points, add = T, pch = 19, col = 'red') # text(x = ext[1], y = ext[3]+10000, paste(i, j, sep = "_"), cex = 1.5, col = 'red', adj = 0.5, pos = 4) - lib1 = print("library(raster)") - rast = print("r = raster('/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img')") - crop = print(paste("c = crop(r, extent(",ext[1], ",", ext[2], ",", ext[3], ",", ext[4], "))", sep = "")) + lib1 <- print("library(raster)") + rast <- print("r = raster('/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img')") + crop <- print(paste("c = crop(r, extent(", ext[1], ",", ext[2], ",", ext[3], ",", ext[4], "))", sep = "")) - crop2 = print("c[c[] == 0] = NA") - crop3 = print("c[c[] <= 31 | c[] > 95 ] = NA") - write_raster = print(paste("writeRaster(c, file = '/data/bmorrison/sda/bailey_paper/data/landcover/tiles/NLCD_corrected_2016_", i, "_", j, ".tif', overwrite = T)", sep = "")) + crop2 <- print("c[c[] == 0] = NA") + crop3 <- print("c[c[] <= 31 | c[] > 95 ] = NA") + write_raster <- print(paste("writeRaster(c, file = '/data/bmorrison/sda/bailey_paper/data/landcover/tiles/NLCD_corrected_2016_", i, "_", j, ".tif', overwrite = T)", sep = "")) - out = c(lib1, rast, crop, crop2, crop3,write_raster) + out <- c(lib1, rast, crop, crop2, crop3, write_raster) write(out, file = paste("/data/bmorrison/sda/bailey_paper/data/landcover/tiles/qsub_files/r_", i, "_", j, ".R", sep = "")) - - } } @@ -77,76 +69,69 @@ for (i in 1:(nc+1)) # #################### REVERSE MASK FOR NON_NATURAL LOCATIONS ####################################### library(raster) -r = raster("/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img") +r <- raster("/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img") -ex = extent(r) -ncol = ncol(r) -nrow = nrow(r) -row_pix = 10000 -col_pix = 10000 -nc = floor(ncol/col_pix) -nr = floor(nrow/row_pix) +ex <- extent(r) +ncol <- ncol(r) +nrow <- nrow(r) +row_pix <- 10000 +col_pix <- 10000 +nc <- floor(ncol / col_pix) +nr <- floor(nrow / row_pix) # plot(r) -for (i in 1:(nc+1)) +for (i in 1:(nc + 1)) { - if (i == 1) - { - start_col = 1 - end_col = i * col_pix + if (i == 1) { + start_col <- 1 + end_col <- i * col_pix } - if (i == nc+1) - { - end_col = ncol - start_col = i * col_pix + if (i == nc + 1) { + end_col <- ncol + start_col <- i * col_pix } - - if (i > 1 & i < nc+1) - { - end_col = i*col_pix - start_col = end_col - col_pix + + if (i > 1 & i < nc + 1) { + end_col <- i * col_pix + start_col <- end_col - col_pix } - #print(paste(start_col, end_col)) - - for (j in 1:(nr+1)) + # print(paste(start_col, end_col)) + + for (j in 1:(nr + 1)) { - if (j == 1) - { - start_row = 1 - end_row = start_row + row_pix - 1 + if (j == 1) { + start_row <- 1 + end_row <- start_row + row_pix - 1 } - if (j == nr+1) - { - end_row = nrow - start_row = j * row_pix + if (j == nr + 1) { + end_row <- nrow + start_row <- j * row_pix } - - if (j > 1 & j < nr+1) - { - end_row = j*row_pix - start_row = end_row - row_pix + + if (j > 1 & j < nr + 1) { + end_row <- j * row_pix + start_row <- end_row - row_pix } - - #print(paste( start_row, end_row)) - - ext = extent(xFromCol(r, start_col), xFromCol(r, end_col), yFromRow(r, end_row), yFromRow(r, start_row)) + + # print(paste( start_row, end_row)) + + ext <- extent(xFromCol(r, start_col), xFromCol(r, end_col), yFromRow(r, end_row), yFromRow(r, start_row)) # points = rbind(c(ext[1], ext[3]), c(ext[1], ext[4]), c(ext[2], ext[3]), c(ext[2], ext[4])) # points(points, add = T, pch = 19, col = 'red') # text(x = ext[1], y = ext[3]+10000, paste(i, j, sep = "_"), cex = 1.5, col = 'red', adj = 0.5, pos = 4) - - lib1 = print("library(raster)") - rast = print("r = raster('/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img')") - crop = print(paste("c = crop(r, extent(",ext[1], ",", ext[2], ",", ext[3], ",", ext[4], "))", sep = "")) - crop2 = print("c[c[] == 0] = NA") - crop3 = print("c[c[] == 11 | c[] == 12 | c[] == 21 | c[] == 22 | c[] == 23 | c[] == 24 | c[] == 31] = 0") - crop4 = print("c[c[] == 41 | c[] == 42| c[] == 43 | c[] == 52 | c[] == 71 | c[] ==81 | c[] == 82 | c[] == 90 | c[] == 95] = 1") - write_raster = print(paste("writeRaster(c, file = '/data/bmorrison/sda/bailey_paper/data/landcover/tiles/NLCD_corrected_2016_mask_", i, "_", j, ".tif', overwrite = T)", sep = "")) + lib1 <- print("library(raster)") + rast <- print("r = raster('/data/bmorrison/sda/bailey_paper/data/landcover/NLCD_2016_Land_Cover_L48_20190424.img')") + crop <- print(paste("c = crop(r, extent(", ext[1], ",", ext[2], ",", ext[3], ",", ext[4], "))", sep = "")) + + crop2 <- print("c[c[] == 0] = NA") + crop3 <- print("c[c[] == 11 | c[] == 12 | c[] == 21 | c[] == 22 | c[] == 23 | c[] == 24 | c[] == 31] = 0") + crop4 <- print("c[c[] == 41 | c[] == 42| c[] == 43 | c[] == 52 | c[] == 71 | c[] ==81 | c[] == 82 | c[] == 90 | c[] == 95] = 1") + write_raster <- print(paste("writeRaster(c, file = '/data/bmorrison/sda/bailey_paper/data/landcover/tiles/NLCD_corrected_2016_mask_", i, "_", j, ".tif', overwrite = T)", sep = "")) - out = c(lib1, rast, crop, crop2, crop3,crop4, write_raster) + out <- c(lib1, rast, crop, crop2, crop3, crop4, write_raster) write(out, file = paste("/data/bmorrison/sda/bailey_paper/data/landcover/tiles/qsub_files/r_", i, "_", j, ".R", sep = "")) - } } @@ -160,100 +145,97 @@ library(tidyverse) library(gridExtra) library(scatterplot3d) library(rgl) -setwd('/data/bmorrison/sda/bailey_paper/data/climate/') -files1 = list.files(path = '/data/bmorrison/sda/bailey_paper/data/climate/', pattern = 'annual')[-4] -files2 = c("tmax_max.tif", "tmin_min.tif") -files = c(files1, files2) +setwd("/data/bmorrison/sda/bailey_paper/data/climate/") +files1 <- list.files(path = "/data/bmorrison/sda/bailey_paper/data/climate/", pattern = "annual")[-4] +files2 <- c("tmax_max.tif", "tmin_min.tif") +files <- c(files1, files2) # summer = stack(list.files(path = '/data/bmorrison/sda/bailey_paper/data/climate/', pattern = 'summer', include.dirs = T, full.names = T)) -#winter = stack(list.files(path = '/data/bmorrison/sda/bailey_paper/data/climate/', pattern = 'winter', include.dirs = T, full.names = T)) -landcover = raster('/data/bmorrison/sda/bailey_paper/data/landcover/landcover_2016_aligned.tif') -mask = landcover -mask[mask[]>0] = 1 -mask[mask[] != 1] = NA - -#mask = raster('/data/bmorrison/sda/bailey_paper/data/landcover/landcover_mask_reverse_aligned.tif') -#mask[mask[] == 0] = 1 - -#climate = stack(summer, winter, landcover) -climate = stack(files, landcover) -climate = climate*mask -names(climate) = c("aet", "def", "rad", "rain", "snow", "tmax", "tmin", "landcover") -#names(climate) = c("aet_s", "def_s", "rad_s", "rain_s", "snow_s", "tmax_s", "tmin_s", "aet_w", "def_w", "rad_w", "rain_w", "snow_w", "tmax_w", "tmin_w", "landcover") - -#climate = subset(climate, c(1,2,4,5,6, 7, 8, 9, 11, 12, 13, 14, 15)) -ecoregions = raster('/data/bmorrison/sda/bailey_paper/data/ecoregion_l3.tif') +# winter = stack(list.files(path = '/data/bmorrison/sda/bailey_paper/data/climate/', pattern = 'winter', include.dirs = T, full.names = T)) +landcover <- raster("/data/bmorrison/sda/bailey_paper/data/landcover/landcover_2016_aligned.tif") +mask <- landcover +mask[mask[] > 0] <- 1 +mask[mask[] != 1] <- NA + +# mask = raster('/data/bmorrison/sda/bailey_paper/data/landcover/landcover_mask_reverse_aligned.tif') +# mask[mask[] == 0] = 1 + +# climate = stack(summer, winter, landcover) +climate <- stack(files, landcover) +climate <- climate * mask +names(climate) <- c("aet", "def", "rad", "rain", "snow", "tmax", "tmin", "landcover") +# names(climate) = c("aet_s", "def_s", "rad_s", "rain_s", "snow_s", "tmax_s", "tmin_s", "aet_w", "def_w", "rad_w", "rain_w", "snow_w", "tmax_w", "tmin_w", "landcover") + +# climate = subset(climate, c(1,2,4,5,6, 7, 8, 9, 11, 12, 13, 14, 15)) +ecoregions <- raster("/data/bmorrison/sda/bailey_paper/data/ecoregion_l3.tif") # ecoregions = shapefile('/data/bmorrison/sda/bailey_paper/data/ecoregions_shapefile/eco_aea.shp') # ecoregions = spTransform(ecoregions, CRS = crs(climate)) -us = shapefile('/data/bmorrison/sda/bailey_paper/data/cb_2018_us_nation_20m/cb_2018_us_nation_20m.shp') -us = crop(spTransform(us, CRS = crs(climate)), extent(climate)) -us = rasterize(us, ecoregions, field = 1) -eco = ecoregions * us - -clusters = eco * NA - -regions_index = unique(ecoregion$CODE) -#regions_index = regions[-1] -regions = unique(ecoregion$NA_L3CODE) -regions_code = unique(ecoregion$NA_L3CODE) -regions = grep('^[1][0].[1]', regions) -regions = regions_index[regions] +us <- shapefile("/data/bmorrison/sda/bailey_paper/data/cb_2018_us_nation_20m/cb_2018_us_nation_20m.shp") +us <- crop(spTransform(us, CRS = crs(climate)), extent(climate)) +us <- rasterize(us, ecoregions, field = 1) +eco <- ecoregions * us + +clusters <- eco * NA + +regions_index <- unique(ecoregion$CODE) +# regions_index = regions[-1] +regions <- unique(ecoregion$NA_L3CODE) +regions_code <- unique(ecoregion$NA_L3CODE) +regions <- grep("^[1][0].[1]", regions) +regions <- regions_index[regions] for (i in 1:length(regions)) { - index = which(eco[] == regions[i]) - if (length(index)>0) - { - xy = xyFromCell(eco, cell = index) - data = as.data.frame(raster::extract(climate, index)) - data$index = index - #data = cbind(data, xy) - nas = which(is.na(data[,1])) - data = as.data.frame(data[complete.cases(data),]) - - data.scale = as.data.frame(scale(data[1:8])) - - data.scale = data.scale[complete.cases(data.scale),] + index <- which(eco[] == regions[i]) + if (length(index) > 0) { + xy <- xyFromCell(eco, cell = index) + data <- as.data.frame(raster::extract(climate, index)) + data$index <- index + # data = cbind(data, xy) + nas <- which(is.na(data[, 1])) + data <- as.data.frame(data[complete.cases(data), ]) + + data.scale <- as.data.frame(scale(data[1:8])) + + data.scale <- data.scale[complete.cases(data.scale), ] dim(data) dim(data.scale) print(regions_code[regions[i]]) } print(dim(data.scale)) - - k2 = hkmeans(data.scale, 2, hc.metric = "euclidean", iter.max = 10) - k3 = hkmeans(data.scale, 3, hc.metric = "euclidean", iter.max = 50) - k4 = hkmeans(data.scale, 4, hc.metric = "euclidean", iter.max = 50) - k5 = hkmeans(data.scale, 5, hc.metric = "euclidean", iter.max = 50) - k6 = hkmeans(data.scale, 6, hc.metric = "euclidean", iter.max = 50) + + k2 <- hkmeans(data.scale, 2, hc.metric = "euclidean", iter.max = 10) + k3 <- hkmeans(data.scale, 3, hc.metric = "euclidean", iter.max = 50) + k4 <- hkmeans(data.scale, 4, hc.metric = "euclidean", iter.max = 50) + k5 <- hkmeans(data.scale, 5, hc.metric = "euclidean", iter.max = 50) + k6 <- hkmeans(data.scale, 6, hc.metric = "euclidean", iter.max = 50) # k7 = hkmeans(data.scale, 7, hc.metric = "euclidean", iter.max = 50) # k8 = hkmeans(data.scale, 8, hc.metric = "euclidean", iter.max = 50) # k9 = hkmeans(data.scale, 9, hc.metric = "euclidean", iter.max = 50) # k10 = hkmeans(data.scale, 10, hc.metric = "euclidean", iter.max = 50) - - - p1 = fviz_cluster(k2, data = data.scale, labelsize = 0) - p2 = fviz_cluster(k3, data = data.scale, labelsize = 0) - p3 = fviz_cluster(k4, data = data.scale, labelsize = 0) - p4 = fviz_cluster(k5, data = data.scale, labelsize = 0) - p5 = fviz_cluster(k6, data = data.scale, labelsize = 0) + + + p1 <- fviz_cluster(k2, data = data.scale, labelsize = 0) + p2 <- fviz_cluster(k3, data = data.scale, labelsize = 0) + p3 <- fviz_cluster(k4, data = data.scale, labelsize = 0) + p4 <- fviz_cluster(k5, data = data.scale, labelsize = 0) + p5 <- fviz_cluster(k6, data = data.scale, labelsize = 0) # p6 = fviz_cluster(k7, data = data.scale, labelsize = 0) # p7 = fviz_cluster(k8, data = data.scale, labelsize = 0) # p8 = fviz_cluster(k9, data = data.scale, labelsize = 0) # p9 = fviz_cluster(k10, data = data.scale, labelsize = 0) - - - #grid.arrange( p9,p10, p11,p12,p13,nrow = 2) # ) - - grid.arrange(p1, p2, p3,p4, p5, nrow = 2) # ) - - pdf(file = '/data/bmorrison/sda/bailey_paper/data/clusters/snake_river_plain_1018_cluster_plots.pdf', width = 16, height = 8) - grid.arrange(p1, p2, p3,p4, p5,nrow = 2) #p7, p8, p9, nrow = 3) + + + # grid.arrange( p9,p10, p11,p12,p13,nrow = 2) # ) + + grid.arrange(p1, p2, p3, p4, p5, nrow = 2) # ) + + pdf(file = "/data/bmorrison/sda/bailey_paper/data/clusters/snake_river_plain_1018_cluster_plots.pdf", width = 16, height = 8) + grid.arrange(p1, p2, p3, p4, p5, nrow = 2) # p7, p8, p9, nrow = 3) dev.off() - - clusters[data$index] = k4$cluster+186 - + + clusters[data$index] <- k4$cluster + 186 + # plot3d(x = data.scale$tmax_s, y = data.scale$aet_s, z = data.scale$landcover, col = k2$cluster) # scatterplot3d(x = data.scale$tmax_s, y = data.scale$aet_s, z = data.scale$landcover, color = k2$cluster, pch = 19) - - } # library(ppcor) @@ -264,7 +246,7 @@ for (i in 1:length(regions)) # diag(pvals) = NA # colnames(pvals) = c(names(climate), "x", "y") # rownames(pvals) = c(names(climate), "x", "y") -# +# # bad = which(abs(pvals) > 0.05) # pairs(data) @@ -284,14 +266,14 @@ for (i in 1:length(regions)) # state = row.names(d)) %>% # ggplot(aes(x, y, color = factor(cluster), label = state)) + # geom_text() -# +# # # p2 <- d %>% # # as_tibble() %>% # # mutate(cluster = k1$cluster, # # state = row.names(d)) %>% # # ggplot(aes(aet_s, tmin_w, color = factor(cluster), label = state)) + # # geom_text() -# # +# # # # p3 <- d %>% # # as_tibble() %>% # # mutate(cluster = k1$cluster, @@ -310,22 +292,22 @@ for (i in 1:length(regions)) # state = row.names(d)) %>% # ggplot(aes(x, y, color = factor(cluster), label = state)) + # geom_text() -# +# # library(gridExtra) # grid.arrange(p1, p2, p3,nrow = 1) -# -# -# +# +# +# # Elbow method # fviz_nbclust(d, kmeans, k.max = 20, method = "wss") + # labs(subtitle = "Elbow method") -# +# # # Silhouette method # fviz_nbclust(d, kmeans, k.max = 20, method = "silhouette")+ # labs(subtitle = "Silhouette method") -# -# +# +# # set.seed(123) # fviz_nbclust(d, kmeans, nstart = 25, k.max = 100, method = "gap_stat", nboot = 50)+ # labs(subtitle = "Gap statistic method") diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/AGB_rf_model.R b/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/AGB_rf_model.R index 97d4fd1c4ac..78363ba326e 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/AGB_rf_model.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/AGB_rf_model.R @@ -10,7 +10,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -19,77 +19,86 @@ library(dplyr) library(furrr) library(tictoc) -analysis = readRDS('/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS') -forecast = readRDS('/data2/bmorrison/sda/500_site_run/output_folder/FORECAST.RDS') +analysis <- readRDS("/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS") +forecast <- readRDS("/data2/bmorrison/sda/500_site_run/output_folder/FORECAST.RDS") -dates = names(analysis) -sites = unique(attributes(analysis[[names(analysis)[1]]])$Site) -observations = sites +dates <- names(analysis) +sites <- unique(attributes(analysis[[names(analysis)[1]]])$Site) +observations <- sites -obs = observations -working = print(paste("working on: ", i)) -sites = print(obs) +obs <- observations +working <- print(paste("working on: ", i)) +sites <- print(obs) PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- obs suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # this is a super crude way to make the dataframe with median values + .5 and .95 quantiles for analysis and forecast. -analysis_data = data.frame() +analysis_data <- data.frame() for (i in 1:length(dates)) { - data = analysis[[dates[i]]] - agb_data = data[,which(colnames(data) == "AbvGrndWood")] - agb_a = as.vector(apply(agb_data, 2, FUN = median, na.rm = T)) - - lai_data = data[,which(colnames(data) == "LAI")] - lai_a = as.vector(apply(lai_data, 2, FUN = median)) - - soil_data = data[,which(colnames(data) == "TotSoilCarb")] - soil_a = as.vector(apply(soil_data, 2, FUN = median)) - - data = forecast[[dates[i]]] - agb_data = data[,which(colnames(data) == "AbvGrndWood")] - agb_f = as.vector(apply(agb_data, 2, FUN = median, na.rm = T)) - - lai_data = data[,which(colnames(data) == "LAI")] - lai_f = as.vector(apply(lai_data, 2, FUN = median)) - - soil_data = data[,which(colnames(data) == "TotSoilCarb")] - soil_f = as.vector(apply(soil_data, 2, FUN = median)) - - info = as.data.frame(cbind(site_info$site_id, dates[i], site_info$lon, site_info$lat, agb_a, lai_a, soil_a, - agb_f, lai_f, soil_f), stringsAsFactors = F) - names(info) = c("site_id", "date", "lon", "lat", "agb_a", "lai_a", "soil_a", - "agb_f", "lai_f", "soil_f") - analysis_data = rbind(analysis_data, info, stringsAsFactors = F) + data <- analysis[[dates[i]]] + agb_data <- data[, which(colnames(data) == "AbvGrndWood")] + agb_a <- as.vector(apply(agb_data, 2, FUN = median, na.rm = T)) + + lai_data <- data[, which(colnames(data) == "LAI")] + lai_a <- as.vector(apply(lai_data, 2, FUN = median)) + + soil_data <- data[, which(colnames(data) == "TotSoilCarb")] + soil_a <- as.vector(apply(soil_data, 2, FUN = median)) + + data <- forecast[[dates[i]]] + agb_data <- data[, which(colnames(data) == "AbvGrndWood")] + agb_f <- as.vector(apply(agb_data, 2, FUN = median, na.rm = T)) + + lai_data <- data[, which(colnames(data) == "LAI")] + lai_f <- as.vector(apply(lai_data, 2, FUN = median)) + + soil_data <- data[, which(colnames(data) == "TotSoilCarb")] + soil_f <- as.vector(apply(soil_data, 2, FUN = median)) + + info <- as.data.frame(cbind( + site_info$site_id, dates[i], site_info$lon, site_info$lat, agb_a, lai_a, soil_a, + agb_f, lai_f, soil_f + ), stringsAsFactors = F) + names(info) <- c( + "site_id", "date", "lon", "lat", "agb_a", "lai_a", "soil_a", + "agb_f", "lai_f", "soil_f" + ) + analysis_data <- rbind(analysis_data, info, stringsAsFactors = F) } -analysis_data$date = as.numeric(substr(analysis_data$date, 1, 4)) -analysis_data$lon = as.numeric(analysis_data$lon) -analysis_data$lat = as.numeric(analysis_data$lat) -analysis_data$agb_a = as.numeric(analysis_data$agb_a) +analysis_data$date <- as.numeric(substr(analysis_data$date, 1, 4)) +analysis_data$lon <- as.numeric(analysis_data$lon) +analysis_data$lat <- as.numeric(analysis_data$lat) +analysis_data$agb_a <- as.numeric(analysis_data$agb_a) -analysis_data$lai_a = as.numeric(analysis_data$lai_a) +analysis_data$lai_a <- as.numeric(analysis_data$lai_a) -analysis_data$soil_a = as.numeric(analysis_data$soil_a) +analysis_data$soil_a <- as.numeric(analysis_data$soil_a) -analysis_data$agb_f = as.numeric(analysis_data$agb_f) +analysis_data$agb_f <- as.numeric(analysis_data$agb_f) -analysis_data$lai_f = as.numeric(analysis_data$lai_f) +analysis_data$lai_f <- as.numeric(analysis_data$lai_f) -analysis_data$soil_f = as.numeric(analysis_data$soil_f) +analysis_data$soil_f <- as.numeric(analysis_data$soil_f) # lt = shapefile('/data/bmorrison/sda/500_site_run/Landtrendr_AGB_493_sites/lt_agb.shp') # lt = as.data.frame(lt) @@ -104,116 +113,114 @@ analysis_data$soil_f = as.numeric(analysis_data$soil_f) # } # } -pft = shapefile('/data/bmorrison/sda/500_site_run/SDA_500_Soil.shp') -pft = as.data.frame(pft) -analysis_data$pft = NA -sites = unique(analysis_data$site_id) +pft <- shapefile("/data/bmorrison/sda/500_site_run/SDA_500_Soil.shp") +pft <- as.data.frame(pft) +analysis_data$pft <- NA +sites <- unique(analysis_data$site_id) for (i in 1:length(sites)) { - index_sites = which(analysis_data$site_id == sites[i]) - index_data = which(analysis_data$lon[index_sites][1] == pft$coords.x1 | analysis_data$lat[index_sites][1] == pft$coords.x2) - analysis_data$pft[index_sites] = pft$pft[index_data][1] + index_sites <- which(analysis_data$site_id == sites[i]) + index_data <- which(analysis_data$lon[index_sites][1] == pft$coords.x1 | analysis_data$lat[index_sites][1] == pft$coords.x2) + analysis_data$pft[index_sites] <- pft$pft[index_data][1] } -eco = shapefile('/data2/bmorrison/sda/bailey_paper/data/ecoregions_shapefile/eco_aea_l1.shp') -test = extract(eco, cbind(analysis_data$lon, analysis_data$lat)) +eco <- shapefile("/data2/bmorrison/sda/bailey_paper/data/ecoregions_shapefile/eco_aea_l1.shp") +test <- extract(eco, cbind(analysis_data$lon, analysis_data$lat)) -analysis_data$pft = as.factor(analysis_data$pft) -analysis_data$eco = as.factor(test$NA_L1CODE) +analysis_data$pft <- as.factor(analysis_data$pft) +analysis_data$eco <- as.factor(test$NA_L1CODE) # add in climate data # files = list.files(path = '/data2/bmorrison/sda/bailey_paper/data/climate/', pattern = '.tif', include.dirs = T, full.names = T) # files = files[c(2,7,18,23,27, 32)] -clim = stack(c('/data2/bmorrison/prism_climate/annuals/aet_annual.tif', '/data2/bmorrison/prism_climate/annuals/def_annual.tif')) -names(clim) = c("aet", "def") +clim <- stack(c("/data2/bmorrison/prism_climate/annuals/aet_annual.tif", "/data2/bmorrison/prism_climate/annuals/def_annual.tif")) +names(clim) <- c("aet", "def") -t = as.data.frame(extract(clim, cbind(analysis_data$lon, analysis_data$lat))) +t <- as.data.frame(extract(clim, cbind(analysis_data$lon, analysis_data$lat))) -analysis_data$aet = t$aet -analysis_data$def = t$def +analysis_data$aet <- t$aet +analysis_data$def <- t$def # analysis_data$rain = t$rain # analysis_data$snow = t$snow # analysis_data$tmax = t$tmax # analysis_data$tmin = t$tmin -elev = raster('/data2/bmorrison/prism_climate/normals/elevation/PRISM_us_dem_4km_bil.bil') -analysis_data$elev = as.numeric(extract(elev, cbind(analysis_data$lon, analysis_data$lat))) -#outlier analysis -sd = sd(analysis_data$agb_a, na.rm = T) -z = abs(analysis_data$agb_a - mean(analysis_data$agb_a, na.rm = T))/sd -bad = which(z >= 3) +elev <- raster("/data2/bmorrison/prism_climate/normals/elevation/PRISM_us_dem_4km_bil.bil") +analysis_data$elev <- as.numeric(extract(elev, cbind(analysis_data$lon, analysis_data$lat))) +# outlier analysis +sd <- sd(analysis_data$agb_a, na.rm = T) +z <- abs(analysis_data$agb_a - mean(analysis_data$agb_a, na.rm = T)) / sd +bad <- which(z >= 3) -data = analysis_data[-bad,] +data <- analysis_data[-bad, ] # data = analysis_data # year = 2012 # index = which(data$date == year) # data = data[index,] -s = sample(1:nrow(data), round(.8*nrow(data))) -training = data[s,] -testing = data[-s,] +s <- sample(1:nrow(data), round(.8 * nrow(data))) +training <- data[s, ] +testing <- data[-s, ] # stratified sampling for the different agb values) -min = min(training$agb_a, na.rm = T) -max = max(training$agb_a, na.rm = T) -bin_width = (max-min)/20 -bins = seq(from = min, to = max, by = bin_width) -training_bins = cut(training$agb_a, breaks = bins, include.lowest = T) -training$bin = training_bins +min <- min(training$agb_a, na.rm = T) +max <- max(training$agb_a, na.rm = T) +bin_width <- (max - min) / 20 +bins <- seq(from = min, to = max, by = bin_width) +training_bins <- cut(training$agb_a, breaks = bins, include.lowest = T) +training$bin <- training_bins -bins = sort(unique(training$bin)) -nrows = 20000 -nsamples = round(nrows/length(unique(training$bin)), digits = 0) +bins <- sort(unique(training$bin)) +nrows <- 20000 +nsamples <- round(nrows / length(unique(training$bin)), digits = 0) -training_dataset = data.frame() +training_dataset <- data.frame() for (i in 1:length(bins)) { - index = which(training$bin == bins[i]) - s = sample(1:length(index), nsamples, replace = T) - d = training[index[s],] - training_dataset = rbind(training_dataset, d) + index <- which(training$bin == bins[i]) + s <- sample(1:length(index), nsamples, replace = T) + d <- training[index[s], ] + training_dataset <- rbind(training_dataset, d) } library(mgcv) # test = gam(agb_a ~ s(lon, lat) + s(elev) + s(aet) + s(def) , data = training_dataset, method = "REML") -# +# # pr = as.numeric(predict(test, newdata = testing, na.action = na.pass)) # cor(testing$agb_a, pr, use = "complete.obs") library(randomForest) -rf1 = randomForest(agb_a ~ aet+def+elev, data = training_dataset, na.action = na.omit) +rf1 <- randomForest(agb_a ~ aet + def + elev, data = training_dataset, na.action = na.omit) -pr = as.numeric(predict(rf1, newdata = testing, na.action = na.pass)) +pr <- as.numeric(predict(rf1, newdata = testing, na.action = na.pass)) cor(testing$agb_a, pr, use = "complete.obs") -max = max(pr, testing$agb_a, na.rm = T) -par(mfrow = c(1,2)) +max <- max(pr, testing$agb_a, na.rm = T) +par(mfrow = c(1, 2)) varImpPlot(rf1, main = as.character(rf1$call)[2]) -plot(testing$agb_a, pr, xlim = c(0,max), ylim = c(0,max), xlab = "SDA AGB", ylab = "Predicted AGB", main = "", col = 'black') -abline(0,1, col = 'red') +plot(testing$agb_a, pr, xlim = c(0, max), ylim = c(0, max), xlab = "SDA AGB", ylab = "Predicted AGB", main = "", col = "black") +abline(0, 1, col = "red") -save(rf1, file = '/data2/bmorrison/sda/500_site_run/output_folder/agb_all_dates_rf_model.RData') -test = analysis_data[analysis_data$date == 2012,] +save(rf1, file = "/data2/bmorrison/sda/500_site_run/output_folder/agb_all_dates_rf_model.RData") +test <- analysis_data[analysis_data$date == 2012, ] -pr2 = pr = as.numeric(predict(rf1, newdata = test, na.action = na.pass)) +pr2 <- pr <- as.numeric(predict(rf1, newdata = test, na.action = na.pass)) cor(test$agb_a, pr2, use = "complete.obs") -max = max(pr2, test$agb_a, na.rm = T) -par(mfrow = c(1,2)) +max <- max(pr2, test$agb_a, na.rm = T) +par(mfrow = c(1, 2)) varImpPlot(rf1, main = as.character(rf1$call)[2]) -plot(test$agb_a, pr2, xlim = c(0,max), ylim = c(0,max), xlab = "SDA AGB", ylab = "Predicted AGB", main = "2012", col = 'black') -abline(0,1, col = 'red') +plot(test$agb_a, pr2, xlim = c(0, max), ylim = c(0, max), xlab = "SDA AGB", ylab = "Predicted AGB", main = "2012", col = "black") +abline(0, 1, col = "red") library(Metrics) rmse(testing$agb_a, pr) # percent bias -sum(testing_agb_a-pr, na.rm = T)/length(pr) - - +sum(testing_agb_a - pr, na.rm = T) / length(pr) diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/check_site_id_mixup.R b/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/check_site_id_mixup.R index acf22fc0f27..355d9056ef3 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/check_site_id_mixup.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/check_site_id_mixup.R @@ -3,65 +3,75 @@ library(PEcAn.settings) library(shapefiles) library(raster) ############################ SDA run information -analysis = readRDS('/Volumes/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS') -dates = names(analysis) -sites = unique(attributes(analysis[[names(analysis)[1]]])$Site) -sda_obs = sites +analysis <- readRDS("/Volumes/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS") +dates <- names(analysis) +sites <- unique(attributes(analysis[[names(analysis)[1]]])$Site) +sda_obs <- sites site_ID <- sda_obs suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -sda_site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +sda_site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) ############################# BETY information settings <- read.settings("/Volumes/data/bmorrison/sda/500_site_run/pecan_MultiSite_SDA_LAI_AGB_sitegroup_500.xml") -bety <- list(user='bety', password='bety', host='modex.bnl.gov', - dbname='betydb', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "modex.bnl.gov", + dbname = "betydb", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con -if ("sitegroup" %in% names(settings)){ - if (is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id, con = con) + sitegroupId = settings$sitegroup$id, con = con + ) } else { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, - sitegroupId = settings$sitegroup$id, - nSite = settings$sitegroup$nSite) + sitegroupId = settings$sitegroup$id, + nSite = settings$sitegroup$nSite + ) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } -bety_sites = observation +bety_sites <- observation site_ID <- bety_sites suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -bety_site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +bety_site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) ######################## COMPARE SITE ID INFORMATION ############################## -#bety sites will have the extra sites up to 517 because this group was made before the sites with bad modis data was removed. -bety_sites = bety_site_info$site_id -sda_sites = sda_site_info$site_id +# bety sites will have the extra sites up to 517 because this group was made before the sites with bad modis data was removed. +bety_sites <- bety_site_info$site_id +sda_sites <- sda_site_info$site_id ### lets make sure they have the correct number of sites matching by id length(which(sda_sites %in% bety_sites)) @@ -69,35 +79,34 @@ length(which(sda_sites %in% bety_sites)) # SITE IDS DO NOT MATCH THE OUTPUT OF THE SDA # # Do the coordinates match up at least? -sda_data = as.data.frame(cbind(sda_site_info$site_id, sda_site_info$lon, sda_site_info$lat)) -names(sda_data) = c("id", "lon", "lat") -bety_data = as.data.frame(cbind(bety_site_info$site_id, bety_site_info$lon, bety_site_info$lat)) -names(bety_data) = c("id", "lon", "lat") -coordinates(sda_data) = ~lon+lat -coordinates(bety_data) = ~lon+lat -projection(sda_data) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " -projection(bety_data) = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " - -us = shapefile('/Volumes/data2/bmorrison/sda/500_site_run/output_folder/us_border_correct.shp') -us = spTransform(us, CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ") - -par(mfrow = c(2,1)) +sda_data <- as.data.frame(cbind(sda_site_info$site_id, sda_site_info$lon, sda_site_info$lat)) +names(sda_data) <- c("id", "lon", "lat") +bety_data <- as.data.frame(cbind(bety_site_info$site_id, bety_site_info$lon, bety_site_info$lat)) +names(bety_data) <- c("id", "lon", "lat") +coordinates(sda_data) <- ~ lon + lat +coordinates(bety_data) <- ~ lon + lat +projection(sda_data) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " +projection(bety_data) <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs " + +us <- shapefile("/Volumes/data2/bmorrison/sda/500_site_run/output_folder/us_border_correct.shp") +us <- spTransform(us, CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ") + +par(mfrow = c(2, 1)) plot(us) -plot(sda_data, add = T, pch = 20, col = 'blue') +plot(sda_data, add = T, pch = 20, col = "blue") plot(us) -plot(bety_data, add = T, pch = 20, col = 'red') +plot(bety_data, add = T, pch = 20, col = "red") ## Coorindates appear to be the same locations. lets make for sure. -sda_data = as.data.frame(cbind(sda_site_info$site_id, sda_site_info$lon, sda_site_info$lat)) -names(sda_data) = c("id", "lon", "lat") -bety_data = as.data.frame(cbind(bety_site_info$site_id, bety_site_info$lon, bety_site_info$lat)) -names(bety_data) = c("id", "lon", "lat") -count = vector() +sda_data <- as.data.frame(cbind(sda_site_info$site_id, sda_site_info$lon, sda_site_info$lat)) +names(sda_data) <- c("id", "lon", "lat") +bety_data <- as.data.frame(cbind(bety_site_info$site_id, bety_site_info$lon, bety_site_info$lat)) +names(bety_data) <- c("id", "lon", "lat") +count <- vector() for (i in 1:nrow(sda_data)) { - index = which(bety_data$lon == sda_data$lon[i] & bety_data$lat == sda_data$lat[i]) - if (length(index) > 0) - { - count = c(count, i) + index <- which(bety_data$lon == sda_data$lon[i] & bety_data$lat == sda_data$lat[i]) + if (length(index) > 0) { + count <- c(count, i) } -} \ No newline at end of file +} diff --git a/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/rf_agb_model_2021_CODE.R b/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/rf_agb_model_2021_CODE.R index 93a141dd922..ea0bcb96fd6 100755 --- a/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/rf_agb_model_2021_CODE.R +++ b/modules/assim.sequential/inst/sda_backup/bmorrison/upscaling_sda_output/rf_agb_model_2021_CODE.R @@ -3,148 +3,152 @@ library(randomForest) library(shapefiles) # get input data setup -ndvi = raster('/data2/bmorrison/sda/500_site_run/data/MODIS_Veg/NDVI_2012_aligned.tif') -evi = raster('/data2/bmorrison/sda/500_site_run/data/MODIS_Veg/EVI_2012_aligned.tif') +ndvi <- raster("/data2/bmorrison/sda/500_site_run/data/MODIS_Veg/NDVI_2012_aligned.tif") +evi <- raster("/data2/bmorrison/sda/500_site_run/data/MODIS_Veg/EVI_2012_aligned.tif") -clim_files = list.files(path = '/data2/bmorrison/sda/bailey_paper/data/climate/', pattern = "annual", include.dirs = T, full.names = T) -clim_files = clim_files[c(2,4,5, 9, 11)] -clim_files = c(clim_files, "/data2/bmorrison/sda/bailey_paper/data/climate/tmax_max.tif", "/data2/bmorrison/sda/bailey_paper/data/climate/tmin_min.tif", "/data2/bmorrison/prism_climate/annuals/elevation.tif") +clim_files <- list.files(path = "/data2/bmorrison/sda/bailey_paper/data/climate/", pattern = "annual", include.dirs = T, full.names = T) +clim_files <- clim_files[c(2, 4, 5, 9, 11)] +clim_files <- c(clim_files, "/data2/bmorrison/sda/bailey_paper/data/climate/tmax_max.tif", "/data2/bmorrison/sda/bailey_paper/data/climate/tmin_min.tif", "/data2/bmorrison/prism_climate/annuals/elevation.tif") -s = stack(c(ndvi, evi, clim_files)) -mask = raster("/data2/bmorrison/sda/500_site_run/data/MODIS_Veg/mask.tif") -s = s*mask -names(s) = c("ndvi", "evi", "aet", "def", "rad", "rain", "snow", "tmax", "tmin", "elevation") -projection(s) = crs(ndvi) +s <- stack(c(ndvi, evi, clim_files)) +mask <- raster("/data2/bmorrison/sda/500_site_run/data/MODIS_Veg/mask.tif") +s <- s * mask +names(s) <- c("ndvi", "evi", "aet", "def", "rad", "rain", "snow", "tmax", "tmin", "elevation") +projection(s) <- crs(ndvi) # pull in the SDA run AGB data -analysis = readRDS('/Volumes/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS') -sites = unique(attributes(analysis[[names(analysis)[1]]])$Site) -obs = sites -bety <- list(user='bety', password='bety', host='modex.bnl.gov', - dbname='betydb', driver='PostgreSQL',write=TRUE) +analysis <- readRDS("/Volumes/data2/bmorrison/sda/500_site_run/output_folder/ANALYSIS.RDS") +sites <- unique(attributes(analysis[[names(analysis)[1]]])$Site) +obs <- sites +bety <- list( + user = "bety", password = "bety", host = "modex.bnl.gov", + dbname = "betydb", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- obs suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) + ids = site_ID, .con = con +)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) # site_info = as.data.frame(cbind(site_info$site_id, site_info$lon, site_info$lat)) # names(site_info) = c("site_id", "lon", "lat") # save(site_info, file = '/Volumes/data2/bmorrison/sda/500_site_run/output_folder/500_site_run_SITE_INFO.Rdata') -load('/data2/bmorrison/sda/500_site_run/output_folder/500_site_run_SITE_INFO.Rdata') +load("/data2/bmorrison/sda/500_site_run/output_folder/500_site_run_SITE_INFO.Rdata") -date = 2012 -index = which(names(analysis) == "2012/07/15") -data = analysis[[index]] -data= data[,which(names(data) == "AbvGrndWood")] +date <- 2012 +index <- which(names(analysis) == "2012/07/15") +data <- analysis[[index]] +data <- data[, which(names(data) == "AbvGrndWood")] -biomass = apply(data, 2, FUN = median, na.rm = T) -biomass = as.data.frame(cbind(sites, biomass)) -row.names(biomass) = NULL -names(biomass) = c("site_id", "biomass") -biomass_data = biomass -biomass_data$lon = NA -biomass_data$lat = NA +biomass <- apply(data, 2, FUN = median, na.rm = T) +biomass <- as.data.frame(cbind(sites, biomass)) +row.names(biomass) <- NULL +names(biomass) <- c("site_id", "biomass") +biomass_data <- biomass +biomass_data$lon <- NA +biomass_data$lat <- NA for (i in 1:nrow(biomass_data)) { - id = biomass_data$site_id[i] - index = which(site_info$site_id == id) - if (length(index) > 0) - { - biomass_data$lon[i] = site_info$lon[index] - biomass_data$lat[i] = site_info$lat[index] + id <- biomass_data$site_id[i] + index <- which(site_info$site_id == id) + if (length(index) > 0) { + biomass_data$lon[i] <- site_info$lon[index] + biomass_data$lat[i] <- site_info$lat[index] } } -coordinates(biomass_data) = ~lon+lat -projection(biomass_data) = crs(s) +coordinates(biomass_data) <- ~ lon + lat +projection(biomass_data) <- crs(s) -par(mfrow = c(1,1)) +par(mfrow = c(1, 1)) plot(ndvi, col = gray.colors(10, start = 0.3, end = 0.9, gamma = 2.2, alpha = NULL)) plot(biomass_data, pch = 20, col = terrain.colors(biomass_data$biomass), add = T) # prepare training and testing datasets -input_data = as.data.frame(extract(s, biomass_data)) -input_data = cbind(input_data, biomass_data$biomass) -names(input_data) = c("ndvi", "evi", "aet", "def", "rad", "rain", "snow", "tmax", "tmin", "elevation", "biomass") -input_data$biomass = as.numeric(input_data$biomass) +input_data <- as.data.frame(extract(s, biomass_data)) +input_data <- cbind(input_data, biomass_data$biomass) +names(input_data) <- c("ndvi", "evi", "aet", "def", "rad", "rain", "snow", "tmax", "tmin", "elevation", "biomass") +input_data$biomass <- as.numeric(input_data$biomass) #### outlier analysis #### # sd = sd(input_data$biomass) # z = abs(input_data$biomass-mean(input_data$biomass))/sd # index = which(z > 2.5) # input_data = input_data[-index,] -samples = sample(1:nrow(input_data), .8*nrow(input_data)) -training = input_data[samples,] -testing = input_data[-samples,] +samples <- sample(1:nrow(input_data), .8 * nrow(input_data)) +training <- input_data[samples, ] +testing <- input_data[-samples, ] # make the model!!! -rf = randomForest(biomass~ndvi+evi+aet+def+rain+snow+tmax+elevation, - data = training, ntree = 1000, na.action = na.omit, keep.forest = T) - -pr = predict(rf, newdata = testing, na.action = na.pass) -max = max(pr, testing$biomass) -jpeg('/data2/bmorrison/sda/500_site_run/rf_model_diagnostics_2021.jpeg', height = 8, width = 8, units = "in", res = 300) -par(mfrow = c(2,2)) +rf <- randomForest(biomass ~ ndvi + evi + aet + def + rain + snow + tmax + elevation, + data = training, ntree = 1000, na.action = na.omit, keep.forest = T +) + +pr <- predict(rf, newdata = testing, na.action = na.pass) +max <- max(pr, testing$biomass) +jpeg("/data2/bmorrison/sda/500_site_run/rf_model_diagnostics_2021.jpeg", height = 8, width = 8, units = "in", res = 300) +par(mfrow = c(2, 2)) plot(rf, main = "Tree Error") varImpPlot(rf, main = "Variable Importance") plot(testing$biomass, pr, xlim = c(0, max), ylim = c(0, max), xlab = "SDA AGB", ylab = "Predicted", main = "Obs vs. Predicted") -abline(0, 1, col = 'red') +abline(0, 1, col = "red") -c = round(cor(pr, testing$biomass, use ="complete.obs"), digits = 1) -rmse = function(obs, pred) -{ - error = sqrt(sum((pred-obs)^2)/length(pred)) +c <- round(cor(pr, testing$biomass, use = "complete.obs"), digits = 1) +rmse <- function(obs, pred) { + error <- sqrt(sum((pred - obs)^2) / length(pred)) return(error) } -error = round(rmse(testing$biomass, pr), digits = 1) +error <- round(rmse(testing$biomass, pr), digits = 1) library(hydroGOF) -pb = pbias(testing$biomass, pr, na.rm = T) -text(x = 0, y = 200, labels = paste0("Cor=",c), pos = 4) +pb <- pbias(testing$biomass, pr, na.rm = T) +text(x = 0, y = 200, labels = paste0("Cor=", c), pos = 4) text(x = 0, y = 190, labels = paste0("RMSE=", error), pos = 4) -text(x = 0, y = 180, labels = paste0("%Bias=",pb), pos = 4) +text(x = 0, y = 180, labels = paste0("%Bias=", pb), pos = 4) -test = predict(object = s, model = rf, na.rm = T) +test <- predict(object = s, model = rf, na.rm = T) plot(test, main = "CONUS AGB Estimate") dev.off() ##### Make STDEV estimate from model -na_index = which(is.na(s[])) -d = as.data.frame(s) -d$cell = 1:nrow(d) -d = d[-na_index,] -test2 = predict(rf, newdata = d, na.action = na.omit, predict.all = T) -test2_data = test2$individual -test2_sd = apply(test2_data, 1, FUN = sd, na.rm = T) - -test2 = mask*NA -test2[d$cell] = test2_sd - -lt_agb = raster("/data2/bmorrison/sda/500_site_run/data/landtrendr_agb_2012_800m.tif") -lt_agb_se = raster("/data2/bmorrison/sda/500_site_run/data/landtrendr_agb_stdev_2012_800m.tif") -agb_diff = abs(lt_agb-test) -se_diff = abs(lt_agb_se-test2) +na_index <- which(is.na(s[])) +d <- as.data.frame(s) +d$cell <- 1:nrow(d) +d <- d[-na_index, ] +test2 <- predict(rf, newdata = d, na.action = na.omit, predict.all = T) +test2_data <- test2$individual +test2_sd <- apply(test2_data, 1, FUN = sd, na.rm = T) + +test2 <- mask * NA +test2[d$cell] <- test2_sd + +lt_agb <- raster("/data2/bmorrison/sda/500_site_run/data/landtrendr_agb_2012_800m.tif") +lt_agb_se <- raster("/data2/bmorrison/sda/500_site_run/data/landtrendr_agb_stdev_2012_800m.tif") +agb_diff <- abs(lt_agb - test) +se_diff <- abs(lt_agb_se - test2) # range_lt = range(r[], na.rm = T) # breaks_lt = seq(range_lt[1], range_lt[2], by = 31) -breaks_agb = c(0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600) -breaks_stdev = c(0, 25, 50, 75, 100, 125, 150) -breaks_diff = c() -jpeg('/data2/bmorrison/sda/500_site_run/rf_model_comparison_2021.jpeg', height = 10, width = 14, units = "in", res = 300) -par(mfrow = c(2,3)) -plot(lt_agb, col = rev(terrain.colors(length(breaks_agb)-1)), breaks = breaks_agb, main = "LandTrendr AGB 800m") -plot(lt_agb_se, col = rev(terrain.colors(length(breaks_stdev)-1)), breaks = breaks_stdev, main = "LandTrendr STDEV 800m") -plot(agb_diff, main = "Difference LT vs. RF AGB Estimates") -plot(test, col = rev(terrain.colors(length(breaks_agb)-1)), breaks = breaks_agb, main = "SDA RF AGB") -plot(test2, col = rev(terrain.colors(length(breaks_stdev)-1)), breaks = breaks_stdev, main = "SDA RF STDEV") -plot(se_diff, main = "Difference LT vs. RF STDEV Estimates") +breaks_agb <- c(0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600) +breaks_stdev <- c(0, 25, 50, 75, 100, 125, 150) +breaks_diff <- c() +jpeg("/data2/bmorrison/sda/500_site_run/rf_model_comparison_2021.jpeg", height = 10, width = 14, units = "in", res = 300) +par(mfrow = c(2, 3)) +plot(lt_agb, col = rev(terrain.colors(length(breaks_agb) - 1)), breaks = breaks_agb, main = "LandTrendr AGB 800m") +plot(lt_agb_se, col = rev(terrain.colors(length(breaks_stdev) - 1)), breaks = breaks_stdev, main = "LandTrendr STDEV 800m") +plot(agb_diff, main = "Difference LT vs. RF AGB Estimates") +plot(test, col = rev(terrain.colors(length(breaks_agb) - 1)), breaks = breaks_agb, main = "SDA RF AGB") +plot(test2, col = rev(terrain.colors(length(breaks_stdev) - 1)), breaks = breaks_stdev, main = "SDA RF STDEV") +plot(se_diff, main = "Difference LT vs. RF STDEV Estimates") dev.off() diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-3sites.R b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-3sites.R index d37c1062798..5a1f22c0bb8 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-3sites.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-3sites.R @@ -6,7 +6,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -14,89 +14,106 @@ library(listviewer) library(dplyr) #------------------------------------------ Setup ------------------------------------- setwd("/data/sserbin/Modeling/sipnet/NASA_CMS") -unlink(c('run','out','SDA'),recursive = T) -rm(list=ls()) +unlink(c("run", "out", "SDA"), recursive = T) +rm(list = ls()) settings <- read.settings("pecan.SDA.3sites.xml") -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() -#sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} +# sample from parameters used for both sensitivity analysis and Ens +get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #---------------------------------------------------------------- # OBS data preparation #--------------------------------------------------------------- load("Obs/LandTrendr_AGB_output-4sites.RData") site1 <- point_list site1$median_AGB[[1]] %>% - filter(Site_ID!='772') -> site1$median_AGB[[1]] + filter(Site_ID != "772") -> site1$median_AGB[[1]] site1$stdv_AGB[[1]] %>% - filter(Site_ID!='772') -> site1$stdv_AGB[[1]] + filter(Site_ID != "772") -> site1$stdv_AGB[[1]] load("Obs/LandTrendr_AGB_output_796-769.RData") site2 <- point_list -site2$median_AGB[[1]] %>% - filter(Site_ID!='1000000074') ->site2$median_AGB[[1]] +site2$median_AGB[[1]] %>% + filter(Site_ID != "1000000074") -> site2$median_AGB[[1]] -site2$stdv_AGB[[1]] %>% - filter(Site_ID!='1000000074') ->site2$stdv_AGB[[1]] -#listviewer::jsonedit(point_list) +site2$stdv_AGB[[1]] %>% + filter(Site_ID != "1000000074") -> site2$stdv_AGB[[1]] +# listviewer::jsonedit(point_list) #-------------------------------------------------------------------------------- -#for multi site both mean and cov needs to be a list like this +# for multi site both mean and cov needs to be a list like this # +date # +siteid # c(state variables)/matrix(cov state variables) -# -#reorder sites in obs -point_list$median_AGB <-rbind(site1$median_AGB[[1]], - site2$median_AGB[[1]]) %>% filter(Site_ID %in% site.ids) -point_list$stdv_AGB <-rbind(site1$stdv_AGB[[1]], - site2$stdv_AGB[[1]])%>% filter(Site_ID %in% site.ids) +# +# reorder sites in obs +point_list$median_AGB <- rbind( + site1$median_AGB[[1]], + site2$median_AGB[[1]] +) %>% filter(Site_ID %in% site.ids) +point_list$stdv_AGB <- rbind( + site1$stdv_AGB[[1]], + site2$stdv_AGB[[1]] +) %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() +site.order <- sapply(site.ids, function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() -point_list$median_AGB <- point_list$median_AGB[site.order,] -point_list$stdv_AGB <- point_list$stdv_AGB[site.order,] +point_list$median_AGB <- point_list$median_AGB[site.order, ] +point_list$stdv_AGB <- point_list$stdv_AGB[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(site1$median_AGB[[1]]),"_")[3:length(site1$median_AGB[[1]])] %>% - map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(site1$median_AGB[[1]]), "_")[3:length(site1$median_AGB[[1]])] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") -obs.mean <-names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% - map(function(namesl){ - ((point_list$median_AGB)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)]) - ) - }) %>% setNames(date.obs) +obs.mean <- names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% + map(function(namesl) { + ((point_list$median_AGB)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("AbvGrndWood"))) %>% + setNames(site.ids[1:length(.)]) + ) + }) %>% + setNames(date.obs) -obs.cov <-names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% +obs.cov <- names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% map(function(namesl) { ((point_list$stdv_AGB)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #---------------------------------------------------------------- # end OBS data preparation #--------------------------------------------------------------- new.settings <- PEcAn.settings::prepare.settings(settings) -#jsonedit(new.settings) +# jsonedit(new.settings) #------------------------------------------ SDA ------------------------------------- -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="lhc sampling - 4sites - SF50 - ALL PFTs - small sample size", - facet.plots=T, - debug=F, - pause=F) - ) - - +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "lhc sampling - 4sites - SF50 - ALL PFTs - small sample size", + facet.plots = T, + debug = F, + pause = F + ) +) diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-4sites.R b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-4sites.R index 96bf135de10..ba976ec36d2 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-4sites.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite-4sites.R @@ -6,7 +6,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAn.assim.sequential:: +# PEcAn.assim.sequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -14,89 +14,106 @@ library(listviewer) library(dplyr) #------------------------------------------ Setup ------------------------------------- setwd("/data/sserbin/Modeling/sipnet/NASA_CMS") -unlink(c('run','out','SDA'),recursive = T) -rm(list=ls()) +unlink(c("run", "out", "SDA"), recursive = T) +rm(list = ls()) settings <- read.settings("pecan.SDA.4sites.xml") -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() -#sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} +# sample from parameters used for both sensitivity analysis and Ens +get.parameter.samples(settings, ens.sample.method = settings$ensemble$samplingspace$parameters$method) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #---------------------------------------------------------------- # OBS data preparation #--------------------------------------------------------------- load("Obs/LandTrendr_AGB_output-4sites.RData") site1 <- point_list site1$median_AGB[[1]] %>% - filter(Site_ID!='772') -> site1$median_AGB[[1]] + filter(Site_ID != "772") -> site1$median_AGB[[1]] site1$stdv_AGB[[1]] %>% - filter(Site_ID!='772') -> site1$stdv_AGB[[1]] + filter(Site_ID != "772") -> site1$stdv_AGB[[1]] load("Obs/LandTrendr_AGB_output_796-769.RData") site2 <- point_list -site2$median_AGB[[1]] %>% - filter(Site_ID=='796') -> site2$median_AGB[[1]] +site2$median_AGB[[1]] %>% + filter(Site_ID == "796") -> site2$median_AGB[[1]] -site2$stdv_AGB[[1]] %>% - filter(Site_ID=='796') -> site2$stdv_AGB[[1]] -#listviewer::jsonedit(point_list) +site2$stdv_AGB[[1]] %>% + filter(Site_ID == "796") -> site2$stdv_AGB[[1]] +# listviewer::jsonedit(point_list) #-------------------------------------------------------------------------------- -#for multi site both mean and cov needs to be a list like this +# for multi site both mean and cov needs to be a list like this # +date # +siteid # c(state variables)/matrix(cov state variables) -# -#reorder sites in obs -point_list$median_AGB <-rbind(site1$median_AGB[[1]], - site2$median_AGB[[1]]) %>% filter(Site_ID %in% site.ids) -point_list$stdv_AGB <-rbind(site1$stdv_AGB[[1]], - site2$stdv_AGB[[1]])%>% filter(Site_ID %in% site.ids) +# +# reorder sites in obs +point_list$median_AGB <- rbind( + site1$median_AGB[[1]], + site2$median_AGB[[1]] +) %>% filter(Site_ID %in% site.ids) +point_list$stdv_AGB <- rbind( + site1$stdv_AGB[[1]], + site2$stdv_AGB[[1]] +) %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() +site.order <- sapply(site.ids, function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() -point_list$median_AGB <- point_list$median_AGB[site.order,] -point_list$stdv_AGB <- point_list$stdv_AGB[site.order,] +point_list$median_AGB <- point_list$median_AGB[site.order, ] +point_list$stdv_AGB <- point_list$stdv_AGB[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(site1$median_AGB[[1]]),"_")[3:length(site1$median_AGB[[1]])] %>% - map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(site1$median_AGB[[1]]), "_")[3:length(site1$median_AGB[[1]])] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") -obs.mean <-names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% - map(function(namesl){ - ((point_list$median_AGB)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)]) - ) - }) %>% setNames(date.obs) +obs.mean <- names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% + map(function(namesl) { + ((point_list$median_AGB)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("AbvGrndWood"))) %>% + setNames(site.ids[1:length(.)]) + ) + }) %>% + setNames(date.obs) -obs.cov <-names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% +obs.cov <- names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% map(function(namesl) { ((point_list$stdv_AGB)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #---------------------------------------------------------------- # end OBS data preparation #--------------------------------------------------------------- new.settings <- PEcAn.settings::prepare.settings(settings) -#jsonedit(new.settings) +# jsonedit(new.settings) #------------------------------------------ SDA ------------------------------------- -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="lhc sampling - 4sites - SF50 - ALL PFTs - small sample size", - facet.plots=T, - debug=F, - pause=F) - ) - - +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "lhc sampling - 4sites - SF50 - ALL PFTs - small sample size", + facet.plots = T, + debug = F, + pause = F + ) +) diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL.R b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL.R index 35f6c643938..628a9ced2a2 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,7 +23,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -33,8 +33,9 @@ library(dplyr) # temporary step until we get this code integrated into pecan library(RCurl) -script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", - ssl.verifypeer = FALSE) +script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", + ssl.verifypeer = FALSE +) eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# @@ -42,28 +43,37 @@ eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions -# Deifine observation - use existing or generate new? +# Deifine observation - use existing or generate new? # set to a specific file, use that. -#observation <- "" -observation <- c("1000025731","1000000048","763","796","772","764","765","1000000024","678", - "1000000146") +# observation <- "" +observation <- c( + "1000025731", "1000000048", "763", "796", "772", "764", "765", "1000000024", "678", + "1000000146" +) # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("XMLs/pecan_MultiSite_SDA.xml") # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -71,7 +81,7 @@ get.parameter.samples(settings, #--------------------------------------------------------------------------------------------------# ## Prepare observational data - still very hacky here -# option 1: use existing observation file +# option 1: use existing observation file # if (observation!="new") { # load(observation) # site1 <- point_list @@ -83,52 +93,61 @@ get.parameter.samples(settings, # option 2: run extraction code to generate observation files PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation # BETYdb site IDs data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -#results <- PEcAn.data.remote::extract.LandTrendr.AGB(coords=site_ID, -results <- extract.LandTrendr.AGB(coords=site_ID, - data_dir = data_dir, con = con, - output_file = file.path(work_dir,"Obs"), - plot_results = FALSE) +# results <- PEcAn.data.remote::extract.LandTrendr.AGB(coords=site_ID, +results <- extract.LandTrendr.AGB( + coords = site_ID, + data_dir = data_dir, con = con, + output_file = file.path(work_dir, "Obs"), + plot_results = FALSE +) load("Obs/LandTrendr_AGB_output.RData") -#for multi site both mean and cov needs to be a list like this +# for multi site both mean and cov needs to be a list like this # +date # +siteid # c(state variables)/matrix(cov state variables) -# -#reorder sites in obs +# +# reorder sites in obs point_list$median_AGB <- point_list$median_AGB[[1]] %>% filter(Site_ID %in% site.ids) point_list$stdv_AGB <- point_list$stdv_AGB[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -point_list$median_AGB <- point_list$median_AGB[site.order,] -point_list$stdv_AGB <- point_list$stdv_AGB[site.order,] +site.order <- sapply(site.ids, function(x) which(point_list$median_AGB$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +point_list$median_AGB <- point_list$median_AGB[site.order, ] +point_list$stdv_AGB <- point_list$stdv_AGB[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(point_list$median_AGB),"_")[3:length(point_list$median_AGB)] %>% - map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(point_list$median_AGB), "_")[3:length(point_list$median_AGB)] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") obs.mean <- names(point_list$median_AGB)[3:length(point_list$median_AGB)] %>% - map(function(namesl){ - ((point_list$median_AGB)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)]) + map(function(namesl) { + ((point_list$median_AGB)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("AbvGrndWood"))) %>% + setNames(site.ids[1:length(.)]) ) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -obs.cov <-names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% +obs.cov <- names(point_list$stdv_AGB)[3:length(point_list$median_AGB)] %>% map(function(namesl) { ((point_list$stdv_AGB)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #--------------------------------------------------------------------------------------------------# @@ -143,16 +162,19 @@ PEcAn.settings::write.settings(new.settings, outputfile = "pecan.CHECKED.xml") #--------------------------------------------------------------------------------------------------# ## Run SDA -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="Uniform sampling - 10 sites", - facet.plots=T, - debug=F, - pause=F) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "Uniform sampling - 10 sites", + facet.plots = T, + debug = F, + pause = F + ) ) #--------------------------------------------------------------------------------------------------# @@ -160,10 +182,10 @@ sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, #--------------------------------------------------------------------------------------------------# ## Wrap up # Send email if configured -#if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { +# if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { # sendmail(settings$email$from, settings$email$to, # paste0("SDA workflow has finished executing at ", base::date())) -#} +# } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL_updated.R b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL_updated.R index 9a5a4e537b3..bb429eca025 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL_updated.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/Multisite_SDA_BNL_updated.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,7 +23,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -33,8 +33,9 @@ library(dplyr) # temporary step until we get this code integrated into pecan library(RCurl) -script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", - ssl.verifypeer = FALSE) +script <- getURL("https://raw.githubusercontent.com/serbinsh/pecan/download_osu_agb/modules/data.remote/R/LandTrendr.AGB.R", + ssl.verifypeer = FALSE +) eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# @@ -42,7 +43,7 @@ eval(parse(text = script)) #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions # grab multi-site XML file settings <- read.settings("XMLs/pecan_MultiSite_SDA.xml") @@ -50,21 +51,28 @@ settings <- read.settings("XMLs/pecan_MultiSite_SDA.xml") # grab observation IDs from settings file observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -72,56 +80,71 @@ get.parameter.samples(settings, #--------------------------------------------------------------------------------------------------# ## Prepare observational data - still very hacky here PEcAn.logger::logger.info("**** Extracting LandTrendr AGB data for model sites ****") -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con site_ID <- observation -suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, -ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, +ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", + ids = site_ID, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) +site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone +) data_dir <- "/data2/RS_GIS_Data/LandTrendr/LandTrendr_AGB_data" -med_agb_data <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", - data_dir, product_dates=NULL, file.path(work_dir,"Obs")) -sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", buffer = NULL, fun = "mean", - data_dir, product_dates=NULL, file.path(work_dir,"Obs")) +med_agb_data <- extract.LandTrendr.AGB(site_info, "median", + buffer = NULL, fun = "mean", + data_dir, product_dates = NULL, file.path(work_dir, "Obs") +) +sdev_agb_data <- extract.LandTrendr.AGB(site_info, "stdv", + buffer = NULL, fun = "mean", + data_dir, product_dates = NULL, file.path(work_dir, "Obs") +) PEcAn.logger::logger.info("**** Preparing data for SDA ****") -#for multi site both mean and cov needs to be a list like this +# for multi site both mean and cov needs to be a list like this # +date # +siteid # c(state variables)/matrix(cov state variables) -# -#reorder sites in obs +# +# reorder sites in obs med_agb_data_sda <- med_agb_data[[1]] %>% filter(Site_ID %in% site.ids) sdev_agb_data_sda <- sdev_agb_data[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -med_agb_data_sda <- med_agb_data_sda[site.order,] -sdev_agb_data_sda <- sdev_agb_data_sda[site.order,] +site.order <- sapply(site.ids, function(x) which(med_agb_data_sda$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +med_agb_data_sda <- med_agb_data_sda[site.order, ] +sdev_agb_data_sda <- sdev_agb_data_sda[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(med_agb_data_sda),"_")[3:length(med_agb_data_sda)] %>% - map_chr(~.x[2]) %>% paste0(.,"/12/31") +date.obs <- strsplit(names(med_agb_data_sda), "_")[3:length(med_agb_data_sda)] %>% + map_chr(~ .x[2]) %>% + paste0(., "/12/31") obs.mean <- names(med_agb_data_sda)[3:length(med_agb_data_sda)] %>% - map(function(namesl){ - ((med_agb_data_sda)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('AbvGrndWood'))) %>% - setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) - -obs.cov <-names(sdev_agb_data_sda)[3:length(sdev_agb_data_sda)] %>% + map(function(namesl) { + ((med_agb_data_sda)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("AbvGrndWood"))) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) + +obs.cov <- names(sdev_agb_data_sda)[3:length(sdev_agb_data_sda)] %>% map(function(namesl) { ((sdev_agb_data_sda)[[namesl]] %>% - map( ~ (.x) ^ 2%>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) #--------------------------------------------------------------------------------------------------# @@ -137,16 +160,19 @@ PEcAn.settings::write.settings(new.settings, outputfile = "pecan.CHECKED.xml") #--------------------------------------------------------------------------------------------------# PEcAn.logger::logger.info("**** Run SDA ****") ## Run SDA -sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="Uniform sampling - 10 sites", - facet.plots=T, - debug=F, - pause=F) +sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "Uniform sampling - 10 sites", + facet.plots = T, + debug = F, + pause = F + ) ) #--------------------------------------------------------------------------------------------------# @@ -154,10 +180,10 @@ sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, #--------------------------------------------------------------------------------------------------# ## Wrap up # Send email if configured -#if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { +# if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { # sendmail(settings$email$from, settings$email$to, # paste0("SDA workflow has finished executing at ", base::date())) -#} +# } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_doconversions.R b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_doconversions.R index b97c5f22e84..710b10eb0e2 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_doconversions.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_doconversions.R @@ -2,7 +2,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -16,8 +16,8 @@ library(PEcAn.utils) library(RCurl) # make sure always to call status.end -options(warn=1) -options(error=quote({ +options(warn = 1) +options(error = quote({ PEcAn.utils::status.end("ERROR") PEcAn.remote::kill.tunnel(settings) if (!interactive()) { @@ -25,32 +25,32 @@ options(error=quote({ } })) -#options(warning.expression=status.end("ERROR")) +# options(warning.expression=status.end("ERROR")) # ---------------------------------------------------------------------- # PEcAn Workflow # ---------------------------------------------------------------------- # Open and read in settings file for PEcAn run. -settings <- PEcAn.settings::read.settings("pecan_US-CZ3_CRUNCEP.xml") +settings <- PEcAn.settings::read.settings("pecan_US-CZ3_CRUNCEP.xml") # Check for additional modules that will require adding settings -if("benchmarking" %in% names(settings)){ +if ("benchmarking" %in% names(settings)) { library(PEcAn.benchmark) settings <- papply(settings, read_settings_BRR) } -if("sitegroup" %in% names(settings)){ - if(is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id) } else { - settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id,nSite = settings$sitegroup$nSite) + settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id, nSite = settings$sitegroup$nSite) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } # Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force=FALSE) +settings <- PEcAn.settings::prepare.settings(settings, force = FALSE) # Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") @@ -62,7 +62,7 @@ if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile) } # Do conversions -settings <- PEcAn.workflow::do_conversions(settings, overwrite.met = list(download = TRUE, met2cf = TRUE, standardize = TRUE, met2model = TRUE)) +settings <- PEcAn.workflow::do_conversions(settings, overwrite.met = list(download = TRUE, met2cf = TRUE, standardize = TRUE, met2model = TRUE)) db.print.connections() print("---------- PEcAn Workflow Complete ----------") diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_metprocess.R b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_metprocess.R index 61d29b73414..d29f7e30d6a 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_metprocess.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/R_scripts_2/workflow_metprocess.R @@ -2,7 +2,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -16,8 +16,8 @@ library(PEcAn.utils) library(RCurl) # make sure always to call status.end -options(warn=1) -options(error=quote({ +options(warn = 1) +options(error = quote({ PEcAn.utils::status.end("ERROR") PEcAn.remote::kill.tunnel(settings) if (!interactive()) { @@ -25,31 +25,31 @@ options(error=quote({ } })) -#options(warning.expression=status.end("ERROR")) +# options(warning.expression=status.end("ERROR")) # ---------------------------------------------------------------------- # PEcAn Workflow # ---------------------------------------------------------------------- -settings <- PEcAn.settings::read.settings("pecan_US-CZ3_CRUNCEP.xml") +settings <- PEcAn.settings::read.settings("pecan_US-CZ3_CRUNCEP.xml") # Check for additional modules that will require adding settings -if("benchmarking" %in% names(settings)){ +if ("benchmarking" %in% names(settings)) { library(PEcAn.benchmark) settings <- papply(settings, read_settings_BRR) } -if("sitegroup" %in% names(settings)){ - if(is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id) } else { - settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id,nSite = settings$sitegroup$nSite) + settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id, nSite = settings$sitegroup$nSite) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } # Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force=FALSE) +settings <- PEcAn.settings::prepare.settings(settings, force = FALSE) # Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") @@ -62,14 +62,14 @@ if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile) # met process PEcAn.data.atmosphere::met.process( - site = settings$run$site, + site = settings$run$site, input_met = settings$run$inputs$met, start_date = settings$run$start.date, end_date = settings$run$end.date, model = settings$model$type, host = settings$host, - dbparms = settings$database$bety, + dbparms = settings$database$bety, dir = settings$database$dbfiles, spin = settings$spin, - overwrite = list(download = TRUE, met2cf = TRUE, standardize = TRUE, met2model = TRUE)) - + overwrite = list(download = TRUE, met2cf = TRUE, standardize = TRUE, met2model = TRUE) +) diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/multi_site_LAI_SDA_BNL.R b/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/multi_site_LAI_SDA_BNL.R index fddca30f42c..9480c458e7e 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/multi_site_LAI_SDA_BNL.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/multi_site_LAI_SDA_BNL.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,7 +23,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) @@ -31,8 +31,8 @@ library(listviewer) library(dplyr) library(doParallel) -extract_LAI <- TRUE #TRUE/FALSE -run_SDA <- TRUE #TRUE/FALSE +extract_LAI <- TRUE # TRUE/FALSE +run_SDA <- TRUE # TRUE/FALSE #--------------------------------------------------------------------------------------------------# @@ -40,7 +40,7 @@ run_SDA <- TRUE #TRUE/FALSE #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions # grab multi-site XML file settings <- read.settings("XMLs/pecan_MultiSite_LAI_SDA.xml") @@ -48,22 +48,29 @@ settings <- read.settings("XMLs/pecan_MultiSite_LAI_SDA.xml") # grab observation IDs from settings file observation <- c() for (i in seq_along(1:length(settings$run))) { - command <- paste0("settings$run$settings.",i,"$site$id") - obs <- eval(parse(text=command)) - observation <- c(observation,obs) + command <- paste0("settings$run$settings.", i, "$site$id") + obs <- eval(parse(text = command)) + observation <- c(observation, obs) } # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -76,81 +83,86 @@ data_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI/modis_lai_data" parameters <- settings$run # get MODIS data -bety <- list(user=settings$database$bety$user, password=settings$database$bety$password, - host=settings$database$bety$host, - dbname='bety', driver='PostgreSQL',write=TRUE) +bety <- list( + user = settings$database$bety$user, password = settings$database$bety$password, + host = settings$database$bety$host, + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con -suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, - ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = observation, .con = con)) -suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) +suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, + ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", + ids = observation, .con = con +)) +suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) site_IDs <- qry_results$id site_names <- qry_results$sitename site_coords <- data.frame(cbind(qry_results$lon, qry_results$lat)) -names(site_coords) <- c("Longitude","Latitude") +names(site_coords) <- c("Longitude", "Latitude") -#extract lai using call_MODIS for the lat/lon per site and dates +# extract lai using call_MODIS for the lat/lon per site and dates if (extract_LAI) { modis_data <- data.frame() - cl <- parallel::makeCluster(5, outfile="") + cl <- parallel::makeCluster(5, outfile = "") registerDoParallel(cl) - modis_data <- foreach(i=1:nrow(site_coords)) %dopar% PEcAn.data.remote::call_MODIS(product = "MOD15A2H", - band = "Lai_500m", start_date = "2001001", - end_date = "2010365", lat = site_coords$Latitude[i], - lon = site_coords$Longitude[i], - size = 0, band_qc = "FparLai_QC", - band_sd = "LaiStdDev_500m", - package_method = "MODISTools") - + modis_data <- foreach(i = 1:nrow(site_coords)) %dopar% PEcAn.data.remote::call_MODIS( + product = "MOD15A2H", + band = "Lai_500m", start_date = "2001001", + end_date = "2010365", lat = site_coords$Latitude[i], + lon = site_coords$Longitude[i], + size = 0, band_qc = "FparLai_QC", + band_sd = "LaiStdDev_500m", + package_method = "MODISTools" + ) + stopCluster(cl) modis_data <- do.call(rbind.data.frame, modis_data) - + # modis_data <- data.frame() # for (i in 1:length(observation)) { # print(paste("extracting site: ", observation[i], sep = "")) - # data <- PEcAn.data.remote::call_MODIS(lat = site_coords[i,2], lon = site_coords[i,1], - # start_date = "2001001", end_date = "2010365", - # size = 0, product = "MOD15A2H", band = "Lai_500m", + # data <- PEcAn.data.remote::call_MODIS(lat = site_coords[i,2], lon = site_coords[i,1], + # start_date = "2001001", end_date = "2010365", + # size = 0, product = "MOD15A2H", band = "Lai_500m", # band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools") # modis_data <- rbind(modis_data, data) # } # output resuls of call_MODIS - save(modis_data, file = file.path(data_dir,'modis_lai_output.RData')) + save(modis_data, file = file.path(data_dir, "modis_lai_output.RData")) } else { - load(file = file.path(data_dir,'modis_lai_output.RData')) + load(file = file.path(data_dir, "modis_lai_output.RData")) } # find peaks peak_lai <- data.frame() years <- unique(year(as.Date(modis_data$calendar_date, "%Y-%m-%d"))) -#site_ll <- data.frame(cbind(lon=unique(modis_data$lon),lat=unique(modis_data$lat))) -site_ll <- data.frame(cbind(lat=unique(modis_data$lat),lon=unique(modis_data$lon))) +# site_ll <- data.frame(cbind(lon=unique(modis_data$lon),lat=unique(modis_data$lat))) +site_ll <- data.frame(cbind(lat = unique(modis_data$lat), lon = unique(modis_data$lon))) for (i in 1:length(years)) { year <- years[i] g <- grep(modis_data$calendar_date, pattern = year) - d <- modis_data[g,] + d <- modis_data[g, ] for (j in 1:length(site_IDs)) { - pixel <- filter(d, lat == site_ll[j,1] & lon == site_ll[j,2]) - + pixel <- filter(d, lat == site_ll[j, 1] & lon == site_ll[j, 2]) + # using peak - peak <- pixel[which(pixel$data == max(pixel$data, na.rm = T)),][1,] - + peak <- pixel[which(pixel$data == max(pixel$data, na.rm = T)), ][1, ] + # using mean - #mn_data <- mean(pixel$data, na.rm = T) - #mn_sd <- mean(pixel$sd, na.rm = T) - #peak <- pixel[1,] - #peak$data <- mn_data - #peak$sd <- mn_sd - - - peak$calendar_date = paste("Year", year, sep = "_") + # mn_data <- mean(pixel$data, na.rm = T) + # mn_sd <- mean(pixel$sd, na.rm = T) + # peak <- pixel[1,] + # peak$data <- mn_data + # peak$sd <- mn_sd + + + peak$calendar_date <- paste("Year", year, sep = "_") peak$tile <- site_names[j] - #peak$tile <- site_IDs[j] - peak_lai <- rbind(peak_lai, peak) - } + # peak$tile <- site_IDs[j] + peak_lai <- rbind(peak_lai, peak) + } } # sort the data by site so the correct values are placed into the resized data frames below. @@ -172,37 +184,43 @@ point_list point_list$median_lai <- point_list$median_lai[[1]] point_list$stdv_lai <- point_list$stdv_lai[[1]] -#point_list$median_lai <- point_list$median_lai[[1]] %>% filter(Site_ID %in% site.ids) -#point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids) -#site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>% +# point_list$median_lai <- point_list$median_lai[[1]] %>% filter(Site_ID %in% site.ids) +# point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids) +# site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>% # as.numeric() %>% na.omit() -#point_list$median_lai <- point_list$median_lai[site.order,] -#point_list$stdv_lai <- point_list$stdv_lai[site.order,] -#point_list +# point_list$median_lai <- point_list$median_lai[site.order,] +# point_list$stdv_lai <- point_list$stdv_lai[site.order,] +# point_list -site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -point_list$median_lai <- point_list$median_lai[site.order,] -point_list$stdv_lai <- point_list$stdv_lai[site.order,] +site.order <- sapply(site.ids, function(x) which(point_list$median_lai$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +point_list$median_lai <- point_list$median_lai[site.order, ] +point_list$stdv_lai <- point_list$stdv_lai[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(point_list$median_lai),"_")[3:length(point_list$median_lai)] %>% map_chr(~.x[2]) %>% paste0(.,"/07/15") +date.obs <- strsplit(names(point_list$median_lai), "_")[3:length(point_list$median_lai)] %>% + map_chr(~ .x[2]) %>% + paste0(., "/07/15") obs.mean <- names(point_list$median_lai)[3:length(point_list$median_lai)] %>% - map(function(namesl){ - ((point_list$median_lai)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('LAI'))) %>% - setNames(site.ids[1:length(.)]) + map(function(namesl) { + ((point_list$median_lai)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("LAI"))) %>% + setNames(site.ids[1:length(.)]) ) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -obs.cov <-names(point_list$stdv_lai)[3:length(point_list$median_lai)] %>% +obs.cov <- names(point_list$stdv_lai)[3:length(point_list$median_lai)] %>% map(function(namesl) { ((point_list$stdv_lai)[[namesl]] %>% - map( ~ (.x) ^ 2 %>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) # check input data - after creating list of lists PEcAnAssimSequential::Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]]) @@ -221,16 +239,20 @@ PEcAn.settings::write.settings(new.settings, outputfile = "pecan.CHECKED.xml") #--------------------------------------------------------------------------------------------------# ## Run SDA if (run_SDA) { - sda.enkf.multisite(new.settings, obs.mean = obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title="LAI SDA, uniform sampling", - facet.plots=T, - debug=T, - pause=F)) + sda.enkf.multisite(new.settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = "LAI SDA, uniform sampling", + facet.plots = T, + debug = T, + pause = F + ) + ) } else { print("*** Not running SDA ***") } @@ -241,10 +263,10 @@ if (run_SDA) { #--------------------------------------------------------------------------------------------------# ## Wrap up # Send email if configured -#if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { +# if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { # sendmail(settings$email$from, settings$email$to, # paste0("SDA workflow has finished executing at ", base::date())) -#} +# } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/single_site_SDA_BNL.R b/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/single_site_SDA_BNL.R index 5d7618e1ac8..0d153db8f58 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/single_site_SDA_BNL.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/Rscripts/single_site_SDA_BNL.R @@ -8,9 +8,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# @@ -23,40 +23,47 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential library(purrr) library(listviewer) library(dplyr) -run_SDA <- TRUE #TRUE/FALSE +run_SDA <- TRUE # TRUE/FALSE #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ## set run options, some of these should be tweaked or removed as requirements work_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI" -setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions +setwd(work_dir) # best not to require setting wd and instead just providing full paths in functions -# Deifine observation - use existing or generate new? +# Deifine observation - use existing or generate new? # set to a specific file, use that. observation <- c("1000000048") # delete an old run -unlink(c('run','out','SDA'),recursive = T) +unlink(c("run", "out", "SDA"), recursive = T) # grab multi-site XML file settings <- read.settings("XMLs/pecan_US-CZ3_LAI_SDA.xml") # what is this step for???? is this to get the site locations for the map?? -if ("MultiSettings" %in% class(settings)) site.ids <- settings %>% - map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +if ("MultiSettings" %in% class(settings)) { + site.ids <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() +} # sample from parameters used for both sensitivity analysis and Ens -get.parameter.samples(settings, - ens.sample.method = settings$ensemble$samplingspace$parameters$method) +get.parameter.samples(settings, + ens.sample.method = settings$ensemble$samplingspace$parameters$method +) ## Aside: if method were set to unscented, would take minimal changes to do UnKF #--------------------------------------------------------------------------------------------------# @@ -64,7 +71,7 @@ get.parameter.samples(settings, #--------------------------------------------------------------------------------------------------# ## Prepare observational data - still very hacky here -# option 1: use existing observation file +# option 1: use existing observation file # if (observation!="new") { # load(observation) # site1 <- point_list @@ -79,29 +86,32 @@ data_dir <- "/data/sserbin/Modeling/sipnet/NASA_CMS_AGB_LAI/modis_lai_data" parameters <- settings$run # get MODIS data -#modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), -# start_date = parameters$start.date, end_date = parameters$end.date, -# siteID = parameters$site$id, size = 0, product = "MOD15A2H", band = "Lai_500m", +# modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), +# start_date = parameters$start.date, end_date = parameters$end.date, +# siteID = parameters$site$id, size = 0, product = "MOD15A2H", band = "Lai_500m", # band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools") -#modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), -# start_date = "2001/01/01", end_date = "2002/01/01", -# size = 0, product = "MOD15A2H", band = "Lai_500m", +# modis <- PEcAn.data.remote::call_MODIS(lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), +# start_date = "2001/01/01", end_date = "2002/01/01", +# size = 0, product = "MOD15A2H", band = "Lai_500m", # band_qc = "", band_sd = "LaiStdDev_500m", package_method = "MODISTools") -if (!file.exists(file.path(data_dir,'modis_lai_output.RData'))) { - modis <- call_MODIS(product = "MOD15A2H", band = "Lai_500m", start_date = "2001001", end_date = "2010365", - lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), size = 0, - band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", - package_method = "MODISTools") - save(modis, file = file.path(data_dir,'modis_lai_output.RData')) - +if (!file.exists(file.path(data_dir, "modis_lai_output.RData"))) { + modis <- call_MODIS( + product = "MOD15A2H", band = "Lai_500m", start_date = "2001001", end_date = "2010365", + lat = as.numeric(parameters$site$lat), lon = as.numeric(parameters$site$lon), size = 0, + band_qc = "FparLai_QC", band_sd = "LaiStdDev_500m", + package_method = "MODISTools" + ) + save(modis, file = file.path(data_dir, "modis_lai_output.RData")) } else { - load(file = file.path(data_dir,'modis_lai_output.RData')) + load(file = file.path(data_dir, "modis_lai_output.RData")) } -# -bety <- list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE) +# +bety <- list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE +) con <- PEcAn.DB::db.open(bety) bety$con <- con @@ -110,59 +120,65 @@ Site_Info Site_ID <- Site_Info$id Site_Name <- Site_Info$sitename -#plot(lubridate::as_date(modis$calendar_date), modis$data, type="l") +# plot(lubridate::as_date(modis$calendar_date), modis$data, type="l") peak_lai <- vector() years <- unique(year(as.Date(modis$calendar_date, "%Y-%m-%d"))) for (i in seq_along(years)) { year <- years[i] g <- grep(modis$calendar_date, pattern = year) - d <- modis[g,] + d <- modis[g, ] max <- which(d$data == max(d$data, na.rm = T)) - peak <- d[max,][1,] - peak$calendar_date = paste("Year", year, sep = "_") + peak <- d[max, ][1, ] + peak$calendar_date <- paste("Year", year, sep = "_") peak_lai <- rbind(peak_lai, peak) } # transpose the data -median_lai = as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$data))), stringsAsFactors = F) -colnames(median_lai) = c("Site_ID", "Site_Name", peak_lai$calendar_date) -median_lai[3:length(median_lai)] = as.numeric(median_lai[3:length(median_lai)]) +median_lai <- as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$data))), stringsAsFactors = F) +colnames(median_lai) <- c("Site_ID", "Site_Name", peak_lai$calendar_date) +median_lai[3:length(median_lai)] <- as.numeric(median_lai[3:length(median_lai)]) -stdv_lai = as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$sd))), stringsAsFactors = F) -colnames(stdv_lai) = c("Site_ID", "Site_Name", peak_lai$calendar_date) -stdv_lai[3:length(stdv_lai)] = as.numeric(stdv_lai[3:length(stdv_lai)]) +stdv_lai <- as.data.frame(cbind(Site_ID, Site_Name, t(cbind(peak_lai$sd))), stringsAsFactors = F) +colnames(stdv_lai) <- c("Site_ID", "Site_Name", peak_lai$calendar_date) +stdv_lai[3:length(stdv_lai)] <- as.numeric(stdv_lai[3:length(stdv_lai)]) -point_list = list() -point_list$median_lai = median_lai -point_list$stdv_lai = stdv_lai +point_list <- list() +point_list$median_lai <- median_lai +point_list$stdv_lai <- stdv_lai ## needed for landtrendr for nested lists. Lai isn't as nested -#point_list$median_lai <- point_list$median_lai[[1]] %>% filter(Site_ID %in% site.ids) -#point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids) -site.order <- sapply(site.ids,function(x) which(point_list$median_lai$Site_ID %in% x)) %>% - as.numeric() %>% na.omit() -point_list$median_lai <- point_list$median_lai[site.order,] -point_list$stdv_lai <- point_list$stdv_lai[site.order,] +# point_list$median_lai <- point_list$median_lai[[1]] %>% filter(Site_ID %in% site.ids) +# point_list$stdv_lai <- point_list$stdv_lai[[1]] %>% filter(Site_ID %in% site.ids) +site.order <- sapply(site.ids, function(x) which(point_list$median_lai$Site_ID %in% x)) %>% + as.numeric() %>% + na.omit() +point_list$median_lai <- point_list$median_lai[site.order, ] +point_list$stdv_lai <- point_list$stdv_lai[site.order, ] # truning lists to dfs for both mean and cov -date.obs <- strsplit(names(point_list$median_lai),"_")[3:length(point_list$median_lai)] %>% map_chr(~.x[2]) %>% paste0(.,"/07/15") +date.obs <- strsplit(names(point_list$median_lai), "_")[3:length(point_list$median_lai)] %>% + map_chr(~ .x[2]) %>% + paste0(., "/07/15") obs.mean <- names(point_list$median_lai)[3:length(point_list$median_lai)] %>% - map(function(namesl){ - ((point_list$median_lai)[[namesl]] %>% - map(~.x %>% as.data.frame %>% `colnames<-`(c('LAI'))) %>% - setNames(site.ids[1:length(.)]) + map(function(namesl) { + ((point_list$median_lai)[[namesl]] %>% + map(~ .x %>% + as.data.frame() %>% + `colnames<-`(c("LAI"))) %>% + setNames(site.ids[1:length(.)]) ) - }) %>% setNames(date.obs) + }) %>% + setNames(date.obs) -obs.cov <-names(point_list$stdv_lai)[3:length(point_list$median_lai)] %>% +obs.cov <- names(point_list$stdv_lai)[3:length(point_list$median_lai)] %>% map(function(namesl) { ((point_list$stdv_lai)[[namesl]] %>% - map( ~ (.x) ^ 2 %>% as.matrix()) %>% - setNames(site.ids[1:length(.)])) - - }) %>% setNames(date.obs) + map(~ (.x)^2 %>% as.matrix()) %>% + setNames(site.ids[1:length(.)])) + }) %>% + setNames(date.obs) # check input data - after creating list of lists PEcAnAssimSequential::Construct.R(site.ids, "LAI", obs.mean[[1]], obs.cov[[1]]) @@ -179,19 +195,23 @@ new.settings <- PEcAn.settings::prepare.settings(settings) #--------------------------------------------------------------------------------------------------# ## Run SDA if (run_SDA) { - #sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, - - sda.enkf.multisite(settings, obs.mean =obs.mean ,obs.cov = obs.cov, - control=list(trace=T, - FF=F, - interactivePlot=F, - TimeseriesPlot=T, - BiasPlot=F, - plot.title="LAI SDA, 1 site", - facet.plots=T, - debug=T, - pause=F)) - + # sda.enkf.multisite(new.settings, obs.mean =obs.mean ,obs.cov = obs.cov, + + sda.enkf.multisite(settings, + obs.mean = obs.mean, obs.cov = obs.cov, + control = list( + trace = T, + FF = F, + interactivePlot = F, + TimeseriesPlot = T, + BiasPlot = F, + plot.title = "LAI SDA, 1 site", + facet.plots = T, + debug = T, + pause = F + ) + ) + # sda.enkf(settings, obs.mean = obs.mean ,obs.cov = obs.cov, # control=list(trace=T, # FF=F, @@ -202,7 +222,6 @@ if (run_SDA) { # facet.plots=T, # debug=T, # pause=F)) - } else { print("*** Not running SDA ***") } @@ -213,10 +232,10 @@ if (run_SDA) { #--------------------------------------------------------------------------------------------------# ## Wrap up # Send email if configured -#if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { +# if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { # sendmail(settings$email$from, settings$email$to, # paste0("SDA workflow has finished executing at ", base::date())) -#} +# } #--------------------------------------------------------------------------------------------------# diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/workflow.R b/modules/assim.sequential/inst/sda_backup/sserbin/workflow.R index a54cd35135b..0c86310c41b 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/workflow.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/workflow.R @@ -107,8 +107,8 @@ if (PEcAn.utils::status.check("CONFIG") == 0) { settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.CONFIGS.xml")) } -if ((length(which(commandArgs() == "--advanced")) != 0) -&& (PEcAn.utils::status.check("ADVANCED") == 0)) { +if ((length(which(commandArgs() == "--advanced")) != 0) && + (PEcAn.utils::status.check("ADVANCED") == 0)) { PEcAn.utils::status.start("ADVANCED") q() } @@ -121,7 +121,7 @@ if (PEcAn.utils::status.check("MODEL") == 0) { # If we're doing an ensemble run, don't stop. If only a single run, we # should be stopping. if (is.null(settings[["ensemble"]]) || - as.numeric(settings[[c("ensemble", "size")]]) == 1) { + as.numeric(settings[[c("ensemble", "size")]]) == 1) { stop_on_error <- TRUE } else { stop_on_error <- FALSE @@ -139,16 +139,16 @@ if (PEcAn.utils::status.check("OUTPUT") == 0) { } # Run ensemble analysis on model output. -if ("ensemble" %in% names(settings) -&& PEcAn.utils::status.check("ENSEMBLE") == 0) { +if ("ensemble" %in% names(settings) && + PEcAn.utils::status.check("ENSEMBLE") == 0) { PEcAn.utils::status.start("ENSEMBLE") runModule.run.ensemble.analysis(settings, TRUE) PEcAn.utils::status.end() } # Run sensitivity analysis and variance decomposition on model output -if ("sensitivity.analysis" %in% names(settings) -&& PEcAn.utils::status.check("SENSITIVITY") == 0) { +if ("sensitivity.analysis" %in% names(settings) && + PEcAn.utils::status.check("SENSITIVITY") == 0) { PEcAn.utils::status.start("SENSITIVITY") runModule.run.sensitivity.analysis(settings) PEcAn.utils::status.end() @@ -174,8 +174,8 @@ if ("state.data.assimilation" %in% names(settings)) { } # Run benchmarking -if ("benchmarking" %in% names(settings) -&& "benchmark" %in% names(settings$benchmarking)) { +if ("benchmarking" %in% names(settings) && + "benchmark" %in% names(settings$benchmarking)) { PEcAn.utils::status.start("BENCHMARKING") results <- papply(settings, function(x) { @@ -198,9 +198,9 @@ if (PEcAn.utils::status.check("FINISHED") == 0) { ) # Send email if configured - if (!is.null(settings$email) - && !is.null(settings$email$to) - && (settings$email$to != "")) { + if (!is.null(settings$email) && + !is.null(settings$email$to) && + (settings$email$to != "")) { sendmail( settings$email$from, settings$email$to, diff --git a/modules/assim.sequential/inst/sda_backup/sserbin/workflow_2.R b/modules/assim.sequential/inst/sda_backup/sserbin/workflow_2.R index a54cd35135b..0c86310c41b 100755 --- a/modules/assim.sequential/inst/sda_backup/sserbin/workflow_2.R +++ b/modules/assim.sequential/inst/sda_backup/sserbin/workflow_2.R @@ -107,8 +107,8 @@ if (PEcAn.utils::status.check("CONFIG") == 0) { settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.CONFIGS.xml")) } -if ((length(which(commandArgs() == "--advanced")) != 0) -&& (PEcAn.utils::status.check("ADVANCED") == 0)) { +if ((length(which(commandArgs() == "--advanced")) != 0) && + (PEcAn.utils::status.check("ADVANCED") == 0)) { PEcAn.utils::status.start("ADVANCED") q() } @@ -121,7 +121,7 @@ if (PEcAn.utils::status.check("MODEL") == 0) { # If we're doing an ensemble run, don't stop. If only a single run, we # should be stopping. if (is.null(settings[["ensemble"]]) || - as.numeric(settings[[c("ensemble", "size")]]) == 1) { + as.numeric(settings[[c("ensemble", "size")]]) == 1) { stop_on_error <- TRUE } else { stop_on_error <- FALSE @@ -139,16 +139,16 @@ if (PEcAn.utils::status.check("OUTPUT") == 0) { } # Run ensemble analysis on model output. -if ("ensemble" %in% names(settings) -&& PEcAn.utils::status.check("ENSEMBLE") == 0) { +if ("ensemble" %in% names(settings) && + PEcAn.utils::status.check("ENSEMBLE") == 0) { PEcAn.utils::status.start("ENSEMBLE") runModule.run.ensemble.analysis(settings, TRUE) PEcAn.utils::status.end() } # Run sensitivity analysis and variance decomposition on model output -if ("sensitivity.analysis" %in% names(settings) -&& PEcAn.utils::status.check("SENSITIVITY") == 0) { +if ("sensitivity.analysis" %in% names(settings) && + PEcAn.utils::status.check("SENSITIVITY") == 0) { PEcAn.utils::status.start("SENSITIVITY") runModule.run.sensitivity.analysis(settings) PEcAn.utils::status.end() @@ -174,8 +174,8 @@ if ("state.data.assimilation" %in% names(settings)) { } # Run benchmarking -if ("benchmarking" %in% names(settings) -&& "benchmark" %in% names(settings$benchmarking)) { +if ("benchmarking" %in% names(settings) && + "benchmark" %in% names(settings$benchmarking)) { PEcAn.utils::status.start("BENCHMARKING") results <- papply(settings, function(x) { @@ -198,9 +198,9 @@ if (PEcAn.utils::status.check("FINISHED") == 0) { ) # Send email if configured - if (!is.null(settings$email) - && !is.null(settings$email$to) - && (settings$email$to != "")) { + if (!is.null(settings$email) && + !is.null(settings$email$to) && + (settings$email$to != "")) { sendmail( settings$email$from, settings$email$to, diff --git a/modules/assim.sequential/inst/workflow.variance.partitioning.R b/modules/assim.sequential/inst/workflow.variance.partitioning.R index 0a536c44614..f7eeceaea3a 100644 --- a/modules/assim.sequential/inst/workflow.variance.partitioning.R +++ b/modules/assim.sequential/inst/workflow.variance.partitioning.R @@ -1,4 +1,3 @@ - ##### ##### Workflow code by Ann Raiho (ann.raiho@gmail.com) ##### This is the workflow template for doing a variance partitioning run @@ -17,7 +16,7 @@ library(PEcAnAssimSequential) library(nimble) library(lubridate) library(PEcAn.visualization) -#PEcAnAssimSequential:: +# PEcAnAssimSequential:: library(rgdal) # need to put in assim.sequential library(ncdf4) # need to put in assim.sequential @@ -28,19 +27,23 @@ library(ncdf4) # need to put in assim.sequential settings <- read.settings("pecan.SDA.xml") -load('sda.obs.Rdata') +load("sda.obs.Rdata") obs.mean <- obs.list$obs.mean obs.cov <- obs.list$obs.cov -sda.enkf(settings, obs.mean, obs.cov, Q = NULL, restart=F, - control=list(trace=T, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title=NULL, - debug=F, - pause = F)) +sda.enkf(settings, obs.mean, obs.cov, + Q = NULL, restart = F, + control = list( + trace = T, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = NULL, + debug = F, + pause = F + ) +) #### #### DEFAULT @@ -48,231 +51,253 @@ sda.enkf(settings, obs.mean, obs.cov, Q = NULL, restart=F, nens <- settings$ensemble -#changed input to be only one met ensemble member -#basically the same as pecan.CONFIGS.xml -settings <- read.settings('pecan.DEFAULT.xml') +# changed input to be only one met ensemble member +# basically the same as pecan.CONFIGS.xml +settings <- read.settings("pecan.DEFAULT.xml") settings <- PEcAn.workflow::runModule.run.write.configs(settings) # Taking average of samples to have fixed params across nens -load('samples.Rdata') +load("samples.Rdata") ensemble.samples.means <- ensemble.samples -for(i in 1:length(ensemble.samples.means)) ensemble.samples.means[[i]] <- matrix(colMeans(ensemble.samples[[i]]),nens,ncol(ensemble.samples[[i]]),byrow = T) +for (i in 1:length(ensemble.samples.means)) ensemble.samples.means[[i]] <- matrix(colMeans(ensemble.samples[[i]]), nens, ncol(ensemble.samples[[i]]), byrow = T) ensemble.samples <- ensemble.samples.means -save(ensemble.samples,file='average_samples.Rdata') -save(ensemble.samples,file='samples.Rdata') - -outconfig <- write.ensemble.configs(defaults = settings$pfts, - ensemble.samples = ensemble.samples, - settings = settings, - model = settings$model$type, - write.to.db = settings$database$bety$write, - restart = NULL) +save(ensemble.samples, file = "average_samples.Rdata") +save(ensemble.samples, file = "samples.Rdata") + +outconfig <- write.ensemble.configs( + defaults = settings$pfts, + ensemble.samples = ensemble.samples, + settings = settings, + model = settings$model$type, + write.to.db = settings$database$bety$write, + restart = NULL +) PEcAn.workflow::runModule_start_model_runs(settings, stop.on.error = FALSE) -file.rename('out','out_default') -file.rename('run','run_default') +file.rename("out", "out_default") +file.rename("run", "run_default") #### #### DEFAULT -- DATA IC #### #### -#similar to pecan.SDA.xml but with not sampling params or met or doing process. Using SDA to constrain time step 1. -settings <- read.settings('pecan.DEFAULT.DATAIC.xml') -load('sda.obs.Rdata') +# similar to pecan.SDA.xml but with not sampling params or met or doing process. Using SDA to constrain time step 1. +settings <- read.settings("pecan.DEFAULT.DATAIC.xml") +load("sda.obs.Rdata") -#Becasue we only want to inform the initial conditions for this model experiment we only use the first data point. -#The last data point is included so that the model runs until this point. +# Becasue we only want to inform the initial conditions for this model experiment we only use the first data point. +# The last data point is included so that the model runs until this point. obs.cov <- obs.mean <- list() -for(i in c(1,length(obs.list$obs.mean))){ +for (i in c(1, length(obs.list$obs.mean))) { obs.mean[[i]] <- obs.list$obs.mean[[i]] obs.cov[[i]] <- obs.list$obs.cov[[i]] } -#write dates as names for data objects +# write dates as names for data objects names(obs.cov) <- names(obs.mean) <- names(obs.list$obs.cov) -obs.mean[2:(length(obs.list$obs.mean)-1)] <- NULL -obs.cov[2:(length(obs.list$obs.mean)-1)] <- NULL +obs.mean[2:(length(obs.list$obs.mean) - 1)] <- NULL +obs.cov[2:(length(obs.list$obs.mean) - 1)] <- NULL -obs.mean[[length(obs.mean)]] <- rep(NA,length(ensemble.samples.means)) +obs.mean[[length(obs.mean)]] <- rep(NA, length(ensemble.samples.means)) -sda.enkf(settings, obs.mean, obs.cov, Q = NULL, restart=F, - control=list(trace=T, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title=NULL, - debug=F, - pause = F)) +sda.enkf(settings, obs.mean, obs.cov, + Q = NULL, restart = F, + control = list( + trace = T, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = NULL, + debug = F, + pause = F + ) +) -file.rename('out','out_default_ic') -file.rename('run','run_default_ic') -file.rename('SDA','SDA_default_ic') +file.rename("out", "out_default_ic") +file.rename("run", "run_default_ic") +file.rename("SDA", "SDA_default_ic") #### #### PARAM #### #### -#running with sampled params -settings <- read.settings('pecan.DEFAULT.xml') +# running with sampled params +settings <- read.settings("pecan.DEFAULT.xml") settings <- PEcAn.workflow::runModule.run.write.configs(settings) PEcAn.workflow::runModule_start_model_runs(settings, stop.on.error = FALSE) -file.rename('out','out_param') -file.rename('run','run_param') +file.rename("out", "out_param") +file.rename("run", "run_param") #### #### PARAM DATA IC #### #### -settings <- read.settings('pecan.DEFAULT.DATAIC.xml') -load('sda.obs.Rdata')#load('sda.data_AGB.Rdata') +settings <- read.settings("pecan.DEFAULT.DATAIC.xml") +load("sda.obs.Rdata") # load('sda.data_AGB.Rdata') -#Becasue we only want to inform the initial conditions for this model experiment we only use the first data point. -#The last data point is included so that the model runs until this point. +# Becasue we only want to inform the initial conditions for this model experiment we only use the first data point. +# The last data point is included so that the model runs until this point. obs.cov <- obs.mean <- list() -for(i in c(1,length(obs.list$obs.mean))){ +for (i in c(1, length(obs.list$obs.mean))) { obs.mean[[i]] <- obs.list$obs.mean[[i]] obs.cov[[i]] <- obs.list$obs.cov[[i]] } -#write dates as names for data objects +# write dates as names for data objects names(obs.cov) <- names(obs.mean) <- names(obs.list$obs.cov) -obs.mean[2:(length(obs.list$obs.mean)-1)] <- NULL -obs.cov[2:(length(obs.list$obs.mean)-1)] <- NULL +obs.mean[2:(length(obs.list$obs.mean) - 1)] <- NULL +obs.cov[2:(length(obs.list$obs.mean) - 1)] <- NULL -obs.mean[[length(obs.mean)]] <- rep(NA,length(ensemble.samples.means)) +obs.mean[[length(obs.mean)]] <- rep(NA, length(ensemble.samples.means)) -sda.enkf(settings, obs.mean, obs.cov, Q = NULL, restart=F, - control=list(trace=T, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title=NULL, - debug=F, - pause = F)) +sda.enkf(settings, obs.mean, obs.cov, + Q = NULL, restart = F, + control = list( + trace = T, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = NULL, + debug = F, + pause = F + ) +) -file.rename('out','out_param_ic') -file.rename('run','run_param_ic') +file.rename("out", "out_param_ic") +file.rename("run", "run_param_ic") #### #### MET #### #### -#running with sampled params -settings <- read.settings('pecan.SAMP.MET.xml') +# running with sampled params +settings <- read.settings("pecan.SAMP.MET.xml") settings <- PEcAn.workflow::runModule.run.write.configs(settings) PEcAn.workflow::runModule_start_model_runs(settings, stop.on.error = FALSE) -file.rename('out','out_met') -file.rename('run','run_met') +file.rename("out", "out_met") +file.rename("run", "run_met") #### #### MET DATA IC #### #### -file.rename('ensemble_weights_SDA.Rdata','ensemble_weights.Rdata') +file.rename("ensemble_weights_SDA.Rdata", "ensemble_weights.Rdata") -settings <- read.settings('pecan.SAMP.MET.DATA.IC.xml') -load('sda.obs.Rdata')#load('sda.data_AGB.Rdata') +settings <- read.settings("pecan.SAMP.MET.DATA.IC.xml") +load("sda.obs.Rdata") # load('sda.data_AGB.Rdata') -#Becasue we only want to inform the initial conditions for this model experiment we only use the first data point. -#The last data point is included so that the model runs until this point. +# Becasue we only want to inform the initial conditions for this model experiment we only use the first data point. +# The last data point is included so that the model runs until this point. obs.cov <- obs.mean <- list() -for(i in c(1,length(obs.list$obs.mean))){ +for (i in c(1, length(obs.list$obs.mean))) { obs.mean[[i]] <- obs.list$obs.mean[[i]] obs.cov[[i]] <- obs.list$obs.cov[[i]] } -#write dates as names for data objects +# write dates as names for data objects names(obs.cov) <- names(obs.mean) <- names(obs.list$obs.cov) -obs.mean[2:(length(obs.list$obs.mean)-1)] <- NULL -obs.cov[2:(length(obs.list$obs.mean)-1)] <- NULL +obs.mean[2:(length(obs.list$obs.mean) - 1)] <- NULL +obs.cov[2:(length(obs.list$obs.mean) - 1)] <- NULL -obs.mean[[length(obs.mean)]] <- rep(NA,length(ensemble.samples.means)) +obs.mean[[length(obs.mean)]] <- rep(NA, length(ensemble.samples.means)) -sda.enkf(settings, obs.mean, obs.cov, Q = NULL, restart=F, - control=list(trace=T, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title=NULL, - debug=F, - pause = F)) +sda.enkf(settings, obs.mean, obs.cov, + Q = NULL, restart = F, + control = list( + trace = T, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = NULL, + debug = F, + pause = F + ) +) -file.rename('out','out_met_ic') -file.rename('run','run_met_ic') +file.rename("out", "out_met_ic") +file.rename("run", "run_met_ic") #### #### PROCESS #### #### -settings <- read.settings('pecan.PROCESS.xml') +settings <- read.settings("pecan.PROCESS.xml") -#running with sampled params -load('sda.obs.Rdata')#load('sda.data_AGB.Rdata') +# running with sampled params +load("sda.obs.Rdata") # load('sda.data_AGB.Rdata') obs.mean <- obs.list$obs.mean obs.cov <- obs.list$obs.cov -#write dates as names for data objects +# write dates as names for data objects names(obs.cov) <- names(obs.mean) <- names(obs.list$obs.cov) -for(i in 1:length(obs.list$obs.mean)) obs.mean[[i]] <- rep(NA,length(ensemble.samples.means)) +for (i in 1:length(obs.list$obs.mean)) obs.mean[[i]] <- rep(NA, length(ensemble.samples.means)) -load('SDA_SDA/sda.output.Rdata') +load("SDA_SDA/sda.output.Rdata") -Q <- solve(enkf.params[[t-1]]$q.bar) +Q <- solve(enkf.params[[t - 1]]$q.bar) rm(new.state) -sda.enkf(settings, obs.mean, obs.cov, Q = Q, restart=F, - control=list(trace=T, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title=NULL, - debug=F, - pause = F)) - -file.rename('out','out_process') -file.rename('run','run_process') -file.rename('SDA','SDA_process') +sda.enkf(settings, obs.mean, obs.cov, + Q = Q, restart = F, + control = list( + trace = T, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = NULL, + debug = F, + pause = F + ) +) + +file.rename("out", "out_process") +file.rename("run", "run_process") +file.rename("SDA", "SDA_process") #### #### PROCESS DATA IC #### #### -settings <- read.settings('pecan.PROCESS.xml') +settings <- read.settings("pecan.PROCESS.xml") -#running with sampled params -load('sda.obs.Rdata') +# running with sampled params +load("sda.obs.Rdata") obs.mean <- obs.list$obs.mean obs.cov <- obs.list$obs.cov -#write dates as names for data objects +# write dates as names for data objects names(obs.cov) <- names(obs.mean) <- names(obs.list$obs.cov) -for(i in 2:length(obs.list$obs.mean)) obs.mean[[i]] <- rep(NA,length(ensemble.samples.means)) +for (i in 2:length(obs.list$obs.mean)) obs.mean[[i]] <- rep(NA, length(ensemble.samples.means)) -load('SDA_SDA/sda.output.Rdata') +load("SDA_SDA/sda.output.Rdata") -Q <- solve(enkf.params[[t-1]]$q.bar) +Q <- solve(enkf.params[[t - 1]]$q.bar) rm(new.state) -sda.enkf(settings, obs.mean, obs.cov, Q = Q, restart=T, - control=list(trace=T, - interactivePlot=T, - TimeseriesPlot=T, - BiasPlot=T, - plot.title=NULL, - debug=F, - pause = F)) - -file.rename('out','out_process_ic') -file.rename('run','run_process_ic') -file.rename('SDA','SDA_process_ic') +sda.enkf(settings, obs.mean, obs.cov, + Q = Q, restart = T, + control = list( + trace = T, + interactivePlot = T, + TimeseriesPlot = T, + BiasPlot = T, + plot.title = NULL, + debug = F, + pause = F + ) +) + +file.rename("out", "out_process_ic") +file.rename("run", "run_process_ic") +file.rename("SDA", "SDA_process_ic") diff --git a/modules/assim.sequential/tests/testthat/test_aggregation.R b/modules/assim.sequential/tests/testthat/test_aggregation.R index 8595aa8c05d..b1c46257f7c 100644 --- a/modules/assim.sequential/tests/testthat/test_aggregation.R +++ b/modules/assim.sequential/tests/testthat/test_aggregation.R @@ -5,30 +5,30 @@ library(exactextractr) library(terra) source("../../R/aggregate.R") test_that("returns aggregated values for RI", { - # Load the saved polygon data with Massachusetts as an example - us_states <- readRDS("test_aggregation/us_states.rds") - state <- "RI" - polygon_data <- st_transform(us_states[us_states$STUSPS == state, ], crs = "EPSG:4326") + # Load the saved polygon data with Massachusetts as an example + us_states <- readRDS("test_aggregation/us_states.rds") + state <- "RI" + polygon_data <- st_transform(us_states[us_states$STUSPS == state, ], crs = "EPSG:4326") - # Load the downscaled raster output - downscale_output <- list( - maps = list( - ensemble1 = "test_aggregation/ensemble1.tif", - ensemble2 = "test_aggregation/ensemble2.tif", - ensemble3 = "test_aggregation/ensemble3.tif" - ) + # Load the downscaled raster output + downscale_output <- list( + maps = list( + ensemble1 = "test_aggregation/ensemble1.tif", + ensemble2 = "test_aggregation/ensemble2.tif", + ensemble3 = "test_aggregation/ensemble3.tif" ) + ) - read_raster <- function(file_path) { - rast(file_path) - } + read_raster <- function(file_path) { + rast(file_path) + } - downscale_output$maps <- lapply(downscale_output$maps, read_raster) - # Aggregate for RI - RI <- aggregate(downscale_output, polygon_data, func = 'mean') - comp <- RI$TTL_mean * 10^9 - comparison_result <- (1.31 < comp & comp < 1.32) - expect_true(comparison_result) + downscale_output$maps <- lapply(downscale_output$maps, read_raster) + # Aggregate for RI + RI <- aggregate(downscale_output, polygon_data, func = "mean") + comp <- RI$TTL_mean * 10^9 + comparison_result <- (1.31 < comp & comp < 1.32) + expect_true(comparison_result) }) test_that("returns error of unmatched CRS", { @@ -36,7 +36,7 @@ test_that("returns error of unmatched CRS", { us_states <- readRDS("test_aggregation/us_states.rds") state <- "RI" polygon_data <- st_transform(us_states[us_states$STUSPS == state, ], crs = "EPSG:2222") - + # Load the downscaled raster output downscale_output <- list( maps = list( @@ -45,16 +45,14 @@ test_that("returns error of unmatched CRS", { ensemble3 = "test_aggregation/ensemble3.tif" ) ) - + read_raster <- function(file_path) { rast(file_path) } - + downscale_output$maps <- lapply(downscale_output$maps, read_raster) expect_error( - aggregate(downscale_output, polygon_data, func = 'mean'), + aggregate(downscale_output, polygon_data, func = "mean"), "CRS of downscale_output and polygon_data must match." ) }) - - diff --git a/modules/assim.sequential/tests/testthat/test_rescaling.R b/modules/assim.sequential/tests/testthat/test_rescaling.R index 8b75dda758d..1189fd3d949 100644 --- a/modules/assim.sequential/tests/testthat/test_rescaling.R +++ b/modules/assim.sequential/tests/testthat/test_rescaling.R @@ -1,75 +1,90 @@ - settings <- list( - state.data.assimilation = list( - state.variables = list( - variable = list(variable.name = "a", scaling_factor = 1), - variable = list(variable.name = "b", scaling_factor = 2), - variable = list(variable.name = "c", scaling_factor = 3), - variable = list(variable.name = "z", scaling_factor = 0)))) + state.data.assimilation = list( + state.variables = list( + variable = list(variable.name = "a", scaling_factor = 1), + variable = list(variable.name = "b", scaling_factor = 2), + variable = list(variable.name = "c", scaling_factor = 3), + variable = list(variable.name = "z", scaling_factor = 0) + ) + ) +) mkdata <- function(...) { - as.matrix(data.frame(...)) + as.matrix(data.frame(...)) } test_that("returns input where no scaling specified", { - expect_identical( - rescaling_stateVars(list(), 1L), - 1L) + expect_identical( + rescaling_stateVars(list(), 1L), + 1L + ) - unscalable <- mkdata(d = 1, e = 1) - expect_identical( - rescaling_stateVars(settings, unscalable), - unscalable) + unscalable <- mkdata(d = 1, e = 1) + expect_identical( + rescaling_stateVars(settings, unscalable), + unscalable + ) - partly_scaleable <- mkdata(c = 10, d = 10) - expect_equal( - rescaling_stateVars(settings, partly_scaleable), - partly_scaleable * c(3, 1)) + partly_scaleable <- mkdata(c = 10, d = 10) + expect_equal( + rescaling_stateVars(settings, partly_scaleable), + partly_scaleable * c(3, 1) + ) }) test_that("multiplies or divides as requested", { - expect_equal( - rescaling_stateVars( - settings, - mkdata(a = 1:3, b = 1:3, c = 1:3)), - mkdata(a = (1:3) * 1, b = (1:3) * 2, c = (1:3) * 3)) - expect_equal( - rescaling_stateVars( - settings, - mkdata(a = 1:3, b = 1:3, c = 1:3), - multiply = FALSE), - mkdata(a = (1:3) / 1, b = (1:3) / 2, c = (1:3) / 3)) + expect_equal( + rescaling_stateVars( + settings, + mkdata(a = 1:3, b = 1:3, c = 1:3) + ), + mkdata(a = (1:3) * 1, b = (1:3) * 2, c = (1:3) * 3) + ) + expect_equal( + rescaling_stateVars( + settings, + mkdata(a = 1:3, b = 1:3, c = 1:3), + multiply = FALSE + ), + mkdata(a = (1:3) / 1, b = (1:3) / 2, c = (1:3) / 3) + ) }) test_that("handles zeroes in data", { - expect_equal( - rescaling_stateVars(settings, mkdata(c = 0)), - mkdata(c = 0)) - expect_equal( - rescaling_stateVars(settings, mkdata(c = 0), multiply = FALSE), - mkdata(c = 0)) + expect_equal( + rescaling_stateVars(settings, mkdata(c = 0)), + mkdata(c = 0) + ) + expect_equal( + rescaling_stateVars(settings, mkdata(c = 0), multiply = FALSE), + mkdata(c = 0) + ) }) test_that("handles zeroes in scalars", { - expect_equal( - rescaling_stateVars(settings, mkdata(z = 10)), - mkdata(z = 0)) - expect_equal( - rescaling_stateVars(settings, mkdata(z = 10), multiply = FALSE), - mkdata(z = Inf)) + expect_equal( + rescaling_stateVars(settings, mkdata(z = 10)), + mkdata(z = 0) + ) + expect_equal( + rescaling_stateVars(settings, mkdata(z = 10), multiply = FALSE), + mkdata(z = Inf) + ) }) test_that("retains attributes", { - x_attrs <- mkdata(b = 1:3) - attr(x_attrs, "site") <- "foo" + x_attrs <- mkdata(b = 1:3) + attr(x_attrs, "site") <- "foo" - expect_identical( - attr(rescaling_stateVars(settings, x_attrs), "site"), - "foo") + expect_identical( + attr(rescaling_stateVars(settings, x_attrs), "site"), + "foo" + ) }) test_that("accepts data frames", { - expect_equal( - rescaling_stateVars(settings, data.frame(b = 2:4)), - data.frame(b = (2:4) * 2)) + expect_equal( + rescaling_stateVars(settings, data.frame(b = 2:4)), + data.frame(b = (2:4) * 2) + ) }) diff --git a/modules/benchmark/R/align_by_first_observation.R b/modules/benchmark/R/align_by_first_observation.R index 84b66592d71..fe586a825a9 100644 --- a/modules/benchmark/R/align_by_first_observation.R +++ b/modules/benchmark/R/align_by_first_observation.R @@ -3,56 +3,45 @@ #' align_first_observation #' @param observation_one a vector of plant functional types, or species. Provides species/pft names. #' @param observation_two another vector of plant functional types, or species. Provides the order. -#' @param custom_table a table that either maps two pft's to one another or maps custom species codes to bety id codes. -#' In the second case, must be passable to match_species_id. -#' @return \code{vector} Returns a vector of PFT's/species from observation_one that matches the order of observation_two -#' +#' @param custom_table a table that either maps two pft's to one another or maps custom species codes to bety id codes. +#' In the second case, must be passable to match_species_id. +#' @return \code{vector} Returns a vector of PFT's/species from observation_one that matches the order of observation_two +#' #' @author Tempest McCabe #' @examples -#' -#' observation_one<-c("AMCA3","AMCA3","AMCA3","AMCA3") -#' observation_two<-c("a", "b", "a", "a") -#' -#' table<-list() -#' table$plant_functional_type_one<- c("AMCA3","AMCA3","ARHY", "ARHY") -#' table$plant_functional_type_two<- c('a','a','b', 'b') # PFT groupings -#' table<-as.data.frame(table) +#' +#' observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") +#' observation_two <- c("a", "b", "a", "a") +#' +#' table <- list() +#' table$plant_functional_type_one <- c("AMCA3", "AMCA3", "ARHY", "ARHY") +#' table$plant_functional_type_two <- c("a", "a", "b", "b") # PFT groupings +#' table <- as.data.frame(table) #' #' aligned <- align_by_first_observation( #' observation_one = observation_one, #' observation_two = observation_two, -#' custom_table = table) +#' custom_table = table +#' ) #' #' # aligned should be a vector '[1] "AMCA3" "ARHY" "AMCA3" "AMCA3"' #' @export -align_by_first_observation<-function(observation_one, observation_two, custom_table){ - - final<-c() - - for( i in seq_along(observation_two)){ # For loop finds "coursest" PFT. - - subset<-custom_table[custom_table$plant_functional_type_two == observation_two[i],] - - if(length(subset$plant_functional_type_one) > length(subset$plant_functional_type_two)){ - - final[i]<-as.character(subset$plant_functional_type_two) - - }else if(length(subset$plant_functional_type_one) < length(subset$plant_functional_type_two)){ - - final[i]<-as.character(subset$plant_functional_type_one) - - }else if (length(subset$plant_functional_type_one) == length(subset$plant_functional_type_two)){ - - final[i]<-as.character(subset$plant_functional_type_one) - - }else{ - +align_by_first_observation <- function(observation_one, observation_two, custom_table) { + final <- c() + + for (i in seq_along(observation_two)) { # For loop finds "coursest" PFT. + + subset <- custom_table[custom_table$plant_functional_type_two == observation_two[i], ] + + if (length(subset$plant_functional_type_one) > length(subset$plant_functional_type_two)) { + final[i] <- as.character(subset$plant_functional_type_two) + } else if (length(subset$plant_functional_type_one) < length(subset$plant_functional_type_two)) { + final[i] <- as.character(subset$plant_functional_type_one) + } else if (length(subset$plant_functional_type_one) == length(subset$plant_functional_type_two)) { + final[i] <- as.character(subset$plant_functional_type_one) + } else { PEcAn.logger::logger.warn("There are no subsets of the custom_table that are alignable. Likely a problem with the custom_table format") - - } - + } } return(final) } - - diff --git a/modules/benchmark/R/align_data.R b/modules/benchmark/R/align_data.R index d3aee1468ac..d47832cabb4 100644 --- a/modules/benchmark/R/align_data.R +++ b/modules/benchmark/R/align_data.R @@ -13,108 +13,108 @@ ## Align timeseries data using different functions align_data <- function(model.calc, obvs.calc, var, align_method = "match_timestep") { - fcn <- match.fun(align_method) - + # Put both timestamps in UTC model.calc$posix <- as.POSIXct(lubridate::with_tz(model.calc$posix, "UTC")) - obvs.calc$posix <- as.POSIXct(lubridate::with_tz(obvs.calc$posix, "UTC")) - + obvs.calc$posix <- as.POSIXct(lubridate::with_tz(obvs.calc$posix, "UTC")) + diff.m <- diff(model.calc$posix) mode.m <- diff.m[which.max(tabulate(match(unique(diff.m), diff.m)))] - + diff.o <- diff(obvs.calc$posix) mode.o <- diff.o[which.max(tabulate(match(unique(diff.o), diff.o)))] - - compare <- data.frame( - type = c("m","o"), - diff_mode = c(as.numeric(mode.m), as.numeric(mode.o)), - diff_units = c(units(mode.m),units(mode.o)), - diff_secs = c(as.numeric(mode.m, units = "secs"),as.numeric(mode.o, units = "secs")), - diff_days = c(as.numeric(mode.m, units = "days"),as.numeric(mode.o, units = "days")), - diff_time = c(mode.m, mode.o), + + compare <- data.frame( + type = c("m", "o"), + diff_mode = c(as.numeric(mode.m), as.numeric(mode.o)), + diff_units = c(units(mode.m), units(mode.o)), + diff_secs = c(as.numeric(mode.m, units = "secs"), as.numeric(mode.o, units = "secs")), + diff_days = c(as.numeric(mode.m, units = "days"), as.numeric(mode.o, units = "days")), + diff_time = c(mode.m, mode.o), stringsAsFactors = FALSE ) - + # Determine if time step units are different, if so which is the coarser # This will just be redundant if they are the same - + coarse <- which.max(compare$diff_secs) - fine <- 2 %/% coarse + fine <- 2 %/% coarse coarse.unit <- compare$diff_units[coarse] - + # Round to the larger time step (experimental) # Note: Oddly, the second argument to `round()` has to be unnamed here # because of an inconsistency in base R's rounding methods. # The generic `round()` expects the second arg to be called `digits`, # but then dispatches to `round.POSIXt`, which takes `units`. - obvs.calc$round.posix <- as.POSIXct(round(obvs.calc$posix, coarse.unit)) + obvs.calc$round.posix <- as.POSIXct(round(obvs.calc$posix, coarse.unit)) model.calc$round.posix <- as.POSIXct(round(model.calc$posix, coarse.unit)) - - + + # Determine the overlaping range of dates # Compare the rounded dates because you can't compare dates of different units with range - rng_obvs <- range(unique(obvs.calc$round.posix)) + rng_obvs <- range(unique(obvs.calc$round.posix)) rng_model <- range(unique(model.calc$round.posix)) - rng_dat <- sort(c(rng_obvs, rng_model))[c(2, 3)] %>% lubridate::with_tz(tzone = "UTC") - + rng_dat <- sort(c(rng_obvs, rng_model))[c(2, 3)] %>% lubridate::with_tz(tzone = "UTC") + # Special case for annual timestep - if(setequal(c(365,366), compare$diff_days[coarse]) | setequal(c(365), compare$diff_days[coarse]) | - setequal(c(366), compare$diff_days[coarse])){ + if (setequal(c(365, 366), compare$diff_days[coarse]) | setequal(c(365), compare$diff_days[coarse]) | + setequal(c(366), compare$diff_days[coarse])) { rng_dat <- lubridate::year(rng_dat) model.calc$round.posix <- lubridate::year(model.calc$round.posix) - obvs.calc$round.posix <- lubridate::year(obvs.calc$round.posix) + obvs.calc$round.posix <- lubridate::year(obvs.calc$round.posix) } - - + + # Subset by date range date_subsets <- list() - date_subsets[["m"]] <- model.calc %>% - filter(rng_dat[1] <= .data$round.posix) %>% - filter(rng_dat[2] >= .data$round.posix) - date_subsets[["o"]] <- obvs.calc %>% - filter(rng_dat[1] <= .data$round.posix) %>% - filter(rng_dat[2] >= .data$round.posix) - + date_subsets[["m"]] <- model.calc %>% + filter(rng_dat[1] <= .data$round.posix) %>% + filter(rng_dat[2] >= .data$round.posix) + date_subsets[["o"]] <- obvs.calc %>% + filter(rng_dat[1] <= .data$round.posix) %>% + filter(rng_dat[2] >= .data$round.posix) + # Additional date range check: the date range of the fine data must be inside # that of the coarse data or the aggregation functions will add an extra day coarse_range_check <- range(date_subsets[[compare$type[coarse]]]$round.posix) - date_subsets[[compare$type[fine]]] <- date_subsets[[compare$type[fine]]] %>% - filter(coarse_range_check[1] <= .data$round.posix) %>% + date_subsets[[compare$type[fine]]] <- date_subsets[[compare$type[fine]]] %>% + filter(coarse_range_check[1] <= .data$round.posix) %>% filter(coarse_range_check[2] >= .data$round.posix) - + out1 <- date_subsets[[compare$type[coarse]]] %>% dplyr::select(dplyr::one_of(var)) colnames(out1) <- paste0(colnames(out1), ".", compare$type[coarse]) - - + + args <- list() if (mode.o != mode.m) { - date.coarse <- date_subsets[[compare$type[coarse]]]$round.posix - date.fine <- date_subsets[[compare$type[fine]]]$round.posix - - data.fine <- date_subsets[[compare$type[fine]]] %>% dplyr::select(dplyr::one_of(var)) + date.fine <- date_subsets[[compare$type[fine]]]$round.posix + + data.fine <- date_subsets[[compare$type[fine]]] %>% dplyr::select(dplyr::one_of(var)) colnames(data.fine) <- paste0(colnames(data.fine), ".", compare$type[fine]) - - out2 <- apply(data.fine, 2, - function(x){ - args$date.coarse = date.coarse - args$date.fine = date.fine - args$data.fine = x - do.call(fcn, args) - }) + + out2 <- apply( + data.fine, 2, + function(x) { + args$date.coarse <- date.coarse + args$date.fine <- date.fine + args$data.fine <- x + do.call(fcn, args) + } + ) dat <- cbind(out1, out2) dat$posix <- date.coarse - } else if (mode.o == mode.m) { # here coarse and fine are just index values but but the time steps are the same size - + out2 <- date_subsets[[compare$type[fine]]] %>% dplyr::select(dplyr::one_of(var)) colnames(out2) <- paste0(colnames(out2), ".", compare$type[fine]) dat <- cbind(out1, out2) - dat$posix <- date_subsets[[compare$type[fine]]] %>% dplyr::select(dplyr::one_of("round.posix")) %>% .[,1] - + dat$posix <- date_subsets[[compare$type[fine]]] %>% + dplyr::select(dplyr::one_of("round.posix")) %>% + .[, 1] } - + return(dat) -} # align_data \ No newline at end of file +} # align_data diff --git a/modules/benchmark/R/align_data_to_data_pft.R b/modules/benchmark/R/align_data_to_data_pft.R index bbef20356a1..0dd5eba4f31 100644 --- a/modules/benchmark/R/align_data_to_data_pft.R +++ b/modules/benchmark/R/align_data_to_data_pft.R @@ -1,26 +1,26 @@ ################################################################# -#'align_data_to_data_pft -#'@details +#' align_data_to_data_pft +#' @details #' Aligns vectors of Plant Fucntional Typed and species. -#' Can align: +#' Can align: #' - two vectors of plant functional types (pft's) if a custom map is provided #' - a list of species (usda, fia, or latin_name format) to a plant functional type #' - a list of species in a custom format, with a table mapping it to bety_species_id's -#' -#' Will return a list of what was originally provided, bety_species_codes if possible, +#' +#' Will return a list of what was originally provided, bety_species_codes if possible, #' and an aligned output. Because some alignement is order-sensitive, alignment based on observation_one -#' and observation_two are both provided. +#' and observation_two are both provided. #' #' #' @param con database connection #' @param observation_one a vector of plant functional types, or species #' @param observation_two another vector of plant functional types, or species #' @param custom_table a table that either maps two pft's to one another or maps custom species codes to bety id codes. -#' In the second case, must be passable to match_species_id. +#' In the second case, must be passable to match_species_id. #' @param format_one The output of query.format.vars() of observation one of the form output$vars$bety_names #' @param format_two The output of query.format.vars() of observation two of the form output$vars$bety_names #' @param subset_is_ok When aligning two species lists, this allows for alignment when species lists aren't identical. -#' set to FALSE by default. +#' set to FALSE by default. #' @return \code{list} containing the following columns: #' \describe{ #' \item{\code{$original}}{Will spit back out original vectors pre-alignment} @@ -31,134 +31,118 @@ #' } #' @author Tempest McCabe #' @examples \dontrun{ -#' -#' observation_one<-c("AMCA3","AMCA3","AMCA3","AMCA3") -#' observation_two<-c("a", "b", "a", "a") -#' -#' table<-list() -#' table$plant_functional_type_one<- c("AMCA3","AMCA3","ARHY", "ARHY") -#' table$plant_functional_type_two<- c('a','a','b', 'b') # PFT groupings -#' table<-as.data.frame(table) #' -#' format_one<-"species_USDA_symbol" -#' format_two<-"plant_functional_type" -#' +#' observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") +#' observation_two <- c("a", "b", "a", "a") +#' +#' table <- list() +#' table$plant_functional_type_one <- c("AMCA3", "AMCA3", "ARHY", "ARHY") +#' table$plant_functional_type_two <- c("a", "a", "b", "b") # PFT groupings +#' table <- as.data.frame(table) +#' +#' format_one <- "species_USDA_symbol" +#' format_two <- "plant_functional_type" +#' #' aligned <- align_data_to_data_pft( -#' con = con, -#' observation_one = observation_one, observation_two = observation_two, -#' format_one = format_one, format_two = format_two, -#' custom_table = table) +#' con = con, +#' observation_one = observation_one, observation_two = observation_two, +#' format_one = format_one, format_two = format_two, +#' custom_table = table +#' ) #' } #' @export -align_data_to_data_pft<-function(con, observation_one, observation_two, custom_table=NULL, format_one, format_two, subset_is_ok=FALSE){ - - translation_table<-NULL - bety_codes_one<-NA - bety_codes_two<-NA - bety_species_intersection<-NA - - if(check_if_species_list(format_one) && check_if_species_list(format_two)){ #Both are lists of species - - if (get_species_list_standard(format_one) == "custom" | get_species_list_standard(format_two) == "custom"){translation_table<-custom_table} - - bety_codes_one<-PEcAn.data.land::match_species_id(input_codes=observation_one, format_name= get_species_list_standard(format_one),translation_table = translation_table, bety=con) - bety_codes_two<-PEcAn.data.land::match_species_id(input_codes=observation_two, format_name= get_species_list_standard(format_two), translation_table = translation_table,bety=con) - - if(setequal(bety_codes_one, bety_codes_two)){ #check if identical lists. - - aligned_by_one<-bety_codes_two #Since they are identical, this has the same names as one, but in the order of two - aligned_by_two<-bety_codes_one - - }else if(subset_is_ok){ - - #for the case where intersections are ok, making columns where a species is present in on list but not the other NA's - - bety_species_intersection<-dplyr::intersect(bety_codes_one$bety_species_id,bety_codes_two$bety_species_id) - - bety_codes_one$bety_species_id[bety_codes_one$bety_species!=bety_species_intersection]<-NA - bety_codes_two$bety_species_id[bety_codes_two$bety_species!=bety_species_intersection]<-NA - - aligned_by_one<-bety_codes_two$bety_species_id - aligned_by_two<-bety_codes_one$bety_species_id - - - }else{ +align_data_to_data_pft <- function(con, observation_one, observation_two, custom_table = NULL, format_one, format_two, subset_is_ok = FALSE) { + translation_table <- NULL + bety_codes_one <- NA + bety_codes_two <- NA + bety_species_intersection <- NA + + if (check_if_species_list(format_one) && check_if_species_list(format_two)) { # Both are lists of species + + if (get_species_list_standard(format_one) == "custom" | get_species_list_standard(format_two) == "custom") { + translation_table <- custom_table + } + + bety_codes_one <- PEcAn.data.land::match_species_id(input_codes = observation_one, format_name = get_species_list_standard(format_one), translation_table = translation_table, bety = con) + bety_codes_two <- PEcAn.data.land::match_species_id(input_codes = observation_two, format_name = get_species_list_standard(format_two), translation_table = translation_table, bety = con) + + if (setequal(bety_codes_one, bety_codes_two)) { # check if identical lists. + + aligned_by_one <- bety_codes_two # Since they are identical, this has the same names as one, but in the order of two + aligned_by_two <- bety_codes_one + } else if (subset_is_ok) { + # for the case where intersections are ok, making columns where a species is present in on list but not the other NA's + + bety_species_intersection <- dplyr::intersect(bety_codes_one$bety_species_id, bety_codes_two$bety_species_id) + + bety_codes_one$bety_species_id[bety_codes_one$bety_species != bety_species_intersection] <- NA + bety_codes_two$bety_species_id[bety_codes_two$bety_species != bety_species_intersection] <- NA + + aligned_by_one <- bety_codes_two$bety_species_id + aligned_by_two <- bety_codes_one$bety_species_id + } else { PEcAn.logger::logger.warn("These observations cannot be aligned, as they have different species lists. Returning NULL. Check species lists, or set 'subset_is_ok' to TRUE. ") return(NULL) } - - }else if(check_if_species_list(format_one) && !check_if_species_list(format_two)){ - - if(is.null(custom_table)){ - + } else if (check_if_species_list(format_one) && !check_if_species_list(format_two)) { + if (is.null(custom_table)) { PEcAn.logger::logger.severe("Please provide custom_table") - - }else if (!is.null(custom_table)){ - - if(check_if_legal_table(custom_table, observation_one, observation_two)){ - - if (get_species_list_standard(format_one)=="custom"){translation_table<-custom_table} - - bety_codes_one<-PEcAn.data.land::match_species_id(input_codes=observation_one, format_name= get_species_list_standard(format_one),translation_table = translation_table, bety=con) - - aligned_by_one<-align_by_first_observation(observation_one,observation_two, custom_table) - aligned_by_two<-align_by_first_observation(observation_two,observation_one, custom_table) - - - }else{ + } else if (!is.null(custom_table)) { + if (check_if_legal_table(custom_table, observation_one, observation_two)) { + if (get_species_list_standard(format_one) == "custom") { + translation_table <- custom_table + } + + bety_codes_one <- PEcAn.data.land::match_species_id(input_codes = observation_one, format_name = get_species_list_standard(format_one), translation_table = translation_table, bety = con) + + aligned_by_one <- align_by_first_observation(observation_one, observation_two, custom_table) + aligned_by_two <- align_by_first_observation(observation_two, observation_one, custom_table) + } else { PEcAn.logger::logger.severe("custom_table provided does not correctly map plant_functional_type_one to plant_functional_type_two. One or more rows are mapped to multiple plant functional types.") - } + } } - - }else if(!check_if_species_list(format_one) && check_if_species_list(format_two)){ - - if(is.null(custom_table)){PEcAn.logger::logger.severe("Please provide custom_table")}else if (!is.null(custom_table)) - { - if(check_if_legal_table(custom_table, observation_one, observation_two)){ - - if (get_species_list_standard(format_two)=="custom"){ - translation_table<-custom_table + } else if (!check_if_species_list(format_one) && check_if_species_list(format_two)) { + if (is.null(custom_table)) { + PEcAn.logger::logger.severe("Please provide custom_table") + } else if (!is.null(custom_table)) { + if (check_if_legal_table(custom_table, observation_one, observation_two)) { + if (get_species_list_standard(format_two) == "custom") { + translation_table <- custom_table } - - bety_codes_two<-PEcAn.data.land::match_species_id(input_codes=observation_two, format_name= get_species_list_standard(format_two),translation_table = translation_table,bety=con) - - aligned_by_one<-align_by_first_observation(observation_one,observation_two, custom_table) - aligned_by_two<-align_by_first_observation(observation_two,observation_one, custom_table) - - }else{ + + bety_codes_two <- PEcAn.data.land::match_species_id(input_codes = observation_two, format_name = get_species_list_standard(format_two), translation_table = translation_table, bety = con) + + aligned_by_one <- align_by_first_observation(observation_one, observation_two, custom_table) + aligned_by_two <- align_by_first_observation(observation_two, observation_one, custom_table) + } else { PEcAn.logger::logger.severe("custom_table provided does not correctly map plant_functional_type_one to plant_functional_type_two. One or more rows are mapped to multiple plant functional types.") - } + } } - + return(aligned_species_list) - - }else if(check_if_list_of_pfts(format_one) && (check_if_list_of_pfts(format_two))){ - - if(is.null(custom_table)){PEcAn.logger::logger.severe("Please provide custom_table")}else if (!is.null(custom_table)) - { - if(check_if_legal_table(custom_table, observation_one, observation_two)){ - - aligned_by_one<-align_by_first_observation(observation_one,observation_two, custom_table) - aligned_by_two<-align_by_first_observation(observation_two,observation_one, custom_table) - - }else{ + } else if (check_if_list_of_pfts(format_one) && (check_if_list_of_pfts(format_two))) { + if (is.null(custom_table)) { + PEcAn.logger::logger.severe("Please provide custom_table") + } else if (!is.null(custom_table)) { + if (check_if_legal_table(custom_table, observation_one, observation_two)) { + aligned_by_one <- align_by_first_observation(observation_one, observation_two, custom_table) + aligned_by_two <- align_by_first_observation(observation_two, observation_one, custom_table) + } else { PEcAn.logger::logger.severe("custom_table provided does not correctly map plant_functional_type_one to plant_functional_type_two. One or more rows are mapped to multiple plant functional types.") - } + } } - - }else{ + } else { PEcAn.logger::logger.severe("PFTs are not in the correct format. Observations must have variables compatible with check_if_species_list(), or use the 'plant_functional_type' variable") } - - aligned_species_list<-list() - aligned_species_list$bety_species_id$observation_one<-bety_codes_one - aligned_species_list$bety_species_id$observation_two<-bety_codes_two - aligned_species_list$original$observation_one<-observation_one - aligned_species_list$original$observation_two<-observation_two - aligned_species_list$aligned$aligned_by_observation_one<-aligned_by_one - aligned_species_list$aligned$aligned_by_observation_two<-aligned_by_two - + + aligned_species_list <- list() + aligned_species_list$bety_species_id$observation_one <- bety_codes_one + aligned_species_list$bety_species_id$observation_two <- bety_codes_two + aligned_species_list$original$observation_one <- observation_one + aligned_species_list$original$observation_two <- observation_two + aligned_species_list$aligned$aligned_by_observation_one <- aligned_by_one + aligned_species_list$aligned$aligned_by_observation_two <- aligned_by_two + return(aligned_species_list) - } diff --git a/modules/benchmark/R/align_pft.R b/modules/benchmark/R/align_pft.R index e6d15f04ebe..20b326b50c8 100644 --- a/modules/benchmark/R/align_pft.R +++ b/modules/benchmark/R/align_pft.R @@ -1,16 +1,16 @@ #' Align vectors of Plant Functional Type and species. #' -#'@details -#' Can align: +#' @details +#' Can align: #' - two vectors of plant fucntional types (pft's) if a custom map is provided #' - a list of species (usda, fia, or latin_name format) to a plant fucntional type #' - a list of species in a custom format, with a table mapping it to bety_species_id's -#' -#' Will return a list of what was originally provided, bety_speceis_codes if possible, +#' +#' Will return a list of what was originally provided, bety_speceis_codes if possible, #' and an aligned output. Becuase some alignement is order-sensitive, alignment based on observation_one -#' and observation_two are both provided. +#' and observation_two are both provided. #' -#'\code{comparison_type} can be one of the following: +#' \code{comparison_type} can be one of the following: #' \describe{ #' \item{\code{data_to_data}}{Will align lists of pfts and species. Must be assosiated with inputs.} #' \item{\code{data_to_model}}{Not yet implemented} @@ -21,12 +21,12 @@ #' @param con database connection #' @param observation_one a vector of plant fucntional types, or species #' @param observation_two anouther vector of plant fucntional types, or species -#' @param custom_table a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. -#' In the second case, must be passable to match_species_id. +#' @param custom_table a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. +#' In the second case, must be passable to match_species_id. #' @param format_one The output of query.format.vars() of observation one of the form output$vars$bety_names #' @param format_two The output of query.format.vars() of observation two of the form output$vars$bety_names -#' @param subset_is_ok When aligning two species lists, this allows for alignement when species lists aren't identical. -#' set to FALSE by default. +#' @param subset_is_ok When aligning two species lists, this allows for alignement when species lists aren't identical. +#' set to FALSE by default. #' @param comparison_type one of "data_to_model", "data_to_data", or "model_to_model" #' @param ... other arguments, currently ignored #' @@ -40,46 +40,38 @@ #' #' @author Tempest McCabe #' @examples \dontrun{ -#' -#' +#' +#' #' #------------ A species to PFT alignment ----------- -#' observation_one<-c("AMCA3","AMCA3","AMCA3","AMCA3") -#' observation_two<-c("a", "b", "a", "a") # -#' -#' format_one<-"species_USDA_symbol" -#' format_two<-"plant_funtional_type" -#' -#' table<-list() -#' table$plant_functional_type_one<- c("AMCA3","AMCA3","ARHY", "ARHY") -#' table$plant_functional_type_two<- c('a','a','b', 'b') # PFT groupings -#' table<-as.data.frame(table) +#' observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") +#' observation_two <- c("a", "b", "a", "a") # +#' +#' format_one <- "species_USDA_symbol" +#' format_two <- "plant_funtional_type" #' -#' -#' aligned<-align_pft(con = con, observation_one = observation_one, observation_two = observation_two, -#' format_one = format_one, format_two = format_two, custom_table = table) +#' table <- list() +#' table$plant_functional_type_one <- c("AMCA3", "AMCA3", "ARHY", "ARHY") +#' table$plant_functional_type_two <- c("a", "a", "b", "b") # PFT groupings +#' table <- as.data.frame(table) +#' +#' +#' aligned <- align_pft( +#' con = con, observation_one = observation_one, observation_two = observation_two, +#' format_one = format_one, format_two = format_two, custom_table = table +#' ) #' } -#' +#' #' @export -align_pft<-function(con, observation_one, observation_two, custom_table=NULL, format_one, format_two, subset_is_ok=FALSE, comparison_type="data_to_data", ...){ - - if(comparison_type == "data_to_model"){ - - #align_data_to_model_pft(settings_one, observations_1) +align_pft <- function(con, observation_one, observation_two, custom_table = NULL, format_one, format_two, subset_is_ok = FALSE, comparison_type = "data_to_data", ...) { + if (comparison_type == "data_to_model") { + # align_data_to_model_pft(settings_one, observations_1) PEcAn.logger::logger.severe("data_to_model alignment not yet implemented. Returning NULL.") - - - }else if (comparison_type == "data_to_data"){ - - align_data_to_data_pft(con, observation_one, observation_two, custom_table, format_one, format_two, subset_is_ok=FALSE) - - }else if (comparison_type == "model_to_model"){ - - #align_model_to_model_pft(settings_one, settings_two) + } else if (comparison_type == "data_to_data") { + align_data_to_data_pft(con, observation_one, observation_two, custom_table, format_one, format_two, subset_is_ok = FALSE) + } else if (comparison_type == "model_to_model") { + # align_model_to_model_pft(settings_one, settings_two) PEcAn.logger::logger.severe("model_to_model alignment not yet implemented. Returning NULL.") - - - }else{ + } else { PEcAn.logger::logger.severe("comparison_type must be set to either 'data_to_model', 'data_to_data', or model_to_model") } - } diff --git a/modules/benchmark/R/bm_settings.R b/modules/benchmark/R/bm_settings.R index 0f679bd6dbe..7f3ec680c8a 100644 --- a/modules/benchmark/R/bm_settings.R +++ b/modules/benchmark/R/bm_settings.R @@ -1,6 +1,6 @@ ## Functions for bulding benchmarking settings -##------------------------------------------------------------------------------------------------## +## ------------------------------------------------------------------------------------------------## ##' For each benchmark entry in a (multi)settings object, get run settings using reference run id ##' and add to the settings object ##' @@ -11,22 +11,23 @@ ##' @export ##' @author Betsy Cowdery -read_settings_BRR <- function(settings){ - +read_settings_BRR <- function(settings) { # Check database connection if (is.null(settings$database$bety)) { PEcAn.logger::logger.info("No database settings, can't get run information.") - return (settings) + return(settings) } con <- PEcAn.DB::db.open(settings$database$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) - BRR <- tbl(con,"reference_runs") %>% + BRR <- tbl(con, "reference_runs") %>% filter(.data$id == settings$benchmarking$reference_run_id) %>% collect() - BRR.settings <- BRR %>% dplyr::pull(settings) %>% unlist() %>% + BRR.settings <- BRR %>% + dplyr::pull(settings) %>% + unlist() %>% XML::xmlToList("pecan") PEcAn.logger::logger.debug(names(BRR.settings)) @@ -35,15 +36,15 @@ read_settings_BRR <- function(settings){ invisible(settings) } -##------------------------------------------------------------------------------------------------## +## ------------------------------------------------------------------------------------------------## ##' @name clean_settings_BRR ##' @title Cleans PEcAn settings file and prepares the settings to be saved in a reference run record in BETY ##' @param inputfile the PEcAn settings file to be used. ##' @export ##' @author Betsy Cowdery -clean_settings_BRR <- function(inputfile){ - clean <- PEcAn.settings::clean.settings(inputfile,write=FALSE) +clean_settings_BRR <- function(inputfile) { + clean <- PEcAn.settings::clean.settings(inputfile, write = FALSE) if (PEcAn.settings::is.MultiSettings(clean)) { PEcAn.logger::logger.error("Cannot run clean settings for a mutlisettings object") # For now } @@ -65,8 +66,8 @@ clean_settings_BRR <- function(inputfile){ clean$model$binary <- NULL # Remove all file paths - for(input in names(clean$run$inputs)){ - if("path" %in% names(clean$run$inputs[[input]])){ + for (input in names(clean$run$inputs)) { + if ("path" %in% names(clean$run$inputs[[input]])) { clean$run$inputs[[input]][["path"]] <- NULL } } @@ -76,7 +77,7 @@ clean_settings_BRR <- function(inputfile){ } -##------------------------------------------------------------------------------------------------## +## ------------------------------------------------------------------------------------------------## ##' @name add_workflow_info ##' @title Add workflow specific info to settings list for benchmarking ##' @param settings settings or multisettings object @@ -85,15 +86,19 @@ clean_settings_BRR <- function(inputfile){ ##' @export ##' @author Betsy Cowdery -add_workflow_info <- function(settings, bety){ +add_workflow_info <- function(settings, bety) { if (PEcAn.settings::is.MultiSettings(settings)) { return(PEcAn.settings::papply(settings, add_workflow_id)) } - if(!as.logical(settings$benchmarking$new_run)){ - settings$workflow$id <- tbl(bety,"ensembles") %>% + if (!as.logical(settings$benchmarking$new_run)) { + settings$workflow$id <- tbl(bety, "ensembles") %>% dplyr::filter(.data$id == settings$benchmarking$ensemble_id) %>% - dplyr::select("workflow_id") %>% dplyr::collect %>% .[[1]] - wf <- tbl(bety, 'workflows') %>% dplyr::filter(.data$id == settings$workflow$id) %>% collect() + dplyr::select("workflow_id") %>% + dplyr::collect %>% + .[[1]] + wf <- tbl(bety, "workflows") %>% + dplyr::filter(.data$id == settings$workflow$id) %>% + collect() settings$rundir <- file.path(wf$folder, "run") settings$modeloutdir <- file.path(wf$folder, "out") settings$outdir <- wf$folder @@ -101,26 +106,26 @@ add_workflow_info <- function(settings, bety){ return(settings) } -##------------------------------------------------------------------------------------------------## +## ------------------------------------------------------------------------------------------------## ##' @name bm_settings2pecan_settings ##' @title Move benchmarking settings back in to original pecan settings object ##' @param bm.settings settings or multisettings object ##' @export ##' @author Betsy Cowdery -bm_settings2pecan_settings <- function(bm.settings){ +bm_settings2pecan_settings <- function(bm.settings) { if (PEcAn.settings::is.MultiSettings(bm.settings)) { return(PEcAn.settings::papply(bm.settings, bm_settings2pecan_settings)) } out <- bm.settings["reference_run_id"] - for(i in grep("benchmark", names(bm.settings))){ + for (i in grep("benchmark", names(bm.settings))) { print(bm.settings[i]$benchmark$benchmark_id) out <- append(out, list(benchmark_id = bm.settings[i]$benchmark$benchmark_id)) } return(out) } -##------------------------------------------------------------------------------------------------## +## ------------------------------------------------------------------------------------------------## ##' @name check_BRR ##' @title Check whether a run has been registered as a reference run in BETY ##' @param settings_xml cleaned settings to be compared with BRR in the database @@ -129,11 +134,13 @@ bm_settings2pecan_settings <- function(bm.settings){ ##' @export ##' @author Betsy Cowdery -check_BRR <- function(settings_xml, con){ +check_BRR <- function(settings_xml, con) { # This is NOT a good way to find matching reference run records # Other options include comparing lists (slow) # more spohisticated PSQL queries # changing the settings field to jsonb - ref_run <- tbl(con, "reference_runs") %>% filter(.data$settings == settings_xml) %>% collect + ref_run <- tbl(con, "reference_runs") %>% + filter(.data$settings == settings_xml) %>% + collect() return(ref_run) } diff --git a/modules/benchmark/R/calc_benchmark.R b/modules/benchmark/R/calc_benchmark.R index 508f6c67b9c..ba1fa1abcb3 100644 --- a/modules/benchmark/R/calc_benchmark.R +++ b/modules/benchmark/R/calc_benchmark.R @@ -1,210 +1,223 @@ ##' Calculate benchmarking statistics ##' ##' For each benchmark id, calculate metrics and update benchmarks_ensemble_scores -##' +##' ##' @param settings settings object describing the run to calculate ##' @param bety database connection ##' @param start_year,end_year time range to read. If NA, these are taken from `settings` -##' @export -##' -##' @author Betsy Cowdery -##' @importFrom dplyr tbl filter rename collect select +##' @export +##' +##' @author Betsy Cowdery +##' @importFrom dplyr tbl filter rename collect select calc_benchmark <- function(settings, bety, start_year = NA, end_year = NA) { - # run.score <- run.success.check(settings) - - if("benchmarking" %in% names(settings)){ - + + if ("benchmarking" %in% names(settings)) { # If "run" is in the list of benchmarking metrics, add run.score record to the database - # How are we dealing with ensemble runs? This is still an issue that has not been dealt with elsewhere in the code. - # For now this design only works with sigle run ensembles. - - ##### This is where calc_benchmarks originally started - + # How are we dealing with ensemble runs? This is still an issue that has not been dealt with elsewhere in the code. + # For now this design only works with sigle run ensembles. + + ##### This is where calc_benchmarks originally started + # Update benchmarks_ensembles and benchmarks_ensembles_scores tables - - ensemble <- tbl(bety,'ensembles') %>% filter(.data$workflow_id == settings$workflow$id) %>% collect() - + + ensemble <- tbl(bety, "ensembles") %>% + filter(.data$workflow_id == settings$workflow$id) %>% + collect() + # Retrieve/create benchmark ensemble database record - bm.ensemble <- tbl(bety,'benchmarks_ensembles') %>% - filter(.data$reference_run_id == settings$benchmarking$reference_run_id, - .data$ensemble_id %in% ensemble$id, # ensemble$id has more than one element - .data$model_id == settings$model$id) %>% + bm.ensemble <- tbl(bety, "benchmarks_ensembles") %>% + filter( + .data$reference_run_id == settings$benchmarking$reference_run_id, + .data$ensemble_id %in% ensemble$id, # ensemble$id has more than one element + .data$model_id == settings$model$id + ) %>% collect() - - if(dim(bm.ensemble)[1] == 0){ - bm.ensemble <- PEcAn.DB::db.query(paste0("INSERT INTO benchmarks_ensembles", - "(reference_run_id, ensemble_id, model_id, ", - "user_id, citation_id)", - "VALUES(",settings$benchmarking$reference_run_id, - ", ",ensemble$id, - ", ",settings$model$id,", ",settings$info$userid, - ", 1000000001 ) RETURNING *;"), bety) - }else if(dim(bm.ensemble)[1] >1){ + + if (dim(bm.ensemble)[1] == 0) { + bm.ensemble <- PEcAn.DB::db.query(paste0( + "INSERT INTO benchmarks_ensembles", + "(reference_run_id, ensemble_id, model_id, ", + "user_id, citation_id)", + "VALUES(", settings$benchmarking$reference_run_id, + ", ", ensemble$id, + ", ", settings$model$id, ", ", settings$info$userid, + ", 1000000001 ) RETURNING *;" + ), bety) + } else if (dim(bm.ensemble)[1] > 1) { PEcAn.logger::logger.error("Duplicate record entries in benchmarks_ensembles") } - + # --------------------------------------------------------------------------------------------- # # Setup - + site <- PEcAn.DB::query.site(settings$run$site$id, bety) model_run <- dir(settings$modeloutdir, full.names = TRUE, include.dirs = TRUE)[1] # How are we dealing with ensemble runs? Right now I've hardcoded to select the first run. - + # All benchmarking records for the given benchmarking ensemble id - # The benchmark entries returned from this query will include all previous - # benchmarks that have ever been done for the ensemble id. - # For now all benchmarks will be (re)calculated. + # The benchmark entries returned from this query will include all previous + # benchmarks that have ever been done for the ensemble id. + # For now all benchmarks will be (re)calculated. # This is could become problematic if previous benchmarks were - # calculated with multiple inputs, which would mean that all of that data - # would need to be loaded and aligned again. - - bms <- tbl(bety,'benchmarks') %>% dplyr::rename(benchmark_id = "id") %>% - dplyr::left_join(tbl(bety, "benchmarks_benchmarks_reference_runs"), by="benchmark_id") %>% - dplyr::filter(.data$reference_run_id == settings$benchmarking$reference_run_id) %>% + # calculated with multiple inputs, which would mean that all of that data + # would need to be loaded and aligned again. + + bms <- tbl(bety, "benchmarks") %>% + dplyr::rename(benchmark_id = "id") %>% + dplyr::left_join(tbl(bety, "benchmarks_benchmarks_reference_runs"), by = "benchmark_id") %>% + dplyr::filter(.data$reference_run_id == settings$benchmarking$reference_run_id) %>% dplyr::select(dplyr::one_of("benchmark_id", "input_id", "site_id", "variable_id", "reference_run_id")) %>% dplyr::collect() %>% dplyr::filter(.data$benchmark_id %in% unlist(settings$benchmarking[which(names(settings$benchmarking) == "benchmark_id")])) - + var.ids <- bms$variable_id - + # --------------------------------------------------------------------------------------------- # # Determine how many data sets inputs are associated with the benchmark id's - # bm.ids are split up in to groups according to their input data. - # So that the input data is only loaded once. - + # bm.ids are split up in to groups according to their input data. + # So that the input data is only loaded once. + results <- list() - + # input.id = unique(bms$input_id) # For testing for (input.id in unique(bms$input_id)) { - # Create directory that will hold benchmarking results bm_dir <- file.path(dirname(dirname(model_run)), "benchmarking", input.id) dir.create(dirname(bm_dir)) dir.create(bm_dir) - + bm.ids <- bms$benchmark_id[which(bms$input_id == input.id)] data.path <- PEcAn.DB::query.file.path(input.id, settings$host$name, bety) - format_full <- format <- PEcAn.DB::query.format.vars(input.id = input.id, bety, format.id = NA, var.ids=var.ids) - + format_full <- format <- PEcAn.DB::query.format.vars(input.id = input.id, bety, format.id = NA, var.ids = var.ids) + # ---- LOAD INPUT DATA ---- # - + time.row <- format$time.row vars.used.index <- setdiff(seq_along(format$vars$variable_id), format$time.row) - - if(is.na(start_year)) start_year <- lubridate::year(settings$run$start.date) - if(is.na(end_year)) end_year <- lubridate::year(settings$run$end.date) - + + if (is.na(start_year)) start_year <- lubridate::year(settings$run$start.date) + if (is.na(end_year)) end_year <- lubridate::year(settings$run$end.date) + obvs <- load_data(data.path, format, start_year = start_year, end_year = end_year, site, vars.used.index, time.row) - dat_vars <- format$vars$pecan_name # IF : is this line redundant? + dat_vars <- format$vars$pecan_name # IF : is this line redundant? obvs_full <- obvs # ---- LOAD MODEL DATA ---- # - - #model_vars <- format$vars$pecan_name[-time.row] # IF : what will happen when time.row is NULL? + + # model_vars <- format$vars$pecan_name[-time.row] # IF : what will happen when time.row is NULL? model_vars <- format$vars$pecan_name # time.row is NULL - # For example 'AmeriFlux.level2.h.nc' format (38) has time vars year-day-hour listed, + # For example 'AmeriFlux.level2.h.nc' format (38) has time vars year-day-hour listed, # but storage type column is empty and it should be because in load_netcdf we extract # the time from netcdf files using the time dimension we can remove time variables from # this format's related variables list or can hardcode 'time.row=NULL' in load_x_netcdf function - read.model <- PEcAn.utils::read.output(runid = basename(model_run), - outdir = model_run, - start.year = start_year, - end.year = end_year, - c("time", model_vars), dataframe = TRUE) - + read.model <- PEcAn.utils::read.output( + runid = basename(model_run), + outdir = model_run, + start.year = start_year, + end.year = end_year, + c("time", model_vars), dataframe = TRUE + ) + model <- read.model vars.used.index <- which(format$vars$pecan_name %in% names(model)[!names(model) == "time"]) model_full <- model - + # ---- CALCULATE BENCHMARK SCORES ---- # - + results.list <- list() dat.list <- list() var.list <- c() - - + + # Loop over benchmark ids # i = 1 # for testing for (i in seq_along(bm.ids)) { bm <- PEcAn.DB::db.query(paste("SELECT * from benchmarks where id =", bm.ids[i]), bety) - metrics <- PEcAn.DB::db.query(paste("SELECT m.name, m.id from metrics as m", - "JOIN benchmarks_metrics as b ON m.id = b.metric_id", - "WHERE b.benchmark_id = ", bm.ids[i]), bety) - #"run" metric needs to be removed from metrics so it isn't computed twice + metrics <- PEcAn.DB::db.query(paste( + "SELECT m.name, m.id from metrics as m", + "JOIN benchmarks_metrics as b ON m.id = b.metric_id", + "WHERE b.benchmark_id = ", bm.ids[i] + ), bety) + # "run" metric needs to be removed from metrics so it isn't computed twice var <- dplyr::filter(format$vars, .data$variable_id == bm$variable_id)[, "pecan_name"] var.list <- c(var.list, var) - + obvs.calc <- obvs_full %>% dplyr::select(dplyr::one_of(c("posix", var))) - obvs.calc[,var] <- as.numeric(obvs.calc[,var]) + obvs.calc[, var] <- as.numeric(obvs.calc[, var]) model.calc <- model_full %>% dplyr::select(dplyr::one_of(c("posix", var))) - + # Check that the variables actually got loaded, otherwise don't send to calc_metrics - - if(!(var %in% names(obvs.calc))|!(var %in% names(model.calc))){ - PEcAn.logger::logger.warn(paste0("Load did not work for ",var,". No metrics will be calculated.")) + + if (!(var %in% names(obvs.calc)) | !(var %in% names(model.calc))) { + PEcAn.logger::logger.warn(paste0("Load did not work for ", var, ". No metrics will be calculated.")) next } - + # TODO: If the scores have already been calculated, don't redo - ensemble.id = bm.ensemble$ensemble_id # this is just to make debugging easier - - out.calc_metrics <- calc_metrics(model.calc, - obvs.calc, - var, - metrics, - ensemble.id, - bm_dir) - - for(metric.id in metrics$id){ - metric.name <- dplyr::filter(metrics,.data$id == metric.id)[["name"]] - score <- out.calc_metrics[["benchmarks"]] %>% - dplyr::filter(.data$metric == metric.name) %>% + ensemble.id <- bm.ensemble$ensemble_id # this is just to make debugging easier + + out.calc_metrics <- calc_metrics( + model.calc, + obvs.calc, + var, + metrics, + ensemble.id, + bm_dir + ) + + for (metric.id in metrics$id) { + metric.name <- dplyr::filter(metrics, .data$id == metric.id)[["name"]] + score <- out.calc_metrics[["benchmarks"]] %>% + dplyr::filter(.data$metric == metric.name) %>% dplyr::select(score) - + # Update scores in the database - + score.entry <- tbl(bety, "benchmarks_ensembles_scores") %>% dplyr::filter(.data$benchmark_id == bm.ids[i]) %>% dplyr::filter(.data$benchmarks_ensemble_id == bm.ensemble$id) %>% - dplyr::filter(.data$metric_id == metric.id) %>% + dplyr::filter(.data$metric_id == metric.id) %>% dplyr::collect() - - # If the score is already in the database, should check if it is the same as the calculated - # score. But this requires a well written regular expression since it can be matching text. - - if(dim(score.entry)[1] == 0){ + + # If the score is already in the database, should check if it is the same as the calculated + # score. But this requires a well written regular expression since it can be matching text. + + if (dim(score.entry)[1] == 0) { PEcAn.DB::db.query(paste0( "INSERT INTO benchmarks_ensembles_scores", "(score, benchmarks_ensemble_id, benchmark_id, metric_id) VALUES ", - "('",score,"',",bm.ensemble$id,", ",bm$id,",",metric.id,")"), bety) - }else if(dim(score.entry)[1] >1){ + "('", score, "',", bm.ensemble$id, ", ", bm$id, ",", metric.id, ")" + ), bety) + } else if (dim(score.entry)[1] > 1) { PEcAn.logger::logger.error("Duplicate record entries in scores") } } results.list <- append(results.list, list(out.calc_metrics[["benchmarks"]])) dat.list <- append(dat.list, list(out.calc_metrics[["dat"]])) - } #end loop over benchmark ids - + } # end loop over benchmark ids + table.filename <- file.path(bm_dir, paste("benchmark.scores", var, bm.ensemble$ensemble_id, "pdf", sep = ".")) grDevices::pdf(file = table.filename) gridExtra::grid.table(do.call(rbind, results.list)) grDevices::dev.off() - + var.names <- c() - for(k in seq_along(dat.list)){ - var.names <- c(var.names,unlist(strsplit(names(dat.list[[k]])[grep("[.]m", names(dat.list[[k]]))],"[.]"))[1]) # This is horrifying. Sorry future self. + for (k in seq_along(dat.list)) { + var.names <- c(var.names, unlist(strsplit(names(dat.list[[k]])[grep("[.]m", names(dat.list[[k]]))], "[.]"))[1]) # This is horrifying. Sorry future self. } names(dat.list) <- var.names - - result.out <- list(bench.results = do.call(rbind, results.list), - data.path = data.path, - format = format_full$vars, - model = model_full, - obvs = obvs_full, - aligned.dat = dat.list) - save(result.out, file = file.path(bm_dir,"benchmarking.output.Rdata")) - + + result.out <- list( + bench.results = do.call(rbind, results.list), + data.path = data.path, + format = format_full$vars, + model = model_full, + obvs = obvs_full, + aligned.dat = dat.list + ) + save(result.out, file = file.path(bm_dir, "benchmarking.output.Rdata")) + results <- append(results, list(result.out)) # For testing } # end loop over input ids diff --git a/modules/benchmark/R/calc_metrics.R b/modules/benchmark/R/calc_metrics.R index d388563cf13..a146f76af92 100644 --- a/modules/benchmark/R/calc_metrics.R +++ b/modules/benchmark/R/calc_metrics.R @@ -3,47 +3,46 @@ ##' @export ##' @param model.calc model data ##' @param obvs.calc observational data -##' @param var variables to be used +##' @param var variables to be used ##' @param metrics metrics to be used ##' @param ensemble.id id of ensemble run ##' @param bm_dir directory where benchmarking outputs will be saved -##' -##' +##' +##' ##' @author Betsy Cowdery calc_metrics <- function(model.calc, obvs.calc, var, metrics, ensemble.id, bm_dir) { - model.calc <- zoo::na.trim(model.calc, sides = "both", is.na = "any") obvs.calc <- zoo::na.trim(obvs.calc, sides = "both", is.na = "any") - + # Remove leading and trailing NA's (not the same as na.omit) dat <- align_data(model.calc, obvs.calc, var, align_method = "mean_over_larger_timestep") - # Make sure that anything that comes through align.data as NA doesn't get included. This is because there may be missing data. We may not want to do this automatically but rather have this as an option. + # Make sure that anything that comes through align.data as NA doesn't get included. This is because there may be missing data. We may not want to do this automatically but rather have this as an option. # dat <- dat[apply(dat,1, function(x) all(!is.na(x))),] - + results <- as.data.frame(matrix(NA, nrow = length(metrics$name), ncol = 3)) colnames(results) <- c("metric", "variable", "score") results$metric <- metrics$name - + metric_dat <- dat[, c(paste(var, c("m", "o"), sep = "."), "posix")] colnames(metric_dat) <- c("model", "obvs", "time") - + for (m in seq_along(metrics$name)) { - fcn <- paste0("metric_", metrics$name[m]) - results[m,"metric"] <- metrics$name[m] - results[m,"variable"] <- var - + results[m, "metric"] <- metrics$name[m] + results[m, "variable"] <- var + if (utils::tail(unlist(strsplit(fcn, "_")), 1) == "plot") { - filename <- file.path(bm_dir, - paste("benchmark", metrics$name[m], var, ensemble.id, "pdf", sep = ".")) + filename <- file.path( + bm_dir, + paste("benchmark", metrics$name[m], var, ensemble.id, "pdf", sep = ".") + ) do.call(fcn, args <- list(metric_dat, var, filename)) - results[m,"score"] <- filename + results[m, "score"] <- filename } else { - results[m,"score"] <- as.character(do.call(fcn, args <- list(metric_dat, var))) + results[m, "score"] <- as.character(do.call(fcn, args <- list(metric_dat, var))) } - - } #end loop over metrics + } # end loop over metrics return(list(benchmarks = results, dat = dat)) } # calc_metrics diff --git a/modules/benchmark/R/check_if_legal_table.R b/modules/benchmark/R/check_if_legal_table.R index 126b231b6b6..126645f9c21 100644 --- a/modules/benchmark/R/check_if_legal_table.R +++ b/modules/benchmark/R/check_if_legal_table.R @@ -5,74 +5,70 @@ #' 1. is formated correctly #' 2. is complete (has all of the species/pft's in both observations) #' 3. is condense-able (Could be represented as a hierachry) -#' +#' #' @param table a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. #' In the second case, must be passable to match_species_id. #' @param observation_one a vector of plant functional types, or species #' @param observation_two anouther vector of plant functional types, or species #' -#' @return \code{boolean} +#' @return \code{boolean} #' @author Tempest McCabe -check_if_legal_table<-function(table, observation_one, observation_two){ - all_there<-TRUE - names<-names(table) - if(!"plant_functional_type_one" %in% names|!"plant_functional_type_two" %in% names ){ - PEcAn.logger::logger.severe("Custom table provided does not use correct column names. Requires both 'plant_functional_type_one', and 'plant_functional_type_two'. +check_if_legal_table <- function(table, observation_one, observation_two) { + all_there <- TRUE + names <- names(table) + if (!"plant_functional_type_one" %in% names | !"plant_functional_type_two" %in% names) { + PEcAn.logger::logger.severe("Custom table provided does not use correct column names. Requires both 'plant_functional_type_one', and 'plant_functional_type_two'. Column names are currently", names(table)) - }else{ - missing<-list() - for(h in seq_along(observation_one)){ - if(!observation_one[h] %in% table$plant_functional_type_one){ - all_there<-FALSE; missing<-c(missing,observation_one[h]) - } + } else { + missing <- list() + for (h in seq_along(observation_one)) { + if (!observation_one[h] %in% table$plant_functional_type_one) { + all_there <- FALSE + missing <- c(missing, observation_one[h]) + } } - for(h in seq_along(observation_two)){ - if(!observation_two[h] %in% table$plant_functional_type_two){ - all_there<-FALSE; missing<-c(missing,observation_two[h]) - } + for (h in seq_along(observation_two)) { + if (!observation_two[h] %in% table$plant_functional_type_two) { + all_there <- FALSE + missing <- c(missing, observation_two[h]) + } } - if(all_there){ - is_legal_table<-TRUE - pft_1<-as.character(unique(table$plant_functional_type_one)) - pft_2<-as.character(unique(table$plant_functional_type_two)) - - for(i in seq_along(pft_1)){ - aggregated_1<-FALSE - aggregated_2<-FALSE - - subset<-subset(table, table$plant_functional_type_one == pft_1[i]) - - length_of_pft_1_uniques_1<-length(as.character(unique(subset$plant_functional_type_one))) - length_of_pft_2_uniques_1<-length(as.character(unique(subset$plant_functional_type_two))) - - if(length_of_pft_2_uniques_1>1 | length_of_pft_1_uniques_1>1){ - aggregated_1<- TRUE + if (all_there) { + is_legal_table <- TRUE + pft_1 <- as.character(unique(table$plant_functional_type_one)) + pft_2 <- as.character(unique(table$plant_functional_type_two)) + + for (i in seq_along(pft_1)) { + aggregated_1 <- FALSE + aggregated_2 <- FALSE + + subset <- subset(table, table$plant_functional_type_one == pft_1[i]) + + length_of_pft_1_uniques_1 <- length(as.character(unique(subset$plant_functional_type_one))) + length_of_pft_2_uniques_1 <- length(as.character(unique(subset$plant_functional_type_two))) + + if (length_of_pft_2_uniques_1 > 1 | length_of_pft_1_uniques_1 > 1) { + aggregated_1 <- TRUE + } + + for (j in seq_along(unique(subset$plant_functional_type_two))) { + subset_2 <- subset(table, table$plant_functional_type_two == as.character(subset$plant_functional_type_two[j])) + length_of_pft_1_uniques <- length(as.character(unique(subset_2$plant_functional_type_one))) + length_of_pft_2_uniques <- length(as.character(unique(subset_2$plant_functional_type_two))) + + if (length_of_pft_2_uniques > 1 | length_of_pft_1_uniques > 1) { + aggregated_2 <- TRUE + } + + if (aggregated_1 && aggregated_2) { + is_legal_table <- FALSE } - - for(j in seq_along(unique(subset$plant_functional_type_two))){ - - subset_2<-subset(table, table$plant_functional_type_two == as.character(subset$plant_functional_type_two[j])) - length_of_pft_1_uniques<-length(as.character(unique(subset_2$plant_functional_type_one))) - length_of_pft_2_uniques<-length(as.character(unique(subset_2$plant_functional_type_two))) - - if(length_of_pft_2_uniques>1 | length_of_pft_1_uniques>1){ - aggregated_2<- TRUE - } - - if(aggregated_1 && aggregated_2){is_legal_table<-FALSE } } - } - + return(is_legal_table) - } else{ + } else { PEcAn.logger::logger.severe("Not every species or plant_functional_type is accounted for in custom_table provided. Please account for", missing, "and make sure that 'plant_fucntional_type_one' is matches to 'observation_one'") } - } } - - - - - diff --git a/modules/benchmark/R/check_if_list_of_pfts.R b/modules/benchmark/R/check_if_list_of_pfts.R index 3b587240d94..710916c04f9 100644 --- a/modules/benchmark/R/check_if_list_of_pfts.R +++ b/modules/benchmark/R/check_if_list_of_pfts.R @@ -4,14 +4,12 @@ #' #' @param vars names to check #' -#' @return \code{boolean} +#' @return \code{boolean} #' @author Tempest McCabe -check_if_list_of_pfts<-function(vars){ - - if( any(c("plant_functional_type","species_name") %in% vars)){ +check_if_list_of_pfts <- function(vars) { + if (any(c("plant_functional_type", "species_name") %in% vars)) { return(TRUE) - }else{ + } else { return(FALSE) } } - diff --git a/modules/benchmark/R/check_if_species_list.R b/modules/benchmark/R/check_if_species_list.R index bbba2729af2..77287795d23 100644 --- a/modules/benchmark/R/check_if_species_list.R +++ b/modules/benchmark/R/check_if_species_list.R @@ -1,29 +1,26 @@ #' check_if_species_list #' -#'@details -#' Checks if format contains a species list in a known format, or a declared custom format. +#' @details +#' Checks if format contains a species list in a known format, or a declared custom format. #' #' @param vars format -#' @param custom_table a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. -#' In the second case, must be passable to match_species_id. +#' @param custom_table a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. +#' In the second case, must be passable to match_species_id. #' -#' @return \code{boolean} +#' @return \code{boolean} #' @author Tempest McCabe -check_if_species_list<-function(vars,custom_table=NULL){ - - if(any(c("species_id", "species_name", "species_USDA_symbol", "species_FIA_symbol") %in% vars)){ +check_if_species_list <- function(vars, custom_table = NULL) { + if (any(c("species_id", "species_name", "species_USDA_symbol", "species_FIA_symbol") %in% vars)) { return(TRUE) - }else if(!is.null(custom_table)){ - if("bety_species_id" %in% names(custom_table)){ + } else if (!is.null(custom_table)) { + if ("bety_species_id" %in% names(custom_table)) { return(TRUE) - }else{ - PEcAn.logger::logger.warn("Note: custom_table does not have column named 'bety_species_id' and cannot be used with match_species_id(). + } else { + PEcAn.logger::logger.warn("Note: custom_table does not have column named 'bety_species_id' and cannot be used with match_species_id(). Tables that do not have a 'bety_species_id' column cannot be used for species-level mapping, but can be used for PFT level mapping.") } - }else{ + } else { return(FALSE) } } - - diff --git a/modules/benchmark/R/create_BRR.R b/modules/benchmark/R/create_BRR.R index d2e4d7f7e3b..f36c3587ee4 100644 --- a/modules/benchmark/R/create_BRR.R +++ b/modules/benchmark/R/create_BRR.R @@ -1,43 +1,45 @@ ##' Create benchmark reference run and ensemble ##' ##' For each benchmark id, calculate metrics and update benchmarks_ensemble_scores -##' -##' @param ens_wf table made from joining ensemble and workflow tables +##' +##' @param ens_wf table made from joining ensemble and workflow tables ##' @param con database connection ##' @param user_id Optional user id to use for this record in reference_runs table -##' @export -##' -##' @author Betsy Cowdery +##' @export +##' +##' @author Betsy Cowdery -create_BRR <- function(ens_wf, con, user_id = ""){ - - # cnd1 <- ens_wf$hostname == PEcAn.remote::fqdn() +create_BRR <- function(ens_wf, con, user_id = "") { + # cnd1 <- ens_wf$hostname == PEcAn.remote::fqdn() # cnd2 <- ens_wf$hostname == 'test-pecan.bu.edu' & PEcAn.remote::fqdn() == 'pecan2.bu.edu' # cnd3 <- ens_wf$hostname == 'pecan2.bu.edu' & PEcAn.remote::fqdn() == 'test-pecan.bu.edu' # if(cnd1|cnd2|cnd3){ # If the ensemble run was done on localhost, turn into a BRR - - clean <- PEcAn.benchmark::clean_settings_BRR(inputfile = file.path(ens_wf$folder,"pecan.CHECKED.xml")) + + clean <- PEcAn.benchmark::clean_settings_BRR(inputfile = file.path(ens_wf$folder, "pecan.CHECKED.xml")) settings_xml <- toString(PEcAn.settings::listToXml(clean, "pecan")) ref_run <- PEcAn.benchmark::check_BRR(settings_xml, con) - - if(length(ref_run) == 0){ # Make new reference run entry - - if(nchar(as.character(user_id)) > 0){ - cmd <- paste0("INSERT INTO reference_runs (model_id, settings, user_id)", - "VALUES(",ens_wf$model_id,", '",settings_xml,"' , ",user_id, - ") RETURNING *;") - }else{ - cmd <- paste0("INSERT INTO reference_runs (model_id, settings)", - "VALUES(",ens_wf$model_id,", '",settings_xml, - "') RETURNING *;") + + if (length(ref_run) == 0) { # Make new reference run entry + + if (nchar(as.character(user_id)) > 0) { + cmd <- paste0( + "INSERT INTO reference_runs (model_id, settings, user_id)", + "VALUES(", ens_wf$model_id, ", '", settings_xml, "' , ", user_id, + ") RETURNING *;" + ) + } else { + cmd <- paste0( + "INSERT INTO reference_runs (model_id, settings)", + "VALUES(", ens_wf$model_id, ", '", settings_xml, + "') RETURNING *;" + ) } ref_run <- PEcAn.DB::db.query(cmd, con) - - }else if(dim(ref_run)[1] > 1){# There shouldn't be more than one reference run with the same settings + } else if (dim(ref_run)[1] > 1) { # There shouldn't be more than one reference run with the same settings PEcAn.logger::logger.error("There is more than one reference run in the database with these settings. Review for duplicates. ") } BRR <- ref_run %>% dplyr::rename(reference_run_id = .data$id) return(BRR) - # }else{logger.error(sprintf("Cannot create a benchmark reference run for a run on hostname: %s", + # }else{logger.error(sprintf("Cannot create a benchmark reference run for a run on hostname: %s", # ens_wf$hostname))} -} #create_BRR +} # create_BRR diff --git a/modules/benchmark/R/define_benchmark.R b/modules/benchmark/R/define_benchmark.R index 6178b942db0..3e23b100950 100644 --- a/modules/benchmark/R/define_benchmark.R +++ b/modules/benchmark/R/define_benchmark.R @@ -9,124 +9,149 @@ ##' @author Betsy Cowdery ##' @importFrom dplyr tbl filter rename collect select ##' @export -define_benchmark <- function(settings, bety){ - +define_benchmark <- function(settings, bety) { if (PEcAn.settings::is.MultiSettings(settings)) { return(PEcAn.settings::papply(settings, function(x) define_benchmark(x, bety))) } bm.settings <- settings$benchmarking - + PEcAn.logger::logger.info(paste("Ensemble id:", bm.settings$ensemble_id)) PEcAn.logger::logger.info(paste(!is.null(bm.settings$ensemble_id))) # Retrieve/create benchmark entries - - if(is.null(bm.settings$reference_run_id)){ - if(!is.null(bm.settings$ensemble_id)){ - + + if (is.null(bm.settings$reference_run_id)) { + if (!is.null(bm.settings$ensemble_id)) { # check if there is already a BRR for ensemble.id, otherwise make one - bm_ens <- dplyr::tbl(bety,"benchmarks_ensembles") %>% rename(bm_ensemble_id = .data$id) %>% - filter(.data$ensemble_id == bm.settings$ensemble_id) %>% collect() - - if(length(bm_ens) == 0){ + bm_ens <- dplyr::tbl(bety, "benchmarks_ensembles") %>% + rename(bm_ensemble_id = .data$id) %>% + filter(.data$ensemble_id == bm.settings$ensemble_id) %>% + collect() + + if (length(bm_ens) == 0) { # Get workflow id from ensemble id - ens_wf <- dplyr::tbl(bety, 'ensembles') %>% filter(.data$id == bm.settings$ensemble_id) %>% - rename(ensemble_id = .data$id) %>% - dplyr::left_join(tbl(bety, "workflows") %>% rename(workflow_id = .data$id), by="workflow_id") %>% collect() + ens_wf <- dplyr::tbl(bety, "ensembles") %>% + filter(.data$id == bm.settings$ensemble_id) %>% + rename(ensemble_id = .data$id) %>% + dplyr::left_join(tbl(bety, "workflows") %>% rename(workflow_id = .data$id), by = "workflow_id") %>% + collect() BRR <- create_BRR(ens_wf, con = bety, user_id = settings$info$userid) - }else if(dim(bm_ens)[1] == 1){ - BRR <- tbl(bety,"reference_runs") %>% filter(.data$id == bm_ens$reference_run_id) %>% - rename(reference_run_id = .data$id) %>% collect() - }else if(dim(bm_ens)[1] > 1){ # There shouldn't be more than one reference run per run + } else if (dim(bm_ens)[1] == 1) { + BRR <- tbl(bety, "reference_runs") %>% + filter(.data$id == bm_ens$reference_run_id) %>% + rename(reference_run_id = .data$id) %>% + collect() + } else if (dim(bm_ens)[1] > 1) { # There shouldn't be more than one reference run per run PEcAn.logger::logger.error("There is more than one reference run in the database for this ensemble id. Review for duplicates. ") } # add the ref_run id, remove the ensemble_id bm.settings$reference_run_id <- BRR$reference_run_id # bm.settings$ensemble_id <- NULL - - }else{PEcAn.logger::logger.error("Cannot find or create benchmark reference run")} - }else{PEcAn.logger::logger.debug("Reference run already created")} - - + } else { + PEcAn.logger::logger.error("Cannot find or create benchmark reference run") + } + } else { + PEcAn.logger::logger.debug("Reference run already created") + } + + # Retrieve/create benchmark entries which.bm <- which(names(bm.settings) == "benchmark") - - for(i in which.bm){ + + for (i in which.bm) { benchmark <- bm.settings[[i]] - + # Create benchmark records using all metrics # Unless individual metrics are specified in settings - # This can be expanded to "suits" of metrics + # This can be expanded to "suits" of metrics # (for example, all plots or all regression based metrics) - if(!is.null(benchmark$metrics)){ + if (!is.null(benchmark$metrics)) { metric_ids <- as.numeric(unlist(benchmark$metrics)) - }else{ - metric_ids <- tbl(bety, 'metrics') %>% dplyr::pull(.data$id) + } else { + metric_ids <- tbl(bety, "metrics") %>% dplyr::pull(.data$id) } - + # If site is not specified in benchmark settings (this may be unnecessary) # Use ensemble site - if(!is.null(benchmark$site_id)){ + if (!is.null(benchmark$site_id)) { site_id <- as.numeric(benchmark$site_id) - }else{ + } else { site_id <- as.numeric(strsplit(strsplit(BRR$settings, "\n* *")[[1]][2], "")[[1]][1]) # This doesn't seem like a good way to read the settings string, would love suggestions for something better } - - bm <- tbl(bety, 'benchmarks') %>% + + bm <- tbl(bety, "benchmarks") %>% filter(.data$input_id == benchmark$input_id) %>% filter(.data$variable_id == benchmark$variable_id) %>% - filter(site_id == site_id) %>% collect() - + filter(site_id == site_id) %>% + collect() + # Retrieve/create benchmark record - if(dim(bm)[1] == 0){ - cmd <- sprintf(paste0("INSERT INTO benchmarks (input_id, variable_id, site_id, user_id)", - "VALUES ( %s, %s, %s, %s) RETURNING * ;"), - benchmark$input_id, benchmark$variable_id, - site_id, settings$info$userid) + if (dim(bm)[1] == 0) { + cmd <- sprintf( + paste0( + "INSERT INTO benchmarks (input_id, variable_id, site_id, user_id)", + "VALUES ( %s, %s, %s, %s) RETURNING * ;" + ), + benchmark$input_id, benchmark$variable_id, + site_id, settings$info$userid + ) bm <- PEcAn.DB::db.query(cmd, bety) - PEcAn.logger::logger.debug(sprintf("Benchmark %.0f for input %.0f variable %.0f created", - bm$id, bm$input_id, bm$variable_id)) - }else if(dim(bm)[1] >1){ - PEcAn.logger::logger.error(sprintf("DUPLICATE records exist for input %.0f variable %.0f", - as.numeric(benchmark$input_id), benchmark$variable_id)) - }else{ - PEcAn.logger::logger.debug(sprintf("Benchmark %.0f for input %.0f variable %.0f exists", - bm$id, bm$input_id, bm$variable_id)) + PEcAn.logger::logger.debug(sprintf( + "Benchmark %.0f for input %.0f variable %.0f created", + bm$id, bm$input_id, bm$variable_id + )) + } else if (dim(bm)[1] > 1) { + PEcAn.logger::logger.error(sprintf( + "DUPLICATE records exist for input %.0f variable %.0f", + as.numeric(benchmark$input_id), benchmark$variable_id + )) + } else { + PEcAn.logger::logger.debug(sprintf( + "Benchmark %.0f for input %.0f variable %.0f exists", + bm$id, bm$input_id, bm$variable_id + )) } - + # Retrieve/create benchmarks_benchmarks_reference_runs record - bmBRR <- tbl(bety, 'benchmarks_benchmarks_reference_runs') %>% + bmBRR <- tbl(bety, "benchmarks_benchmarks_reference_runs") %>% filter(.data$benchmark_id == bm$id) %>% - filter(.data$reference_run_id == bm.settings$reference_run_id) %>% collect() - - if(dim(bmBRR)[1] == 0){ - cmd <- sprintf(paste0("INSERT INTO benchmarks_benchmarks_reference_runs", - " (benchmark_id, reference_run_id) VALUES (%s, %s)"), - bm$id, bm.settings$reference_run_id) + filter(.data$reference_run_id == bm.settings$reference_run_id) %>% + collect() + + if (dim(bmBRR)[1] == 0) { + cmd <- sprintf( + paste0( + "INSERT INTO benchmarks_benchmarks_reference_runs", + " (benchmark_id, reference_run_id) VALUES (%s, %s)" + ), + bm$id, bm.settings$reference_run_id + ) PEcAn.DB::db.query(cmd, bety) - }else if(dim(bmBRR)[1] > 1){ + } else if (dim(bmBRR)[1] > 1) { PEcAn.logger::logger.error("Duplicate record entries in benchmarks_benchmarks_reference_runs") } - + # Retrieve/create benchmarks_metrics record - for(k in seq_along(metric_ids)){ - bmmetric <- tbl(bety, 'benchmarks_metrics') %>% + for (k in seq_along(metric_ids)) { + bmmetric <- tbl(bety, "benchmarks_metrics") %>% filter(.data$benchmark_id == bm$id) %>% - filter(.data$metric_id == metric_ids[[k]]) %>% collect() - - if(dim(bmmetric)[1] == 0){ - cmd <- sprintf(paste0("INSERT INTO benchmarks_metrics (benchmark_id, metric_id) VALUES (%s, %s)"), - bm$id, metric_ids[[k]]) + filter(.data$metric_id == metric_ids[[k]]) %>% + collect() + + if (dim(bmmetric)[1] == 0) { + cmd <- sprintf( + paste0("INSERT INTO benchmarks_metrics (benchmark_id, metric_id) VALUES (%s, %s)"), + bm$id, metric_ids[[k]] + ) PEcAn.DB::db.query(cmd, bety) - }else if(dim(bmmetric)[1] > 1){ + } else if (dim(bmmetric)[1] > 1) { PEcAn.logger::logger.error("Duplicate record entries in benchmarks_metrics") } } # end loop over metric ids - + benchmark$benchmark_id <- bm$id bm.settings[[i]] <- benchmark - } # End loop over benchmarks in settings - + invisible(return(bm.settings)) } # create.benchmark diff --git a/modules/benchmark/R/format_wide2long.R b/modules/benchmark/R/format_wide2long.R index 8ab93062e74..57b7a176024 100644 --- a/modules/benchmark/R/format_wide2long.R +++ b/modules/benchmark/R/format_wide2long.R @@ -1,5 +1,5 @@ ##' Function to convert wide format to long format -##' +##' ##' @param out wide format data ##' @param format as returned by query.format.vars ##' @param vars_used data frame mapping `input_name` to `bety_name` @@ -7,10 +7,9 @@ ##' @return list of updated values ##' @export ##' @author Istem Fer -format_wide2long <- function(out, format, vars_used, time.row){ - +format_wide2long <- function(out, format, vars_used, time.row) { # GapMacro example: - # suppose that the wide variable is DBH, DMG (damage status), MORT (mortality status) + # suppose that the wide variable is DBH, DMG (damage status), MORT (mortality status) # # out looks like this: # @@ -29,106 +28,108 @@ format_wide2long <- function(out, format, vars_used, time.row){ # 2012 10 NA A # # 2014 12 TL A # # 2016 13 TL D # - # # + # # ########################## - - + + # which cols are "wide" - wide_bety <- unique(vars_used$bety_name[duplicated(vars_used$bety_name)]) - long_bety <- vars_used$bety_name[!(vars_used$bety_name %in% wide_bety)] - dindx <- vars_used$bety_name %in% wide_bety + wide_bety <- unique(vars_used$bety_name[duplicated(vars_used$bety_name)]) + long_bety <- vars_used$bety_name[!(vars_used$bety_name %in% wide_bety)] + dindx <- vars_used$bety_name %in% wide_bety wide_input <- vars_used$input_name[dindx] long_input <- colnames(out)[!(colnames(out) %in% wide_input)] - + melt_list <- list() - long_var <- data.frame(varname = rep(NA, length(wide_bety)), - storage_type = rep(NA, length(wide_bety))) - for(i in seq_along(wide_bety)){ - wide_var <- wide_bety[i] - wide_cols <- out[ ,vars_used$bety_name %in% c(wide_var, long_bety)] - melt_cols <- reshape2::melt(wide_cols, id = long_input, value.name = wide_var) - # - # a sample melt_cols for a wide variable, in this case for DBH, looks like this: - # - ########################## - # # - # code variable value # - # FAGR diam12 10 # - # FAGR diam14 12 # - # FAGR diam16 13 # - # # - ########################## - # - # now we need to replace diam12/14/16 values with 2012, 2014, 2016 respectively - # and "variable" to some pecan variable name, "year" in this case - # this information will come from the storage type which is the same for all wide variables of same kind - # e.g. for this wide var storage type will be "diam:20 year:%Y" *I chose ":" as it is unlikely to be in var names* - # expression before the space tells what to swap in the values - # expression after the space tells what the newly created variable is - # - storage_wide <- unique(vars_used$storage_type[vars_used$bety_name == wide_var]) - swap_val <- gsub(" .*", "", storage_wide) - new_var <- gsub(".* ", "", storage_wide) - # replace "diam" with "20" - melt_cols$variable <- gsub(gsub(":.*", "", swap_val), gsub(".*:", "", swap_val), melt_cols$variable) - # replace "variable" with "year" - colnames(melt_cols)[colnames(melt_cols) == "variable"] <- gsub(":.*", "", new_var) - # collect info about newly created var - long_var[i, ] <- c(gsub(":.*", "", new_var), gsub(".*:", "", new_var)) - - melt_list[[i]] <- melt_cols + long_var <- data.frame( + varname = rep(NA, length(wide_bety)), + storage_type = rep(NA, length(wide_bety)) + ) + for (i in seq_along(wide_bety)) { + wide_var <- wide_bety[i] + wide_cols <- out[, vars_used$bety_name %in% c(wide_var, long_bety)] + melt_cols <- reshape2::melt(wide_cols, id = long_input, value.name = wide_var) + # + # a sample melt_cols for a wide variable, in this case for DBH, looks like this: + # + ########################## + # # + # code variable value # + # FAGR diam12 10 # + # FAGR diam14 12 # + # FAGR diam16 13 # + # # + ########################## + # + # now we need to replace diam12/14/16 values with 2012, 2014, 2016 respectively + # and "variable" to some pecan variable name, "year" in this case + # this information will come from the storage type which is the same for all wide variables of same kind + # e.g. for this wide var storage type will be "diam:20 year:%Y" *I chose ":" as it is unlikely to be in var names* + # expression before the space tells what to swap in the values + # expression after the space tells what the newly created variable is + # + storage_wide <- unique(vars_used$storage_type[vars_used$bety_name == wide_var]) + swap_val <- gsub(" .*", "", storage_wide) + new_var <- gsub(".* ", "", storage_wide) + # replace "diam" with "20" + melt_cols$variable <- gsub(gsub(":.*", "", swap_val), gsub(".*:", "", swap_val), melt_cols$variable) + # replace "variable" with "year" + colnames(melt_cols)[colnames(melt_cols) == "variable"] <- gsub(":.*", "", new_var) + # collect info about newly created var + long_var[i, ] <- c(gsub(":.*", "", new_var), gsub(".*:", "", new_var)) + + melt_list[[i]] <- melt_cols } # join sublists temp_data <- do.call(cbind, melt_list) long_data <- temp_data[, !duplicated(colnames(temp_data))] - #time.check <- unique(gsub(" .*", "", vars_used$storage_type[dindx])) - + # time.check <- unique(gsub(" .*", "", vars_used$storage_type[dindx])) + # finally, you need to inform "format" about the new structure, vars_used index and update time.row if necessary # remove wide rows from format$vars altogether format$vars <- format$vars[!(format$vars$input_name %in% wide_input), ] # just use one of the wide var row(s) - wide_rows <- vars_used[dindx,] - wide_rows <- wide_rows[!duplicated(wide_rows$bety_name),] + wide_rows <- vars_used[dindx, ] + wide_rows <- wide_rows[!duplicated(wide_rows$bety_name), ] # just for the sake of unit conversion printf in load_data change the input name, not sure if this is necessary - wide_rows$input_name <- tapply(wide_input, rep(1:nrow(wide_rows), each=nrow(wide_rows)), paste, collapse = ",") + wide_rows$input_name <- tapply(wide_input, rep(1:nrow(wide_rows), each = nrow(wide_rows)), paste, collapse = ",") # empty the storage type and column_number so that it won't break anything downstream, probably it won't anyway - wide_rows$storage_type <- "" + wide_rows$storage_type <- "" wide_rows$column_number <- "" format$vars <- rbind(format$vars, wide_rows) - + # finally add the newly created variable(s), only "year" in this case - new_var <- data.frame(bety_name = "", variable_id = "", input_name = "", input_units = "", - storage_type = "", column_number = "", bety_units = "", - mstmip_name = "", mstmip_units = "", pecan_name = "", pecan_units = "") - for(i in 1:nrow(long_var)){ - if(!long_var[i, 1] %in% format$vars$bety_name){ # avoid duplicating - new_var$bety_name <- new_var$input_name <- new_var$pecan_name <- long_var[i, 1] - new_var$storage_type <- long_var[i, 2] - # do we also nee variable_id downstream? - format$vars <- rbind(format$vars, new_var) + new_var <- data.frame( + bety_name = "", variable_id = "", input_name = "", input_units = "", + storage_type = "", column_number = "", bety_units = "", + mstmip_name = "", mstmip_units = "", pecan_name = "", pecan_units = "" + ) + for (i in 1:nrow(long_var)) { + if (!long_var[i, 1] %in% format$vars$bety_name) { # avoid duplicating + new_var$bety_name <- new_var$input_name <- new_var$pecan_name <- long_var[i, 1] + new_var$storage_type <- long_var[i, 2] + # do we also nee variable_id downstream? + format$vars <- rbind(format$vars, new_var) } } - + # update time.row st <- format$vars$storage_type - time.row <- which(nchar(st)>1 & substr(st, 1,1) == "%") - if(length(time.row) == 0){ + time.row <- which(nchar(st) > 1 & substr(st, 1, 1) == "%") + if (length(time.row) == 0) { format$time.row <- NULL time.row <- NULL - }else{ + } else { format$time.row <- time.row } - + # update vars_used vars.used.index <- setdiff(seq_along(format$vars$variable_id), format$time.row) vars_used <- format$vars[vars.used.index, ] - - + + return(list(long_data = long_data, format = format, vars_used = vars_used, time.row = time.row)) - } # wide2long - diff --git a/modules/benchmark/R/get_species_list_standard.R b/modules/benchmark/R/get_species_list_standard.R index d7872a31f96..c29f1de03cd 100644 --- a/modules/benchmark/R/get_species_list_standard.R +++ b/modules/benchmark/R/get_species_list_standard.R @@ -6,20 +6,19 @@ #' @return \code{character} Returns "usda", "latin_name", "fia" or "custom" #' @author Tempest McCabe get_species_list_standard <- function(vars) { - - if(any(c("species_id", "species_USDA_symbol") %in% vars)){ + if (any(c("species_id", "species_USDA_symbol") %in% vars)) { return("usda") - }else if("species_name" %in% vars){ - return('latin_name') - }else if("species_FIA_symbol" %in% vars){ - return('fia') - }else if(!is.null(vars)){ - if("bety_species_id" %in% names(vars)){ + } else if ("species_name" %in% vars) { + return("latin_name") + } else if ("species_FIA_symbol" %in% vars) { + return("fia") + } else if (!is.null(vars)) { + if ("bety_species_id" %in% names(vars)) { return("custom") - }else{ + } else { PEcAn.logger::logger.warn("Note: `vars` does not have column named 'bety_species_id' and cannot be used with match_species_id(). This prohibits species-level mapping, but allows PFT level mapping.") } - }else{ + } else { return(FALSE) } } diff --git a/modules/benchmark/R/load_csv.R b/modules/benchmark/R/load_csv.R index 28c9bdea4ef..342b47ec104 100644 --- a/modules/benchmark/R/load_csv.R +++ b/modules/benchmark/R/load_csv.R @@ -4,29 +4,33 @@ ##' @param format list ##' @param site list ##' @param vars column names to return. If NULL, returns all columns -##' +##' ##' @author Betsy Cowdery ##' @export load_csv <- function(data.path, format, site, vars = NULL) { - data.path <- sapply(data.path, function(x) dir(dirname(x), basename(x), full.names = TRUE)) - + if (format$header == 0 | format$header == 1) { - dat <- utils::read.csv(data.path, skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE, header = as.logical(format$header)) + dat <- utils::read.csv(data.path, + skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE, header = as.logical(format$header) + ) } else if (format$header > 1) { - dat <- utils::read.csv(data.path, skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE, header = TRUE) + dat <- utils::read.csv(data.path, + skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE, header = TRUE + ) dat <- dat[-c(1:format$header - 1), ] } else { - dat <- utils::read.csv(data.path, skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE) + dat <- utils::read.csv(data.path, + skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE + ) } - - if(!is.null(vars)){ + + if (!is.null(vars)) { return(dplyr::select(dat, dplyr::one_of(vars))) - }else{ + } else { return(dat) } - } # load_csv diff --git a/modules/benchmark/R/load_data.R b/modules/benchmark/R/load_data.R index 21f25b3a935..76692a6d537 100644 --- a/modules/benchmark/R/load_data.R +++ b/modules/benchmark/R/load_data.R @@ -15,26 +15,27 @@ #' @importFrom magrittr %>% #' @export -load_data <- function(data.path, format, start_year = NA, end_year = NA, site = NA, - vars.used.index=NULL, ...) { - +load_data <- function(data.path, format, start_year = NA, end_year = NA, site = NA, + vars.used.index = NULL, ...) { # If site = NA, check that site information is in the formats table - if(all(is.na(site))){ - if(!is.null(format$site)){ - site <- list(id = format$site, lat = format$lat, lon = format$lon, - time_zone = format$time_zone) - }else{ + if (all(is.na(site))) { + if (!is.null(format$site)) { + site <- list( + id = format$site, lat = format$lat, lon = format$lon, + time_zone = format$time_zone + ) + } else { PEcAn.logger::logger.error("Input must have site information.") } } - + ## load everything in format by default - time.row <- format$time.row - - if(is.null(vars.used.index)){ + time.row <- format$time.row + + if (is.null(vars.used.index)) { vars.used.index <- setdiff(seq_along(format$vars$variable_id), format$time.row) } - + # Determine the function that should be used to load the data mimetype <- gsub("-", "_", format$mimetype) fcn1 <- paste0("load_", format$file_name) @@ -44,26 +45,26 @@ load_data <- function(data.path, format, start_year = NA, end_year = NA, site = } else if (exists(fcn2)) { fcn <- match.fun(fcn2) } else { - PEcAn.logger::logger.warn("Brown Dog is currently unable to perform conversion from ",mimetype," to a PEcAn usable format") + PEcAn.logger::logger.warn("Brown Dog is currently unable to perform conversion from ", mimetype, " to a PEcAn usable format") } - - vars = format$vars$input_name[c(vars.used.index, time.row)] + + vars <- format$vars$input_name[c(vars.used.index, time.row)] out <- fcn(data.path, format, site, vars) - + # Convert loaded data to the same standard variable names and units - + vars_used <- format$vars[vars.used.index, ] - + # check wide format and transform to long - if(any(duplicated(vars_used$bety_name))){ - w2l <- format_wide2long(out, format, vars_used, time.row) - out <- w2l$long_data - format <- w2l$format - vars_used <- w2l$vars_used - time.row <- w2l$time.row - } - - + if (any(duplicated(vars_used$bety_name))) { + w2l <- format_wide2long(out, format, vars_used, time.row) + out <- w2l$long_data + format <- w2l$format + vars_used <- w2l$vars_used + time.row <- w2l$time.row + } + + for (i in seq_len(nrow(vars_used))) { col <- names(out) == vars_used$input_name[i] if (vars_used$input_units[i] == vars_used$pecan_units[i]) { @@ -74,59 +75,63 @@ load_data <- function(data.path, format, start_year = NA, end_year = NA, site = u1 <- vars_used$input_units[i] u2 <- vars_used$pecan_units[i] if (units::ud_are_convertible(u1, u2)) { - print(sprintf("convert %s %s to %s %s", - vars_used$input_name[i], vars_used$input_units[i], - vars_used$pecan_name[i], vars_used$pecan_units[i])) + print(sprintf( + "convert %s %s to %s %s", + vars_used$input_name[i], vars_used$input_units[i], + vars_used$pecan_name[i], vars_used$pecan_units[i] + )) out[col] <- PEcAn.utils::ud_convert(as.numeric(x), u1, u2) colnames(out)[col] <- vars_used$pecan_name[i] } else if (PEcAn.utils::misc.are.convertible(u1, u2)) { - print(sprintf("convert %s %s to %s %s", - vars_used$input_name[i], u1, - vars_used$pecan_name[i], u2)) + print(sprintf( + "convert %s %s to %s %s", + vars_used$input_name[i], u1, + vars_used$pecan_name[i], u2 + )) out[col] <- as.vector(PEcAn.utils::misc.convert(x, u1, u2)) # Betsy: Adding this because misc.convert returns vector with attributes original agrument x, which causes problems later colnames(out)[col] <- vars_used$pecan_name[i] } else { - PEcAn.logger::logger.warn(paste("Units cannot be converted. Removing variable. please check the units of",vars_used$input_name[i])) - out<-out[,!names(out) %in% c(vars_used$input_name[i])] - vars_used<-vars_used[!names(vars_used) %in% c(vars_used$input_name[i],vars_used$pecan_name[i]),] + PEcAn.logger::logger.warn(paste("Units cannot be converted. Removing variable. please check the units of", vars_used$input_name[i])) + out <- out[, !names(out) %in% c(vars_used$input_name[i])] + vars_used <- vars_used[!names(vars_used) %in% c(vars_used$input_name[i], vars_used$pecan_name[i]), ] } } } - - if(!is.null(time.row)){ - + + if (!is.null(time.row)) { # load_data was not changing the name of the 'time' column col <- names(out) %in% format$vars$input_name[time.row] names(out)[col] <- format$vars$pecan_name[time.row] - - # Need a much more spohisticated approach to converting into time format. + + # Need a much more spohisticated approach to converting into time format. y <- dplyr::select(out, tidyselect::one_of(format$vars$pecan_name[time.row])) - - if(!is.null(site$time_zone)){ - tz = site$time_zone - }else{ - tz = "UTC" + + if (!is.null(site$time_zone)) { + tz <- site$time_zone + } else { + tz <- "UTC" PEcAn.logger::logger.warn("No site timezone. Assuming input time zone is UTC. This may be incorrect.") } - - out$posix <- strptime(apply(y, 1, function(x) paste(x, collapse = " ")), - format=paste(format$vars$storage_type[time.row], collapse = " "), - tz = tz) %>% as.POSIXct() - } - + + out$posix <- strptime(apply(y, 1, function(x) paste(x, collapse = " ")), + format = paste(format$vars$storage_type[time.row], collapse = " "), + tz = tz + ) %>% as.POSIXct() + } + # Subset by start year and end year when loading data # This was part of the arguments but never implemented - if(!is.na(start_year)){ + if (!is.na(start_year)) { out$year <- lubridate::year(out$posix) out <- out %>% filter(.data$year >= as.numeric(start_year)) print("subsetting by start year") } - - if(!is.na(end_year)){ + + if (!is.na(end_year)) { out$year <- lubridate::year(out$posix) out <- out %>% filter(.data$year <= as.numeric(end_year)) print("subsetting by end year") } - + return(out) } # load_data diff --git a/modules/benchmark/R/load_netcdf.R b/modules/benchmark/R/load_netcdf.R index 2e956ffee85..e4ebe00cad4 100644 --- a/modules/benchmark/R/load_netcdf.R +++ b/modules/benchmark/R/load_netcdf.R @@ -29,29 +29,33 @@ load_x_netcdf <- function(data.path, format, site, vars = NULL) { t.units <- ncdf4::ncatt_get(nc[[i]], dims[time.var])$units # If the unit has if of the form * since YYYY-MM-DD * with "-hour" timezone offset # This is a feature of the met produced by met2CF - if(stringr::str_detect(t.units, "ince\\s[0-9]{4}[.-][0-9]{2}[.-][0-9]{2}.*\\s-\\d+")){ - unit2 <- stringr::str_split_fixed(t.units,"\\s-",2)[1] - offset <- stringr::str_split_fixed(t.units,"\\s-",2)[2] %>% as.numeric() + if (stringr::str_detect(t.units, "ince\\s[0-9]{4}[.-][0-9]{2}[.-][0-9]{2}.*\\s-\\d+")) { + unit2 <- stringr::str_split_fixed(t.units, "\\s-", 2)[1] + offset <- stringr::str_split_fixed(t.units, "\\s-", 2)[2] %>% as.numeric() date_time <- suppressWarnings(try(lubridate::ymd((unit2)))) - if(is.na(date_time)){ + if (is.na(date_time)) { date_time <- suppressWarnings(try(lubridate::ymd_hms(unit2))) } - if(is.na(date_time)){ + if (is.na(date_time)) { PEcAn.logger::logger.error("All time formats failed to parse. No formats found.") } - t.units <- paste(stringr::str_split_fixed(t.units," since",2)[1], "since", - date_time - lubridate::hms(paste(offset,":00:00"))) - }else if(stringr::str_detect(t.units, "ince\\s[0-9]{4}[.-][0-9]{2}[.-][0-9]{2}.*")){ - unit2 <- stringr::str_split_fixed(t.units,"\\s-",2)[1] + t.units <- paste( + stringr::str_split_fixed(t.units, " since", 2)[1], "since", + date_time - lubridate::hms(paste(offset, ":00:00")) + ) + } else if (stringr::str_detect(t.units, "ince\\s[0-9]{4}[.-][0-9]{2}[.-][0-9]{2}.*")) { + unit2 <- stringr::str_split_fixed(t.units, "\\s-", 2)[1] date_time <- suppressWarnings(try(lubridate::ymd((unit2)))) - if(is.na(date_time)){ + if (is.na(date_time)) { date_time <- suppressWarnings(try(lubridate::ymd_hms(unit2))) } - if(is.na(date_time)){ + if (is.na(date_time)) { PEcAn.logger::logger.error("All time formats failed to parse. No formats found.") } - t.units <- paste(stringr::str_split_fixed(t.units," since",2)[1], "since", - date_time) + t.units <- paste( + stringr::str_split_fixed(t.units, " since", 2)[1], "since", + date_time + ) } # for heterogenous formats try parsing ymd_hms date.origin <- suppressWarnings(try(lubridate::ymd_hms(t.units))) @@ -64,10 +68,12 @@ load_x_netcdf <- function(data.path, format, site, vars = NULL) { PEcAn.logger::logger.error("All time formats failed to parse. No formats found.") } time.stamp.match <- gsub("UTC", "", date.origin) - t.units <- gsub(paste0(" since ", time.stamp.match, ".*"), "", - t.units) + t.units <- gsub( + paste0(" since ", time.stamp.match, ".*"), "", + t.units + ) # need to change system TZ otherwise, lines below keeps writing in the current time zone - Sys.setenv(TZ = 'UTC') + Sys.setenv(TZ = "UTC") foo <- as.POSIXct(date.origin, tz = "UTC") + PEcAn.utils::ud_convert(time.col[[i]], t.units, "seconds") time.col[[i]] <- foo } diff --git a/modules/benchmark/R/load_rds.R b/modules/benchmark/R/load_rds.R index 7fb82daa5b8..a4fe0092d85 100644 --- a/modules/benchmark/R/load_rds.R +++ b/modules/benchmark/R/load_rds.R @@ -5,18 +5,16 @@ ##' @param format list, not used, for compatibility ##' @param site not used, for compatibility ##' @param vars optional variable names to load. if NULL, returns all variables in file -##' +##' ##' @author Istem Fer load_rds <- function(data.path, format, site, vars = NULL) { - data.path <- sapply(data.path, function(x) dir(dirname(x), basename(x), full.names = TRUE)) - + dat <- readRDS(data.path) - if(!is.null(vars)){ + if (!is.null(vars)) { return(dplyr::select(dat, dplyr::one_of(vars))) - }else{ + } else { return(dat) } - } # load_rds diff --git a/modules/benchmark/R/load_tab.R b/modules/benchmark/R/load_tab.R index 4f95eca6413..a6e4ae80ad6 100644 --- a/modules/benchmark/R/load_tab.R +++ b/modules/benchmark/R/load_tab.R @@ -4,33 +4,39 @@ ##' @param format list ##' @param site list ##' @param vars variable names to load. If NULL, loads all columns -##' +##' ##' @author Betsy Cowdery, Mike Dietze ##' @export -load_tab_separated_values <- function(data.path, format, site=NULL, vars = NULL) { - +load_tab_separated_values <- function(data.path, format, site = NULL, vars = NULL) { data.path <- sapply(data.path, function(x) dir(dirname(x), basename(x), full.names = TRUE)) - + if (format$header == 0) { - dat <- utils::read.table(data.path, sep="\t",skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE, header = FALSE) + dat <- utils::read.table(data.path, + sep = "\t", skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE, header = FALSE + ) colnames(dat)[format$vars$column_number] <- format$vars$input_name } else if (format$header == 1) { - dat <- utils::read.table(data.path, sep="\t",skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE, header = TRUE) + dat <- utils::read.table(data.path, + sep = "\t", skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE, header = TRUE + ) } else if (format$header > 1) { - dat <- utils::read.table(data.path, sep="\t",skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE, header = TRUE) + dat <- utils::read.table(data.path, + sep = "\t", skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE, header = TRUE + ) dat <- dat[-c(1:format$header - 1), ] } else { - dat <- utils::read.table(data.path, sep="\t",skip = format$skip, na.strings = format$na.strings, - as.is = TRUE, check.names = FALSE) + dat <- utils::read.table(data.path, + sep = "\t", skip = format$skip, na.strings = format$na.strings, + as.is = TRUE, check.names = FALSE + ) } - - if(!is.null(vars)){ + + if (!is.null(vars)) { return(dplyr::select(dat, dplyr::one_of(vars))) - }else{ + } else { return(dat) } - } # load_tab_separated_values diff --git a/modules/benchmark/R/match_timestep.R b/modules/benchmark/R/match_timestep.R index bb030ed447f..925227d827f 100644 --- a/modules/benchmark/R/match_timestep.R +++ b/modules/benchmark/R/match_timestep.R @@ -1,14 +1,13 @@ -##' @name match_timestep +##' @name match_timestep ##' @title Match time step ##' @param date.fine numeric ##' @param data.fine matrix ##' @param date.coarse numeric ##' @export match_timestep -##' +##' ##' @author Istem Fer match_timestep <- function(date.coarse, date.fine, data.fine) { - midpoints <- c(-Inf, utils::head(as.numeric(date.fine), -1)) + c(0, diff(as.numeric(date.fine)) / 2) - + return(data.fine[findInterval(date.coarse, midpoints)]) } # match_timestep diff --git a/modules/benchmark/R/mean_over_larger_timestep.R b/modules/benchmark/R/mean_over_larger_timestep.R index bbc72396114..6e314b68106 100644 --- a/modules/benchmark/R/mean_over_larger_timestep.R +++ b/modules/benchmark/R/mean_over_larger_timestep.R @@ -1,10 +1,10 @@ -##' @name mean_over_larger_timestep +##' @name mean_over_larger_timestep ##' @title Calculate benchmarking statistics ##' @param date.fine numeric ##' @param data.fine data.frame ##' @param date.coarse numeric ##' @export mean_over_larger_timestep -##' +##' ##' @author Betsy Cowdery, Michael Dietze mean_over_larger_timestep <- function(date.coarse, date.fine, data.fine) { return(tapply(X = data.fine, INDEX = findInterval(date.fine, date.coarse), FUN = function(x) mean(x, na.rm = TRUE))) diff --git a/modules/benchmark/R/metric_AME.R b/modules/benchmark/R/metric_AME.R index cd5950bac97..6e245f1e42c 100644 --- a/modules/benchmark/R/metric_AME.R +++ b/modules/benchmark/R/metric_AME.R @@ -2,11 +2,11 @@ ##' ##' @param dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery ##' @export metric_AME <- function(dat, ...) { PEcAn.logger::logger.info("Metric: Absolute Maximum Error") - return(max(abs(dat$model - dat$obvs),na.rm = TRUE)) + return(max(abs(dat$model - dat$obvs), na.rm = TRUE)) } # metric_AME diff --git a/modules/benchmark/R/metric_Frechet.R b/modules/benchmark/R/metric_Frechet.R index e79014c6450..eb6a702145c 100644 --- a/modules/benchmark/R/metric_Frechet.R +++ b/modules/benchmark/R/metric_Frechet.R @@ -3,7 +3,7 @@ ##' @export ##' @param metric_dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery metric_Frechet <- function(metric_dat, ...) { diff --git a/modules/benchmark/R/metric_MAE.R b/modules/benchmark/R/metric_MAE.R index 06fa9577a4e..7027544086d 100644 --- a/modules/benchmark/R/metric_MAE.R +++ b/modules/benchmark/R/metric_MAE.R @@ -1,11 +1,11 @@ ##' Mean Absolute Error ##' @param dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery -##' +##' ##' @export metric_MAE <- function(dat, ...) { PEcAn.logger::logger.info("Metric: Mean Absolute Error") - return(mean(abs(dat$model - dat$obvs),na.rm=TRUE)) + return(mean(abs(dat$model - dat$obvs), na.rm = TRUE)) } # metric_MAE diff --git a/modules/benchmark/R/metric_MSE.R b/modules/benchmark/R/metric_MSE.R index a3d91a14764..76d7dd9e6d1 100644 --- a/modules/benchmark/R/metric_MSE.R +++ b/modules/benchmark/R/metric_MSE.R @@ -3,10 +3,10 @@ ##' @export ##' @param dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery metric_MSE <- function(dat, ...) { PEcAn.logger::logger.info("Metric: Mean Square Error") - return(mean((dat$model - dat$obvs) ^ 2,na.rm=TRUE)) + return(mean((dat$model - dat$obvs)^2, na.rm = TRUE)) } # metric_MSE diff --git a/modules/benchmark/R/metric_PPMC.R b/modules/benchmark/R/metric_PPMC.R index 53ec1151c84..d20cb734e06 100644 --- a/modules/benchmark/R/metric_PPMC.R +++ b/modules/benchmark/R/metric_PPMC.R @@ -3,7 +3,7 @@ ##' @export ##' @param metric_dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery metric_PPMC <- function(metric_dat, ...) { diff --git a/modules/benchmark/R/metric_R2.R b/modules/benchmark/R/metric_R2.R index 94e5df7677b..cb13664cd26 100644 --- a/modules/benchmark/R/metric_R2.R +++ b/modules/benchmark/R/metric_R2.R @@ -3,21 +3,20 @@ ##' @export ##' @param metric_dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery metric_R2 <- function(metric_dat, ...) { PEcAn.logger::logger.info("Metric: Coefficient of Determination (R2)") numer <- sum((metric_dat$obvs - mean(metric_dat$obvs)) * (metric_dat$model - mean(metric_dat$model))) - denom <- sqrt(sum((metric_dat$obvs - mean(metric_dat$obvs)) ^ 2)) * sqrt(sum((metric_dat$model - mean(metric_dat$model)) ^ 2)) - - out <- (numer / denom) ^ 2 - - if(is.na(out)){ + denom <- sqrt(sum((metric_dat$obvs - mean(metric_dat$obvs))^2)) * sqrt(sum((metric_dat$model - mean(metric_dat$model))^2)) + + out <- (numer / denom)^2 + + if (is.na(out)) { fit <- stats::lm(metric_dat$model ~ metric_dat$obvs) out <- summary(fit)$r.squared } - + return(out) - } # metric_R2 diff --git a/modules/benchmark/R/metric_RAE.R b/modules/benchmark/R/metric_RAE.R index b644aabb56a..4802765f96f 100644 --- a/modules/benchmark/R/metric_RAE.R +++ b/modules/benchmark/R/metric_RAE.R @@ -11,5 +11,5 @@ metric_RAE <- function(metric_dat, ...) { metric_dat <- stats::na.omit(metric_dat) numer <- mean(abs(metric_dat$obvs - metric_dat$model)) denom <- mean(abs(metric_dat$obvs - mean(metric_dat$obvs))) - return(numer/denom) + return(numer / denom) } # metric_RAE diff --git a/modules/benchmark/R/metric_RMSE.R b/modules/benchmark/R/metric_RMSE.R index ebd02fc89c2..10bd42a9589 100644 --- a/modules/benchmark/R/metric_RMSE.R +++ b/modules/benchmark/R/metric_RMSE.R @@ -3,10 +3,10 @@ ##' @export ##' @param dat dataframe ##' @param ... ignored -##' +##' ##' @author Betsy Cowdery metric_RMSE <- function(dat, ...) { PEcAn.logger::logger.info("Metric: Root Mean Square Error") - return(sqrt(mean((dat$model - dat$obvs) ^ 2,na.rm=TRUE))) + return(sqrt(mean((dat$model - dat$obvs)^2, na.rm = TRUE))) } # metric_RMSE diff --git a/modules/benchmark/R/metric_cor.R b/modules/benchmark/R/metric_cor.R index cf3bfa026f3..a6af97c98a3 100644 --- a/modules/benchmark/R/metric_cor.R +++ b/modules/benchmark/R/metric_cor.R @@ -3,10 +3,10 @@ ##' @export ##' @param dat dataframe ##' @param ... ignored -##' +##' ##' @author Mike Dietze metric_cor <- function(dat, ...) { PEcAn.logger::logger.info("Metric: Correlation Coefficient") - return(stats::cor(dat$model,dat$obvs,use ="pairwise.complete.obs")) + return(stats::cor(dat$model, dat$obvs, use = "pairwise.complete.obs")) } # metric_cor diff --git a/modules/benchmark/R/metric_lmDiag_plot.R b/modules/benchmark/R/metric_lmDiag_plot.R index 2be6f50d951..f53746e6a23 100644 --- a/modules/benchmark/R/metric_lmDiag_plot.R +++ b/modules/benchmark/R/metric_lmDiag_plot.R @@ -5,74 +5,73 @@ ##' @param var ignored ##' @param filename path to save plot, or NA to not save ##' @param draw.plot logical: return plot object? -##' +##' ##' @author Betsy Cowdery metric_lmDiag_plot <- function(metric_dat, var, filename = NA, draw.plot = FALSE) { PEcAn.logger::logger.info("Metric: Linear Regression Diagnostic Plot") - + fit <- stats::lm(metric_dat[, 1] ~ metric_dat[, 2]) p1 <- ggplot2::ggplot(fit, ggplot2::aes(.data$.fitted, .data$.resid)) p1 <- p1 + ggplot2::geom_point() - p1 <- p1 + ggplot2::stat_smooth(method = "loess") + p1 <- p1 + ggplot2::stat_smooth(method = "loess") p1 <- p1 + ggplot2::geom_hline(yintercept = 0, col = "red", linetype = "dashed") - p1 <- p1 + ggplot2::xlab("Fitted values") + p1 <- p1 + ggplot2::xlab("Fitted values") p1 <- p1 + ggplot2::ylab("Residuals") - p1 <- p1 + ggplot2::ggtitle("Residual vs Fitted Plot") + p1 <- p1 + ggplot2::ggtitle("Residual vs Fitted Plot") p1 <- p1 + ggplot2::theme_bw() - - p2 <- ggplot2::ggplot(fit, ggplot2::aes(stats::qqnorm(.data$.stdresid)[[1]], .data$.stdresid)) + + p2 <- ggplot2::ggplot(fit, ggplot2::aes(stats::qqnorm(.data$.stdresid)[[1]], .data$.stdresid)) p2 <- p2 + ggplot2::geom_point(na.rm = TRUE) - p2 <- p2 + ggplot2::geom_abline(ggplot2::aes(stats::qqline(.data$.stdresid))) - p2 <- p2 + ggplot2::xlab("Theoretical Quantiles") + p2 <- p2 + ggplot2::geom_abline(ggplot2::aes(stats::qqline(.data$.stdresid))) + p2 <- p2 + ggplot2::xlab("Theoretical Quantiles") p2 <- p2 + ggplot2::ylab("Standardized Residuals") - p2 <- p2 + ggplot2::ggtitle("Normal Q-Q") + p2 <- p2 + ggplot2::ggtitle("Normal Q-Q") p2 <- p2 + ggplot2::theme_bw() - + p3 <- ggplot2::ggplot(fit, ggplot2::aes(.data$.fitted, sqrt(abs(.data$.stdresid)))) p3 <- p3 + ggplot2::geom_point(na.rm = TRUE) - p3 <- p3 + ggplot2::stat_smooth(method = "loess", na.rm = TRUE) + p3 <- p3 + ggplot2::stat_smooth(method = "loess", na.rm = TRUE) p3 <- p3 + ggplot2::xlab("Fitted Value") p3 <- p3 + ggplot2::ylab(expression(sqrt("|Standardized residuals|"))) - p3 <- p3 + ggplot2::ggtitle("Scale-Location") + p3 <- p3 + ggplot2::ggtitle("Scale-Location") p3 <- p3 + ggplot2::theme_bw() - - p4 <- ggplot2:: ggplot(fit, ggplot2::aes(seq_along(.data$.cooksd), .data$.cooksd)) + + p4 <- ggplot2::ggplot(fit, ggplot2::aes(seq_along(.data$.cooksd), .data$.cooksd)) p4 <- p4 + ggplot2::geom_bar(stat = "identity", position = "identity") - p4 <- p4 + ggplot2::xlab("Obs. Number") + p4 <- p4 + ggplot2::xlab("Obs. Number") p4 <- p4 + ggplot2::ylab("Cook's distance") - p4 <- p4 + ggplot2::ggtitle("Cook's distance") + p4 <- p4 + ggplot2::ggtitle("Cook's distance") p4 <- p4 + ggplot2::theme_bw() - - p5 <- ggplot2::ggplot(fit, ggplot2::aes(.data$.hat, .data$.stdresid)) + + p5 <- ggplot2::ggplot(fit, ggplot2::aes(.data$.hat, .data$.stdresid)) p5 <- p5 + ggplot2::geom_point(ggplot2::aes(size = .data$.cooksd), na.rm = TRUE) p5 <- p5 + ggplot2::stat_smooth(method = "loess", na.rm = TRUE) - p5 <- p5 + ggplot2::xlab("Leverage") + p5 <- p5 + ggplot2::xlab("Leverage") p5 <- p5 + ggplot2::ylab("Standardized Residuals") p5 <- p5 + ggplot2::ggtitle("Residual vs Leverage Plot") p5 <- p5 + ggplot2::scale_size_continuous("Cook's Distance", range = c(1, 5)) - p5 <- p5 + ggplot2::theme_bw() + p5 <- p5 + ggplot2::theme_bw() p5 <- p5 + ggplot2::theme(legend.position = "bottom") - - p6 <- ggplot2::ggplot(fit, ggplot2::aes(.data$.hat, .data$.cooksd)) - p6 <- p6 + ggplot2::geom_point(na.rm = TRUE) + + p6 <- ggplot2::ggplot(fit, ggplot2::aes(.data$.hat, .data$.cooksd)) + p6 <- p6 + ggplot2::geom_point(na.rm = TRUE) p6 <- p6 + ggplot2::stat_smooth(method = "loess", na.rm = TRUE) - p6 <- p6 + ggplot2::xlab("Leverage hii") + p6 <- p6 + ggplot2::xlab("Leverage hii") p6 <- p6 + ggplot2::ylab("Cook's Distance") p6 <- p6 + ggplot2::ggtitle("Cook's dist vs Leverage hii/(1-hii)") p6 <- p6 + ggplot2::geom_abline(slope = seq(0, 3, 0.5), color = "gray", linetype = "dashed") p6 <- p6 + ggplot2::theme_bw() - + p <- gridExtra::grid.arrange(p1, p2, p3, p4, p5, p6, nrow = 3) - - if (!is.na(filename)) { - grDevices::pdf(filename, width = 10, height = 6) - plot(p) - grDevices::dev.off() - } - + + if (!is.na(filename)) { + grDevices::pdf(filename, width = 10, height = 6) + plot(p) + grDevices::dev.off() + } + if (draw.plot) { plot(p) } - } # metric_lmDiag_plot diff --git a/modules/benchmark/R/metric_residual_plot.R b/modules/benchmark/R/metric_residual_plot.R index 38a20a65125..2b52e081c9a 100644 --- a/modules/benchmark/R/metric_residual_plot.R +++ b/modules/benchmark/R/metric_residual_plot.R @@ -4,28 +4,28 @@ ##' @param var variable name, used as plot title ##' @param filename path to save plot, or NA to not save ##' @param draw.plot logical: Return the plot object? -##' +##' ##' @author Betsy Cowdery ##' @export metric_residual_plot <- function(metric_dat, var, filename = NA, draw.plot = is.na(filename)) { PEcAn.logger::logger.info("Metric: Residual Plot") - + metric_dat$time <- lubridate::year(as.Date(as.character(metric_dat$time), format = "%Y")) metric_dat$diff <- abs(metric_dat$model - metric_dat$obvs) metric_dat$zeros <- rep(0, length(metric_dat$time)) - - p <- ggplot2::ggplot(data = metric_dat, ggplot2::aes(x = .data$time)) - p <- p + ggplot2::geom_path(ggplot2::aes(y = .data$zeros), colour = "#666666", size = 2, linetype = 2, lineend = "round") - p <- p + ggplot2::geom_point(ggplot2::aes(y = .data$diff), size = 4, colour = "#619CFF") + + p <- ggplot2::ggplot(data = metric_dat, ggplot2::aes(x = .data$time)) + p <- p + ggplot2::geom_path(ggplot2::aes(y = .data$zeros), colour = "#666666", size = 2, linetype = 2, lineend = "round") + p <- p + ggplot2::geom_point(ggplot2::aes(y = .data$diff), size = 4, colour = "#619CFF") p <- p + ggplot2::labs(title = var, x = "years", y = "abs(model - observation)") - + if (!is.na(filename)) { grDevices::pdf(filename, width = 10, height = 6) plot(p) grDevices::dev.off() } - + if (draw.plot) { return(p) } -} # metric_residual_plot \ No newline at end of file +} # metric_residual_plot diff --git a/modules/benchmark/R/metric_run.R b/modules/benchmark/R/metric_run.R index 40183a872e8..14476049c24 100644 --- a/modules/benchmark/R/metric_run.R +++ b/modules/benchmark/R/metric_run.R @@ -1,33 +1,32 @@ ##' Model Run Check ##' ##' @param settings list -##' +##' ##' @author Betsy Cowdery ##' @export -metric_run <- function(settings){ +metric_run <- function(settings) { + # The goal of this function is to determine if a model run has been successfully completed. + # There are three steps to determining success. - # The goal of this function is to determine if a model run has been successfully completed. - # There are three steps to determining success. - # Check the STATUS file to see if all the steps so far have completed # - If "ERROR" ==> FAIL # - Grab the error message and stage at which the workflow failed. # - If all "DONE": this means the run completed, but it could still have problems - # - Check to see if output exists - # - If output exists, + # - Check to see if output exists + # - If output exists, # - If there is no output or output is not the right length ==> FAIL - # - If output is the expected length ==> SUCCESS - # - # Open metric_run.Rdata + # - If output is the expected length ==> SUCCESS + # + # Open metric_run.Rdata # Save in metric_run.Rdata: # - run information (run ID?, do we need to look that up in the database? If so, that should happen in calc_benchmark) # - SUCCESS/FAIL # - stage of failure # - error message - # - # Calculate the benchmark metric_run score. + # + # Calculate the benchmark metric_run score. # What should this be? - # + # # Numeric? # 0 = SUCCESS # 1 = FAIL MODEL RUN, produces output @@ -35,13 +34,9 @@ metric_run <- function(settings){ # 3 = FAIL CONFIG # 4 = FAIL META # 5 = FAIL TRAIT - # + # # Text? # SUCCESS/FAIL STAGE and error message? - # + # # return(score) - } - - - diff --git a/modules/benchmark/R/metric_scatter_plot.R b/modules/benchmark/R/metric_scatter_plot.R index 751d08e53b4..9c254596694 100644 --- a/modules/benchmark/R/metric_scatter_plot.R +++ b/modules/benchmark/R/metric_scatter_plot.R @@ -4,26 +4,27 @@ ##' @param var ignored ##' @param filename path to save plot, or NA to not save ##' @param draw.plot logical: Return the plot object? -##' +##' ##' @author Betsy Cowdery ##' @export metric_scatter_plot <- function(metric_dat, var, filename = NA, draw.plot = is.na(filename)) { PEcAn.logger::logger.info("Metric: Scatter Plot") - - p <- ggplot2::ggplot(data = metric_dat) - p <- p + ggplot2::geom_point(ggplot2::aes(x = .data$model, y = .data$obvs), size = 4) - p <- p + ggplot2::geom_abline(slope = 1, intercept = 0, colour = "#666666", - size = 2, linetype = 2) - + + p <- ggplot2::ggplot(data = metric_dat) + p <- p + ggplot2::geom_point(ggplot2::aes(x = .data$model, y = .data$obvs), size = 4) + p <- p + ggplot2::geom_abline( + slope = 1, intercept = 0, colour = "#666666", + size = 2, linetype = 2 + ) + if (!is.na(filename)) { grDevices::pdf(filename, width = 10, height = 6) plot(p) grDevices::dev.off() } - + if (draw.plot) { return(p) } - } # metric_scatter_plot diff --git a/modules/benchmark/R/metric_timeseries_plot.R b/modules/benchmark/R/metric_timeseries_plot.R index ca879b95648..79f6afbe0d5 100644 --- a/modules/benchmark/R/metric_timeseries_plot.R +++ b/modules/benchmark/R/metric_timeseries_plot.R @@ -11,28 +11,28 @@ metric_timeseries_plot <- function(metric_dat, var, filename = NA, draw.plot = is.na(filename)) { PEcAn.logger::logger.info("Metric: Timeseries Plot") - + # Attempt at getting around the fact that time can be annual and thus as.Date won't work date.time <- try(as.Date(metric_dat$time), silent = TRUE) if (inherits(date.time, "try-error")) { PEcAn.logger::logger.warn("Can't coerce time column to Date format, attempting plot anyway") - }else{ + } else { metric_dat$time <- date.time } - - p <- ggplot(data = metric_dat, ggplot2::aes(x = .data$time)) - p <- p + labs(title = var, y = "") - p <- p + geom_path(ggplot2::aes(y = .data$model, colour = "Model"), size = 2) - p <- p + geom_point(ggplot2::aes(y = .data$model, colour = "Model"), size = 4) - p <- p + geom_path(ggplot2::aes(y = .data$obvs, colour = "Observed"), size = 2) + + p <- ggplot(data = metric_dat, ggplot2::aes(x = .data$time)) + p <- p + labs(title = var, y = "") + p <- p + geom_path(ggplot2::aes(y = .data$model, colour = "Model"), size = 2) + p <- p + geom_point(ggplot2::aes(y = .data$model, colour = "Model"), size = 4) + p <- p + geom_path(ggplot2::aes(y = .data$obvs, colour = "Observed"), size = 2) p <- p + geom_point(ggplot2::aes(y = .data$obvs, colour = "Observed"), size = 4) - + if (!is.na(filename)) { grDevices::pdf(filename, width = 10, height = 6) plot(p) grDevices::dev.off() } - + if (draw.plot) { return(p) } diff --git a/modules/benchmark/R/pecan_bench.R b/modules/benchmark/R/pecan_bench.R index 598a25c4260..812c9f040cb 100644 --- a/modules/benchmark/R/pecan_bench.R +++ b/modules/benchmark/R/pecan_bench.R @@ -1,5 +1,4 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { - # comp_run = The results which we want to do the comparison results with. I would recommend having # the input already be in similar table format than the style the bench mark runs have been saved # as. bench_id = The ID number of the benchmark run the results are to be compared against. I @@ -7,7 +6,7 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { # needs to be only specifically given when wanting to examine a specific benchmark comparison. # MCD2: this naming and the descriptions are ambiguous. I'd go with something like new_id and # ref_id. I think it'd be fine for both to have defaults (e.g. newest and best runs respectively) - + # imp_limit = The ratio of comparison values which need to be improved in order for the run to be # accepted as a new benchmark. Should be set to some default value if user doesn't want to give a # specific value. MCD2: should default to a value that never causes an insert (e.g. NULL or -1). @@ -15,30 +14,30 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { # it, but we can set the value in the workflow. high_limit = The ratio where the increase in # value is decided to be large enough for alert. Again, should be set to some default value if # user doesn't want to give a specific value. - - + + ## MCD: Ok, the #1 thing that has to happen next is that function definitions and interfaces need ## to be flushed out more In particular we need to know if the database design has anything missing ## or any thing superfluous as well as what needs to be passed to each function vs looked up ## internally - + # I am now starting with the program by checking if the submitted run results are valid with a # subroutine. I also think that instead of having those logical checks as a part of a table, they # should be just written out in the subroutine and if the results do not fill the given standards, # the subroutine should print out the specific instances. If the submitted run fails to fill all # the given validation standards, the outcome of the subroutine should be FALSE at which point the # program is stopped. - + ## MCD2: What do you mean by 'valid'? I agree that we shouldn't run the benchmark if the run ## failed to complete, but I'm not sure what other checks you envision - + logic_check <- validation_check(comp_run) - + if (isFALSE(logic_check)) { print("The results were found to be invalid") stop() } - + # The observed values against which the runs will be compared to. Should be the same for all # benchmark runs. N here is the number of observed values. I left the read_values vague, as I am # not certain which format the data will be in. MCD: some path or index for this data file needs @@ -50,11 +49,11 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { # the data should never be changed, but we'll likely have more than one benchmark (e.g. runs at # more than one site, more than one variable we want to track) so we need flexibility. More # generally, files should never be hardcoded since these tests will run on many different machines - + obs_file <- utils::read.table("Location of the data table") obs_value <- obs_file$value N <- size(obs_value) - + # Read the previously accepted benchmark standard run values MCD: again, this needs to be an # argument or database item. I'd favor passing it so that the function could be used to compare # different reference points as needed I do not necessarily agree on passing the item, as I was @@ -69,29 +68,29 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { # since right now the History table is not very informative to WHY a run was done unless the # purpose of that run was recorded elsewhere. seems like it would be much better to have all that # in one place - + bench_file <- utils::read.table("Location of the data table", bench_id) bench_value <- bench_file$value - + # First calculate the differences between the current benchmark run and the observed values - + bench_dif <- obs_value - bench_value - + # Calculate the differences between the current run and the observed values - + comp_dif <- obs_value - comp_value - + # Determine the relative changes in differences between runs and observed values - + bench_ratio <- comp_dif / bench_dif ## MCD2: this could easily result in division by zero. You might want to compare the ratio of RMSEs ## not the individual point-wise differences - + # If uncertainties available, calculate the uncertainty for the ratio. Here I used the traditional # error propagation, but other approaches should be possible. - - bench_ratio_uncertainty <- bench_ratio * sqrt((comp_dif_uncertainty/comp_dif) ^ 2 + (ratio_dif_uncertainty/ratio_dif) ^ 2) - + + bench_ratio_uncertainty <- bench_ratio * sqrt((comp_dif_uncertainty / comp_dif)^2 + (ratio_dif_uncertainty / ratio_dif)^2) + ## MCD: We seem to be calculating this but not using it anywhere below Kevin Schaefer advocates the ## Chi-Sq metric since it allows for uncertainty See Schwalm et al 2010 eqn 2 However that doesn't ## allow for error in both the model and data. A bit of Googling suggests Welch's t may be a @@ -109,20 +108,20 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { ## uncertainty in the model-data comparisons. Overall, the focus here should be on the model-data ## comparison, but obviously we need to check against the previous model version in order to flag ## benchmark runs where performance decreases. - + # The slight hesitation I have for this is due to not knowing if it will be only one person making # the runs and decisions, or if there is a group independently submitting sets of runs. Thus if # you had a larger number of people making their own individual decisions which are saved to the # table used by the whole group, there might be large variance on how strict the criteria was for # different accepted runs. MCD2: Good point. A group will be submitting runs independently. - + # As for the method, I am open for both methods or any other being suggested, but it is important # to note that the test would be a comparison between comp_dif and bench_dif, and thus between two # model uncertainties. Thus before actually deciding the exact test chosen for this purpose, we # need to be clear on how that uncertainty would be determined. Is it just the ensemble of runs? # Can it be considered to be normally distributed? etc. I am perhaps myself leaning on the Welch's # t-test at the moment. - + # Here we calculate the average bench_ratio and the proportional amount of components which show # improvement. Here a reduction of 0.025 as ratio was used as the indicator of the current run # doing better, but it is a whimsical choice for now. The actual value needs to be decided. An @@ -140,21 +139,21 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { # added an additional value bench_ratio_warn, which shows the sum of values where the bench_ratio # increases too much as a potential warning. This is not a fraction in order to avoid it being # buried by a large N value. - + bench_ratio_mean <- mean(abs(bench_ratio)) bench_ratio_count <- sum(bench_ratio < imp_limit) / N bench_ratio_high <- sum(bench_ratio > high_limit) - + # Print out both the average value and amount of comparisons where we saw improvement. Also, if we # saw too high values, print out a warning for that. - + bench_print <- c(bench_ratio_mean, bench_ratio_count) print(bench_print) - + if (bench_ratio_high > 0) { print(paste("Warning:", N, "variables showed a larger than suggested increase in value")) } - + ## MCD: the specific stats involved could vary, but they definitely need to written to database not ## screen For the benchmark report itself I'd be in favor of doing all this in a RMarkdown document ## or in Shiny so that we can combine figures, tables, text, etc and spit out a html report. In @@ -168,19 +167,19 @@ pecan_bench <- function(comp_run, bench_id, imp_limit, high_limit) { ## maybe any time the model changed non-trivially. It would also be good to be able to show ## multi-model taylor plots -- e.g. the current, the reference, and the 'best' for all models that ## have run that same benchmark - + # Here we determine if the current run performs better than the previous benchmark run. The ratio # of 0.975 is set as the limit for ratio reduction for the bench mark to be successful. # Additionally we require that a certain number of control variables need to show improvement. # Here that number was chosen to b 80 % of the sites. Improvement in uncertainty can be included # in several ways, not touched on here yet. - + # If the current runs performed better, it is stored as the new bench mark. Additionally, we # calculate the - + if (bench_ratio_mean < imp_limit & bench_ratio_count > 0.8) { - #Set_Bench(...) #Subroutine which sets the current run to be the new benchmark comparison run as well as saves the previous benchmark comparison run a database - #Compare_Bench(...) #Subroutine which calculates a new vector of values which are essentially the benchmark ratios between old runs and the current comparison run. + # Set_Bench(...) #Subroutine which sets the current run to be the new benchmark comparison run as well as saves the previous benchmark comparison run a database + # Compare_Bench(...) #Subroutine which calculates a new vector of values which are essentially the benchmark ratios between old runs and the current comparison run. } ## MCD: I wonder if this should be automatic (i.e. every time the model does better it becomes the ## new benchmark) or whether it should be something where the user is prompted to confirm that we diff --git a/modules/benchmark/inst/scripts/benchmark.workflow.FATES_BCI.R b/modules/benchmark/inst/scripts/benchmark.workflow.FATES_BCI.R index 7697cd65205..b6da598dba4 100644 --- a/modules/benchmark/inst/scripts/benchmark.workflow.FATES_BCI.R +++ b/modules/benchmark/inst/scripts/benchmark.workflow.FATES_BCI.R @@ -4,9 +4,9 @@ ##' Modified by Mike Dietze for FATES BCI example ## ----- setup: these are not necessary but make testing easier ----- ## -rm(list = setdiff(ls(), lsf.str())) # clear environment except for sourced functions +rm(list = setdiff(ls(), lsf.str())) # clear environment except for sourced functions # rm(list= ls()[!(ls() %in% c('objects you want to save'))] # clear environment except for ... -for (i in dbListConnections(PostgreSQL())) db.close(i) #close any stray database connections +for (i in dbListConnections(PostgreSQL())) db.close(i) # close any stray database connections options(digits = 10) # just to make things easier to read ## ----- setup: these are not necessary but make testing easier ----- ## @@ -19,7 +19,7 @@ bety <- PEcAn.DB::betyConnect("web/config.php") settings.file <- "/fs/data2/output//PEcAn_1000002914/pecan.CONFIGS.xml" settings <- PEcAn.settings::read.settings(settings.file) -## *** set up the benchmarks *** +## *** set up the benchmarks *** ## To do this you need to first register the input data with the database ## which may also require registering a format ## 1) Load data onto server @@ -28,20 +28,20 @@ settings <- PEcAn.settings::read.settings(settings.file) input_id <- 1000011171 ## 4) Edit Input to associate File ## 5) Verify that PEcAn is able to find and load file -input <- PEcAn.DB::query.file.path(input_id,host_name = "localhost",con = bety) -format <- PEcAn.DB::query.format.vars(bety,input_id) -field <- PEcAn.benchmark::load_data(input,format) +input <- PEcAn.DB::query.file.path(input_id, host_name = "localhost", con = bety) +format <- PEcAn.DB::query.format.vars(bety, input_id) +field <- PEcAn.benchmark::load_data(input, format) ## 6) Look up variable_id in database ## 7) Look up metric_id (web interface in progress, for now run: -metrics <- tbl(bety,"metrics") +metrics <- tbl(bety, "metrics") View(metrics) ## define benchmark in settings settings$benchmarking <- list( - benchmark=list( - input_id = input_id, - variable_id = 1000000132, - site_id = as.numeric(settings$run$site$id), - metrics = c(1000000004,1000000001,1000000009,1000000010,1000000011) + benchmark = list( + input_id = input_id, + variable_id = 1000000132, + site_id = as.numeric(settings$run$site$id), + metrics = c(1000000004, 1000000001, 1000000009, 1000000010, 1000000011) ), ensemble_id = as.numeric(settings$ensemble$ensemble.id), info = settings$info @@ -50,33 +50,33 @@ settings$benchmarking <- list( ## Brown dog conversion test (part of #5) ## remember to enter your own username and password! output_path <- getwd() -key <- BrownDog::get_key("https://bd-api.ncsa.illinois.edu",username,password) -token <- BrownDog::get_token("https://bd-api.ncsa.illinois.edu",key) -foo <- BrownDog::convert_file("https://bd-api.ncsa.illinois.edu", input,"csv", output_path, token,wait=900) +key <- BrownDog::get_key("https://bd-api.ncsa.illinois.edu", username, password) +token <- BrownDog::get_token("https://bd-api.ncsa.illinois.edu", key) +foo <- BrownDog::convert_file("https://bd-api.ncsa.illinois.edu", input, "csv", output_path, token, wait = 900) -bm.settings <- define_benchmark(bm.settings = settings$benchmarking,bety) -bm.settings$new_run=FALSE +bm.settings <- define_benchmark(bm.settings = settings$benchmarking, bety) +bm.settings$new_run <- FALSE ## Now that the Benchmark is setup, verify that benchmark metrics can be calculated -bm_settings2pecan_settings <- function(bm.settings){ +bm_settings2pecan_settings <- function(bm.settings) { if (PEcAn.settings::is.MultiSettings(bm.settings)) { return(papply(bm.settings, bm_settings2pecan_settings)) } - return(append(bm.settings["reference_run_id"], - bm.settings$benchmark[which(names(bm.settings$benchmark) == "benchmark_id")])) -} + return(append( + bm.settings["reference_run_id"], + bm.settings$benchmark[which(names(bm.settings$benchmark) == "benchmark_id")] + )) +} new.settings <- settings new.settings$benchmarking <- bm_settings2pecan_settings(bm.settings) -if(bm.settings$new_run){ - write.settings(new.settings,pecan.xml,outputdir = settings$outdir) +if (bm.settings$new_run) { + write.settings(new.settings, pecan.xml, outputdir = settings$outdir) # Run the workflow! YAY - settings <- read.settings(file.path(new.settings$outdir,"pecan.CHECKED.xml")) - results <- load(file.path(new.settings$outdir,"benchmarking.output.Rdata")) - -}else{ - + settings <- read.settings(file.path(new.settings$outdir, "pecan.CHECKED.xml")) + results <- load(file.path(new.settings$outdir, "benchmarking.output.Rdata")) +} else { new.settings <- read_settings_BRR(new.settings) new.settings <- PEcAn.settings::prepare.settings(new.settings) results <- papply(new.settings, function(x) calc_benchmark(x, bety)) diff --git a/modules/benchmark/inst/scripts/benchmark.workflow.R b/modules/benchmark/inst/scripts/benchmark.workflow.R index d18fd69abcf..d94b0e2a10b 100644 --- a/modules/benchmark/inst/scripts/benchmark.workflow.R +++ b/modules/benchmark/inst/scripts/benchmark.workflow.R @@ -23,16 +23,13 @@ settings$benchmarking <- bm_settings2pecan_settings(bm.settings) ################################################################################ -if(bm.settings$new_run){ - +if (bm.settings$new_run) { # This section isn't ready yet # write.settings(settings = settings, outputfile = "pecan.xml", outputdir = settings$outdir) # # Run the workflow! YAY # settings <- read.settings(file.path(settings$outdir,"pecan.CHECKED.xml")) # results <- load(file.path(settings$outdir,"benchmarking.output.Rdata")) - -}else{ - +} else { settings <- read_settings_BRR(settings) sprintf("MODEL: %s", settings$model$type) diff --git a/modules/benchmark/tests/testthat.R b/modules/benchmark/tests/testthat.R index 7134207c19a..5f664df868b 100644 --- a/modules/benchmark/tests/testthat.R +++ b/modules/benchmark/tests/testthat.R @@ -1,4 +1,4 @@ library(testthat) library(PEcAn.benchmark) -#test_check("PEcAn.benchmark") +# test_check("PEcAn.benchmark") diff --git a/modules/benchmark/tests/testthat/test-align_pft.R b/modules/benchmark/tests/testthat/test-align_pft.R index 8098f2baf4f..035eb375a3e 100644 --- a/modules/benchmark/tests/testthat/test-align_pft.R +++ b/modules/benchmark/tests/testthat/test-align_pft.R @@ -1,10 +1,10 @@ - context("align_pft") con <- PEcAn.DB::db.open(PEcAn.DB::get_postgres_envvars( - host = "localhost", - user = "bety", - password = "bety")) + host = "localhost", + user = "bety", + password = "bety" +)) teardown(PEcAn.DB::db.close(con)) observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") @@ -15,11 +15,11 @@ format_two <- "plant_functional_type" table <- data.frame( plant_functional_type_one = c("AMCA3", "AMCA3", "ARHY", "ARHY"), plant_functional_type_two = c("a", "a", "b", "b"), # PFT groupings - stringsAsFactors = FALSE) + stringsAsFactors = FALSE +) test_that("align_data_to_data_pft", { - expected <- list( bety_species_id = list( observation_one = data.frame( @@ -27,13 +27,18 @@ test_that("align_data_to_data_pft", { bety_species_id = rep(23463, 4), genus = rep("Amaranthus", 4), species = rep("", 4), - stringsAsFactors = FALSE), - observation_two = NA), + stringsAsFactors = FALSE + ), + observation_two = NA + ), original = list( observation_one = observation_one, - observation_two = observation_two), + observation_two = observation_two + ), aligned = list( - aligned_by_observation_one = c("AMCA3", "ARHY", "AMCA3", "AMCA3"))) + aligned_by_observation_one = c("AMCA3", "ARHY", "AMCA3", "AMCA3") + ) + ) aligned_d2d <- align_data_to_data_pft( con = con, @@ -41,7 +46,8 @@ test_that("align_data_to_data_pft", { observation_two = observation_two, format_one = format_one, format_two = format_two, - custom_table = table) + custom_table = table + ) expect_identical(aligned_d2d, expected) @@ -52,8 +58,8 @@ test_that("align_data_to_data_pft", { format_one = format_one, format_two = format_two, custom_table = table, - comparison_type = "data_to_data") + comparison_type = "data_to_data" + ) expect_identical(aligned_generic, aligned_d2d) - }) diff --git a/modules/data.atmosphere/R/ERA5_met_process.R b/modules/data.atmosphere/R/ERA5_met_process.R index d483be8f1b1..589351f3fee 100644 --- a/modules/data.atmosphere/R/ERA5_met_process.R +++ b/modules/data.atmosphere/R/ERA5_met_process.R @@ -8,60 +8,65 @@ #' #' @return if write.db is True then return input IDs with physical paths; if write.db is False then return just physical paths of extracted ERA5 clim files. #' @export -#' +#' #' @author Dongchen Zhang #' @importFrom dplyr %>% #' -ERA5_met_process <- function(settings, in.path, out.path, write.db=FALSE, write = TRUE){ - #Initialize the multicore computation. +ERA5_met_process <- function(settings, in.path, out.path, write.db = FALSE, write = TRUE) { + # Initialize the multicore computation. if (future::supportsMulticore()) { future::plan(future::multicore) } else { future::plan(future::multisession) } - - #getting site info - #grab the site info from Bety DB if we can't get the site info directly from the settings object. - if ("try-error" %in% class(try(site_info <- settings$run %>% - purrr::map('site')%>% - purrr::map(function(site.list){ - #conversion from string to number - site.list$lat <- as.numeric(site.list$lat) - site.list$lon <- as.numeric(site.list$lon) - list(site.id=site.list$id, lat=site.list$lat, lon=site.list$lon, site_name=site.list$name) - }) %>% - dplyr::bind_rows() %>% - as.list()))) { - #getting site ID + + # getting site info + # grab the site info from Bety DB if we can't get the site info directly from the settings object. + if ("try-error" %in% class(try(site_info <- settings$run %>% + purrr::map("site") %>% + purrr::map(function(site.list) { + # conversion from string to number + site.list$lat <- as.numeric(site.list$lat) + site.list$lon <- as.numeric(site.list$lon) + list(site.id = site.list$id, lat = site.list$lat, lon = site.list$lon, site_name = site.list$name) + }) %>% + dplyr::bind_rows() %>% + as.list()))) { + # getting site ID observations <- c() for (i in 1:length(settings)) { obs <- settings[[i]]$run$site$id - observations <- c(observations,obs) + observations <- c(observations, obs) } - - #query site info - bety <- dplyr::src_postgres(dbname = settings$database$bety$dbname, - host = settings$database$bety$host, - user = settings$database$bety$user, - password = settings$database$bety$password) + + # query site info + bety <- dplyr::src_postgres( + dbname = settings$database$bety$dbname, + host = settings$database$bety$host, + user = settings$database$bety$user, + password = settings$database$bety$password + ) con <- bety$con site_ID <- observations suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) - suppressWarnings(qry_results <- PEcAn.DB::db.query(con = con, query = site_qry))#use PEcAn.DB instead - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) + ids = site_ID, .con = con + )) + suppressWarnings(qry_results <- PEcAn.DB::db.query(con = con, query = site_qry)) # use PEcAn.DB instead + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) } - - #initialize db query elements - if(write.db){ + + # initialize db query elements + if (write.db) { mimetype <- "application/x-netcdf" formatname <- "CF Meteorology" hostname <- PEcAn.remote::fqdn() # find mimetype, if it does not exist, it will create one mimetypeid <- PEcAn.DB::get.id("mimetypes", "type_string", mimetype, con, create = TRUE) - + # find appropriate format, create if it does not exist formatid <- PEcAn.DB::get.id( table = "formats", @@ -71,131 +76,135 @@ ERA5_met_process <- function(settings, in.path, out.path, write.db=FALSE, write create = TRUE, dates = TRUE ) - + # setup parent part of query if specified parent <- "" - - #initialize Input_IDs object when looping over each site + + # initialize Input_IDs object when looping over each site Input_IDs <- list() } - - #restructure the site_info into list. + + # restructure the site_info into list. site_info$start_date <- start_date <- rep(settings$state.data.assimilation$start.date, length(settings)) site_info$end_date <- end_date <- rep(settings$state.data.assimilation$end.date, length(settings)) site_info$out.path <- rep(out.path, length(settings)) site_info$in.path <- rep(in.path, length(settings)) site_info$model.type <- rep(settings$model$type, length(settings)) new.site.info <- split(as.data.frame(site_info), seq(nrow(as.data.frame(site_info)))) - - #Extract ERA5 for each site. + + # Extract ERA5 for each site. PEcAn.logger::logger.info("Started extracting ERA5 data!\n") - Clim_paths <- furrr::future_map(new.site.info, function(site){ - #check if sub-folder exists, if doesn't then create a new folder specific for each site - site_outFolder <- paste0(site$out.path,'/', site$site.id) - #check if folder already exists, if it does, then jump to the next loop - if(!file.exists(site_outFolder)){ + Clim_paths <- furrr::future_map(new.site.info, function(site) { + # check if sub-folder exists, if doesn't then create a new folder specific for each site + site_outFolder <- paste0(site$out.path, "/", site$site.id) + # check if folder already exists, if it does, then jump to the next loop + if (!file.exists(site_outFolder)) { dir.create(site_outFolder) - }else{ - #grab physical paths of existing ERA5 files - #need to be generalized when more models come in. - clim.paths <- list(in.path=list.files(path=site_outFolder, pattern = '*.clim', full.names = T)) + } else { + # grab physical paths of existing ERA5 files + # need to be generalized when more models come in. + clim.paths <- list(in.path = list.files(path = site_outFolder, pattern = "*.clim", full.names = T)) names(clim.paths) <- site$site.id return(clim.paths) } - - #extract ERA5.nc files - PEcAn.data.atmosphere::extract.nc.ERA5(slat = site$lat, - slon = site$lon, - in.path = site$in.path, - start_date = site$start_date, - end_date = site$end_date, - outfolder = site_outFolder, - in.prefix = 'ERA5_', - newsite = as.character(site$site.id)) - - #starting working on met2model.model function over each ensemble - #setting up met2model function depending on model name from settings + + # extract ERA5.nc files + PEcAn.data.atmosphere::extract.nc.ERA5( + slat = site$lat, + slon = site$lon, + in.path = site$in.path, + start_date = site$start_date, + end_date = site$end_date, + outfolder = site_outFolder, + in.prefix = "ERA5_", + newsite = as.character(site$site.id) + ) + + # starting working on met2model.model function over each ensemble + # setting up met2model function depending on model name from settings met2model_method <- do.call("::", list(paste0("PEcAn.", site$model.type), paste0("met2model.", site$model.type))) - #grab the rbind.xts function + # grab the rbind.xts function rbind.xts <- do.call("::", list("xts", "rbind.xts")) - #find every path associated with each ensemble member + # find every path associated with each ensemble member ens_nc <- list.files(path = site_outFolder, full.names = T) - #loop over each ensemble member + # loop over each ensemble member for (i in 1:length(ens_nc)) { nc_path <- ens_nc[i] - - #find a proper in prefix for each ensemble member - ens_num <- strsplit(basename(nc_path),"_")[[1]][3] + + # find a proper in prefix for each ensemble member + ens_num <- strsplit(basename(nc_path), "_")[[1]][3] in_prefix <- paste0("ERA5.", ens_num) - - #preparing for the met2model.SIPNET function - met2model_method(in.path = nc_path, - in.prefix = in_prefix, - outfolder = site_outFolder, - start_date = site$start_date, - end_date = site$end_date) + + # preparing for the met2model.SIPNET function + met2model_method( + in.path = nc_path, + in.prefix = in_prefix, + outfolder = site_outFolder, + start_date = site$start_date, + end_date = site$end_date + ) } # grab physical paths of ERA5 files - clim.paths <- list(in.path=list.files(path=site_outFolder, pattern = '*.clim', full.names = T)) + clim.paths <- list(in.path = list.files(path = site_outFolder, pattern = "*.clim", full.names = T)) names(clim.paths) <- site$site.id return(clim.paths) }, .progress = TRUE) PEcAn.logger::logger.info("\nFinished!") - - #write the paths into settings. + + # write the paths into settings. if (write) { - #write paths into settings. + # write paths into settings. for (i in seq_along(settings)) { - #fill in dates related to met files. - settings[[i]]$run$site$met.start <- - settings[[i]]$run$start.date <- + # fill in dates related to met files. + settings[[i]]$run$site$met.start <- + settings[[i]]$run$start.date <- settings[[i]]$state.data.assimilation$start.date - settings[[i]]$run$site$met.end <- - settings[[i]]$run$end.date <- + settings[[i]]$run$site$met.end <- + settings[[i]]$run$end.date <- settings[[i]]$state.data.assimilation$end.date settings[[i]]$run$inputs$met$path <- as.list(unlist(Clim_paths[[i]])) %>% purrr::set_names(rep("path", length(Clim_paths[[i]]))) } - - #write settings into xml file. + + # write settings into xml file. PEcAn.logger::logger.info(paste0("Write updated pecan.xml file into: ", file.path(settings$outdir, "pecan.xml"))) PEcAn.settings::write.settings(settings, outputfile = "pecan.xml") } - - #write into bety - if(write.db){ + + # write into bety + if (write.db) { PEcAn.logger::logger.info("Write into database!") - #loop over each site + # loop over each site for (i in 1:length(site_info$site_id)) { - #loop over each ensemble - #initialize arrays to store input and dbfile IDs. + # loop over each ensemble + # initialize arrays to store input and dbfile IDs. dbfile_IDs <- c() input_IDs <- c() - for(j in 1:length(Clim_paths[[i]])){ - #create input record for each ensemble member - #insert into inputs table + for (j in 1:length(Clim_paths[[i]])) { + # create input record for each ensemble member + # insert into inputs table cmd <- paste0( "INSERT INTO inputs ", "(site_id, format_id, start_date, end_date, name) VALUES (", - site_info$site_id[i], ", ", formatid, ", '", start_date, "', '", end_date, "','", paste0('ERA5_',site_info$site_id[i],"_",as.character(j)), + site_info$site_id[i], ", ", formatid, ", '", start_date, "', '", end_date, "','", paste0("ERA5_", site_info$site_id[i], "_", as.character(j)), "') RETURNING id" ) # This is the id that we just registered inputid <- PEcAn.DB::db.query(query = cmd, con = con) input_IDs <- c(input_IDs, inputid) - - #create dbfiles associated with each ensemble ID + + # create dbfiles associated with each ensemble ID dbfileid <- PEcAn.DB::dbfile.insert( in.path = Clim_paths[[i]][j], in.prefix = paste0("ERA5.", as.character(j)), type = "Input", id = inputid, con = con, reuse = TRUE, hostname = hostname ) dbfile_IDs <- c(dbfile_IDs, dbfileid) } - Input_IDs[[i]] <- list(input_ID=inputid$id, dbfile_IDs=dbfile_IDs, Site_ID=site_info$site_id[i], in.path=Clim_paths[[i]]) + Input_IDs[[i]] <- list(input_ID = inputid$id, dbfile_IDs = dbfile_IDs, Site_ID = site_info$site_id[i], in.path = Clim_paths[[i]]) } - save(Input_IDs, file=paste0(out.path, '/', 'Inputs.RData')) + save(Input_IDs, file = paste0(out.path, "/", "Inputs.RData")) return(Input_IDs) - }else{ - save(Clim_paths, file=paste0(out.path, '/', 'Inputs.RData')) + } else { + save(Clim_paths, file = paste0(out.path, "/", "Inputs.RData")) return(Clim_paths) } -} \ No newline at end of file +} diff --git a/modules/data.atmosphere/R/GEFS_helper_functions.R b/modules/data.atmosphere/R/GEFS_helper_functions.R index 754580ae0da..a4c5953a57e 100644 --- a/modules/data.atmosphere/R/GEFS_helper_functions.R +++ b/modules/data.atmosphere/R/GEFS_helper_functions.R @@ -7,171 +7,174 @@ #' @param model_name_raw model name for directory creation #' @param end_hr end hr to determine how many hours to download #' @param output_directory output directory -#' +#' #' @export #' #' @return NA #' noaa_grid_download <- function(lat_list, lon_list, forecast_time, forecast_date, model_name_raw, output_directory, end_hr) { - - - download_grid <- function(ens_index, location, directory, hours_char, cycle, base_filename1, vars,working_directory){ - #for(j in 1:31){ - if(ens_index == 1){ - base_filename2 <- paste0("gec00",".t",cycle,"z.pgrb2a.0p50.f") + download_grid <- function(ens_index, location, directory, hours_char, cycle, base_filename1, vars, working_directory) { + # for(j in 1:31){ + if (ens_index == 1) { + base_filename2 <- paste0("gec00", ".t", cycle, "z.pgrb2a.0p50.f") curr_hours <- hours_char[hours <= 384] - }else{ - if((ens_index-1) < 10){ - ens_name <- paste0("0",ens_index - 1) - }else{ - ens_name <- as.character(ens_index -1) + } else { + if ((ens_index - 1) < 10) { + ens_name <- paste0("0", ens_index - 1) + } else { + ens_name <- as.character(ens_index - 1) } - base_filename2 <- paste0("gep",ens_name,".t",cycle,"z.pgrb2a.0p50.f") + base_filename2 <- paste0("gep", ens_name, ".t", cycle, "z.pgrb2a.0p50.f") curr_hours <- hours_char } - - - for(i in 1:length(curr_hours)){ + + + for (i in 1:length(curr_hours)) { file_name <- paste0(base_filename2, curr_hours[i]) - - destfile <- paste0(working_directory,"/", file_name,".grib") - - if(file.exists(destfile)){ - + + destfile <- paste0(working_directory, "/", file_name, ".grib") + + if (file.exists(destfile)) { fsz <- file.info(destfile)$size gribf <- file(destfile, "rb") - fsz4 <- fsz-4 - seek(gribf,where = fsz4,origin = "start") - last4 <- readBin(gribf,"raw",4) - if(as.integer(last4[1])==55 & as.integer(last4[2])==55 & as.integer(last4[3])==55 & as.integer(last4[4])==55) { + fsz4 <- fsz - 4 + seek(gribf, where = fsz4, origin = "start") + last4 <- readBin(gribf, "raw", 4) + if (as.integer(last4[1]) == 55 & as.integer(last4[2]) == 55 & as.integer(last4[3]) == 55 & as.integer(last4[4]) == 55) { download_file <- FALSE } else { download_file <- TRUE } close(gribf) - - }else{ + } else { download_file <- TRUE } - - if(download_file){ - - out <- tryCatch(utils::download.file(paste0(base_filename1, file_name, vars, location, directory), - destfile = destfile, quiet = TRUE), - error = function(e){ - warning(paste(e$message, "skipping", file_name), - call. = FALSE) - return(NA) - }, - finally = NULL) - - if(is.na(out)) next + + if (download_file) { + out <- tryCatch( + utils::download.file(paste0(base_filename1, file_name, vars, location, directory), + destfile = destfile, quiet = TRUE + ), + error = function(e) { + warning(paste(e$message, "skipping", file_name), + call. = FALSE + ) + return(NA) + }, + finally = NULL + ) + + if (is.na(out)) next } } } - + model_dir <- file.path(output_directory, model_name_raw) - + curr_time <- lubridate::with_tz(Sys.time(), tzone = "UTC") curr_date <- lubridate::as_date(curr_time) - - noaa_page <- readLines('https://nomads.ncep.noaa.gov/pub/data/nccf/com/gens/prod/') - + + noaa_page <- readLines("https://nomads.ncep.noaa.gov/pub/data/nccf/com/gens/prod/") + potential_dates <- NULL - for(i in 1:length(noaa_page)){ - if(stringr::str_detect(noaa_page[i], ">gefs.")){ + for (i in 1:length(noaa_page)) { + if (stringr::str_detect(noaa_page[i], ">gefs.")) { end <- stringr::str_locate(noaa_page[i], ">gefs.")[2] - dates <- stringr::str_sub(noaa_page[i], start = end+1, end = end+8) + dates <- stringr::str_sub(noaa_page[i], start = end + 1, end = end + 8) potential_dates <- c(potential_dates, dates) } } - - - last_cycle_page <- readLines(paste0('https://nomads.ncep.noaa.gov/pub/data/nccf/com/gens/prod/gefs.', dplyr::last(potential_dates))) - + + + last_cycle_page <- readLines(paste0("https://nomads.ncep.noaa.gov/pub/data/nccf/com/gens/prod/gefs.", dplyr::last(potential_dates))) + potential_cycle <- NULL - for(i in 1:length(last_cycle_page)){ - if(stringr::str_detect(last_cycle_page[i], 'href=\"')){ + for (i in 1:length(last_cycle_page)) { + if (stringr::str_detect(last_cycle_page[i], 'href=\"')) { end <- stringr::str_locate(last_cycle_page[i], 'href=\"')[2] - cycles <- stringr::str_sub(last_cycle_page[i], start = end+1, end = end+2) - if(cycles %in% c("00","06", "12", "18")){ + cycles <- stringr::str_sub(last_cycle_page[i], start = end + 1, end = end + 2) + if (cycles %in% c("00", "06", "12", "18")) { potential_cycle <- c(potential_cycle, cycles) } } } - + potential_dates <- lubridate::as_date(potential_dates) - - potential_dates = potential_dates[which(potential_dates == forecast_date)] - - if(length(potential_dates) == 0){PEcAn.logger::logger.error("Forecast Date not available")} - - - location <- paste0("&subregion=&leftlon=", - floor(min(lon_list)), - "&rightlon=", - ceiling(max(lon_list)), - "&toplat=", - ceiling(max(lat_list)), - "&bottomlat=", - floor(min(lat_list))) - + + potential_dates <- potential_dates[which(potential_dates == forecast_date)] + + if (length(potential_dates) == 0) { + PEcAn.logger::logger.error("Forecast Date not available") + } + + + location <- paste0( + "&subregion=&leftlon=", + floor(min(lon_list)), + "&rightlon=", + ceiling(max(lon_list)), + "&toplat=", + ceiling(max(lat_list)), + "&bottomlat=", + floor(min(lat_list)) + ) + base_filename1 <- "https://nomads.ncep.noaa.gov/cgi-bin/filter_gefs_atmos_0p50a.pl?file=" vars <- "&lev_10_m_above_ground=on&lev_2_m_above_ground=on&lev_surface=on&lev_entire_atmosphere=on&var_APCP=on&var_DLWRF=on&var_DSWRF=on&var_PRES=on&var_RH=on&var_TMP=on&var_UGRD=on&var_VGRD=on&var_TCDC=on" - - for(i in 1:length(potential_dates)){ - + + for (i in 1:length(potential_dates)) { forecast_date <- lubridate::as_date(potential_dates[i]) - forecast_hours = as.numeric(forecast_time) - - - for(j in 1:length(forecast_hours)){ + forecast_hours <- as.numeric(forecast_time) + + + for (j in 1:length(forecast_hours)) { cycle <- forecast_hours[j] - - if(cycle < 10) cycle <- paste0("0",cycle) - - model_date_hour_dir <- file.path(model_dir,forecast_date,cycle) - if(!dir.exists(model_date_hour_dir)){ - dir.create(model_date_hour_dir, recursive=TRUE, showWarnings = FALSE) + + if (cycle < 10) cycle <- paste0("0", cycle) + + model_date_hour_dir <- file.path(model_dir, forecast_date, cycle) + if (!dir.exists(model_date_hour_dir)) { + dir.create(model_date_hour_dir, recursive = TRUE, showWarnings = FALSE) } - + new_download <- TRUE - - if(new_download){ - + + if (new_download) { print(paste("Downloading", forecast_date, cycle)) - - if(cycle == "00"){ - hours <- c(seq(0, 240, 3),seq(246, 384, 6)) - hours <- hours[hours<=end_hr] - }else{ - hours <- c(seq(0, 240, 3),seq(246, min(end_hr, 840) , 6)) + + if (cycle == "00") { + hours <- c(seq(0, 240, 3), seq(246, 384, 6)) + hours <- hours[hours <= end_hr] + } else { + hours <- c(seq(0, 240, 3), seq(246, min(end_hr, 840), 6)) } hours_char <- hours - hours_char[which(hours < 100)] <- paste0("0",hours[which(hours < 100)]) - hours_char[which(hours < 10)] <- paste0("0",hours_char[which(hours < 10)]) + hours_char[which(hours < 100)] <- paste0("0", hours[which(hours < 100)]) + hours_char[which(hours < 10)] <- paste0("0", hours_char[which(hours < 10)]) curr_year <- lubridate::year(forecast_date) curr_month <- lubridate::month(forecast_date) - if(curr_month < 10) curr_month <- paste0("0",curr_month) + if (curr_month < 10) curr_month <- paste0("0", curr_month) curr_day <- lubridate::day(forecast_date) - if(curr_day < 10) curr_day <- paste0("0",curr_day) - curr_date <- paste0(curr_year,curr_month,curr_day) - directory <- paste0("&dir=%2Fgefs.",curr_date,"%2F",cycle,"%2Fatmos%2Fpgrb2ap5") - + if (curr_day < 10) curr_day <- paste0("0", curr_day) + curr_date <- paste0(curr_year, curr_month, curr_day) + directory <- paste0("&dir=%2Fgefs.", curr_date, "%2F", cycle, "%2Fatmos%2Fpgrb2ap5") + ens_index <- 1:31 - - parallel::mclapply(X = ens_index, - FUN = download_grid, - location, - directory, - hours_char, - cycle, - base_filename1, - vars, - working_directory = model_date_hour_dir, - mc.cores = 1) - }else{ + + parallel::mclapply( + X = ens_index, + FUN = download_grid, + location, + directory, + hours_char, + cycle, + base_filename1, + vars, + working_directory = model_date_hour_dir, + mc.cores = 1 + ) + } else { print(paste("Existing", forecast_date, cycle)) } } @@ -185,13 +188,13 @@ noaa_grid_download <- function(lat_list, lon_list, forecast_time, forecast_date, #' @param downscale Logical. Default is TRUE. Downscales from 6hr to hourly #' @param overwrite Logical. Default is FALSE. Should exisiting files be overwritten #' @param forecast_date Date for download -#' @param forecast_time Time (0,6,12,18) for start of download +#' @param forecast_time Time (0,6,12,18) for start of download #' @param model_name Name of model for file name #' @param model_name_ds Name of downscale file name #' @param model_name_raw Name of raw file name -#' @param output_directory Output directory -#' @importFrom rlang .data -#' +#' @param output_directory Output directory +#' @importFrom rlang .data +#' #' @export #' @return List #' @@ -206,116 +209,118 @@ process_gridded_noaa_download <- function(lat_list, model_name, model_name_ds, model_name_raw, - output_directory){ - #binding variables + output_directory) { + # binding variables NOAA.member <- NULL - extract_sites <- function(ens_index, hours_char, hours, cycle, site_id, lat_list, lon_list, working_directory){ - + extract_sites <- function(ens_index, hours_char, hours, cycle, site_id, lat_list, lon_list, working_directory) { site_length <- length(site_id) tmp2m <- array(NA, dim = c(site_length, length(hours_char))) rh2m <- array(NA, dim = c(site_length, length(hours_char))) - ugrd10m <- array(NA, dim = c(site_length,length(hours_char))) + ugrd10m <- array(NA, dim = c(site_length, length(hours_char))) vgrd10m <- array(NA, dim = c(site_length, length(hours_char))) pressfc <- array(NA, dim = c(site_length, length(hours_char))) apcpsfc <- array(NA, dim = c(site_length, length(hours_char))) tcdcclm <- array(NA, dim = c(site_length, length(hours_char))) dlwrfsfc <- array(NA, dim = c(site_length, length(hours_char))) dswrfsfc <- array(NA, dim = c(site_length, length(hours_char))) - - if(ens_index == 1){ - base_filename2 <- paste0("gec00",".t",cycle,"z.pgrb2a.0p50.f") - }else{ - if(ens_index-1 < 10){ - ens_name <- paste0("0",ens_index-1) - }else{ - ens_name <- as.character(ens_index-1) + + if (ens_index == 1) { + base_filename2 <- paste0("gec00", ".t", cycle, "z.pgrb2a.0p50.f") + } else { + if (ens_index - 1 < 10) { + ens_name <- paste0("0", ens_index - 1) + } else { + ens_name <- as.character(ens_index - 1) } - base_filename2 <- paste0("gep",ens_name,".t",cycle,"z.pgrb2a.0p50.f") + base_filename2 <- paste0("gep", ens_name, ".t", cycle, "z.pgrb2a.0p50.f") } - - lats <- round(lat_list/.5)*.5 - lons <- round(lon_list/.5)*.5 - - if(lons < 0){ + + lats <- round(lat_list / .5) * .5 + lons <- round(lon_list / .5) * .5 + + if (lons < 0) { lons <- 360 + lons } curr_hours <- hours_char - - for(hr in 1:length(curr_hours)){ + + for (hr in 1:length(curr_hours)) { file_name <- paste0(base_filename2, curr_hours[hr]) - grib_file_name <- paste0(working_directory,"/", file_name,".grib") - - if(file.exists(grib_file_name)){ + grib_file_name <- paste0(working_directory, "/", file_name, ".grib") + + if (file.exists(grib_file_name)) { grib_data <- terra::rast(grib_file_name) - + ## Convert to data frame - grib_data_df <- terra::as.data.frame(grib_data, xy=TRUE) + grib_data_df <- terra::as.data.frame(grib_data, xy = TRUE) lat_lon <- grib_data_df[, c("x", "y")] - - for(s in 1:length(site_id)){ - - index <- which(lat_lon[,2] == lats[s] & lat_lon[,1] == lons[s]) - - pressfc[s, hr] <- grib_data_df$`SFC=Ground or water surface; Pressure [Pa]`[index] - tmp2m[s, hr] <- grib_data_df$`2[m] HTGL=Specified height level above ground; Temperature [C]`[index] - rh2m[s, hr] <- grib_data_df$`2[m] HTGL=Specified height level above ground; Relative humidity [%]`[index] - ugrd10m[s, hr] <- grib_data_df$`10[m] HTGL=Specified height level above ground; u-component of wind [m/s]`[index] - vgrd10m[s, hr] <- grib_data_df$`10[m] HTGL=Specified height level above ground; v-component of wind [m/s]`[index] - - if(curr_hours[hr] != "000"){ - apcpsfc[s, hr] <- grib_data_df$`SFC=Ground or water surface; 03 hr Total precipitation [kg/(m^2)]`[index] - tcdcclm[s, hr] <- grib_data_df$`RESERVED(10) (Reserved); Total cloud cover [%]`[index] + + for (s in 1:length(site_id)) { + index <- which(lat_lon[, 2] == lats[s] & lat_lon[, 1] == lons[s]) + + pressfc[s, hr] <- grib_data_df$`SFC=Ground or water surface; Pressure [Pa]`[index] + tmp2m[s, hr] <- grib_data_df$`2[m] HTGL=Specified height level above ground; Temperature [C]`[index] + rh2m[s, hr] <- grib_data_df$`2[m] HTGL=Specified height level above ground; Relative humidity [%]`[index] + ugrd10m[s, hr] <- grib_data_df$`10[m] HTGL=Specified height level above ground; u-component of wind [m/s]`[index] + vgrd10m[s, hr] <- grib_data_df$`10[m] HTGL=Specified height level above ground; v-component of wind [m/s]`[index] + + if (curr_hours[hr] != "000") { + apcpsfc[s, hr] <- grib_data_df$`SFC=Ground or water surface; 03 hr Total precipitation [kg/(m^2)]`[index] + tcdcclm[s, hr] <- grib_data_df$`RESERVED(10) (Reserved); Total cloud cover [%]`[index] dswrfsfc[s, hr] <- grib_data_df$`SFC=Ground or water surface; Downward Short-Wave Rad. Flux [W/(m^2)]`[index] dlwrfsfc[s, hr] <- grib_data_df$`SFC=Ground or water surface; Downward Long-Wave Rad. Flux [W/(m^2)]`[index] } } } } - - return(list(tmp2m = tmp2m, - pressfc = pressfc, - rh2m = rh2m, - dlwrfsfc = dlwrfsfc, - dswrfsfc = dswrfsfc, - ugrd10m = ugrd10m, - vgrd10m = vgrd10m, - apcpsfc = apcpsfc, - tcdcclm = tcdcclm)) + + return(list( + tmp2m = tmp2m, + pressfc = pressfc, + rh2m = rh2m, + dlwrfsfc = dlwrfsfc, + dswrfsfc = dswrfsfc, + ugrd10m = ugrd10m, + vgrd10m = vgrd10m, + apcpsfc = apcpsfc, + tcdcclm = tcdcclm + )) } - - noaa_var_names <- c("tmp2m", "pressfc", "rh2m", "dlwrfsfc", - "dswrfsfc", "apcpsfc", - "ugrd10m", "vgrd10m", "tcdcclm") - - + + noaa_var_names <- c( + "tmp2m", "pressfc", "rh2m", "dlwrfsfc", + "dswrfsfc", "apcpsfc", + "ugrd10m", "vgrd10m", "tcdcclm" + ) + + model_dir <- file.path(output_directory) model_name_raw_dir <- file.path(output_directory, model_name_raw) - + curr_time <- lubridate::with_tz(Sys.time(), tzone = "UTC") curr_date <- lubridate::as_date(curr_time) potential_dates <- seq(curr_date - lubridate::days(6), curr_date, by = "1 day") - - #Remove dates before the new GEFS system + + # Remove dates before the new GEFS system potential_dates <- potential_dates[which(potential_dates > lubridate::as_date("2020-09-23"))] - - - - - cycle <-forecast_time + + + + + cycle <- forecast_time curr_forecast_time <- forecast_date + lubridate::hours(cycle) - if(cycle < 10) cycle <- paste0("0",cycle) - if(cycle == "00"){ - hours <- c(seq(0, 240, 3),seq(246, 840 , 6)) - }else{ - hours <- c(seq(0, 240, 3),seq(246, 384 , 6)) + if (cycle < 10) cycle <- paste0("0", cycle) + if (cycle == "00") { + hours <- c(seq(0, 240, 3), seq(246, 840, 6)) + } else { + hours <- c(seq(0, 240, 3), seq(246, 384, 6)) } hours_char <- hours - hours_char[which(hours < 100)] <- paste0("0",hours[which(hours < 100)]) - hours_char[which(hours < 10)] <- paste0("0",hours_char[which(hours < 10)]) - - raw_files <- list.files(file.path(model_name_raw_dir,forecast_date,cycle)) + hours_char[which(hours < 100)] <- paste0("0", hours[which(hours < 100)]) + hours_char[which(hours < 10)] <- paste0("0", hours_char[which(hours < 10)]) + + raw_files <- list.files(file.path(model_name_raw_dir, forecast_date, cycle)) hours_present <- as.numeric(stringr::str_sub(raw_files, start = 25, end = 27)) - + all_downloaded <- TRUE # if(cycle == "00"){ # #Sometime the 16-35 day forecast is not competed for some of the forecasts. If over 24 hrs has passed then they won't show up. @@ -328,202 +333,217 @@ process_gridded_noaa_download <- function(lat_list, # all_downloaded <- TRUE # } # } - - - - - - if(all_downloaded){ - + + + + + + if (all_downloaded) { ens_index <- 1:31 - #Run download_downscale_site() over the site_index - output <- parallel::mclapply(X = ens_index, - FUN = extract_sites, - hours_char = hours_char, - hours = hours, - cycle, - site_id, - lat_list, - lon_list, - working_directory = file.path(model_name_raw_dir,forecast_date,cycle), - mc.cores = 1) - - + # Run download_downscale_site() over the site_index + output <- parallel::mclapply( + X = ens_index, + FUN = extract_sites, + hours_char = hours_char, + hours = hours, + cycle, + site_id, + lat_list, + lon_list, + working_directory = file.path(model_name_raw_dir, forecast_date, cycle), + mc.cores = 1 + ) + + forecast_times <- lubridate::as_datetime(forecast_date) + lubridate::hours(as.numeric(cycle)) + lubridate::hours(as.numeric(hours_char)) - - - - #Convert negetive longitudes to degrees east - if(lon_list < 0){ + + + + # Convert negetive longitudes to degrees east + if (lon_list < 0) { lon_east <- 360 + lon_list - }else{ + } else { lon_east <- lon_list } - - model_site_date_hour_dir <- file.path(model_dir, site_id, forecast_date,cycle) - - if(!dir.exists(model_site_date_hour_dir)){ - dir.create(model_site_date_hour_dir, recursive=TRUE, showWarnings = FALSE) - }else{ + + model_site_date_hour_dir <- file.path(model_dir, site_id, forecast_date, cycle) + + if (!dir.exists(model_site_date_hour_dir)) { + dir.create(model_site_date_hour_dir, recursive = TRUE, showWarnings = FALSE) + } else { unlink(list.files(model_site_date_hour_dir, full.names = TRUE)) } - - if(downscale){ - modelds_site_date_hour_dir <- file.path(output_directory,model_name_ds,site_id, forecast_date,cycle) - if(!dir.exists(modelds_site_date_hour_dir)){ - dir.create(modelds_site_date_hour_dir, recursive=TRUE, showWarnings = FALSE) - }else{ + + if (downscale) { + modelds_site_date_hour_dir <- file.path(output_directory, model_name_ds, site_id, forecast_date, cycle) + if (!dir.exists(modelds_site_date_hour_dir)) { + dir.create(modelds_site_date_hour_dir, recursive = TRUE, showWarnings = FALSE) + } else { unlink(list.files(modelds_site_date_hour_dir, full.names = TRUE)) } } - + noaa_data <- list() - - for(v in 1:length(noaa_var_names)){ - + + for (v in 1:length(noaa_var_names)) { value <- NULL ensembles <- NULL forecast.date <- NULL - + noaa_data[v] <- NULL - - for(ens in 1:31){ + + for (ens in 1:31) { curr_ens <- output[[ens]] value <- c(value, curr_ens[[noaa_var_names[v]]][1, ]) ensembles <- c(ensembles, rep(ens, length(curr_ens[[noaa_var_names[v]]][1, ]))) forecast.date <- c(forecast.date, forecast_times) } - noaa_data[[v]] <- list(value = value, - ensembles = ensembles, - forecast.date = lubridate::as_datetime(forecast.date)) - + noaa_data[[v]] <- list( + value = value, + ensembles = ensembles, + forecast.date = lubridate::as_datetime(forecast.date) + ) } - - #These are the cf standard names - cf_var_names <- c("air_temperature", "air_pressure", "relative_humidity", "surface_downwelling_longwave_flux_in_air", - "surface_downwelling_shortwave_flux_in_air", "precipitation_flux", "eastward_wind", "northward_wind","cloud_area_fraction") - - #Replace "eastward_wind" and "northward_wind" with "wind_speed" - cf_var_names1 <- c("air_temperature", "air_pressure", "relative_humidity", "surface_downwelling_longwave_flux_in_air", - "surface_downwelling_shortwave_flux_in_air", "precipitation_flux","specific_humidity", "cloud_area_fraction","wind_speed") - - cf_var_units1 <- c("K", "Pa", "1", "Wm-2", "Wm-2", "kgm-2s-1", "1", "1", "ms-1") #Negative numbers indicate negative exponents - + + # These are the cf standard names + cf_var_names <- c( + "air_temperature", "air_pressure", "relative_humidity", "surface_downwelling_longwave_flux_in_air", + "surface_downwelling_shortwave_flux_in_air", "precipitation_flux", "eastward_wind", "northward_wind", "cloud_area_fraction" + ) + + # Replace "eastward_wind" and "northward_wind" with "wind_speed" + cf_var_names1 <- c( + "air_temperature", "air_pressure", "relative_humidity", "surface_downwelling_longwave_flux_in_air", + "surface_downwelling_shortwave_flux_in_air", "precipitation_flux", "specific_humidity", "cloud_area_fraction", "wind_speed" + ) + + cf_var_units1 <- c("K", "Pa", "1", "Wm-2", "Wm-2", "kgm-2s-1", "1", "1", "ms-1") # Negative numbers indicate negative exponents + names(noaa_data) <- cf_var_names - + specific_humidity <- rep(NA, length(noaa_data$relative_humidity$value)) - + noaa_data$relative_humidity$value <- noaa_data$relative_humidity$value / 100 - + noaa_data$air_temperature$value <- noaa_data$air_temperature$value + 273.15 - - specific_humidity[which(!is.na(noaa_data$relative_humidity$value))] <- PEcAn.data.atmosphere::rh2qair(rh = noaa_data$relative_humidity$value[which(!is.na(noaa_data$relative_humidity$value))], - T = noaa_data$air_temperature$value[which(!is.na(noaa_data$relative_humidity$value))], - press = noaa_data$air_pressure$value[which(!is.na(noaa_data$relative_humidity$value))]) - - - #Calculate wind speed from east and north components + + specific_humidity[which(!is.na(noaa_data$relative_humidity$value))] <- PEcAn.data.atmosphere::rh2qair( + rh = noaa_data$relative_humidity$value[which(!is.na(noaa_data$relative_humidity$value))], + T = noaa_data$air_temperature$value[which(!is.na(noaa_data$relative_humidity$value))], + press = noaa_data$air_pressure$value[which(!is.na(noaa_data$relative_humidity$value))] + ) + + + # Calculate wind speed from east and north components wind_speed <- sqrt(noaa_data$eastward_wind$value^2 + noaa_data$northward_wind$value^2) - - forecast_noaa <- tibble::tibble(time = noaa_data$air_temperature$forecast.date, - NOAA.member = noaa_data$air_temperature$ensembles, - air_temperature = noaa_data$air_temperature$value, - air_pressure= noaa_data$air_pressure$value, - relative_humidity = noaa_data$relative_humidity$value, - surface_downwelling_longwave_flux_in_air = noaa_data$surface_downwelling_longwave_flux_in_air$value, - surface_downwelling_shortwave_flux_in_air = noaa_data$surface_downwelling_shortwave_flux_in_air$value, - precipitation_flux = noaa_data$precipitation_flux$value, - specific_humidity = specific_humidity, - cloud_area_fraction = noaa_data$cloud_area_fraction$value, - wind_speed = wind_speed) - - forecast_noaa$cloud_area_fraction <- forecast_noaa$cloud_area_fraction / 100 #Convert from % to proportion - + + forecast_noaa <- tibble::tibble( + time = noaa_data$air_temperature$forecast.date, + NOAA.member = noaa_data$air_temperature$ensembles, + air_temperature = noaa_data$air_temperature$value, + air_pressure = noaa_data$air_pressure$value, + relative_humidity = noaa_data$relative_humidity$value, + surface_downwelling_longwave_flux_in_air = noaa_data$surface_downwelling_longwave_flux_in_air$value, + surface_downwelling_shortwave_flux_in_air = noaa_data$surface_downwelling_shortwave_flux_in_air$value, + precipitation_flux = noaa_data$precipitation_flux$value, + specific_humidity = specific_humidity, + cloud_area_fraction = noaa_data$cloud_area_fraction$value, + wind_speed = wind_speed + ) + + forecast_noaa$cloud_area_fraction <- forecast_noaa$cloud_area_fraction / 100 # Convert from % to proportion + # Convert the 3 hr precip rate to per second. forecast_noaa$precipitation_flux <- forecast_noaa$precipitation_flux / (60 * 60 * 3) - - - + + + # Create a data frame with information about the file. This data frame's format is an internal PEcAn standard, and is stored in the BETY database to - # locate the data file. The data file is stored on the local machine where the download occured. Because NOAA GEFS is an - # ensemble of 21 different forecast models, each model gets its own data frame. All of the information is the same for + # locate the data file. The data file is stored on the local machine where the download occured. Because NOAA GEFS is an + # ensemble of 21 different forecast models, each model gets its own data frame. All of the information is the same for # each file except for the file name. - - results_list = list() - - + + results_list <- list() + + for (ens in 1:31) { # i is the ensemble number - - #Turn the ensemble number into a string - if(ens-1< 10){ - ens_name <- paste0("0",ens-1) - }else{ + + # Turn the ensemble number into a string + if (ens - 1 < 10) { + ens_name <- paste0("0", ens - 1) + } else { ens_name <- ens - 1 } - + forecast_noaa_ens <- forecast_noaa %>% dplyr::filter(NOAA.member == ens) %>% dplyr::filter(!is.na(.data$air_temperature)) - + end_date <- forecast_noaa_ens %>% dplyr::summarise(max_time = max(.data$time)) - - results = data.frame( - file = "", #Path to the file (added in loop below). - host = PEcAn.remote::fqdn(), #Name of the server where the file is stored - mimetype = "application/x-netcdf", #Format the data is saved in - formatname = "CF Meteorology", #Type of data - startdate = paste0(format(forecast_date, "%Y-%m-%dT%H:%M:00")), #starting date and time, down to the second - enddate = paste0(format(end_date$max_time, "%Y-%m-%dT%H:%M:00")), #ending date and time, down to the second - dbfile.name = "NOAA_GEFS_downscale", #Source of data (ensemble number will be added later) + + results <- data.frame( + file = "", # Path to the file (added in loop below). + host = PEcAn.remote::fqdn(), # Name of the server where the file is stored + mimetype = "application/x-netcdf", # Format the data is saved in + formatname = "CF Meteorology", # Type of data + startdate = paste0(format(forecast_date, "%Y-%m-%dT%H:%M:00")), # starting date and time, down to the second + enddate = paste0(format(end_date$max_time, "%Y-%m-%dT%H:%M:00")), # ending date and time, down to the second + dbfile.name = "NOAA_GEFS_downscale", # Source of data (ensemble number will be added later) stringsAsFactors = FALSE ) - - identifier = paste("NOAA_GEFS", site_id, ens_name, format(forecast_date, "%Y-%m-%dT%H:%M"), - format(end_date$max_time, "%Y-%m-%dT%H:%M"), sep="_") - + + identifier <- paste("NOAA_GEFS", site_id, ens_name, format(forecast_date, "%Y-%m-%dT%H:%M"), + format(end_date$max_time, "%Y-%m-%dT%H:%M"), + sep = "_" + ) + fname <- paste0(identifier, ".nc") - ensemble_folder = file.path(output_directory, identifier) - output_file <- file.path(ensemble_folder,fname) - + ensemble_folder <- file.path(output_directory, identifier) + output_file <- file.path(ensemble_folder, fname) + if (!dir.exists(ensemble_folder)) { - dir.create(ensemble_folder, recursive=TRUE, showWarnings = FALSE)} - - - #Write netCDF - if(!nrow(forecast_noaa_ens) == 0){ - write_noaa_gefs_netcdf(df = forecast_noaa_ens,ens, lat = lat_list[1], lon = lon_east, cf_units = cf_var_units1, output_file = output_file, overwrite = TRUE) - }else {results_list[[ens]] <- NULL - next} - - if(downscale){ - #Downscale the forecast from 6hr to 1hr - - - identifier_ds = paste("NOAA_GEFS_downscale", site_id, ens_name, format(forecast_date, "%Y-%m-%dT%H:%M"), - format(end_date$max_time, "%Y-%m-%dT%H:%M"), sep="_") - + dir.create(ensemble_folder, recursive = TRUE, showWarnings = FALSE) + } + + + # Write netCDF + if (!nrow(forecast_noaa_ens) == 0) { + write_noaa_gefs_netcdf(df = forecast_noaa_ens, ens, lat = lat_list[1], lon = lon_east, cf_units = cf_var_units1, output_file = output_file, overwrite = TRUE) + } else { + results_list[[ens]] <- NULL + next + } + + if (downscale) { + # Downscale the forecast from 6hr to 1hr + + + identifier_ds <- paste("NOAA_GEFS_downscale", site_id, ens_name, format(forecast_date, "%Y-%m-%dT%H:%M"), + format(end_date$max_time, "%Y-%m-%dT%H:%M"), + sep = "_" + ) + fname_ds <- paste0(identifier_ds, ".nc") - ensemble_folder_ds = file.path(output_directory, identifier_ds) - output_file_ds <- file.path(ensemble_folder_ds,fname_ds) - + ensemble_folder_ds <- file.path(output_directory, identifier_ds) + output_file_ds <- file.path(ensemble_folder_ds, fname_ds) + if (!dir.exists(ensemble_folder_ds)) { - dir.create(ensemble_folder_ds, recursive=TRUE, showWarnings = FALSE)} - - results$file = output_file_ds - results$dbfile.name = fname_ds + dir.create(ensemble_folder_ds, recursive = TRUE, showWarnings = FALSE) + } + + results$file <- output_file_ds + results$dbfile.name <- fname_ds results_list[[ens]] <- results - - #Run downscaling + + # Run downscaling temporal_downscale_half_hour(input_file = output_file, output_file = output_file_ds, overwrite = TRUE, hr = 1) } - - } } results_list <- results_list[!sapply(results_list, is.null)] return(results_list) -} #process_gridded_noaa_download +} # process_gridded_noaa_download #' @title Downscale NOAA GEFS from 6hr to 1hr #' @return None @@ -532,116 +552,121 @@ process_gridded_noaa_download <- function(lat_list, #' @param output_file, full path to 1hr file that will be generated #' @param overwrite, logical stating to overwrite any existing output_file #' @param hr time step in hours of temporal downscaling (default = 1) -#' @importFrom rlang .data -#' +#' @importFrom rlang .data +#' #' @author Quinn Thomas #' #' -temporal_downscale <- function(input_file, output_file, overwrite = TRUE, hr = 1){ - +temporal_downscale <- function(input_file, output_file, overwrite = TRUE, hr = 1) { # open netcdf nc <- ncdf4::nc_open(input_file) - - if(stringr::str_detect(input_file, "ens")){ + + if (stringr::str_detect(input_file, "ens")) { ens_postion <- stringr::str_locate(input_file, "ens") ens_name <- stringr::str_sub(input_file, start = ens_postion[1], end = ens_postion[2] + 2) ens <- as.numeric(stringr::str_sub(input_file, start = ens_postion[2] + 1, end = ens_postion[2] + 2)) - }else{ + } else { ens <- 0 ens_name <- "ens00" } - + # retrive variable names cf_var_names <- names(nc$var) - + # generate time vector time <- ncdf4::ncvar_get(nc, "time") begining_time <- lubridate::ymd_hm(ncdf4::ncatt_get(nc, "time", - attname = "units")$value) + attname = "units" + )$value) time <- begining_time + lubridate::hours(time) - + # retrive lat and lon lat.in <- ncdf4::ncvar_get(nc, "latitude") lon.in <- ncdf4::ncvar_get(nc, "longitude") - + # generate data frame from netcdf variables and retrive units noaa_data <- tibble::tibble(time = time) var_units <- rep(NA, length(cf_var_names)) - for(i in 1:length(cf_var_names)){ + for (i in 1:length(cf_var_names)) { curr_data <- ncdf4::ncvar_get(nc, cf_var_names[i]) noaa_data <- cbind(noaa_data, curr_data) var_units[i] <- ncdf4::ncatt_get(nc, cf_var_names[i], attname = "units")$value } - + ncdf4::nc_close(nc) - - names(noaa_data) <- c("time",cf_var_names) - + + names(noaa_data) <- c("time", cf_var_names) + # spline-based downscaling - if(length(which(c("air_temperature", "wind_speed","specific_humidity", "air_pressure") %in% cf_var_names) == 4)){ - forecast_noaa_ds <- downscale_spline_to_hrly(df = noaa_data, VarNames = c("air_temperature", "wind_speed","specific_humidity", "air_pressure")) - }else{ - #Add error message + if (length(which(c("air_temperature", "wind_speed", "specific_humidity", "air_pressure") %in% cf_var_names) == 4)) { + forecast_noaa_ds <- downscale_spline_to_hrly(df = noaa_data, VarNames = c("air_temperature", "wind_speed", "specific_humidity", "air_pressure")) + } else { + # Add error message } - + # Convert splined SH, temperature, and presssure to RH forecast_noaa_ds <- forecast_noaa_ds %>% - dplyr::mutate(relative_humidity = qair2rh(qair = forecast_noaa_ds$specific_humidity, - temp = forecast_noaa_ds$air_temperature, - press = forecast_noaa_ds$air_pressure)) %>% - dplyr::mutate(relative_humidity = .data$relative_humidity, - relative_humidity = ifelse(.data$relative_humidity > 1, 0, .data$relative_humidity)) - + dplyr::mutate(relative_humidity = qair2rh( + qair = forecast_noaa_ds$specific_humidity, + temp = forecast_noaa_ds$air_temperature, + press = forecast_noaa_ds$air_pressure + )) %>% + dplyr::mutate( + relative_humidity = .data$relative_humidity, + relative_humidity = ifelse(.data$relative_humidity > 1, 0, .data$relative_humidity) + ) + # convert longwave to hourly (just copy 6 hourly values over past 6-hour time period) - if("surface_downwelling_longwave_flux_in_air" %in% cf_var_names){ + if ("surface_downwelling_longwave_flux_in_air" %in% cf_var_names) { LW.flux.hrly <- downscale_repeat_6hr_to_hrly(df = noaa_data, varName = "surface_downwelling_longwave_flux_in_air") forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, LW.flux.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - + # convert precipitation to hourly (just copy 6 hourly values over past 6-hour time period) - if("surface_downwelling_longwave_flux_in_air" %in% cf_var_names){ + if ("surface_downwelling_longwave_flux_in_air" %in% cf_var_names) { Precip.flux.hrly <- downscale_repeat_6hr_to_hrly(df = noaa_data, varName = "precipitation_flux") forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, Precip.flux.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - + # convert cloud_area_fraction to hourly (just copy 6 hourly values over past 6-hour time period) - if("cloud_area_fraction" %in% cf_var_names){ + if ("cloud_area_fraction" %in% cf_var_names) { cloud_area_fraction.flux.hrly <- downscale_repeat_6hr_to_hrly(df = noaa_data, varName = "cloud_area_fraction") forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, cloud_area_fraction.flux.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - + # use solar geometry to convert shortwave from 6 hr to 1 hr - if("surface_downwelling_shortwave_flux_in_air" %in% cf_var_names){ + if ("surface_downwelling_shortwave_flux_in_air" %in% cf_var_names) { ShortWave.hrly <- downscale_ShortWave_to_hrly(df = noaa_data, lat = lat.in, lon = lon.in) forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, ShortWave.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - - #Add dummy ensemble number to work with write_noaa_gefs_netcdf() + + # Add dummy ensemble number to work with write_noaa_gefs_netcdf() forecast_noaa_ds$NOAA.member <- ens - - #Make sure var names are in correct order + + # Make sure var names are in correct order forecast_noaa_ds <- forecast_noaa_ds %>% dplyr::select("time", tidyselect::all_of(cf_var_names), "NOAA.member") - - #Write netCDF - write_noaa_gefs_netcdf(df = forecast_noaa_ds, - ens = ens, - lat = lat.in, - lon = lon.in, - cf_units = var_units, - output_file = output_file, - overwrite = overwrite) - -} #temporal_downscale + + # Write netCDF + write_noaa_gefs_netcdf( + df = forecast_noaa_ds, + ens = ens, + lat = lat.in, + lon = lon.in, + cf_units = var_units, + output_file = output_file, + overwrite = overwrite + ) +} # temporal_downscale @@ -655,60 +680,61 @@ temporal_downscale <- function(input_file, output_file, overwrite = TRUE, hr = 1 ##' @param cf_units vector of variable names in order they appear in df ##' @param output_file name, with full path, of the netcdf file that is generated ##' @param overwrite logical to overwrite existing netcdf file -##' +##' ##' @return NA ##' @export -##' +##' ##' @author Quinn Thomas ##' ##' -write_noaa_gefs_netcdf <- function(df, ens = NA, lat, lon, cf_units, output_file, overwrite){ - - if(!is.na(ens)){ +write_noaa_gefs_netcdf <- function(df, ens = NA, lat, lon, cf_units, output_file, overwrite) { + if (!is.na(ens)) { data <- df max_index <- max(which(!is.na(data$air_temperature))) start_time <- min(data$time) end_time <- data$time[max_index] - + data <- data %>% dplyr::select(-c("time", "NOAA.member")) - }else{ + } else { data <- df max_index <- max(which(!is.na(data$air_temperature))) start_time <- min(data$time) end_time <- data$time[max_index] - + data <- df %>% dplyr::select(-c("time")) } - + diff_time <- as.numeric(difftime(df$time, df$time[1])) / (60 * 60) - + cf_var_names <- names(data) - - time_dim <- ncdf4::ncdim_def(name="time", - units = paste("hours since", format(start_time, "%Y-%m-%d %H:%M")), - diff_time, #GEFS forecast starts 6 hours from start time - create_dimvar = TRUE) + + time_dim <- ncdf4::ncdim_def( + name = "time", + units = paste("hours since", format(start_time, "%Y-%m-%d %H:%M")), + diff_time, # GEFS forecast starts 6 hours from start time + create_dimvar = TRUE + ) lat_dim <- ncdf4::ncdim_def("latitude", "degree_north", lat, create_dimvar = TRUE) lon_dim <- ncdf4::ncdim_def("longitude", "degree_east", lon, create_dimvar = TRUE) - + dimensions_list <- list(time_dim, lat_dim, lon_dim) - + nc_var_list <- list() - for (i in 1:length(cf_var_names)) { #Each ensemble member will have data on each variable stored in their respective file. - nc_var_list[[i]] <- ncdf4::ncvar_def(cf_var_names[i], cf_units[i], dimensions_list, missval=NaN) + for (i in 1:length(cf_var_names)) { # Each ensemble member will have data on each variable stored in their respective file. + nc_var_list[[i]] <- ncdf4::ncvar_def(cf_var_names[i], cf_units[i], dimensions_list, missval = NaN) } - + if (!file.exists(output_file) | overwrite) { nc_flptr <- ncdf4::nc_create(output_file, nc_var_list, verbose = FALSE) - - #For each variable associated with that ensemble + + # For each variable associated with that ensemble for (j in 1:ncol(data)) { # "j" is the variable number. "i" is the ensemble number. Remember that each row represents an ensemble - ncdf4::ncvar_put(nc_flptr, nc_var_list[[j]], unlist(data[,j])) + ncdf4::ncvar_put(nc_flptr, nc_var_list[[j]], unlist(data[, j])) } - - ncdf4::nc_close(nc_flptr) #Write to the disk/storage + + ncdf4::nc_close(nc_flptr) # Write to the disk/storage } -} \ No newline at end of file +} diff --git a/modules/data.atmosphere/R/align_met.R b/modules/data.atmosphere/R/align_met.R index af9749bc7a3..38dcacdb61c 100644 --- a/modules/data.atmosphere/R/align_met.R +++ b/modules/data.atmosphere/R/align_met.R @@ -6,23 +6,23 @@ ##' @title align.met ##' @family debias - Debias & Align Meteorology Datasets into continuous time series ##' @author Christy Rollinson -##' @description This script aligns meteorology datasets in at temporal resolution for debiasing & -##' temporal downscaling. -##' Note: The output here is stored in memory! -##' Note: can probably at borrow from or adapt align_data.R in Benchmarking module, but +##' @description This script aligns meteorology datasets in at temporal resolution for debiasing & +##' temporal downscaling. +##' Note: The output here is stored in memory! +##' Note: can probably at borrow from or adapt align_data.R in Benchmarking module, but ##' it's too much of a black box at the moment. # ----------------------------------- # Notes # ----------------------------------- -##' @details 1. Assumes that both the training and source data are in *at least* daily resolution -##' and each dataset is in a consistent temporal resolution being read from a single file -##' (CF/Pecan format). For example, CMIP5 historical/p1000 runs where radiation drivers +##' @details 1. Assumes that both the training and source data are in *at least* daily resolution +##' and each dataset is in a consistent temporal resolution being read from a single file +##' (CF/Pecan format). For example, CMIP5 historical/p1000 runs where radiation drivers ##' are in monthly resolution and temperature is in daily will need to be reconciled using ##' one of the "met2CF" or "download" or "extract" functions -##' 2. Default file structure: Ensembles members for a given site or set of simes are housed -##' in a common folder with the site ID. Right now everything is based off of Christy's +##' 2. Default file structure: Ensembles members for a given site or set of simes are housed +##' in a common folder with the site ID. Right now everything is based off of Christy's ##' PalEON ensemble ID scheme where the site ID is a character string (e.g. HARVARD) followed -##' the SOURCE data family (i.e. GCM) as a string and then the ensemble member ID as a number +##' the SOURCE data family (i.e. GCM) as a string and then the ensemble member ID as a number ##' (e.g. 001). For example, the file path for a single daily ensemble member for PalEON is: ##' "~/Desktop/Research/met_ensembles/data/met_ensembles/HARVARD/day/ensembles/bcc-csm1-1_004" ##' with each year in a separate netcdf file inside of it. "bcc-csm1-1_004" is an example of @@ -30,28 +30,28 @@ ##' @return 2-layered list (stored in memory) containing the training and source data that are now matched ##' in temporal resolution have the specified number of ensemble members ##' - dat.train (training dataset) and dat.source (source data to be downscaled or bias-corrected) -##' are both lists that contain separate data frames for time indices and all available met +##' are both lists that contain separate data frames for time indices and all available met ##' variables with ensemble members in columns # ----------------------------------- # Parameters # ----------------------------------- ##' @param train.path - path to the dataset to be used to downscale the data ##' @param source.path - data to be bias-corrected aligned with training data (from align.met) -##' @param yrs.train - (optional) specify a specific years to be loaded for the training data; -##' prevents needing to load the entire dataset. If NULL, all available years +##' @param yrs.train - (optional) specify a specific years to be loaded for the training data; +##' prevents needing to load the entire dataset. If NULL, all available years ##' will be loaded. If not null, should be a vector of numbers (so you can skip ##' problematic years) ##' @param yrs.source - (optional) specify a specific years to be loaded for the source data; -##' prevents needing to load the entire dataset. If NULL, all available years +##' prevents needing to load the entire dataset. If NULL, all available years ##' will be loaded. If not null, should be a vector of numbers (so you can skip ##' problematic years) ##' @param n.ens - number of ensemble members to generate and save -##' @param pair.mems - logical stating whether ensemble members should be paired in +##' @param pair.mems - logical stating whether ensemble members should be paired in ##' the case where ensembles are being read in in both the training and source data -##' @param mems.train - (optional) string of ensemble identifiers that ensure the training data is read +##' @param mems.train - (optional) string of ensemble identifiers that ensure the training data is read ##' in a specific order to ensure consistent time series & proper error propagation. -##' If null, members of the training data ensemble will be randomly selected and -##' ordered. Specifying the ensemble members IDs (e.g. CCSM_001, CCSM_002) will +##' If null, members of the training data ensemble will be randomly selected and +##' ordered. Specifying the ensemble members IDs (e.g. CCSM_001, CCSM_002) will ##' ensure ensemble members are properly identified and combined. ##' @param seed - specify seed so that random draws can be reproduced ##' @param print.progress - if TRUE, prints progress bar @@ -67,7 +67,7 @@ # - Note: for now this is only going to work with a single time series & not an ensemble of source data # - Note: end dimensions should match that of the training data # 3. export data (stored in memory) for input into the debiasing or temporal downscaling workflow -# +# # Returns a list called met.out with 2 levels that are matched in temporal resolution & number of ensembles # List Layers # 1. dat.train @@ -77,8 +77,7 @@ #---------------------------------------------------------------------- # Begin Function #---------------------------------------------------------------------- -align.met <- function(train.path, source.path, yrs.train=NULL, yrs.source=NULL, n.ens=NULL, pair.mems = FALSE, mems.train=NULL, seed=Sys.Date(), print.progress = FALSE) { - +align.met <- function(train.path, source.path, yrs.train = NULL, yrs.source = NULL, n.ens = NULL, pair.mems = FALSE, mems.train = NULL, seed = Sys.Date(), print.progress = FALSE) { met.out <- list() # where the aligned data will be stored # --------------- @@ -86,395 +85,391 @@ align.met <- function(train.path, source.path, yrs.train=NULL, yrs.source=NULL, # --------------- met.out[["dat.train"]] <- list() # 1.a. Determine if we have an ensemble in the training path or if it's a single time series - if(length(dir(train.path, ".nc"))>0){ # we have a single time series - n.trn = 1 # Ignore how many input ensemble members we asked for, we only actually have 1 here - + if (length(dir(train.path, ".nc")) > 0) { # we have a single time series + n.trn <- 1 # Ignore how many input ensemble members we asked for, we only actually have 1 here + files.train <- dir(train.path, ".nc") - + yrs.file <- strsplit(files.train, "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - - if(!is.null(yrs.train)){ + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + + if (!is.null(yrs.train)) { files.train <- files.train[which(yrs.file %in% yrs.train)] yrs.file <- yrs.file[which(yrs.file %in% yrs.train)] } - + # Loop through the .nc files putting everything into a list - if(print.progress==TRUE){ + if (print.progress == TRUE) { print("Processing Training Data") - pb <- utils::txtProgressBar(min=0, max=length(files.train), style=3) - } - for(i in 1:length(files.train)){ + pb <- utils::txtProgressBar(min = 0, max = length(files.train), style = 3) + } + for (i in 1:length(files.train)) { yr.now <- yrs.file[i] - + ncT <- ncdf4::nc_open(file.path(train.path, files.train[i])) - + # Set up the time data frame to help index nday <- ifelse(lubridate::leap_year(yr.now), 366, 365) ntime <- length(ncT$dim$time$vals) - step.day <- nday/ntime - step.hr <- step.day*24 - stamps.hr <- seq(step.hr/2, by=step.hr, length.out=1/step.day) # Time stamps centered on period - + step.day <- nday / ntime + step.hr <- step.day * 24 + stamps.hr <- seq(step.hr / 2, by = step.hr, length.out = 1 / step.day) # Time stamps centered on period + # Create a data frame with all the important time info # center the hour step - df.time <- data.frame(Year=yr.now, DOY=rep(1:nday, each=1/step.day), Hour=rep(stamps.hr, length.out=ntime)) - df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep="-"), format=("%Y-%j-%H"), tz="UTC") + df.time <- data.frame(Year = yr.now, DOY = rep(1:nday, each = 1 / step.day), Hour = rep(stamps.hr, length.out = ntime)) + df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep = "-"), format = ("%Y-%j-%H"), tz = "UTC") met.out$dat.train[["time"]] <- rbind(met.out$dat.train$time, df.time) - + # Extract the met info, making matrices with the appropriate number of ensemble members - for(v in names(ncT$var)){ - df.tem <- matrix(rep(ncdf4::ncvar_get(ncT, v), n.trn), ncol=n.trn, byrow=F) + for (v in names(ncT$var)) { + df.tem <- matrix(rep(ncdf4::ncvar_get(ncT, v), n.trn), ncol = n.trn, byrow = F) met.out$dat.train[[v]] <- rbind(met.out$dat.train[[v]], df.tem) } - + ncdf4::nc_close(ncT) - if(print.progress==TRUE) utils::setTxtProgressBar(pb, i) + if (print.progress == TRUE) utils::setTxtProgressBar(pb, i) } # End looping through training data files } else { # we have an ensemble we need to deal with # Figure out how many ensemble members we're working with ens.train <- dir(train.path) - - if(is.null(n.ens)) n.ens <- length(ens.train) - if(length(ens.train)>n.ens & is.null(mems.train)) { + + if (is.null(n.ens)) n.ens <- length(ens.train) + if (length(ens.train) > n.ens & is.null(mems.train)) { train.use <- sample(1:length(ens.train), n.ens) ens.train <- ens.train[train.use] } - if(!is.null(mems.train)){ + if (!is.null(mems.train)) { ens.train <- mems.train } # getting an estimate of how many files we need to process yrs.file <- strsplit(dir(file.path(train.path, ens.train[1])), "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - - if(!is.null(yrs.train)){ + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + + if (!is.null(yrs.train)) { n.files <- length(yrs.file[which(yrs.file %in% yrs.train)]) } else { n.files <- length(dir(file.path(train.path, ens.train[1]))) } - - if(print.progress==TRUE){ + + if (print.progress == TRUE) { print("Processing Training Data") - pb <- utils::txtProgressBar(min=0, max=length(ens.train)*n.files, style=3) - pb.ind=1 + pb <- utils::txtProgressBar(min = 0, max = length(ens.train) * n.files, style = 3) + pb.ind <- 1 } - - for(j in 1:length(ens.train)){ + + for (j in 1:length(ens.train)) { files.train <- dir(file.path(train.path, ens.train[j]), ".nc") - + yrs.file <- strsplit(files.train, "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - - if(!is.null(yrs.train)){ + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + + if (!is.null(yrs.train)) { files.train <- files.train[which(yrs.file %in% yrs.train)] yrs.file <- yrs.file[which(yrs.file %in% yrs.train)] } - - + + # Loop through the .nc files putting everything into a list dat.ens <- list() # Making a temporary storage bin for all the data from this ensemble member - for(i in 1:length(files.train)){ + for (i in 1:length(files.train)) { yr.now <- yrs.file[i] - + ncT <- ncdf4::nc_open(file.path(train.path, ens.train[j], files.train[i])) - + # Set up the time data frame to help index nday <- ifelse(lubridate::leap_year(yr.now), 366, 365) ntime <- length(ncT$dim$time$vals) - step.day <- nday/ntime - step.hr <- step.day*24 - stamps.hr <- seq(step.hr/2, by=step.hr, length.out=1/step.day) # Time stamps centered on period - + step.day <- nday / ntime + step.hr <- step.day * 24 + stamps.hr <- seq(step.hr / 2, by = step.hr, length.out = 1 / step.day) # Time stamps centered on period + # Create a data frame with all the important time info # center the hour step # ** Only do this with the first ensemble member so we're not being redundant - if(j==1){ - df.time <- data.frame(Year=yr.now, DOY=rep(1:nday, each=1/step.day), Hour=rep(stamps.hr, length.out=ntime)) - df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep="-"), format=("%Y-%j-%H"), tz="UTC") + if (j == 1) { + df.time <- data.frame(Year = yr.now, DOY = rep(1:nday, each = 1 / step.day), Hour = rep(stamps.hr, length.out = ntime)) + df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep = "-"), format = ("%Y-%j-%H"), tz = "UTC") met.out$dat.train[["time"]] <- rbind(met.out$dat.train$time, df.time) } - + # Extract the met info, making matrices with the appropriate number of ensemble members - for(v in names(ncT$var)){ - dat.ens[[v]] <- append(dat.ens[[v]], ncdf4::ncvar_get(ncT, v)) + for (v in names(ncT$var)) { + dat.ens[[v]] <- append(dat.ens[[v]], ncdf4::ncvar_get(ncT, v)) } ncdf4::nc_close(ncT) - if(print.progress==TRUE){ + if (print.progress == TRUE) { utils::setTxtProgressBar(pb, pb.ind) - pb.ind <- pb.ind+1 + pb.ind <- pb.ind + 1 } } # End looping through training data files - + # Storing the ensemble member data in our output list - for(v in names(dat.ens)){ + for (v in names(dat.ens)) { met.out$dat.train[[v]] <- cbind(met.out$dat.train[[v]], dat.ens[[v]]) } } # End extracting ensemble members - for(v in 2:length(met.out$dat.train)){ + for (v in 2:length(met.out$dat.train)) { dimnames(met.out$dat.train[[v]])[[2]] <- ens.train } } # End loading & formatting training data - if(print.progress==TRUE) print(" ") + if (print.progress == TRUE) print(" ") # --------------- - + # --------------- # Read in & format the source data # --------------- met.out[["dat.source"]] <- list() - if(length(dir(source.path, ".nc"))>0){ # we have a single time series - n.src = 1 # we only have 1 time series so - + if (length(dir(source.path, ".nc")) > 0) { # we have a single time series + n.src <- 1 # we only have 1 time series so + # Get a list of the files we'll be downscaling files.source <- dir(source.path, ".nc") - + # create a vector of the years yrs.file <- strsplit(files.source, "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + # Subsetting to just the years we're interested in - if(!is.null(yrs.source)){ + if (!is.null(yrs.source)) { files.source <- files.source[which(yrs.file %in% yrs.source)] yrs.file <- yrs.file[which(yrs.file %in% yrs.source)] } - - + + # Getting the day & hour timesteps from the training data yrs.train <- length(unique(met.out$dat.train$time$Year)) - hr.train <- 24/length(unique(met.out$dat.train$time$Hour)) - day.train <- 1/length(unique(met.out$dat.train$time$Hour)) + hr.train <- 24 / length(unique(met.out$dat.train$time$Hour)) + day.train <- 1 / length(unique(met.out$dat.train$time$Hour)) # day.train <- 1/(nrow(met.out$dat.train$time)/yrs.train/365) - + # Loop through the .nc files putting everything into a list - if(print.progress==TRUE){ + if (print.progress == TRUE) { print("Processing Source Data") - pb <- utils::txtProgressBar(min=0, max=length(files.source), style=3) + pb <- utils::txtProgressBar(min = 0, max = length(files.source), style = 3) } - - for(i in 1:length(files.source)){ + + for (i in 1:length(files.source)) { yr.now <- yrs.file[i] - + ncT <- ncdf4::nc_open(file.path(source.path, files.source[i])) # Set up the time data frame to help index nday <- ifelse(lubridate::leap_year(yr.now), 366, 365) ntime <- length(ncT$dim$time$vals) - step.day <- nday/ntime - step.hr <- step.day*24 - + step.day <- nday / ntime + step.hr <- step.day * 24 + # ----- # Making time stamps to match the training data # For coarser time step than the training data, we'll duplicate in the loop # ----- # Making what the unique time stamps should be to match the training data - stamps.hr <- seq(hr.train/2, by=hr.train, length.out=1/day.train) - stamps.src <- seq(step.hr/2, by=step.hr, length.out=1/step.day) - - if(step.hr < hr.train){ # Finer hour increment --> set it up to aggregate - align = "aggregate" - stamps.src <- rep(stamps.hr, each=24/step.hr) - } else if(step.hr > hr.train) { # Set the flag to duplicate the data - align = "repeat" + stamps.hr <- seq(hr.train / 2, by = hr.train, length.out = 1 / day.train) + stamps.src <- seq(step.hr / 2, by = step.hr, length.out = 1 / step.day) + + if (step.hr < hr.train) { # Finer hour increment --> set it up to aggregate + align <- "aggregate" + stamps.src <- rep(stamps.hr, each = 24 / step.hr) + } else if (step.hr > hr.train) { # Set the flag to duplicate the data + align <- "repeat" } else { # things are aligned, so we're fine - align = "aligned" + align <- "aligned" } # ----- - + # Create a data frame with all the important time info # center the hour step - df.time <- data.frame(Year=yr.now, DOY=rep(1:nday, each=1/day.train), Hour=rep(stamps.hr, length.out=nday/(day.train))) - df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep="-"), format=("%Y-%j-%H"), tz="UTC") + df.time <- data.frame(Year = yr.now, DOY = rep(1:nday, each = 1 / day.train), Hour = rep(stamps.hr, length.out = nday / (day.train))) + df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep = "-"), format = ("%Y-%j-%H"), tz = "UTC") met.out$dat.source[["time"]] <- rbind(met.out$dat.source$time, df.time) - - src.time <- data.frame(Year=yr.now, DOY=rep(1:nday, each=1/step.day), Hour=rep(stamps.src, length.out=ntime)) - src.time$Date <- strptime(paste(src.time$Year, src.time$DOY, src.time$Hour, sep="-"), format=("%Y-%j-%H"), tz="UTC") - + + src.time <- data.frame(Year = yr.now, DOY = rep(1:nday, each = 1 / step.day), Hour = rep(stamps.src, length.out = ntime)) + src.time$Date <- strptime(paste(src.time$Year, src.time$DOY, src.time$Hour, sep = "-"), format = ("%Y-%j-%H"), tz = "UTC") + # Extract the met info, making matrices with the appropriate number of ensemble members - for(v in names(ncT$var)){ + for (v in names(ncT$var)) { dat.tem <- ncdf4::ncvar_get(ncT, v) - - if(align=="repeat"){ # if we need to coerce the time step to be repeated to match temporal resolution, do it here - dat.tem <- rep(dat.tem, each=length(stamps.hr)) + + if (align == "repeat") { # if we need to coerce the time step to be repeated to match temporal resolution, do it here + dat.tem <- rep(dat.tem, each = length(stamps.hr)) } - df.tem <- matrix(rep(dat.tem, n.src), ncol=n.src, byrow=F) - + df.tem <- matrix(rep(dat.tem, n.src), ncol = n.src, byrow = F) + # If we need to aggregate the data to align it, do it now to save memory - if(align == "aggregate"){ + if (align == "aggregate") { df.tem <- cbind(src.time, data.frame(df.tem)) - - df.agg <- stats::aggregate(df.tem[,(4+1:n.src)], by=df.tem[,c("Year", "DOY", "Hour")], FUN=mean) - met.out$dat.source[[v]] <- rbind(met.out$dat.source[[v]], as.matrix(df.agg[,(3+1:n.src)])) - + + df.agg <- stats::aggregate(df.tem[, (4 + 1:n.src)], by = df.tem[, c("Year", "DOY", "Hour")], FUN = mean) + met.out$dat.source[[v]] <- rbind(met.out$dat.source[[v]], as.matrix(df.agg[, (3 + 1:n.src)])) + # if workign wiht air temp, also find the max & min - if(v=="air_temperature"){ - tmin <- stats::aggregate(df.tem[,(4+1:n.src)], by=df.tem[,c("Year", "DOY", "Hour")], FUN=min) - tmax <- stats::aggregate(df.tem[,(4+1:n.src)], by=df.tem[,c("Year", "DOY", "Hour")], FUN=max) - - met.out$dat.source[["air_temperature_minimum"]] <- rbind(met.out$dat.source[["air_temperature_minimum"]], as.matrix(tmin[,(3+1:n.src)])) - met.out$dat.source[["air_temperature_maximum"]] <- rbind(met.out$dat.source[["air_temperature_maximum"]], as.matrix(tmax[,(3+1:n.src)])) - } + if (v == "air_temperature") { + tmin <- stats::aggregate(df.tem[, (4 + 1:n.src)], by = df.tem[, c("Year", "DOY", "Hour")], FUN = min) + tmax <- stats::aggregate(df.tem[, (4 + 1:n.src)], by = df.tem[, c("Year", "DOY", "Hour")], FUN = max) + + met.out$dat.source[["air_temperature_minimum"]] <- rbind(met.out$dat.source[["air_temperature_minimum"]], as.matrix(tmin[, (3 + 1:n.src)])) + met.out$dat.source[["air_temperature_maximum"]] <- rbind(met.out$dat.source[["air_temperature_maximum"]], as.matrix(tmax[, (3 + 1:n.src)])) + } } else { - met.out$dat.source[[v]] <- rbind(met.out$dat.source[[v]], as.matrix(df.tem, ncol=1)) + met.out$dat.source[[v]] <- rbind(met.out$dat.source[[v]], as.matrix(df.tem, ncol = 1)) } - } ncdf4::nc_close(ncT) - if(print.progress==TRUE) utils::setTxtProgressBar(pb, i) + if (print.progress == TRUE) utils::setTxtProgressBar(pb, i) } # End looping through source met files - if(print.progress==TRUE) print("") + if (print.progress == TRUE) print("") } else { # we have an ensemble we need to deal with ens.source <- dir(source.path) - + # If we're matching ensemble members need to use the same ones as from the training data - if(pair.mems==TRUE){ - if(length(ens.source) < ens.train) stop("Cannot pair ensemble members. Reset pair.mems to FALSE or check your file paths") - + if (pair.mems == TRUE) { + if (length(ens.source) < ens.train) stop("Cannot pair ensemble members. Reset pair.mems to FALSE or check your file paths") + ens.source <- ens.source[train.use] } else { # Figure out whether or not we need to subsample or repeat ensemble members - if(length(ens.source)>=n.ens){ + if (length(ens.source) >= n.ens) { source.use <- sample(1:length(ens.source), n.ens) } else { source.use <- sample(1:length(ens.source), n.ens, replace = TRUE) } - + ens.source <- ens.source[source.use] } - n.src = 1 # Potential to redo places where n.src is currently; this is based on out-dated code - + n.src <- 1 # Potential to redo places where n.src is currently; this is based on out-dated code + # getting an estimate of how many files we need to process n.files <- length(dir(file.path(source.path, ens.source[1]))) - - if(print.progress==TRUE){ + + if (print.progress == TRUE) { print("Processing Source Data") - pb <- utils::txtProgressBar(min=0, max=length(ens.source)*n.files, style=3) - pb.ind=1 + pb <- utils::txtProgressBar(min = 0, max = length(ens.source) * n.files, style = 3) + pb.ind <- 1 } - for(j in 1:length(ens.source)){ + for (j in 1:length(ens.source)) { # Get a list of the files we'll be downscaling files.source <- dir(file.path(source.path, ens.source[j]), ".nc") - + # create a vector of the years yrs.file <- strsplit(files.source, "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + # Subsetting to just the years we're interested in - if(!is.null(yrs.source)){ + if (!is.null(yrs.source)) { files.source <- files.source[which(yrs.file %in% yrs.source)] yrs.file <- yrs.file[which(yrs.file %in% yrs.source)] } - + # Getting the day & hour timesteps from the training data - day.train <- round(365/length(unique(met.out$dat.train$time$DOY))) - hr.train <- 24/length(unique(met.out$dat.train$time$Hour)) - + day.train <- round(365 / length(unique(met.out$dat.train$time$DOY))) + hr.train <- 24 / length(unique(met.out$dat.train$time$Hour)) + # Loop through the .nc files putting everything into a list dat.ens <- list() - for(i in 1:length(files.source)){ + for (i in 1:length(files.source)) { yr.now <- yrs.file[i] - + ncT <- ncdf4::nc_open(file.path(source.path, ens.source[j], files.source[i])) - + # Set up the time data frame to help index nday <- ifelse(lubridate::leap_year(yr.now), 366, 365) ntime <- length(ncT$dim$time$vals) - step.day <- nday/ntime - step.hr <- step.day*24 - + step.day <- nday / ntime + step.hr <- step.day * 24 + # ----- # Making time stamps to match the training data # For coarser time step than the training data, we'll duplicate in the loop # ----- # Making what the unique time stamps should be to match the training data - stamps.hr <- seq(hr.train/2, by=hr.train, length.out=1/day.train) - stamps.src <- seq(step.hr/2, by=step.hr, length.out=1/step.day) - - if(step.hr < hr.train){ # Finer hour increment --> set it up to aggregate - align = "aggregate" - stamps.src <- rep(stamps.hr, each=24/step.hr) - } else if(step.hr > hr.train) { # Set the flag to duplicate the data - align = "repeat" + stamps.hr <- seq(hr.train / 2, by = hr.train, length.out = 1 / day.train) + stamps.src <- seq(step.hr / 2, by = step.hr, length.out = 1 / step.day) + + if (step.hr < hr.train) { # Finer hour increment --> set it up to aggregate + align <- "aggregate" + stamps.src <- rep(stamps.hr, each = 24 / step.hr) + } else if (step.hr > hr.train) { # Set the flag to duplicate the data + align <- "repeat" } else { # things are aligned, so we're fine - align = "aligned" + align <- "aligned" } # ----- - - + + # Create a data frame with all the important time info # center the hour step - df.time <- data.frame(Year=yr.now, DOY=rep(1:nday, each=1/day.train), Hour=rep(stamps.hr, length.out=nday/(day.train))) - df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep="-"), format=("%Y-%j-%H"), tz="UTC") + df.time <- data.frame(Year = yr.now, DOY = rep(1:nday, each = 1 / day.train), Hour = rep(stamps.hr, length.out = nday / (day.train))) + df.time$Date <- strptime(paste(df.time$Year, df.time$DOY, df.time$Hour, sep = "-"), format = ("%Y-%j-%H"), tz = "UTC") # Create a data frame with all the important time info # center the hour step # ** Only do this with the first ensemble member so we're not being redundant - if(j==1){ + if (j == 1) { met.out$dat.source[["time"]] <- rbind(met.out$dat.source$time, df.time) } - - src.time <- data.frame(Year=yr.now, DOY=rep(1:nday, each=1/step.day), Hour=rep(stamps.src, length.out=ntime)) - src.time$Date <- strptime(paste(src.time$Year, src.time$DOY, src.time$Hour, sep="-"), format=("%Y-%j-%H"), tz="UTC") - + + src.time <- data.frame(Year = yr.now, DOY = rep(1:nday, each = 1 / step.day), Hour = rep(stamps.src, length.out = ntime)) + src.time$Date <- strptime(paste(src.time$Year, src.time$DOY, src.time$Hour, sep = "-"), format = ("%Y-%j-%H"), tz = "UTC") + # Extract the met info, making matrices with the appropriate number of ensemble members - for(v in names(ncT$var)){ + for (v in names(ncT$var)) { dat.tem <- ncdf4::ncvar_get(ncT, v) - - if(align=="repeat"){ # if we need to coerce the time step to be repeated to match temporal resolution, do it here - dat.tem <- rep(dat.tem, each=stamps.hr) + + if (align == "repeat") { # if we need to coerce the time step to be repeated to match temporal resolution, do it here + dat.tem <- rep(dat.tem, each = stamps.hr) } - df.tem <- matrix(rep(dat.tem, n.src), ncol=1, byrow=F) - + df.tem <- matrix(rep(dat.tem, n.src), ncol = 1, byrow = F) + # If we need to aggregate the data to align it, do it now to save memory - if(align == "aggregate"){ + if (align == "aggregate") { df.tem <- cbind(src.time, data.frame(df.tem)) - - df.agg <- stats::aggregate(df.tem[,(4+1:n.src)], by=df.tem[,c("Year", "DOY", "Hour")], FUN=mean) - dat.ens[[v]] <- rbind(dat.ens[[v]], as.matrix(df.agg[,(3+1:n.src)])) - + + df.agg <- stats::aggregate(df.tem[, (4 + 1:n.src)], by = df.tem[, c("Year", "DOY", "Hour")], FUN = mean) + dat.ens[[v]] <- rbind(dat.ens[[v]], as.matrix(df.agg[, (3 + 1:n.src)])) + # if working with air temp, also find the max & min - if(v=="air_temperature"){ - tmin <- stats::aggregate(df.tem[,(4+1:n.src)], by=df.tem[,c("Year", "DOY", "Hour")], FUN=min) - tmax <- stats::aggregate(df.tem[,(4+1:n.src)], by=df.tem[,c("Year", "DOY", "Hour")], FUN=max) - - dat.ens[["air_temperature_minimum"]] <- rbind(dat.ens[["air_temperature_minimum"]], as.matrix(tmin[,(3+1:n.src)])) - dat.ens[["air_temperature_maximum"]] <- rbind(dat.ens[["air_temperature_maximum"]], as.matrix(tmax[,(3+1:n.src)])) - } + if (v == "air_temperature") { + tmin <- stats::aggregate(df.tem[, (4 + 1:n.src)], by = df.tem[, c("Year", "DOY", "Hour")], FUN = min) + tmax <- stats::aggregate(df.tem[, (4 + 1:n.src)], by = df.tem[, c("Year", "DOY", "Hour")], FUN = max) + + dat.ens[["air_temperature_minimum"]] <- rbind(dat.ens[["air_temperature_minimum"]], as.matrix(tmin[, (3 + 1:n.src)])) + dat.ens[["air_temperature_maximum"]] <- rbind(dat.ens[["air_temperature_maximum"]], as.matrix(tmax[, (3 + 1:n.src)])) + } } else { - dat.ens[[v]] <- rbind(dat.ens[[v]], as.matrix(df.tem, ncol=1)) + dat.ens[[v]] <- rbind(dat.ens[[v]], as.matrix(df.tem, ncol = 1)) } - - } #End variable loop + } # End variable loop ncdf4::nc_close(ncT) - if(print.progress==TRUE){ + if (print.progress == TRUE) { utils::setTxtProgressBar(pb, pb.ind) - pb.ind <- pb.ind+1 + pb.ind <- pb.ind + 1 } } # End looping through source met files - + # Storing the ensemble member data in our output list - for(v in names(dat.ens)){ + for (v in names(dat.ens)) { met.out$dat.source[[v]] <- cbind(met.out$dat.source[[v]], dat.ens[[v]]) } } # End loading & formatting source ensemble members - + # Storing info about the ensemble members - for(v in 2:length(met.out$dat.source)){ + for (v in 2:length(met.out$dat.source)) { dimnames(met.out$dat.source[[v]])[[2]] <- ens.source } - } # End loading & formatting source data - if(print.progress==TRUE) print("") + if (print.progress == TRUE) print("") # --------------- - - + + return(met.out) } - \ No newline at end of file diff --git a/modules/data.atmosphere/R/check_met_input.R b/modules/data.atmosphere/R/check_met_input.R index dc9a144158e..2fd5f48bcd3 100644 --- a/modules/data.atmosphere/R/check_met_input.R +++ b/modules/data.atmosphere/R/check_met_input.R @@ -17,9 +17,7 @@ check_met_input_file <- function(metfile, required_vars = variable_table %>% dplyr::filter(.data$is_required) %>% dplyr::pull("cf_standard_name"), - warn_unknown = TRUE - ) { - + warn_unknown = TRUE) { metfile <- normalizePath(metfile, mustWork = FALSE) PEcAn.logger::severeifnot( @@ -77,7 +75,9 @@ check_met_input_file <- function(metfile, ) nonstring_to_missing <- function(x) { - if (is.character(x)) return(x) + if (is.character(x)) { + return(x) + } NA_character_ } diff --git a/modules/data.atmosphere/R/closest_xy.R b/modules/data.atmosphere/R/closest_xy.R index 96c02512736..824a9b22d67 100644 --- a/modules/data.atmosphere/R/closest_xy.R +++ b/modules/data.atmosphere/R/closest_xy.R @@ -9,33 +9,32 @@ ##' @export ##' @author Betsy Cowdery, Ankur Desai closest_xy <- function(slat, slon, infolder, infile) { - test.file <- dir(infolder, infile, full.names = TRUE) test.file <- test.file[grep("*.nc", test.file)] if (length(test.file) == 0) { return(NULL) } test.file <- test.file[1] - + nc <- ncdf4::nc_open(test.file) lat <- ncdf4::ncvar_get(nc, "latitude") lon <- ncdf4::ncvar_get(nc, "longitude") ncdf4::nc_close(nc) - + if (all(dim(lat) == dim(lon))) { - if (dim(lat)==1&&dim(lon)==1) { - ##Case of a single grid cell in file + if (dim(lat) == 1 && dim(lon) == 1) { + ## Case of a single grid cell in file use_xy <- FALSE D <- matrix(-1, 1, 1) - D[1,1] <- sqrt((lat - slat) ^ 2 + (lon - slon) ^ 2) + D[1, 1] <- sqrt((lat - slat)^2 + (lon - slon)^2) } else { - ## this case appears to involve hard-coded values for NARR - ## needs to be generalized + ## this case appears to involve hard-coded values for NARR + ## needs to be generalized use_xy <- TRUE rows <- nrow(lat) cols <- ncol(lat) D <- matrix(-1, rows, cols) - + for (i in seq_len(rows)) { for (j in seq_len(cols)) { tlat <- lat[i, j] @@ -45,7 +44,7 @@ closest_xy <- function(slat, slon, infolder, infile) { c3 <- tlon >= -125 c4 <- tlon <= -65 if (c1 & c2 & c3 & c4) { - D[i, j] <- sqrt((tlat - slat) ^ 2 + (tlon - slon) ^ 2) + D[i, j] <- sqrt((tlat - slat)^2 + (tlon - slon)^2) } } } @@ -56,29 +55,29 @@ closest_xy <- function(slat, slon, infolder, infile) { rows <- length(lat) cols <- length(lon) D <- matrix(-1, rows, cols) - + dlat <- mean(diff(lat)) dlon <- mean(diff(lon)) for (i in seq_len(rows)) { for (j in seq_len(cols)) { - tlat <- lat[i]+dlat ## assume lat/lon is LL corner, move to center - tlon <- lon[j]+dlon + tlat <- lat[i] + dlat ## assume lat/lon is LL corner, move to center + tlon <- lon[j] + dlon c1 <- tlat >= min(lat) c2 <- tlat <= max(lat) c3 <- tlon >= min(lon) c4 <- tlon <= max(lon) if (c1 & c2 & c3 & c4) { - D[i, j] <- sqrt((tlat - slat) ^ 2 + (tlon - slon) ^ 2) + D[i, j] <- sqrt((tlat - slat)^2 + (tlon - slon)^2) } } } } - + dmin <- min(D[which(D >= 0)]) xy <- which(D == dmin, arr.ind = TRUE) - + if (nrow(xy) > 1) { print("More than one possible coordinate, choosing first one") } - return(list(x = as.numeric(xy[1, 1]), y = as.numeric(xy[1, 2]),use_xy=use_xy)) + return(list(x = as.numeric(xy[1, 1]), y = as.numeric(xy[1, 2]), use_xy = use_xy)) } # closest_xy diff --git a/modules/data.atmosphere/R/data.R b/modules/data.atmosphere/R/data.R index 256b8573748..977995c9bae 100644 --- a/modules/data.atmosphere/R/data.R +++ b/modules/data.atmosphere/R/data.R @@ -1,4 +1,3 @@ - ## Drafts of documentation for package datasets ## ## Written by CKB 2020-05-03, then commented out when I realized that as @@ -31,8 +30,8 @@ # #' \item{source}{dataset identifier, in this case always "cruncep"}} # #' @seealso \code{\link{narr}} \code{\link{narr3h}} \code{\link{ebifarm}} # "cruncep" -# -# +# +# # #' Global 0.5 degree land/water mask for the CRUNCEP dataset # #' # #' For details, please see the CRUNCEP scripts included with this package: @@ -44,8 +43,8 @@ # #' \item{lon}{longitude, in decimal degrees} # #' \item{land}{logical. TRUE = land, FALSE = water}} # "cruncep_landmask" -# -# +# +# # #' 2010 weather station data from near Urbana, IL # #' # #' Hourly 2010 weather data collected at the EBI Energy Farm (Urbana, IL). @@ -64,8 +63,8 @@ # #' \item{source}{dataset identifier, in this case always "ebifarm"}} # #' @seealso \code{\link{cruncep}} \code{\link{narr}} \code{\link{narr3h}} # "ebifarm" -# -# +# +# # #' Codes and BeTY IDs for sites in the FLUXNET network # #' # #' @format a data frame with 698 rows and 2 columns: @@ -75,8 +74,8 @@ # #' \item{site.id}{identifier used in the `sites` table of the PEcAn # #' database. Integer, but stored as character}} # "FLUXNET.sitemap" -# -# +# +# # #' Global land/water mask for the NCEP dataset # #' # #' For details, please see the NCEP scripts included with this package: @@ -88,8 +87,8 @@ # #' \item{lon}{longitude, in decimal degrees} # #' \item{land}{logical. TRUE = land, FALSE = water}} # "landmask" -# -# +# +# # #' Latitudes of 94 sites from the NCEP dataset # #' # #' For details, please see the NCEP scripts included with this package: @@ -97,8 +96,8 @@ # #' # #' @format a vector of 94 decimal values # "Lat" -# -# +# +# # #' Longitudes of 192 sites from the NCEP dataset # #' # #' For details, please see the NCEP scripts included with this package: @@ -106,8 +105,8 @@ # #' # #' @format a vector of 192 decimal values # "Lon" -# -# +# +# # #' 2010 NARR weather data for Urbana, IL # #' # #' Hourly 2010 meteorology for the 0.3-degree grid cell containing the @@ -127,8 +126,8 @@ # #' \item{source}{dataset identifier, in this case always "narr"}} # #' @seealso \code{\link{cruncep}} \code{\link{ebifarm}} \code{\link{narr3h}} # "narr" -# -# +# +# # #' 2010 NARR 3-hourly weather data for Urbana, IL # #' # #' Hourly 2010 meteorology for the 0.25-degree grid cell containing the diff --git a/modules/data.atmosphere/R/debias.met.R b/modules/data.atmosphere/R/debias.met.R index 52265855bca..0c168ebcb86 100644 --- a/modules/data.atmosphere/R/debias.met.R +++ b/modules/data.atmosphere/R/debias.met.R @@ -8,7 +8,7 @@ substrRight <- function(x, n) { ##' @export ##' ##' @param outfolder location where output is stored -##' @param input_met - the source_met dataset that will be altered by the training dataset in NC format. +##' @param input_met - the source_met dataset that will be altered by the training dataset in NC format. ##' @param train_met - the observed dataset that will be used to train the modeled dataset in NC format ##' @param de_method - select which debias method you would like to use, options are 'normal', 'linear regression' ##' @param overwrite logical: replace output file if it already exists? Currently ignored. @@ -17,21 +17,26 @@ substrRight <- function(x, n) { ##' @param ... other inputs ##' functions print debugging information as they run? ##' @author James Simkins -debias.met <- function(outfolder, input_met, train_met, site_id, de_method = "linear", +debias.met <- function(outfolder, input_met, train_met, site_id, de_method = "linear", overwrite = FALSE, verbose = FALSE, ...) { - - outfolder <- paste0(outfolder, "_site_", paste0(site_id%/%1e+09, "-", site_id%%1e+09)) - - var <- data.frame(CF.name = c("air_temperature", "air_temperature_max", "air_temperature_min", - "surface_downwelling_longwave_flux_in_air", "air_pressure", "surface_downwelling_shortwave_flux_in_air", - "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux"), - units = c("Kelvin", "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", - "m/s", "g/g", "kg/m2/s")) - + outfolder <- paste0(outfolder, "_site_", paste0(site_id %/% 1e+09, "-", site_id %% 1e+09)) + + var <- data.frame( + CF.name = c( + "air_temperature", "air_temperature_max", "air_temperature_min", + "surface_downwelling_longwave_flux_in_air", "air_pressure", "surface_downwelling_shortwave_flux_in_air", + "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux" + ), + units = c( + "Kelvin", "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", + "m/s", "g/g", "kg/m2/s" + ) + ) + sub_str <- substrRight(input_met, 7) year <- substr(sub_str, 1, 4) - + # Load in the data that will be used to train the source. Most of the time this # will be observed data. train <- list() @@ -46,7 +51,7 @@ debias.met <- function(outfolder, input_met, train_met, site_id, de_method = "li lat_train <- as.numeric(ncdf4::ncvar_get(tem, "latitude")) lon_train <- as.numeric(ncdf4::ncvar_get(tem, "longitude")) ncdf4::nc_close(tem) - + train <- data.frame(train) colnames(train) <- var$CF.name if (all(is.na(train$air_temperature_max))) { @@ -55,38 +60,42 @@ debias.met <- function(outfolder, input_met, train_met, site_id, de_method = "li if (all(is.na(train$air_temperature_min))) { train$air_temperature_min <- train$air_temperature } - + # Load the source dataset source <- list() tem <- ncdf4::nc_open(input_met) for (j in seq_along(var$CF.name)) { if (exists(as.character(var$CF.name[j]), tem$var) == FALSE) { - source[[j]] = NA + source[[j]] <- NA } else { source[[j]] <- ncdf4::ncvar_get(tem, as.character(var$CF.name[j])) } } year <- as.numeric(year) ncdf4::nc_close(tem) - + source <- data.frame(source) colnames(source) <- var$CF.name - - reso <- 24/(nrow(source)/365) + + reso <- 24 / (nrow(source) / 365) reso_len <- nrow(source) # Grab the means/medians of the source and train, find the difference, and # correct the source dataset accordingly The following separate variables based # on properties so we can appropriately debias based on means/medians - add_var <- c("air_temperature", "air_temperature_max", "air_temperature_min", - "air_pressure", "eastward_wind", "northward_wind") + add_var <- c( + "air_temperature", "air_temperature_max", "air_temperature_min", + "air_pressure", "eastward_wind", "northward_wind" + ) - mult_var <- c("surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", - "specific_humidity", "precipitation_flux") + mult_var <- c( + "surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", + "specific_humidity", "precipitation_flux" + ) # These are for the linear regression argument, the for loop upscales the # training dataset to match the length of the source dataset because they must be # equal lengths - step <- floor(nrow(train)/nrow(source)) + step <- floor(nrow(train) / nrow(source)) lin_train <- data.frame() for (n in 1:length(var$CF.name)) { for (x in 1:reso_len) { @@ -97,125 +106,130 @@ debias.met <- function(outfolder, input_met, train_met, site_id, de_method = "li debi <- data.frame() ### De_method routines!!!!! ### if (de_method == "mean") { - for (u in add_var){ + for (u in add_var) { if (all(is.na(source[[u]])) == FALSE) { mean_source <- mean(source[[u]]) mean_train <- mean(train[[u]]) mean_diff <- mean_train - mean_source debi[1:reso_len, u] <- source[[u]] + mean_diff } else { - debi[1:reso_len,u] <- NA + debi[1:reso_len, u] <- NA } } - for (u in mult_var){ + for (u in mult_var) { if (all(is.na(source[[u]])) == FALSE) { mean_source <- mean(source[[u]]) mean_train <- mean(train[[u]]) - mean_ratio <- mean_train/mean_source + mean_ratio <- mean_train / mean_source debi[1:reso_len, u] <- source[[u]] * mean_ratio } else { - debi[1:reso_len,u] <- NA + debi[1:reso_len, u] <- NA } } - } else { if (de_method == "median") { - for (u in add_var){ + for (u in add_var) { if (all(is.na(source[[u]])) == FALSE) { med_source <- stats::median(source[[u]]) med_train <- stats::median(train[[u]]) med_diff <- med_train - med_source debi[1:reso_len, u] <- source[[u]] + med_diff } else { - debi[1:reso_len,u] <- NA + debi[1:reso_len, u] <- NA } } - for (u in mult_var){ + for (u in mult_var) { if (all(is.na(source[[u]])) == FALSE) { - med_source <- stats::median(source[[u]][source[[u]]>0]) - med_train <- stats::median(train[[u]][train[[u]]>0]) - med_ratio <- med_train/med_source + med_source <- stats::median(source[[u]][source[[u]] > 0]) + med_train <- stats::median(train[[u]][train[[u]] > 0]) + med_ratio <- med_train / med_source debi[1:reso_len, u] <- source[[u]] * med_ratio } else { - debi[1:reso_len,u] <- NA + debi[1:reso_len, u] <- NA } } } else { if (de_method == "linear") { debi <- data.frame() for (i in add_var) { - if (all(is.na(source[[i]])) == FALSE & all(is.na(lin_train[[i]])) == - FALSE) { + if (all(is.na(source[[i]])) == FALSE & all(is.na(lin_train[[i]])) == + FALSE) { lin <- stats::lm(lin_train[[i]] ~ source[[i]]) x <- as.numeric(lin$coefficients[2]) b <- as.numeric(lin$coefficients[1]) - debi[1:reso_len,i] <- (source[[i]] * x + b) + debi[1:reso_len, i] <- (source[[i]] * x + b) } else { - if (all(is.na(source[[i]])) == TRUE | all(is.na(lin_train[[i]])) == - TRUE) { - debi[1:reso_len,i] <- NA + if (all(is.na(source[[i]])) == TRUE | all(is.na(lin_train[[i]])) == + TRUE) { + debi[1:reso_len, i] <- NA } } } for (i in mult_var) { - if (all(is.na(source[[i]])) == FALSE & all(is.na(lin_train[[i]])) == - FALSE) { + if (all(is.na(source[[i]])) == FALSE & all(is.na(lin_train[[i]])) == + FALSE) { lin <- stats::lm(lin_train[[i]] ~ source[[i]]) x <- as.numeric(lin$coefficients[2]) b <- 0 - debi[1:reso_len,i] <- (source[[i]] * x + b) + debi[1:reso_len, i] <- (source[[i]] * x + b) } else { - if (all(is.na(source[[i]])) == TRUE | all(is.na(lin_train[[i]])) == - TRUE) { - debi[1:reso_len,i] <- NA + if (all(is.na(source[[i]])) == TRUE | all(is.na(lin_train[[i]])) == + TRUE) { + debi[1:reso_len, i] <- NA } } } } } } - + # This step just ensures that we aren't breaking laws of nature by having # negative precipitation or negative specific humidity debi$precipitation_flux[debi$precipitation_flux < 0] <- 0 debi$specific_humidity[debi$specific_humidity < 0] <- 0 - + train.list <- list() - lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat_train, - create_dimvar = TRUE) - lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon_train, - create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = (1:reso_len) * - reso * 3600, create_dimvar = TRUE, unlim = TRUE) + lat <- ncdf4::ncdim_def( + name = "latitude", units = "degree_north", vals = lat_train, + create_dimvar = TRUE + ) + lon <- ncdf4::ncdim_def( + name = "longitude", units = "degree_east", vals = lon_train, + create_dimvar = TRUE + ) + time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = (1:reso_len) * + reso * 3600, create_dimvar = TRUE, unlim = TRUE) dim <- list(lat, lon, time) - + for (j in seq_along(var$CF.name)) { - train.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), dim = dim, missval = -999, verbose = verbose) + train.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), dim = dim, missval = -999, verbose = verbose + ) } - + rows <- 1 dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - results <- data.frame(file = character(rows), host = character(rows), mimetype = character(rows), - formatname = character(rows), startdate = character(rows), enddate = character(rows), - dbfile.name = paste("debias_met", sep = "."), stringsAsFactors = FALSE) - - loc.file = file.path(outfolder, paste("debias", year, "nc", sep = ".")) + results <- data.frame( + file = character(rows), host = character(rows), mimetype = character(rows), + formatname = character(rows), startdate = character(rows), enddate = character(rows), + dbfile.name = paste("debias_met", sep = "."), stringsAsFactors = FALSE + ) + + loc.file <- file.path(outfolder, paste("debias", year, "nc", sep = ".")) loc <- ncdf4::nc_create(filename = loc.file, vars = train.list, verbose = verbose) - + for (j in seq_along(var$CF.name)) { ncdf4::ncvar_put(nc = loc, varid = as.character(colnames(debi[j])), vals = debi[[j]]) } ncdf4::nc_close(loc) - + results$file <- loc.file results$host <- PEcAn.remote::fqdn() results$startdate <- paste0(year, "-01-01 00:00:00", tz = "UTC") results$enddate <- paste0(year, "-12-31 23:59:59", tz = "UTC") results$mimetype <- "application/x-netcdf" results$formatname <- "CF Meteorology" - + return(invisible(results)) } - - diff --git a/modules/data.atmosphere/R/debias_met_regression.R b/modules/data.atmosphere/R/debias_met_regression.R index bae4dd6f036..87f0c771e7b 100644 --- a/modules/data.atmosphere/R/debias_met_regression.R +++ b/modules/data.atmosphere/R/debias_met_regression.R @@ -61,29 +61,28 @@ #---------------------------------------------------------------------- -debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NULL, CRUNCEP=FALSE, - pair.anoms = TRUE, pair.ens = FALSE, uncert.prop="mean", resids = FALSE, seed=Sys.Date(), - outfolder, yrs.save=NULL, ens.name, ens.mems=NULL, force.sanity=TRUE, sanity.tries=25, sanity.sd=8, lat.in, lon.in, - save.diagnostics=TRUE, path.diagnostics=NULL, +debias.met.regression <- function(train.data, source.data, n.ens, vars.debias = NULL, CRUNCEP = FALSE, + pair.anoms = TRUE, pair.ens = FALSE, uncert.prop = "mean", resids = FALSE, seed = Sys.Date(), + outfolder, yrs.save = NULL, ens.name, ens.mems = NULL, force.sanity = TRUE, sanity.tries = 25, sanity.sd = 8, lat.in, lon.in, + save.diagnostics = TRUE, path.diagnostics = NULL, parallel = FALSE, n.cores = NULL, overwrite = TRUE, verbose = FALSE) { - set.seed(seed) - if(parallel==TRUE) warning("Warning! Parallel processing not reccomended because of memory constraints") - if(ncol(source.data[[2]])>1) warning("Feeding an ensemble of source data is currently experimental! This could crash") - if(n.ens<1){ + if (parallel == TRUE) warning("Warning! Parallel processing not reccomended because of memory constraints") + if (ncol(source.data[[2]]) > 1) warning("Feeding an ensemble of source data is currently experimental! This could crash") + if (n.ens < 1) { warning("You need to generate at least one vector of outputs. Changing n.ens to 1, which will be based on the model means.") - n.ens=1 + n.ens <- 1 } - if(!uncert.prop %in% c("mean", "random")) stop("unspecified uncertainty propogation method. Must be 'random' or 'mean' ") - if(uncert.prop=="mean" & n.ens>1) warning(paste0("Warning! Use of mean propagation with n.ens>1 not encouraged as all results will be the same and you will not be adding uncertainty at this stage.")) + if (!uncert.prop %in% c("mean", "random")) stop("unspecified uncertainty propogation method. Must be 'random' or 'mean' ") + if (uncert.prop == "mean" & n.ens > 1) warning(paste0("Warning! Use of mean propagation with n.ens>1 not encouraged as all results will be the same and you will not be adding uncertainty at this stage.")) # Variables need to be done in a specific order vars.all <- c("air_temperature", "air_temperature_maximum", "air_temperature_minimum", "specific_humidity", "surface_downwelling_shortwave_flux_in_air", "air_pressure", "surface_downwelling_longwave_flux_in_air", "wind_speed", "precipitation_flux") - if(is.null(vars.debias)) vars.debias <- vars.all[vars.all %in% names(train.data)] # Don't try to do vars that we don't have - if(is.null(yrs.save)) yrs.save <- unique(source.data$time$Year) - if(is.null(ens.mems)) ens.mems <- stringr::str_pad(1:n.ens, nchar(n.ens), "left", pad="0") + if (is.null(vars.debias)) vars.debias <- vars.all[vars.all %in% names(train.data)] # Don't try to do vars that we don't have + if (is.null(yrs.save)) yrs.save <- unique(source.data$time$Year) + if (is.null(ens.mems)) ens.mems <- stringr::str_pad(1:n.ens, nchar(n.ens), "left", pad = "0") # Set up outputs vars.pred <- vector() @@ -100,30 +99,30 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # - Referencing off of whatever the layer after "time" is # --------- # If we have fewer columns then we need, randomly duplicate some - if(ncol(train.data[[2]])==n.ens) ens.train <- 1:n.ens + if (ncol(train.data[[2]]) == n.ens) ens.train <- 1:n.ens - if(ncol(train.data[[2]]) < n.ens){ - ens.train <- c(1:ncol(train.data[[2]]), sample(1:ncol(train.data[[2]]), n.ens-ncol(train.data[[2]]),replace=T)) + if (ncol(train.data[[2]]) < n.ens) { + ens.train <- c(1:ncol(train.data[[2]]), sample(1:ncol(train.data[[2]]), n.ens - ncol(train.data[[2]]), replace = T)) } # If we have more columns than we need, randomly subset - if(ncol(train.data[[2]]) > n.ens) { - ens.train <- sample(1:ncol(train.data[[2]]), ncol(train.data[[2]]),replace=T) + if (ncol(train.data[[2]]) > n.ens) { + ens.train <- sample(1:ncol(train.data[[2]]), ncol(train.data[[2]]), replace = T) } # Setting up cases for dealing with an ensemble of source data to be biased - if(pair.ens==T & ncol(train.data[[2]]!=ncol(source.data[[2]]))){ + if (pair.ens == T & ncol(train.data[[2]] != ncol(source.data[[2]]))) { stop("Cannot pair ensembles of different size") - } else if(pair.ens==T) { + } else if (pair.ens == T) { ens.src <- ens.train } - if(pair.ens==F & ncol(source.data[[2]])==1){ - ens.src=1 - } else if(pair.ens==F & ncol(source.data[[2]]) > n.ens) { - ens.src <- sample(1:ncol(source.data[[2]]), ncol(source.data[[2]]),replace=T) - } else if(pair.ens==F & ncol(source.data[[2]]) < n.ens){ - ens.src <- c(1:ncol(source.data[[2]]), sample(1:ncol(source.data[[2]]), n.ens-ncol(source.data[[2]]),replace=T)) + if (pair.ens == F & ncol(source.data[[2]]) == 1) { + ens.src <- 1 + } else if (pair.ens == F & ncol(source.data[[2]]) > n.ens) { + ens.src <- sample(1:ncol(source.data[[2]]), ncol(source.data[[2]]), replace = T) + } else if (pair.ens == F & ncol(source.data[[2]]) < n.ens) { + ens.src <- c(1:ncol(source.data[[2]]), sample(1:ncol(source.data[[2]]), n.ens - ncol(source.data[[2]]), replace = T)) } # --------- @@ -132,17 +131,17 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU yrs.overlap <- unique(train.data$time$Year)[unique(train.data$time$Year) %in% unique(source.data$time$Year)] # If we don't have a year of overlap, take closest 20 years from each dataset - if(length(yrs.overlap)<1){ - if(pair.anoms==TRUE) warning("No overlap in years, so we cannot pair the anomalies") - yrs.overlap <- (max(min(train.data$time$Year), min(source.data$time$Year))-20):(min(max(train.data$time$Year), max(source.data$time$Year))+20) - pair.anoms=FALSE # we can't pair the anomalies no matter what we tried to specify before + if (length(yrs.overlap) < 1) { + if (pair.anoms == TRUE) warning("No overlap in years, so we cannot pair the anomalies") + yrs.overlap <- (max(min(train.data$time$Year), min(source.data$time$Year)) - 20):(min(max(train.data$time$Year), max(source.data$time$Year)) + 20) + pair.anoms <- FALSE # we can't pair the anomalies no matter what we tried to specify before } # Cut out training data down to just the calibration period - for(v in vars.debias){ - train.data[[v]] <- matrix(train.data[[v]][which(train.data$time$Year %in% yrs.overlap),], ncol=ncol(train.data[[v]])) + for (v in vars.debias) { + train.data[[v]] <- matrix(train.data[[v]][which(train.data$time$Year %in% yrs.overlap), ], ncol = ncol(train.data[[v]])) } - train.data$time <- train.data$time[which(train.data$time$Year %in% yrs.overlap),] + train.data$time <- train.data$time[which(train.data$time$Year %in% yrs.overlap), ] # ------------------------------------------- @@ -150,38 +149,38 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # ------------------------------------------- print("") print("Debiasing Meteorology") - pb <- utils::txtProgressBar(min=0, max=length(vars.debias)*n.ens, style=3) - pb.ind=1 - for(v in vars.debias){ + pb <- utils::txtProgressBar(min = 0, max = length(vars.debias) * n.ens, style = 3) + pb.ind <- 1 + for (v in vars.debias) { # ------------- # If we're dealing with precip, lets keep the training data handy & # calculate the number of rainless time periods (days) in each year to # make sure we don't get a constant drizzle # Update: We also need to look at the distribution of consequtive rainless days # ------------- - if(v=="precipitation_flux"){ + if (v == "precipitation_flux") { # rain.train <- met.bias[met.bias$dataset==dat.train.orig,] rainless <- vector() cons.wet <- vector() - for(y in unique(train.data$time$Year)){ - for(i in 1:ncol(train.data$precipitation_flux)){ - rain.now <- train.data$precipitation_flux[train.data$time$Year==y, i] + for (y in unique(train.data$time$Year)) { + for (i in 1:ncol(train.data$precipitation_flux)) { + rain.now <- train.data$precipitation_flux[train.data$time$Year == y, i] - rainless <- c(rainless, length(which(rain.now==0))) + rainless <- c(rainless, length(which(rain.now == 0))) # calculating the mean & sd for rainless days - tally = 0 - for(z in 1:length(rain.now)){ + tally <- 0 + for (z in 1:length(rain.now)) { # If we don't have rain, add it to our tally - if(rain.now[z]>0){ - tally=tally+1 + if (rain.now[z] > 0) { + tally <- tally + 1 } # If we have rain and it resets our tally, # - store tally in our vector; then reset - if(rain.now[z]==0 & tally>0){ + if (rain.now[z] == 0 & tally > 0) { cons.wet <- c(cons.wet, tally) - tally=0 + tally <- 0 } } # z End loop } # end i loop @@ -189,8 +188,8 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # Hard-coding in some sort of max for precipitaiton rain.max <- max(train.data$precipitation_flux) + stats::sd(train.data$precipitation_flux) - rainless.min <- ifelse(min(rainless)-stats::sd(rainless)>=0, min(rainless)-stats::sd(rainless), max(min(rainless)-stats::sd(rainless)/2, 0)) - rainless.max <- ifelse(max(rainless)+stats::sd(rainless)<=365, max(rainless)+stats::sd(rainless), min(max(rainless)+stats::sd(rainless)/2, 365)) + rainless.min <- ifelse(min(rainless) - stats::sd(rainless) >= 0, min(rainless) - stats::sd(rainless), max(min(rainless) - stats::sd(rainless) / 2, 0)) + rainless.max <- ifelse(max(rainless) + stats::sd(rainless) <= 365, max(rainless) + stats::sd(rainless), min(max(rainless) + stats::sd(rainless) / 2, 365)) } # ------------- @@ -203,26 +202,27 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # -- This will get aggregated right off the bat so we so we're looking at the climatic means # for the first part of bias-correction # ----- - met.train <- data.frame(year=train.data$time$Year, - doy=train.data$time$DOY, - Y=utils::stack(data.frame(train.data[[v]][,ens.train]))[,1], - ind=rep(paste0("X", 1:n.ens), each=nrow(train.data[[v]])) - ) - met.train[,v] <- 0 + met.train <- data.frame( + year = train.data$time$Year, + doy = train.data$time$DOY, + Y = utils::stack(data.frame(train.data[[v]][, ens.train]))[, 1], + ind = rep(paste0("X", 1:n.ens), each = nrow(train.data[[v]])) + ) + met.train[, v] <- 0 # For precip, we want to adjust the total annual precipitation, and then calculate day of year # adjustment & anomaly as fraction of total annual precipitation - if(v == "precipitation_flux"){ + if (v == "precipitation_flux") { # Find total annual preciptiation - precip.ann <- stats::aggregate(met.train$Y, by=met.train[,c("year", "ind")], FUN=sum) + precip.ann <- stats::aggregate(met.train$Y, by = met.train[, c("year", "ind")], FUN = sum) names(precip.ann)[3] <- "Y.tot" - met.train <- merge(met.train, precip.ann, all=T) - met.train$Y <- met.train$Y/met.train$Y.tot # Y is now fraction of annual precip in each timestep + met.train <- merge(met.train, precip.ann, all = T) + met.train$Y <- met.train$Y / met.train$Y.tot # Y is now fraction of annual precip in each timestep } # Aggregate to get rid of years so that we can compare climatic means; bring in covariance among climatic predictors - dat.clim <- stats::aggregate(met.train[,"Y"], by=met.train[,c("doy", "ind")], FUN=mean) + dat.clim <- stats::aggregate(met.train[, "Y"], by = met.train[, c("doy", "ind")], FUN = mean) # dat.clim[,v] <- 1 names(dat.clim)[3] <- "Y" # ----- @@ -231,58 +231,59 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # 2. Pull the raw ("source") data that needs to be bias-corrected -- this will be called "X" # -- this gets aggregated to the climatological mean right off the bat # ----- - met.src <- data.frame(year=source.data$time$Year, - doy=source.data$time$DOY, - X=utils::stack(data.frame(source.data[[v]][,ens.src]))[,1], - ind.src=rep(paste0("X", 1:length(ens.src)), each=nrow(source.data[[v]])) - ) + met.src <- data.frame( + year = source.data$time$Year, + doy = source.data$time$DOY, + X = utils::stack(data.frame(source.data[[v]][, ens.src]))[, 1], + ind.src = rep(paste0("X", 1:length(ens.src)), each = nrow(source.data[[v]])) + ) # met.src[,v] <- - if(v=="precipitation_flux"){ - src.ann <- stats::aggregate(met.src$X, by=met.src[,c("year", "ind.src")], FUN=sum) + if (v == "precipitation_flux") { + src.ann <- stats::aggregate(met.src$X, by = met.src[, c("year", "ind.src")], FUN = sum) names(src.ann)[3] <- "X.tot" - met.src <- merge(met.src, src.ann, all.x=T) + met.src <- merge(met.src, src.ann, all.x = T) # Putting precip as fraction of the year again - met.src$X <- met.src$X/met.src$X.tot - + met.src$X <- met.src$X / met.src$X.tot } # Lets deal with the source data first # - Adding in the ensembles to be predicted - if(length(unique(met.src$ind.src))0 & sane.attempt <= sanity.tries){ - + sane.attempt <- 0 + while (n.new > 0 & sane.attempt <= sanity.tries) { # Rbeta <- matrix(nrow=0, ncol=1); Rbeta.anom <- matrix(nrow=0, ncol=1) # ntries=50 # try.now=0 # while(nrow(Rbeta)<1 & try.now<=ntries){ - # Generate a random distribution of betas using the covariance matrix - # I think the anomalies might be problematic, so lets get way more betas than we need and trim the distribution + # Generate a random distribution of betas using the covariance matrix + # I think the anomalies might be problematic, so lets get way more betas than we need and trim the distribution # set.seed=42 - if(n.ens==1 | uncert.prop=="mean"){ - Rbeta <- matrix(stats::coef(mod.bias), ncol=length(stats::coef(mod.bias))) + if (n.ens == 1 | uncert.prop == "mean") { + Rbeta <- matrix(stats::coef(mod.bias), ncol = length(stats::coef(mod.bias))) } else { - Rbeta <- matrix(MASS::mvrnorm(n=n.new, stats::coef(mod.bias), stats::vcov(mod.bias)), ncol=length(stats::coef(mod.bias))) + Rbeta <- matrix(MASS::mvrnorm(n = n.new, stats::coef(mod.bias), stats::vcov(mod.bias)), ncol = length(stats::coef(mod.bias))) } dimnames(Rbeta)[[2]] <- names(stats::coef(mod.bias)) @@ -548,12 +549,12 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # try.now=0 # while(nrow(Rbeta.anom)<1 & try.now<=ntries){ - # Generate a random distribution of betas using the covariance matrix - # I think the anomalies might be problematic, so lets get way more betas than we need and trim the distribution - if(n.ens==1){ - Rbeta.anom <- matrix(stats::coef(mod.anom), ncol=length(stats::coef(mod.anom))) + # Generate a random distribution of betas using the covariance matrix + # I think the anomalies might be problematic, so lets get way more betas than we need and trim the distribution + if (n.ens == 1) { + Rbeta.anom <- matrix(stats::coef(mod.anom), ncol = length(stats::coef(mod.anom))) } else { - Rbeta.anom <- matrix(MASS::mvrnorm(n=n.new, stats::coef(mod.anom), stats::vcov(mod.anom)), ncol=length(stats::coef(mod.anom))) + Rbeta.anom <- matrix(MASS::mvrnorm(n = n.new, stats::coef(mod.anom), stats::vcov(mod.anom)), ncol = length(stats::coef(mod.anom))) } dimnames(Rbeta.anom)[[2]] <- names(stats::coef(mod.anom)) # # Filter our betas to remove outliers @@ -571,11 +572,11 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # Rbeta.anom <- matrix(Rbeta.anom[sample(1:nrow(Rbeta.anom), n.new, replace=T),], ncol=ncol(Rbeta.anom)) - if(v == "precipitation_flux"){ - if(n.ens==1){ - Rbeta.ann <- matrix(stats::coef(mod.ann), ncol=length(coef.ann)) + if (v == "precipitation_flux") { + if (n.ens == 1) { + Rbeta.ann <- matrix(stats::coef(mod.ann), ncol = length(coef.ann)) } else { - Rbeta.ann <- matrix(MASS::mvrnorm(n=n.new, stats::coef(mod.ann), stats::vcov(mod.ann)), ncol=length(stats::coef(mod.ann))) + Rbeta.ann <- matrix(MASS::mvrnorm(n = n.new, stats::coef(mod.ann), stats::vcov(mod.ann)), ncol = length(stats::coef(mod.ann))) } # ci.ann <- matrix(apply(Rbeta.ann, 2, quantile, c(0.01, 0.99)), nrow=2) # Rbeta.ann <- Rbeta.ann[which(apply(Rbeta.ann, 1, function(x) all(x > ci.ann[1,] & x < ci.ann[2,]))),] @@ -583,15 +584,15 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU } # Create the prediction matrix - Xp <- stats::predict(mod.bias, newdata=met.src[met.src$ind==ind,], type="lpmatrix") - Xp.anom <- stats::predict(mod.anom, newdata=met.src[met.src$ind==ind,], type="lpmatrix") - if(v == "precipitation_flux"){ + Xp <- stats::predict(mod.bias, newdata = met.src[met.src$ind == ind, ], type = "lpmatrix") + Xp.anom <- stats::predict(mod.anom, newdata = met.src[met.src$ind == ind, ], type = "lpmatrix") + if (v == "precipitation_flux") { # Linear models have a bit of a difference in how we get the info out # Xp.ann <- predict(mod.ann, newdata=met.src, type="lpmatrix") - met.src[met.src$ind==ind,"Y.tot"] <- met.src[met.src$ind==ind,"pred.ann"] + met.src[met.src$ind == ind, "Y.tot"] <- met.src[met.src$ind == ind, "pred.ann"] mod.terms <- stats::terms(mod.ann) - m <- stats::model.frame(mod.terms, met.src[met.src$ind==ind,], xlev=mod.ann$xlevels) + m <- stats::model.frame(mod.terms, met.src[met.src$ind == ind, ], xlev = mod.ann$xlevels) Xp.ann <- stats::model.matrix(mod.terms, m, constrasts.arg <- mod.ann$contrasts) } @@ -619,38 +620,39 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # -- I tried this and it made my brain hurt # ----- # If we're starting from sratch, make the blank data frames - if(sane.attempt==0){ - dim.new <- c(nrow(met.src[met.src$ind==ind,]), n.new) - sim1a <- matrix(nrow=dim.new[1], ncol=dim.new[2]) - sim1b <- matrix(nrow=dim.new[1], ncol=dim.new[2]) - if(v == "precipitation_flux") sim1c <- matrix(nrow=dim.new[1], ncol=dim.new[2]) + if (sane.attempt == 0) { + dim.new <- c(nrow(met.src[met.src$ind == ind, ]), n.new) + sim1a <- matrix(nrow = dim.new[1], ncol = dim.new[2]) + sim1b <- matrix(nrow = dim.new[1], ncol = dim.new[2]) + if (v == "precipitation_flux") sim1c <- matrix(nrow = dim.new[1], ncol = dim.new[2]) - sim1 <- matrix(nrow=dim.new[1], ncol=dim.new[2]) + sim1 <- matrix(nrow = dim.new[1], ncol = dim.new[2]) } # Default option: no residual error; all error from the downscaling parameters - sim1a[,cols.redo] <- Xp %*% t(Rbeta) # Seasonal Climate component with uncertainty - sim1b[,cols.redo] <- Xp.anom %*% t(Rbeta.anom) # Weather component with uncertainty - if(v == "precipitation_flux"){ - sim1a[,cols.redo] <- 0 + sim1a[, cols.redo] <- Xp %*% t(Rbeta) # Seasonal Climate component with uncertainty + sim1b[, cols.redo] <- Xp.anom %*% t(Rbeta.anom) # Weather component with uncertainty + if (v == "precipitation_flux") { + sim1a[, cols.redo] <- 0 sim1c <- Xp.ann %*% t(Rbeta.ann) # Mean annual precip uncertainty } # If we're dealing with the temperatures where there's basically no anomaly, # we'll get the uncertainty subtract the multi-decadal trend out of the anomalies; not a perfect solution, but it will increase the variability - if(pair.anoms==F & (v %in% c("air_temperature_maximum", "air_temperature_minimum"))){ + if (pair.anoms == F & (v %in% c("air_temperature_maximum", "air_temperature_minimum"))) { # sim1b.norm <- apply(sim1b, 1, mean) # What we need is to remove the mean-trend from the anomalies and then add the trend (with uncertinaties) back in # Note that for a single-member ensemble, this just undoes itself - anom.detrend <- met.src[met.src$ind==ind,"anom.raw"] - stats::predict(mod.anom) + anom.detrend <- met.src[met.src$ind == ind, "anom.raw"] - stats::predict(mod.anom) # NOTE: This section can probably be removed and simplified since it should always be a 1-column array now - if(length(cols.redo)>1){ - sim1b[,cols.redo] <- apply(sim1b[,cols.redo], 2, FUN=function(x){x+anom.detrend}) # Get the range around that medium-frequency trend + if (length(cols.redo) > 1) { + sim1b[, cols.redo] <- apply(sim1b[, cols.redo], 2, FUN = function(x) { + x + anom.detrend + }) # Get the range around that medium-frequency trend } else { - sim1b[,cols.redo] <- as.matrix(sim1b[,cols.redo] + anom.detrend) + sim1b[, cols.redo] <- as.matrix(sim1b[, cols.redo] + anom.detrend) } - } @@ -668,11 +670,11 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # ----- # Adding climate and anomaly together - sim1[,cols.redo] <- sim1a[,cols.redo] + sim1b[,cols.redo] # climate + weather = met driver!! + sim1[, cols.redo] <- sim1a[, cols.redo] + sim1b[, cols.redo] # climate + weather = met driver!! # If we're dealing with precip, transform proportions of rain back to actual precip - if(v == "precipitation_flux"){ - sim1[,cols.redo] <- sim1[,cols.redo]*sim1c[,cols.redo] + if (v == "precipitation_flux") { + sim1[, cols.redo] <- sim1[, cols.redo] * sim1c[, cols.redo] # met.src$X <- met.src$X*met.src$X.tot # met.src$anom.raw <- met.src$anom.raw*met.src$X.tot } @@ -681,176 +683,181 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # SANITY CHECKS!!! # ----- # Determine which ensemble members fail sanity checks - #don't forget to check for transformed variables + # don't forget to check for transformed variables # vars.transform <- c("surface_downwelling_shortwave_flux_in_air", "specific_humidity", "surface_downwelling_longwave_flux_in_air", "wind_speed") - if(v %in% c("air_temperature", "air_temperature_maximum", "air_temperature_minimum")){ + if (v %in% c("air_temperature", "air_temperature_maximum", "air_temperature_minimum")) { # max air temp = 70 C; hottest temperature from sattellite; very ridiculous # min air temp = -95 C; colder than coldest natural temperature recorded in Antarctica - cols.redo <- which(apply(sim1, 2, function(x) min(x) < 273.15-95 | max(x) > 273.15+70 | - min(x) < mean(met.train$X) - sanity.sd*stats::sd(met.train$X) | - max(x) > mean(met.train$X) + sanity.sd*stats::sd(met.train$X) - )) + cols.redo <- which(apply(sim1, 2, function(x) { + min(x) < 273.15 - 95 | max(x) > 273.15 + 70 | + min(x) < mean(met.train$X) - sanity.sd * stats::sd(met.train$X) | + max(x) > mean(met.train$X) + sanity.sd * stats::sd(met.train$X) + })) } - #"specific_humidity", - if(v == "specific_humidity"){ + # "specific_humidity", + if (v == "specific_humidity") { # Based on google, it looks like values of 30 g/kg can occur in the tropics, so lets go above that # Also, the minimum humidity can't be 0 so lets just make it extremely dry; lets set this for 1 g/Mg - cols.redo <- which(apply(sim1, 2, function(x) min(x^2) < 1e-6 | max(x^2) > 40e-3 | - min(x^2) < mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2) | - max(x^2) > mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2) - )) + cols.redo <- which(apply(sim1, 2, function(x) { + min(x^2) < 1e-6 | max(x^2) > 40e-3 | + min(x^2) < mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2) | + max(x^2) > mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2) + })) } - #"surface_downwelling_shortwave_flux_in_air", - if(v == "surface_downwelling_shortwave_flux_in_air"){ + # "surface_downwelling_shortwave_flux_in_air", + if (v == "surface_downwelling_shortwave_flux_in_air") { # Based on something found from Columbia, average Radiative flux at ATM is 1360 W/m2, so for a daily average it should be less than this # Lets round 1360 and divide that by 2 (because it should be a daily average) and conservatively assume albedo of 20% (average value is more like 30) # Source http://eesc.columbia.edu/courses/ees/climate/lectures/radiation/ - cols.redo <- which(apply(sim1, 2, function(x) max(x^2) > 1360/2*0.8 | - min(x^2) < mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2) | - max(x^2) > mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2) - )) + cols.redo <- which(apply(sim1, 2, function(x) { + max(x^2) > 1360 / 2 * 0.8 | + min(x^2) < mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2) | + max(x^2) > mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2) + })) } - if(v == "air_pressure"){ + if (v == "air_pressure") { # According to wikipedia the highest barometric pressure ever recorded was 1085.7 hPa = 1085.7*100 Pa; Dead sea has average pressure of 1065 hPa # - Lets round up to 1100 hPA # Also according to Wikipedia, the lowest non-tornadic pressure ever measured was 870 hPA - cols.redo <- which(apply(sim1, 2, function(x) min(x) < 870*100 | max(x) > 1100*100 | - min(x) < mean(met.train$X) - sanity.sd*stats::sd(met.train$X) | - max(x) > mean(met.train$X) + sanity.sd*stats::sd(met.train$X) - )) + cols.redo <- which(apply(sim1, 2, function(x) { + min(x) < 870 * 100 | max(x) > 1100 * 100 | + min(x) < mean(met.train$X) - sanity.sd * stats::sd(met.train$X) | + max(x) > mean(met.train$X) + sanity.sd * stats::sd(met.train$X) + })) } - if(v == "surface_downwelling_longwave_flux_in_air"){ + if (v == "surface_downwelling_longwave_flux_in_air") { # A NASA presentation has values topping out ~300 and min ~0: https://ceres.larc.nasa.gov/documents/STM/2003-05/pdf/smith.pdf # A random journal article has 130 - 357.3: http://www.tandfonline.com/doi/full/10.1080/07055900.2012.760441 # ED2 sanity checks boudn longwave at 40 & 600 - cols.redo <- which(apply(sim1, 2, function(x) min(x^2) < 40 | max(x^2) > 600 | - min(x^2) < mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2) | - max(x^2) > mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2) - )) - + cols.redo <- which(apply(sim1, 2, function(x) { + min(x^2) < 40 | max(x^2) > 600 | + min(x^2) < mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2) | + max(x^2) > mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2) + })) } - if(v == "wind_speed"){ + if (v == "wind_speed") { # According to wikipedia, the hgihest wind speed ever recorded is a gust of 113 m/s; the maximum 5-mind wind speed is 49 m/s - cols.redo <- which(apply(sim1, 2, function(x) max(x^2) > 50/2 | - min(x^2) < mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2) | - max(x^2) > mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2) - )) + cols.redo <- which(apply(sim1, 2, function(x) { + max(x^2) > 50 / 2 | + min(x^2) < mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2) | + max(x^2) > mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2) + })) } - if(v == "precipitation_flux"){ + if (v == "precipitation_flux") { # According to wunderground, ~16" in 1 hr is the max; Lets divide that by 2 for the daily rainfall rate # https://www.wunderground.com/blog/weatherhistorian/what-is-the-most-rain-to-ever-fall-in-one-minute-or-one-hour.html # 16/2 = round number; x25.4 = inches to mm; /(60*60) = hr to sec - cols.redo <- which(apply(sim1, 2, function(x) max(x) > 16/2*25.4/(60*60) | - min(x) < min(met.train$X) - sanity.sd*stats::sd(met.train$X) | - max(x) > max(met.train$X) + sanity.sd*stats::sd(met.train$X) - )) + cols.redo <- which(apply(sim1, 2, function(x) { + max(x) > 16 / 2 * 25.4 / (60 * 60) | + min(x) < min(met.train$X) - sanity.sd * stats::sd(met.train$X) | + max(x) > max(met.train$X) + sanity.sd * stats::sd(met.train$X) + })) } - n.new = length(cols.redo) - if(force.sanity){ - sane.attempt = sane.attempt + 1 + n.new <- length(cols.redo) + if (force.sanity) { + sane.attempt <- sane.attempt + 1 } else { - sane.attempt = sanity.tries+1 + sane.attempt <- sanity.tries + 1 } # ----- } # End Sanity Attempts - if(force.sanity & n.new>0){ + if (force.sanity & n.new > 0) { # # If we're still struggling, but we have at least some workable columns, lets just duplicate those: # if(n.new<(round(n.ens/2)+1)){ # cols.safe <- 1:ncol(sim1) # cols.safe <- cols.safe[!(cols.safe %in% cols.redo)] # sim1[,cols.redo] <- sim1[,sample(cols.safe, n.new, replace=T)] # } else { - # for known problem variables, lets force sanity as a last resort - if(v %in% c("air_temperature", "air_temperature_maximum", "air_temperature_minimum")){ - warning(paste("Forcing Sanity:", v)) - if(min(sim1) < max(184, mean(met.train$X) - sanity.sd*stats::sd(met.train$X))) { - qtrim <- max(184, mean(met.train$X) - sanity.sd*stats::sd(met.train$X)) + 1e-6 - sim1[sim1 < qtrim] <- qtrim - } - if(max(sim1) > min(331, mean(met.train$X) + stats::sd(met.train$X^2))) { - qtrim <- min(331, mean(met.train$X) + sanity.sd*stats::sd(met.train$X)) - 1e-6 - sim1[sim1 > qtrim] <- qtrim - } - } else if(v == "surface_downwelling_shortwave_flux_in_air"){ - # # Based on something found from Columbia, average Radiative flux at ATM is 1360 W/m2, so for a daily average it should be less than this - # # Lets round 1360 and divide that by 2 (because it should be a daily average) and conservatively assume albedo of 20% (average value is more like 30) - # # Source http://eesc.columbia.edu/courses/ees/climate/lectures/radiation/ - # cols.redo <- which(apply(sim1, 2, function(x) max(x^2) > 1360/2*0.8 | - # min(x) < mean(met.train$X) - sanity.sd*sd(met.train$X) | - # max(x) > mean(met.train$X) + sanity.sd*sd(met.train$X) - # )) - warning(paste("Forcing Sanity:", v)) - if(min(sim1^2) < max(mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2))) { - qtrim <- max(mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2)) - sim1[sim1^2 < qtrim] <- sqrt(qtrim) - } - if(max(sim1^2) > min(1500*0.8, mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2))) { - qtrim <- min(1500*0.8, mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2)) - sim1[sim1^2 > qtrim] <- sqrt(qtrim) - } - - } else if(v == "surface_downwelling_longwave_flux_in_air"){ - # Having a heck of a time keeping things reasonable, so lets trim it - # ED2 sanity checks boudn longwave at 40 & 600 + # for known problem variables, lets force sanity as a last resort + if (v %in% c("air_temperature", "air_temperature_maximum", "air_temperature_minimum")) { + warning(paste("Forcing Sanity:", v)) + if (min(sim1) < max(184, mean(met.train$X) - sanity.sd * stats::sd(met.train$X))) { + qtrim <- max(184, mean(met.train$X) - sanity.sd * stats::sd(met.train$X)) + 1e-6 + sim1[sim1 < qtrim] <- qtrim + } + if (max(sim1) > min(331, mean(met.train$X) + stats::sd(met.train$X^2))) { + qtrim <- min(331, mean(met.train$X) + sanity.sd * stats::sd(met.train$X)) - 1e-6 + sim1[sim1 > qtrim] <- qtrim + } + } else if (v == "surface_downwelling_shortwave_flux_in_air") { + # # Based on something found from Columbia, average Radiative flux at ATM is 1360 W/m2, so for a daily average it should be less than this + # # Lets round 1360 and divide that by 2 (because it should be a daily average) and conservatively assume albedo of 20% (average value is more like 30) + # # Source http://eesc.columbia.edu/courses/ees/climate/lectures/radiation/ + # cols.redo <- which(apply(sim1, 2, function(x) max(x^2) > 1360/2*0.8 | + # min(x) < mean(met.train$X) - sanity.sd*sd(met.train$X) | + # max(x) > mean(met.train$X) + sanity.sd*sd(met.train$X) + # )) + warning(paste("Forcing Sanity:", v)) + if (min(sim1^2) < max(mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2))) { + qtrim <- max(mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2)) + sim1[sim1^2 < qtrim] <- sqrt(qtrim) + } + if (max(sim1^2) > min(1500 * 0.8, mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2))) { + qtrim <- min(1500 * 0.8, mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2)) + sim1[sim1^2 > qtrim] <- sqrt(qtrim) + } + } else if (v == "surface_downwelling_longwave_flux_in_air") { + # Having a heck of a time keeping things reasonable, so lets trim it + # ED2 sanity checks boudn longwave at 40 & 600 - warning(paste("Forcing Sanity:", v)) - if(min(sim1^2) < max(40, mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2))) { - qtrim <- max(40, mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2)) - sim1[sim1^2 < qtrim] <- sqrt(qtrim) - } - if(max(sim1^2) > min(600, mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2))) { - qtrim <- min(600, mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2)) - sim1[sim1^2 > qtrim] <- sqrt(qtrim) - } - } else if(v=="specific_humidity"){ - warning(paste("Forcing Sanity:", v)) - # I'm having a hell of a time trying to get SH to fit sanity bounds, so lets brute-force fix outliers - if(min(sim1^2) < max(1e-6, mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2))) { - qtrim <- max(1e-6, mean(met.train$X^2) - sanity.sd*stats::sd(met.train$X^2)) - sim1[sim1^2 < qtrim] <- sqrt(qtrim) - } - if(max(sim1^2) > min(3.2e-2, mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2))) { - qtrim <- min(3.2e-2, mean(met.train$X^2) + sanity.sd*stats::sd(met.train$X^2)) - sim1[sim1^2 > qtrim] <- sqrt(qtrim) - } - } else if(v=="air_pressure"){ - warning(paste("Forcing Sanity:", v)) - if(min(sim1)< max(45000, mean(met.train$X) - sanity.sd*stats::sd(met.train$X))){ - qtrim <- min(45000, mean(met.train$X) - sanity.sd*stats::sd(met.train$X)) - sim1[sim1 < qtrim] <- qtrim - } - if(max(sim1) < min(11000000, mean(met.train$X) + sanity.sd*stats::sd(met.train$X))){ - qtrim <- min(11000000, mean(met.train$X) + sanity.sd*stats::sd(met.train$X)) - sim1[sim1 > qtrim] <- qtrim - } - } else if(v=="wind_speed"){ - warning(paste("Forcing Sanity:", v)) - if(min(sim1)< max(0, mean(met.train$X) - sanity.sd*stats::sd(met.train$X))){ - qtrim <- min(0, mean(met.train$X) - sanity.sd*stats::sd(met.train$X)) - sim1[sim1 < qtrim] <- qtrim - } - if(max(sim1) < min(sqrt(85), mean(met.train$X) + sanity.sd*stats::sd(met.train$X))){ - qtrim <- min(sqrt(85), mean(met.train$X) + sanity.sd*stats::sd(met.train$X)) - sim1[sim1 > qtrim] <- qtrim - } - } else { - # If this is a new problem variable, lets stop and look at it - stop(paste("Unable to produce a sane prediction:", v, "- ens", ens, "; problem child =", paste(cols.redo, collapse=" "))) + warning(paste("Forcing Sanity:", v)) + if (min(sim1^2) < max(40, mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2))) { + qtrim <- max(40, mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2)) + sim1[sim1^2 < qtrim] <- sqrt(qtrim) + } + if (max(sim1^2) > min(600, mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2))) { + qtrim <- min(600, mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2)) + sim1[sim1^2 > qtrim] <- sqrt(qtrim) + } + } else if (v == "specific_humidity") { + warning(paste("Forcing Sanity:", v)) + # I'm having a hell of a time trying to get SH to fit sanity bounds, so lets brute-force fix outliers + if (min(sim1^2) < max(1e-6, mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2))) { + qtrim <- max(1e-6, mean(met.train$X^2) - sanity.sd * stats::sd(met.train$X^2)) + sim1[sim1^2 < qtrim] <- sqrt(qtrim) + } + if (max(sim1^2) > min(3.2e-2, mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2))) { + qtrim <- min(3.2e-2, mean(met.train$X^2) + sanity.sd * stats::sd(met.train$X^2)) + sim1[sim1^2 > qtrim] <- sqrt(qtrim) } + } else if (v == "air_pressure") { + warning(paste("Forcing Sanity:", v)) + if (min(sim1) < max(45000, mean(met.train$X) - sanity.sd * stats::sd(met.train$X))) { + qtrim <- min(45000, mean(met.train$X) - sanity.sd * stats::sd(met.train$X)) + sim1[sim1 < qtrim] <- qtrim + } + if (max(sim1) < min(11000000, mean(met.train$X) + sanity.sd * stats::sd(met.train$X))) { + qtrim <- min(11000000, mean(met.train$X) + sanity.sd * stats::sd(met.train$X)) + sim1[sim1 > qtrim] <- qtrim + } + } else if (v == "wind_speed") { + warning(paste("Forcing Sanity:", v)) + if (min(sim1) < max(0, mean(met.train$X) - sanity.sd * stats::sd(met.train$X))) { + qtrim <- min(0, mean(met.train$X) - sanity.sd * stats::sd(met.train$X)) + sim1[sim1 < qtrim] <- qtrim + } + if (max(sim1) < min(sqrt(85), mean(met.train$X) + sanity.sd * stats::sd(met.train$X))) { + qtrim <- min(sqrt(85), mean(met.train$X) + sanity.sd * stats::sd(met.train$X)) + sim1[sim1 > qtrim] <- qtrim + } + } else { + # If this is a new problem variable, lets stop and look at it + stop(paste("Unable to produce a sane prediction:", v, "- ens", ens, "; problem child =", paste(cols.redo, collapse = " "))) + } # } # End if else } # End force sanity # Un-transform variables where we encounter zero-truncation issues # NOTE: Need to do this *before* we sum the components!! - #if(v %in% vars.transform){ + # if(v %in% vars.transform){ # sim1 <- sim1^2 # # met.src[met.src$ind==ind,"X"] <- met.src[met.src$ind==ind,"X"]^2 - #} + # } # For preciptiation, we need to make sure we don't have constant drizzle and have @@ -858,58 +865,57 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # been a trend in number of rainless days over the past 1000 years and use the mean & # sd of rainless days in the training data to randomly distribute the rain in the past # Update: We also need to look at the distribution of consequtive rainless days - if(v=="precipitation_flux"){ - for(j in 1:ncol(sim1)){ - for(y in min(met.src[met.src$ind==ind, "year"]):max(met.src[met.src$ind==ind, "year"])){ + if (v == "precipitation_flux") { + for (j in 1:ncol(sim1)) { + for (y in min(met.src[met.src$ind == ind, "year"]):max(met.src[met.src$ind == ind, "year"])) { # Figure out which rows belong to this particular year - rows.yr <- which(met.src[met.src$ind==ind, "year"]==y) + rows.yr <- which(met.src[met.src$ind == ind, "year"] == y) # Before adjusting rainless days, make sure we get rid of our negative days first - dry <- rows.yr[which(sim1[rows.yr,j] < 0)] - while(length(dry)>0){ # until we have our water year balanced - for(r in 1:length(dry)){ + dry <- rows.yr[which(sim1[rows.yr, j] < 0)] + while (length(dry) > 0) { # until we have our water year balanced + for (r in 1:length(dry)) { # Pick a year with some rain and take the rain from it # -- this *should* make sure we don't get an infinite loop by making one rainless day have negative rain - row.steal <- sample(rows.yr[which(sim1[rows.yr,j]>0)], 1) # The row we're stealing precip out of to balance the budget - sim1[row.steal,j] <- sim1[row.steal,j] + sim1[dry[r],j] - sim1[dry[r],j] <- 0 + row.steal <- sample(rows.yr[which(sim1[rows.yr, j] > 0)], 1) # The row we're stealing precip out of to balance the budget + sim1[row.steal, j] <- sim1[row.steal, j] + sim1[dry[r], j] + sim1[dry[r], j] <- 0 } - dry <- rows.yr[which(sim1[rows.yr,j] < 0)] # update our dry days + dry <- rows.yr[which(sim1[rows.yr, j] < 0)] # update our dry days } # n.now = number of rainless days for this sim - n.now <- round(stats::rnorm(1, mean(rainless, na.rm=T), stats::sd(rainless, na.rm=T)), 0) - if(n.now < rainless.min) n.now <- rainless.min # Make sure we don't have negative or no rainless days - if(n.now > rainless.max) n.now <- rainless.max # Make sure we have at least one day with rain + n.now <- round(stats::rnorm(1, mean(rainless, na.rm = T), stats::sd(rainless, na.rm = T)), 0) + if (n.now < rainless.min) n.now <- rainless.min # Make sure we don't have negative or no rainless days + if (n.now > rainless.max) n.now <- rainless.max # Make sure we have at least one day with rain # We're having major seasonality issues, so lets randomly redistribute our precip # Pull ~twice what we need and randomly select from that so that we don't have such clean cuttoffs # set.seed(12) - cutoff <- stats::quantile(sim1[rows.yr, j], min(n.now/366*2.5, max(0.75, n.now/366)), na.rm=T) - if(length(which(sim1[rows.yr,j]>0)) < n.now){ + cutoff <- stats::quantile(sim1[rows.yr, j], min(n.now / 366 * 2.5, max(0.75, n.now / 366)), na.rm = T) + if (length(which(sim1[rows.yr, j] > 0)) < n.now) { # if we need to re-distribute our rain (make more rainy days), use the inverse of the cutoff # cutoff <- 1-cutoff - dry1 <- rows.yr[which(sim1[rows.yr,j] > cutoff)] - dry <- sample(dry1, 365-n.now, replace=T) + dry1 <- rows.yr[which(sim1[rows.yr, j] > cutoff)] + dry <- sample(dry1, 365 - n.now, replace = T) - wet <- sample(rows.yr[!rows.yr %in% dry], length(dry), replace=T) + wet <- sample(rows.yr[!rows.yr %in% dry], length(dry), replace = T) # Go through and randomly redistribute the precipitation to days we're not designating as rainless # Note, if we don't loop through, we might lose some of our precip # IN the case of redistributing rain to prevent super droughts, divide by 2 - for(r in 1:length(dry)){ - sim1[wet[r],j] <- sim1[dry[r],j]/2 - sim1[dry[r],j] <- sim1[dry[r],j]/2 + for (r in 1:length(dry)) { + sim1[wet[r], j] <- sim1[dry[r], j] / 2 + sim1[dry[r], j] <- sim1[dry[r], j] / 2 } - } else { # Figure out which days are currently below our cutoff and randomly distribute # their precip to days that are not below the cutoff (this causes a more bi-modal # distribution hwere dry days get drier), but other options ended up with either # too few rainless days because of only slight redistribution (r+1) or buildup # towards the end of the year (random day that hasn't happened) - dry1 <- rows.yr[which(sim1[rows.yr,j] < cutoff)] - dry <- sample(dry1, min(n.now, length(dry1)), replace=F) + dry1 <- rows.yr[which(sim1[rows.yr, j] < cutoff)] + dry <- sample(dry1, min(n.now, length(dry1)), replace = F) dry1 <- dry1[!dry1 %in% dry] # dry <- dry[order(dry)] @@ -917,67 +923,65 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # Figure out how close together our dry are # Now checking to see if we need to move rainy days # calculating the mean & sd for rainless days - redistrib=T + redistrib <- T # wet.max <- round(rnorm(1, mean(cons.wet, na.rm=T), sd(cons.wet, na.rm=T)), 0) - while(redistrib==T & length(dry1)>1){ + while (redistrib == T & length(dry1) > 1) { ens.wet <- vector() wet.end <- vector() - tally = 0 - for(z in seq_along(rows.yr)){ + tally <- 0 + for (z in seq_along(rows.yr)) { # If we don't have rain, add it to our tally - if(!rows.yr[z] %in% dry){ - tally=tally+1 + if (!rows.yr[z] %in% dry) { + tally <- tally + 1 } # If we have rain and it resets our tally, # - store tally in our vector; then reset - if(rows.yr[z] %in% dry & tally>0){ + if (rows.yr[z] %in% dry & tally > 0) { ens.wet <- c(ens.wet, tally) wet.end <- c(wet.end, rows.yr[z]) - tally=0 + tally <- 0 } } # end z # If we have a worryingly high number of consequtive wet days (outside of 6 sd); try a new dry - if(max(ens.wet) > max(cons.wet)+stats::sd(cons.wet) ){ + if (max(ens.wet) > max(cons.wet) + stats::sd(cons.wet)) { # print("redistributing dry days") # If we have a wet period that's too long, lets find the random dry that's # closest to the midpoint of the longest # Finding what we're going to insert as our new dry day - wet.max <- which(ens.wet==max(ens.wet))[1] - dry.diff <- abs(dry1 - round(wet.end[wet.max]-ens.wet[wet.max]/2)+1) - dry.new <- which(dry.diff==min(dry.diff))[1] + wet.max <- which(ens.wet == max(ens.wet))[1] + dry.diff <- abs(dry1 - round(wet.end[wet.max] - ens.wet[wet.max] / 2) + 1) + dry.new <- which(dry.diff == min(dry.diff))[1] # Finding the closest dry date to shift - dry.diff2 <- abs(dry - round(wet.end[wet.max]-ens.wet[wet.max]/2)+1) - dry.replace <- which(dry.diff2==min(dry.diff2))[1] + dry.diff2 <- abs(dry - round(wet.end[wet.max] - ens.wet[wet.max] / 2) + 1) + dry.replace <- which(dry.diff2 == min(dry.diff2))[1] dry[dry.replace] <- dry1[dry.new] - dry1 <- dry1[dry1!=dry1[dry.new]] # Drop the one we just moved so we don't get in an infinite loop + dry1 <- dry1[dry1 != dry1[dry.new]] # Drop the one we just moved so we don't get in an infinite loop } else { - redistrib=F + redistrib <- F } } # # Figure out where to put the extra rain; allow replacement for good measure - wet <- sample(rows.yr[!rows.yr %in% dry], length(dry), replace=T) + wet <- sample(rows.yr[!rows.yr %in% dry], length(dry), replace = T) # Go through and randomly redistribute the precipitation to days we're not designating as rainless # Note, if we don't loop through, we might lose some of our precip - for(r in 1:length(dry)){ - sim1[wet[r],j] <- sim1[wet[r],j] + sim1[dry[r],j] - sim1[dry[r],j] <- 0 + for (r in 1:length(dry)) { + sim1[wet[r], j] <- sim1[wet[r], j] + sim1[dry[r], j] + sim1[dry[r], j] <- 0 } } - - } # End year (y) } # End sim (j) } # End precip # Randomly pick one from this meta-ensemble to save # this *should* be propogating uncertainty because we have the ind effects in all of the models and we're randomly adding as we go - sim.final[,ens] <- as.vector(sim1) + sim.final[, ens] <- as.vector(sim1) # if(uncert.prop=="random"){ # sim.final[,ens] <- sim1[,sample(1:ncol(sim1),1)] # } @@ -986,20 +990,20 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # } utils::setTxtProgressBar(pb, pb.ind) - pb.ind <- pb.ind+1 + pb.ind <- pb.ind + 1 rm(mod.bias, anom.train, anom.src, mod.anom, Xp, Xp.anom, sim1, sim1a, sim1b) } # End ensemble loop - if(v == "precipitation_flux"){ + if (v == "precipitation_flux") { # sim1 <- sim1*sim1c - met.src$X <- met.src$X*met.src$X.tot - met.src$anom.raw <- met.src$anom.raw*met.src$X.tot + met.src$X <- met.src$X * met.src$X.tot + met.src$anom.raw <- met.src$anom.raw * met.src$X.tot } - if(v %in% vars.transform){ + if (v %in% vars.transform) { sim.final <- sim.final^2 - dat.clim[,c("X", "Y")] <- (dat.clim[,c("X", "Y")]^2) + dat.clim[, c("X", "Y")] <- (dat.clim[, c("X", "Y")]^2) met.src$X <- (met.src$X)^2 met.train$X <- (met.train$X)^2 met.train$Y <- (met.train$Y)^2 @@ -1012,26 +1016,26 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # ------------- # Save some diagnostic graphs if useful # ------------- - if(save.diagnostics==TRUE){ - dir.create(path.diagnostics, recursive=T, showWarnings=F) + if (save.diagnostics == TRUE) { + dir.create(path.diagnostics, recursive = T, showWarnings = F) dat.pred <- source.data$time dat.pred$Date <- as.POSIXct(dat.pred$Date) - dat.pred$obs <- apply(source.data[[v]], 1, mean, na.rm=T) - dat.pred$mean <- apply(dat.out[[v]], 1, mean, na.rm=T) - dat.pred$lwr <- apply(dat.out[[v]], 1, stats::quantile, 0.025, na.rm=T) - dat.pred$upr <- apply(dat.out[[v]], 1, stats::quantile, 0.975, na.rm=T) + dat.pred$obs <- apply(source.data[[v]], 1, mean, na.rm = T) + dat.pred$mean <- apply(dat.out[[v]], 1, mean, na.rm = T) + dat.pred$lwr <- apply(dat.out[[v]], 1, stats::quantile, 0.025, na.rm = T) + dat.pred$upr <- apply(dat.out[[v]], 1, stats::quantile, 0.975, na.rm = T) # Plotting the observed and the bias-corrected 95% CI - grDevices::png(file.path(path.diagnostics, paste(ens.name, v, "day.png", sep="_")), height=6, width=6, units="in", res=220) + grDevices::png(file.path(path.diagnostics, paste(ens.name, v, "day.png", sep = "_")), height = 6, width = 6, units = "in", res = 220) print( - ggplot2::ggplot(data=dat.pred[dat.pred$Year>=mean(dat.pred$Year)-1 & dat.pred$Year<=mean(dat.pred$Year)+1,]) + - ggplot2::geom_ribbon(ggplot2::aes(x=.data$Date, ymin=.data$lwr, ymax=.data$upr, fill="corrected"), alpha=0.5) + - ggplot2::geom_line(ggplot2::aes(x=.data$Date, y=mean, color="corrected"), size=0.5) + - ggplot2::geom_line(ggplot2::aes(x=.data$Date, y=.data$obs, color="original"), size=0.5) + - ggplot2::scale_color_manual(values=c("corrected" = "red", "original"="black")) + - ggplot2::scale_fill_manual(values=c("corrected" = "red", "original"="black")) + - ggplot2::guides(fill=F) + + ggplot2::ggplot(data = dat.pred[dat.pred$Year >= mean(dat.pred$Year) - 1 & dat.pred$Year <= mean(dat.pred$Year) + 1, ]) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$Date, ymin = .data$lwr, ymax = .data$upr, fill = "corrected"), alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(x = .data$Date, y = mean, color = "corrected"), size = 0.5) + + ggplot2::geom_line(ggplot2::aes(x = .data$Date, y = .data$obs, color = "original"), size = 0.5) + + ggplot2::scale_color_manual(values = c("corrected" = "red", "original" = "black")) + + ggplot2::scale_fill_manual(values = c("corrected" = "red", "original" = "black")) + + ggplot2::guides(fill = F) + ggplot2::ggtitle(paste0(v, " - ensemble mean & 95% CI (daily slice)")) + ggplot2::theme_bw() ) @@ -1040,113 +1044,118 @@ debias.met.regression <- function(train.data, source.data, n.ens, vars.debias=NU # Plotting a few random series to get an idea for what an individual pattern looks liek col.samp <- paste0("X", sample(1:n.ens, min(3, n.ens))) - sim.sub <- data.frame(dat.out[[v]])[,col.samp] - for(i in 1:ncol(sim.sub)){ - sim.sub[,i] <- as.vector(sim.sub[,i]) + sim.sub <- data.frame(dat.out[[v]])[, col.samp] + for (i in 1:ncol(sim.sub)) { + sim.sub[, i] <- as.vector(sim.sub[, i]) } # names(test) <- col.samp stack.sims <- utils::stack(sim.sub) - stack.sims[,c("Year", "DOY", "Date")] <- dat.pred[,c("Year", "DOY", "Date")] + stack.sims[, c("Year", "DOY", "Date")] <- dat.pred[, c("Year", "DOY", "Date")] - grDevices::png(file.path(path.diagnostics, paste(ens.name, v, "day2.png", sep="_")), height=6, width=6, units="in", res=220) + grDevices::png(file.path(path.diagnostics, paste(ens.name, v, "day2.png", sep = "_")), height = 6, width = 6, units = "in", res = 220) print( - ggplot2::ggplot(data=stack.sims[stack.sims$Year>=mean(stack.sims$Year)-2 & stack.sims$Year<=mean(stack.sims$Year)+2,]) + - ggplot2::geom_line(ggplot2::aes(x=.data$Date, y=values, color=ind), size=0.2, alpha=0.8) + + ggplot2::ggplot(data = stack.sims[stack.sims$Year >= mean(stack.sims$Year) - 2 & stack.sims$Year <= mean(stack.sims$Year) + 2, ]) + + ggplot2::geom_line(ggplot2::aes(x = .data$Date, y = values, color = ind), size = 0.2, alpha = 0.8) + ggplot2::ggtitle(paste0(v, " - example ensemble members (daily slice)")) + ggplot2::theme_bw() ) grDevices::dev.off() # Looking tat the annual means over the whole time series to make sure we're getting decent interannual variability - dat.yr <- stats::aggregate(dat.pred[,c("obs", "mean", "lwr", "upr")], - by=list(dat.pred$Year), - FUN=mean) + dat.yr <- stats::aggregate(dat.pred[, c("obs", "mean", "lwr", "upr")], + by = list(dat.pred$Year), + FUN = mean + ) names(dat.yr)[1] <- "Year" - grDevices::png(file.path(path.diagnostics, paste(ens.name, v, "annual.png", sep="_")), height=6, width=6, units="in", res=220) + grDevices::png(file.path(path.diagnostics, paste(ens.name, v, "annual.png", sep = "_")), height = 6, width = 6, units = "in", res = 220) print( - ggplot2::ggplot(data=dat.yr[,]) + - ggplot2::geom_ribbon(ggplot2::aes(x=.data$Year, ymin=.data$lwr, ymax=.data$upr, fill="corrected"), alpha=0.5) + - ggplot2::geom_line(ggplot2::aes(x=.data$Year, y=mean, color="corrected"), size=0.5) + - ggplot2::geom_line(ggplot2::aes(x=.data$Year, y=.data$obs, color="original"), size=0.5) + - ggplot2::scale_color_manual(values=c("corrected" = "red", "original"="black")) + - ggplot2::scale_fill_manual(values=c("corrected" = "red", "original"="black")) + - ggplot2::guides(fill=F) + - + ggplot2::ggplot(data = dat.yr[, ]) + + ggplot2::geom_ribbon(ggplot2::aes(x = .data$Year, ymin = .data$lwr, ymax = .data$upr, fill = "corrected"), alpha = 0.5) + + ggplot2::geom_line(ggplot2::aes(x = .data$Year, y = mean, color = "corrected"), size = 0.5) + + ggplot2::geom_line(ggplot2::aes(x = .data$Year, y = .data$obs, color = "original"), size = 0.5) + + ggplot2::scale_color_manual(values = c("corrected" = "red", "original" = "black")) + + ggplot2::scale_fill_manual(values = c("corrected" = "red", "original" = "black")) + + ggplot2::guides(fill = F) + ggplot2::ggtitle(paste0(v, " - annual mean time series")) + ggplot2::theme_bw() ) grDevices::dev.off() - } # ------------- - } # End looping through variables # ------------------------------------------- # Save the output - nc.info <- data.frame(CF.name = c("air_temperature", "air_temperature_minimum", "air_temperature_maximum", "precipitation_flux", - "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "specific_humidity", "wind_speed"), - longname = c("2 meter air temperature", "2 meter minimum air temperature", "2 meter maximum air temperature", - "cumulative precipitation (water equivalent)", "incident (downwelling) showtwave radiation", - "incident (downwelling) longwave radiation", "air pressure at the surface", - "Specific humidity measured at the lowest level of the atmosphere", - "wind_speed speed"), - units = c("K", "K", "K", "kg m-2 s-1", "W m-2", "W m-2", "Pa", "kg kg-1", "m s-1") - ) + nc.info <- data.frame( + CF.name = c( + "air_temperature", "air_temperature_minimum", "air_temperature_maximum", "precipitation_flux", + "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "specific_humidity", "wind_speed" + ), + longname = c( + "2 meter air temperature", "2 meter minimum air temperature", "2 meter maximum air temperature", + "cumulative precipitation (water equivalent)", "incident (downwelling) showtwave radiation", + "incident (downwelling) longwave radiation", "air pressure at the surface", + "Specific humidity measured at the lowest level of the atmosphere", + "wind_speed speed" + ), + units = c("K", "K", "K", "kg m-2 s-1", "W m-2", "W m-2", "Pa", "kg kg-1", "m s-1") + ) # Define our lat/lon dims since those will be constant - dim.lat <- ncdf4::ncdim_def(name='latitude', units='degree_north', vals=lat.in, create_dimvar=TRUE) - dim.lon <- ncdf4::ncdim_def(name='longitude', units='degree_east', vals=lon.in, create_dimvar=TRUE) + dim.lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) + dim.lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) print("") print("Saving Ensemble") - pb <- utils::txtProgressBar(min=0, max=length(yrs.save)*n.ens, style=3) - pb.ind=1 - for(yr in yrs.save){ + pb <- utils::txtProgressBar(min = 0, max = length(yrs.save) * n.ens, style = 3) + pb.ind <- 1 + for (yr in yrs.save) { # Doing some row/time indexing - rows.yr <- which(dat.out$time$Year==yr) + rows.yr <- which(dat.out$time$Year == yr) nday <- ifelse(lubridate::leap_year(yr), 366, 365) # Finish defining our time variables (same for all ensemble members) - dim.time <- ncdf4::ncdim_def(name='time', units="sec", vals=seq(1*24*360, (nday+1-1/24)*24*360, length.out=length(rows.yr)), create_dimvar=TRUE, unlim=TRUE) - nc.dim=list(dim.lat,dim.lon,dim.time) + dim.time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = seq(1 * 24 * 360, (nday + 1 - 1 / 24) * 24 * 360, length.out = length(rows.yr)), create_dimvar = TRUE, unlim = TRUE) + nc.dim <- list(dim.lat, dim.lon, dim.time) # Setting up variables and dimensions - var.list = list() - dat.list = list() - - for(j in 1:length(vars.debias)){ - var.list[[j]] = ncdf4::ncvar_def(name=vars.debias[j], - units=as.character(nc.info[nc.info$CF.name==vars.debias[j], "units"]), - longname=as.character(nc.info[nc.info$CF.name==vars.debias[j], "longname"]), - dim=nc.dim, missval=-999, verbose=verbose) + var.list <- list() + dat.list <- list() + + for (j in 1:length(vars.debias)) { + var.list[[j]] <- ncdf4::ncvar_def( + name = vars.debias[j], + units = as.character(nc.info[nc.info$CF.name == vars.debias[j], "units"]), + longname = as.character(nc.info[nc.info$CF.name == vars.debias[j], "longname"]), + dim = nc.dim, missval = -999, verbose = verbose + ) } names(var.list) <- vars.debias # Loop through & write each ensemble member - for(i in 1:n.ens){ + for (i in 1:n.ens) { # Setting up file structure - ens.path <- file.path(outfolder, paste(ens.name, ens.mems[i], sep="_")) - dir.create(ens.path, recursive=T, showWarnings=F) - loc.file <- file.path(ens.path, paste(ens.name, ens.mems[i], stringr::str_pad(yr, width=4, side="left", pad="0"), "nc", sep = ".")) + ens.path <- file.path(outfolder, paste(ens.name, ens.mems[i], sep = "_")) + dir.create(ens.path, recursive = T, showWarnings = F) + loc.file <- file.path(ens.path, paste(ens.name, ens.mems[i], stringr::str_pad(yr, width = 4, side = "left", pad = "0"), "nc", sep = ".")) - for(j in 1:length(vars.debias)){ - dat.list[[j]] = array(dat.out[[vars.debias[j]]][rows.yr,i], dim=c(length(lat.in), length(lon.in), length(rows.yr))) # Go ahead and make the arrays + for (j in 1:length(vars.debias)) { + dat.list[[j]] <- array(dat.out[[vars.debias[j]]][rows.yr, i], dim = c(length(lat.in), length(lon.in), length(rows.yr))) # Go ahead and make the arrays } names(dat.list) <- vars.debias ## put data in new file - loc <- ncdf4::nc_create(filename=loc.file, vars=var.list, verbose=verbose) - for(j in 1:length(vars.debias)){ - ncdf4::ncvar_put(nc=loc, varid=as.character(vars.debias[j]), vals=dat.list[[j]]) + loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + for (j in 1:length(vars.debias)) { + ncdf4::ncvar_put(nc = loc, varid = as.character(vars.debias[j]), vals = dat.list[[j]]) } ncdf4::nc_close(loc) utils::setTxtProgressBar(pb, pb.ind) - pb.ind <- pb.ind+1 + pb.ind <- pb.ind + 1 } # End ensemble member loop } # End year loop print("") diff --git a/modules/data.atmosphere/R/download.Ameriflux.R b/modules/data.atmosphere/R/download.Ameriflux.R index e56e63158d1..b353ecf6f00 100644 --- a/modules/data.atmosphere/R/download.Ameriflux.R +++ b/modules/data.atmosphere/R/download.Ameriflux.R @@ -1,7 +1,8 @@ # lookup the site based on the site_id download.Ameriflux.site <- function(site_id) { - sites <- utils::read.csv(system.file("data/FLUXNET.sitemap.csv", package = "PEcAn.data.atmosphere"), - stringsAsFactors = FALSE) + sites <- utils::read.csv(system.file("data/FLUXNET.sitemap.csv", package = "PEcAn.data.atmosphere"), + stringsAsFactors = FALSE + ) sites$FLUX.id[which(sites$site.id == site_id)] } # download.Ameriflux.site @@ -11,7 +12,7 @@ download.Ameriflux.site <- function(site_id) { ##' @name download.Ameriflux ##' @title download.Ameriflux ##' @export -##' @param sitename the FLUXNET ID of the site to be downloaded, used as file name prefix. +##' @param sitename the FLUXNET ID of the site to be downloaded, used as file name prefix. ##' The 'SITE_ID' field in \href{http://ameriflux.lbl.gov/sites/site-list-and-pages/}{list of Ameriflux sites} ##' @param outfolder location on disk where outputs will be stored ##' @param start_date the start date of the data to be downloaded. Format is YYYY-MM-DD (will only use the year part of the date) @@ -19,25 +20,25 @@ download.Ameriflux.site <- function(site_id) { ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose ##' @param ... further arguments, currently ignored -##' +##' ##' @author Josh Mantooth, Rob Kooper, Ankur Desai download.Ameriflux <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { # get start/end year code works on whole years only - + site <- sub(".* \\((.*)\\)", "\\1", sitename) - + start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # make sure output folder exists if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } - + # url where Ameriflux data is stored baseurl <- paste0("http://cdiac.ornl.gov/ftp/ameriflux/data/Level2/Sites_ByID/", site, "/with_gaps/") # Hack needed for US-UMB which has hourly and half-hourly folders separate. This version sticks @@ -45,52 +46,57 @@ download.Ameriflux <- function(sitename, outfolder, start_date, end_date, if (site == "US-UMB") { baseurl <- paste0(baseurl, "/hourly/") } - + # fetch all links - links <- tryCatch({ - XML::xpathSApply(XML::htmlParse(baseurl), "//a/@href") - }, error = function(e) { - PEcAn.logger::logger.severe("Could not get information about", site, ".", "Is this an Ameriflux site?") - }) - + links <- tryCatch( + { + XML::xpathSApply(XML::htmlParse(baseurl), "//a/@href") + }, + error = function(e) { + PEcAn.logger::logger.severe("Could not get information about", site, ".", "Is this an Ameriflux site?") + } + ) + # find all links we need based on the years and download them rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = site, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = site, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { outputfile <- file.path(outfolder, paste(site, year, "nc", sep = ".")) - + # create array with results - row <- year - start_year + 1 - results$file[row] <- outputfile - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + row <- year - start_year + 1 + results$file[row] <- outputfile + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "AmeriFlux.level2.h.nc" - + # see if file exists if (file.exists(outputfile) && !overwrite) { PEcAn.logger::logger.debug("File '", outputfile, "' already exists, skipping to next file.") next } - + file <- utils::tail(as.character(links[grep(paste0("_", year, "_.*.nc"), links)]), n = 1) if (length(file) == 0) { PEcAn.logger::logger.severe("Could not download data for", site, "for the year", year) } PEcAn.utils::download_file(paste0(baseurl, file), outputfile) } - + # return list of files downloaded return(invisible(results)) } # download.Ameriflux -#site <- download.Ameriflux.site(622) -#print(download.Ameriflux(2001, 2005, site, "/tmp/met/ameriflux")) +# site <- download.Ameriflux.site(622) +# print(download.Ameriflux(2001, 2005, site, "/tmp/met/ameriflux")) diff --git a/modules/data.atmosphere/R/download.AmerifluxLBL.R b/modules/data.atmosphere/R/download.AmerifluxLBL.R index 8a2ff965f3f..bb615af0722 100644 --- a/modules/data.atmosphere/R/download.AmerifluxLBL.R +++ b/modules/data.atmosphere/R/download.AmerifluxLBL.R @@ -33,7 +33,6 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, username = "pecan", method, useremail = "@", data_product = "BASE-BADM", data_policy = "CCBY4.0", ...) { - # Initial set-ups for amerifluxr packages # get start/end year code works on whole years only start_date <- as.POSIXlt(start_date, tz = "UTC") @@ -52,7 +51,7 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, } version <- amerifluxr::amf_var_info() - version <- unique(version[version$Site_ID == site,]$BASE_Version) + version <- unique(version[version$Site_ID == site, ]$BASE_Version) if (length(version) != 1) { PEcAn.logger::logger.error("Could not find AmerifluxLBL version info for site", site) } @@ -76,11 +75,12 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, intended_use = "model", intended_use_text = "PEcAn download", verbose = verbose, - out_dir = outfolder) + out_dir = outfolder + ) ) - if (!inherits(zip_file, "try-error")){ + if (!inherits(zip_file, "try-error")) { break - }else if(tout > 250 ){ + } else if (tout > 250) { PEcAn.logger::logger.severe("Download takes too long, check your connection.") break } @@ -91,7 +91,7 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, # Path to created zip-file - if(!grepl(".zip", zip_file)){ + if (!grepl(".zip", zip_file)) { PEcAn.logger::logger.error("Not able to download a zip-file. Check download.AmerifluxLBL inputs") } @@ -101,7 +101,8 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, if (outfname != expected_filename) { PEcAn.logger::logger.info( "Downloaded a file named", sQuote(outfname), - "but download.AmerifluxLBL was expecting", sQuote(expected_filename), ". This may be a PEcAn bug.") + "but download.AmerifluxLBL was expecting", sQuote(expected_filename), ". This may be a PEcAn bug." + ) } file_timestep_hh <- "HH" @@ -121,18 +122,18 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, outcsvname_hr <- paste0(substr(outfname, 1, 15), "_", file_timestep_hr, "_", endname, ".csv") output_csv_file_hr <- file.path(outfolder, outcsvname_hr) - extract_file_flag <- TRUE + extract_file_flag <- TRUE if (!overwrite && file.exists(output_csv_file)) { PEcAn.logger::logger.debug("File '", output_csv_file, "' already exists, skipping extraction.") - extract_file_flag <- FALSE - file_timestep <- "HH" + extract_file_flag <- FALSE + file_timestep <- "HH" } else { if (!overwrite && file.exists(output_csv_file_hr)) { PEcAn.logger::logger.debug("File '", output_csv_file_hr, "' already exists, skipping extraction.") - extract_file_flag <- FALSE - file_timestep <- "HR" - outcsvname <- outcsvname_hr - output_csv_file <- output_csv_file_hr + extract_file_flag <- FALSE + file_timestep <- "HR" + outcsvname <- outcsvname_hr + output_csv_file <- output_csv_file_hr } } @@ -162,17 +163,21 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, firstline <- firstline[4] lastline <- system(paste0("tail -1 ", output_csv_file), intern = TRUE) - firstdate_st <- paste0(substr(firstline, 1, 4), "-", - substr(firstline, 5, 6), "-", - substr(firstline, 7, 8), " ", - substr(firstline, 9, 10), ":", - substr(firstline, 11, 12)) + firstdate_st <- paste0( + substr(firstline, 1, 4), "-", + substr(firstline, 5, 6), "-", + substr(firstline, 7, 8), " ", + substr(firstline, 9, 10), ":", + substr(firstline, 11, 12) + ) firstdate <- as.POSIXlt(firstdate_st) - lastdate_st <- paste0(substr(lastline, 1, 4), "-", - substr(lastline, 5, 6), "-", - substr(lastline, 7, 8), " ", - substr(lastline, 9, 10), ":", - substr(lastline, 11, 12)) + lastdate_st <- paste0( + substr(lastline, 1, 4), "-", + substr(lastline, 5, 6), "-", + substr(lastline, 7, 8), " ", + substr(lastline, 9, 10), ":", + substr(lastline, 11, 12) + ) lastdate <- as.POSIXlt(lastdate_st) syear <- lubridate::year(firstdate) @@ -185,21 +190,23 @@ download.AmerifluxLBL <- function(sitename, outfolder, start_date, end_date, PEcAn.logger::logger.severe("End_Year", end_year, "precedes start of record ", syear, " for ", site) } - rows <- 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = dbfilename, - stringsAsFactors = FALSE) - - results$file[rows] <- output_csv_file - results$host[rows] <- PEcAn.remote::fqdn() - results$startdate[rows] <- firstdate_st - results$enddate[rows] <- lastdate_st - results$mimetype[rows] <- "text/csv" + rows <- 1 + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = dbfilename, + stringsAsFactors = FALSE + ) + + results$file[rows] <- output_csv_file + results$host[rows] <- PEcAn.remote::fqdn() + results$startdate[rows] <- firstdate_st + results$enddate[rows] <- lastdate_st + results$mimetype[rows] <- "text/csv" results$formatname[rows] <- "AMERIFLUX_BASE_HH" # return list of files downloaded diff --git a/modules/data.atmosphere/R/download.CRUNCEP_Global.R b/modules/data.atmosphere/R/download.CRUNCEP_Global.R index e2c363a0075..ae90aa68e1b 100644 --- a/modules/data.atmosphere/R/download.CRUNCEP_Global.R +++ b/modules/data.atmosphere/R/download.CRUNCEP_Global.R @@ -24,7 +24,6 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, overwrite = FALSE, verbose = FALSE, maxErrors = 10, sleep = 2, method = "ncss", ...) { - if (is.null(method)) method <- "ncss" if (!method %in% c("opendap", "ncss")) { PEcAn.logger::logger.severe(glue::glue( @@ -41,9 +40,11 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, CRUNCEP_start <- 1901 CRUNCEP_end <- 2010 if (start_year < CRUNCEP_start | end_year > CRUNCEP_end) { - PEcAn.logger::logger.severe(sprintf('Input year range (%d:%d) exceeds the CRUNCEP range (%d:%d)', - start_year, end_year, - CRUNCEP_start, CRUNCEP_end)) + PEcAn.logger::logger.severe(sprintf( + "Input year range (%d:%d) exceeds the CRUNCEP range (%d:%d)", + start_year, end_year, + CRUNCEP_start, CRUNCEP_end + )) } dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) @@ -86,7 +87,7 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, mask_dist <- (lon.in - mask_grid[, 1])^2 + (lat.in - mask_grid[, 2])^2 # Order by increasing distance (closest first) mask_order <- order(mask_dist) - mask_igrido <- mask_igrid[mask_order,] + mask_igrido <- mask_igrid[mask_order, ] on_land <- as.logical(mask_values[mask_igrido]) if (!any(on_land)) { PEcAn.logger::logger.severe(glue::glue( @@ -119,14 +120,16 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, ylist <- seq(start_year, end_year, by = 1) rows <- length(ylist) - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = "CRUNCEP", - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = "CRUNCEP", + stringsAsFactors = FALSE + ) var <- tibble::tribble( ~DAP.name, ~CF.name, ~units, @@ -153,7 +156,7 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, results$formatname[i] <- "CF Meteorology" if (file.exists(loc.file) && !isTRUE(overwrite)) { - PEcAn.logger::logger.error("File already exists. Skipping to next year") + PEcAn.logger::logger.error("File already exists. Skipping to next year") next } @@ -162,9 +165,11 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) - days_elapsed <- (1:ntime) * 6/24 - 3/24 # data are 6-hourly, with timestamp at center of interval - time <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", year, "-01-01T00:00:00Z"), - vals = as.array(days_elapsed), create_dimvar = TRUE, unlim = TRUE) + days_elapsed <- (1:ntime) * 6 / 24 - 3 / 24 # data are 6-hourly, with timestamp at center of interval + time <- ncdf4::ncdim_def( + name = "time", units = paste0("days since ", year, "-01-01T00:00:00Z"), + vals = as.array(days_elapsed), create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) @@ -174,8 +179,7 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, file_name <- "mstmip_driver_global_hd_climate_%1$s_%2$d_v1.nc4" ## get data off OpenDAP - dap_base <- switch( - method, + dap_base <- switch(method, opendap = paste0("https://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1220/", file_name), ncss = paste0("https://thredds.daac.ornl.gov/thredds/ncss/grid/ornldaac/1220/", file_name, "/dataset.html") ) @@ -185,7 +189,7 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, url <- sprintf(dap_base, current_var, year) PEcAn.logger::logger.info("Attempting to access file at: ", url) if (method == "opendap") { - dap <- PEcAn.utils::retry.func(ncdf4::nc_open(url, verbose=verbose), maxErrors=maxErrors, sleep=sleep) + dap <- PEcAn.utils::retry.func(ncdf4::nc_open(url, verbose = verbose), maxErrors = maxErrors, sleep = sleep) } else if (method == "ncss") { ncss_query <- glue::glue( url, "?", @@ -219,15 +223,19 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, # confirm that timestamps match if (dap$dim$time$len != ntime) { - PEcAn.logger::logger.severe("Expected", ntime, "observations, but", url, "contained", dap$dim$time$len) + PEcAn.logger::logger.severe("Expected", ntime, "observations, but", url, "contained", dap$dim$time$len) } - dap_time <- PEcAn.utils::ud_convert(dap$dim$time$vals, - dap$dim$time$units, - time$units) - if (!isTRUE(all.equal(dap_time, time$vals))){ - PEcAn.logger::logger.severe("Timestamp mismatch.", - "Expected", min(time$vals), '..', max(time$vals), time$units, - "but got", min(dap_time), "..", max(dap_time)) + dap_time <- PEcAn.utils::ud_convert( + dap$dim$time$vals, + dap$dim$time$units, + time$units + ) + if (!isTRUE(all.equal(dap_time, time$vals))) { + PEcAn.logger::logger.severe( + "Timestamp mismatch.", + "Expected", min(time$vals), "..", max(time$vals), time$units, + "but got", min(dap_time), "..", max(dap_time) + ) } dat.list[[j]] <- PEcAn.utils::retry.func( @@ -237,13 +245,16 @@ download.CRUNCEP <- function(outfolder, start_date, end_date, lat.in, lon.in, c(lon_grid, lat_grid, 1), c(1, 1, ntime) ), - maxErrors=maxErrors, sleep=sleep) + maxErrors = maxErrors, sleep = sleep + ) - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), - dim = dim, - missval = -999, - verbose = verbose) + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), + dim = dim, + missval = -999, + verbose = verbose + ) ncdf4::nc_close(dap) } ## change units of precip to kg/m2/s instead of 6 hour accumulated precip diff --git a/modules/data.atmosphere/R/download.ERA5.R b/modules/data.atmosphere/R/download.ERA5.R index 87c6e7a9d3f..00525e4f802 100644 --- a/modules/data.atmosphere/R/download.ERA5.R +++ b/modules/data.atmosphere/R/download.ERA5.R @@ -30,8 +30,8 @@ #' @return Character vector of file names containing raw, downloaded #' data (invisibly) #' @author Alexey Shiklomanov -#' @md - # ^ tells Roxygen to interpret this fn's doc block as Markdown +#' @md +# ^ tells Roxygen to interpret this fn's doc block as Markdown #' @export #' @examples #' \dontrun{ @@ -45,57 +45,66 @@ #' ) #' } download.ERA5.old <- function(outfolder, start_date, end_date, lat.in, lon.in, - product_types = "all", - overwrite = FALSE, - reticulate_python = NULL, - ...) { + product_types = "all", + overwrite = FALSE, + reticulate_python = NULL, + ...) { PEcAn.logger::logger.warn( "This function is an incomplete prototype! Use with caution!" ) PEcAn.utils::need_packages("reticulate") - + if (!is.null(reticulate_python)) { reticulate::use_python(reticulate_python) } - tryCatch({ - cdsapi <- reticulate::import("cdsapi") - }, error = function(e) { - PEcAn.logger::logger.severe( - "Failed to load `cdsapi` Python library. ", - "Please make sure it is installed to a location accessible to `reticulate`.", - "You should be able to install it with the following command: ", - "`pip install --user cdsapi`.", - "The following error was thrown by `reticulate::import(\"cdsapi\")`: ", - conditionMessage(e) - ) - }) - - - if (!file.exists(file.path(Sys.getenv("HOME"), ".cdsapirc"))) + tryCatch( + { + cdsapi <- reticulate::import("cdsapi") + }, + error = function(e) { + PEcAn.logger::logger.severe( + "Failed to load `cdsapi` Python library. ", + "Please make sure it is installed to a location accessible to `reticulate`.", + "You should be able to install it with the following command: ", + "`pip install --user cdsapi`.", + "The following error was thrown by `reticulate::import(\"cdsapi\")`: ", + conditionMessage(e) + ) + } + ) + + + if (!file.exists(file.path(Sys.getenv("HOME"), ".cdsapirc"))) { PEcAn.logger::logger.severe( "Please create a `${HOME}/.cdsapirc` file as described here:", "https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key ." ) - + } - tryCatch({ - cclient <- cdsapi$Client() - }, error = function(e) { - PEcAn.logger::logger.severe( - "The following error was thrown by `cdsapi$Client()`: ", - conditionMessage(e) - ) - }) - all_products <- c("reanalysis", "ensemble_members", - "ensemble mean", "ensemble_spread") - + tryCatch( + { + cclient <- cdsapi$Client() + }, + error = function(e) { + PEcAn.logger::logger.severe( + "The following error was thrown by `cdsapi$Client()`: ", + conditionMessage(e) + ) + } + ) + + all_products <- c( + "reanalysis", "ensemble_members", + "ensemble mean", "ensemble_spread" + ) + if (product_types == "all") { product_types <- all_products } - + if (any(!product_types %in% all_products)) { bad_products <- setdiff(product_types, all_products) PEcAn.logger::logger.severe(sprintf( @@ -128,7 +137,7 @@ download.ERA5.old <- function(outfolder, start_date, end_date, lat.in, lon.in, files <- character() dir.create(outfolder, showWarnings = FALSE) - + # First, download all the files for (i in seq_len(nvar)) { var <- variables[["api_name"]][[i]] @@ -143,33 +152,36 @@ download.ERA5.old <- function(outfolder, start_date, end_date, lat.in, lon.in, )) next } - do_next <- tryCatch({ - cclient$retrieve( - "reanalysis-era5-single-levels", - list( - variable = var, - product_type = 'ensemble_members', - date = paste(start_date, end_date, sep = "/"), - time = "00/to/23/by/1", - area = area, - grid = c(0.25, 0.25), - format = "netcdf" - ), - fname - ) - FALSE - }, error = function(e) { - PEcAn.logger::logger.warn( - glue::glue( - "Failed to download variable `{var}`. ", - "Skipping to next variable. ", - "Error message was:\n", - conditionMessage(e) + do_next <- tryCatch( + { + cclient$retrieve( + "reanalysis-era5-single-levels", + list( + variable = var, + product_type = "ensemble_members", + date = paste(start_date, end_date, sep = "/"), + time = "00/to/23/by/1", + area = area, + grid = c(0.25, 0.25), + format = "netcdf" + ), + fname ) - ) - TRUE - }) - + FALSE + }, + error = function(e) { + PEcAn.logger::logger.warn( + glue::glue( + "Failed to download variable `{var}`. ", + "Skipping to next variable. ", + "Error message was:\n", + conditionMessage(e) + ) + ) + TRUE + } + ) + if (isTRUE(do_next)) next files <- c(files, fname) } diff --git a/modules/data.atmosphere/R/download.FACE.R b/modules/data.atmosphere/R/download.FACE.R index 3803003e61e..65b5a1f59e4 100644 --- a/modules/data.atmosphere/R/download.FACE.R +++ b/modules/data.atmosphere/R/download.FACE.R @@ -15,41 +15,43 @@ download.FACE <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, method, ...) { # download.FACE <- # function(data.set,outfolder,pkg,raw.host,start_year,end_year,site.id,dbparams,con){ - + start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") - + end_date <- as.POSIXlt(end_date, tz = "UTC") + site <- site_from_tag(sitename, "FACE") - + # make sure output folder exists if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } - + raw.file <- paste0(site, "_forcing_h.nc") out.file <- file.path(outfolder, paste0("FACE_", raw.file)) - + # url where Ameriflux data is stored url <- paste0("ftp://cdiac.ornl.gov/.private/eCO2_Modelling/Site_Data/", site, "/", raw.file) print(url) - PEcAn.utils::download_file(url,out.file,method) + PEcAn.utils::download_file(url, out.file, method) # return file info - return(invisible(data.frame(file = out.file, - host = PEcAn.remote::fqdn(), - mimetype = "application/x-netcdf", - formatname = "FACE", - startdate = start_date, - enddate = end_date, - dbfile.name = "FACE", - stringsAsFactors = FALSE))) - + return(invisible(data.frame( + file = out.file, + host = PEcAn.remote::fqdn(), + mimetype = "application/x-netcdf", + formatname = "FACE", + startdate = start_date, + enddate = end_date, + dbfile.name = "FACE", + stringsAsFactors = FALSE + ))) + ###################### - + # if(is.na(start_year) |is.na(end_year)){ # j <- grep("YEAR =",years) # start_year <- as.numeric(substr(unlist(strsplit(years[j],","))[1],nchar(unlist(strsplit(years[j],","))[1])-4,nchar(unlist(strsplit(years[j],","))[1]) )) - # end_year <- as.numeric(unlist(strsplit(years[length(years)-1],";"))[1]) + # end_year <- as.numeric(unlist(strsplit(years[length(years)-1],";"))[1]) # start_date <- paste0(start_year,"-01-01 00:00:00") # end_date <- paste0(end_year,"-12-31 23:59:00") # } diff --git a/modules/data.atmosphere/R/download.Fluxnet2015.R b/modules/data.atmosphere/R/download.Fluxnet2015.R index 80bde7b6714..a377bdf5c8b 100644 --- a/modules/data.atmosphere/R/download.Fluxnet2015.R +++ b/modules/data.atmosphere/R/download.Fluxnet2015.R @@ -3,7 +3,7 @@ ##' @name download.Fluxnet2015 ##' @title download.Fluxnet2015 ##' @export -##' @param sitename the FLUXNET ID of the site to be downloaded, used as file name prefix. +##' @param sitename the FLUXNET ID of the site to be downloaded, used as file name prefix. ##' The 'SITE_ID' field in \href{https://fluxnet.org/sites/site-list-and-pages/}{list of Ameriflux sites} ##' @param outfolder location on disk where outputs will be stored ##' @param start_date the start date of the data to be downloaded. Format is YYYY-MM-DD (will only use the year part of the date) @@ -12,26 +12,26 @@ ##' @param verbose should the function be very verbose ##' @param username login name for Ameriflux ##' @param ... further arguments, currently ignored -##' +##' ##' @author Ankur Desai, based on download.Ameriflux.R by Josh Mantooth, Rob Kooper -download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, +download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, username = "pecan", ...) { # get start/end year code works on whole years only - + site <- sub(".* \\((.*)\\)", "\\1", sitename) - + start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") - + start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + # make sure output folder exists if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } - + # need to query to get full file name - this is Fluxnet2015 version, TIER1 only url <- "http://wile.lbl.gov:8080/AmeriFlux/DataDownload.svc/datafileURLs" json_query <- paste0("{\"username\":\"", username, "\",\"siteList\":[\"", site, "\"],\"intendedUse\":\"Research - Land model/Earth system model\",\"description\":\"PEcAn download\",\"dataProduct\":\"SUBSET\",\"policy\":\"TIER1\"}") @@ -39,17 +39,17 @@ download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, link <- httr::content(result) ftplink <- NULL - if(is.null(link) || is.atomic(link)) { + if (is.null(link) || is.atomic(link)) { PEcAn.logger::logger.severe("Could not get information about", site, ".", "Is this an Fluxnet2015 site?") } else if (length(link$dataURLsList) > 0) { ftplink <- link$dataURLsList[[1]]$URL } - + # test to see that we got back a FTP if (is.null(ftplink)) { PEcAn.logger::logger.severe("Could not get information about", site, ".", "Is this an Fluxnet2015 site?") } - + # get start and end year of data from filename syear <- as.numeric(substr(ftplink, nchar(ftplink) - 16, nchar(ftplink) - 13)) eyear <- as.numeric(substr(ftplink, nchar(ftplink) - 11, nchar(ftplink) - 8)) @@ -59,35 +59,39 @@ download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, if (end_year < syear) { PEcAn.logger::logger.severe("End_Year", end_year, "precedes start of record ", syear, " for ", site) } - + # get zip and csv filenames outfname <- strsplit(ftplink, "/") outfname <- outfname[[1]][length(outfname[[1]])] - - output_zip_file <- file.path(outfolder, outfname) + + output_zip_file <- file.path(outfolder, outfname) file_timestep_hh <- "HH" file_timestep_hr <- "HR" - file_timestep <- file_timestep_hh - endname <- strsplit(outfname, "_") - endname <- endname[[1]][length(endname[[1]])] - endname <- substr(endname, 1, nchar(endname) - 4) - - outcsvname <- paste0(substr(outfname, 1, 30), - file_timestep_hh, "_", - syear, "-", - eyear, "_", - endname, - ".csv") + file_timestep <- file_timestep_hh + endname <- strsplit(outfname, "_") + endname <- endname[[1]][length(endname[[1]])] + endname <- substr(endname, 1, nchar(endname) - 4) + + outcsvname <- paste0( + substr(outfname, 1, 30), + file_timestep_hh, "_", + syear, "-", + eyear, "_", + endname, + ".csv" + ) output_csv_file <- file.path(outfolder, outcsvname) - outcsvname_hr <- paste0(substr(outfname, 1, 30), - file_timestep_hr, "_", - syear, "-", eyear, "_", - endname, - ".csv") + outcsvname_hr <- paste0( + substr(outfname, 1, 30), + file_timestep_hr, "_", + syear, "-", eyear, "_", + endname, + ".csv" + ) output_csv_file_hr <- file.path(outfolder, outcsvname_hr) - + download_file_flag <- TRUE - extract_file_flag <- TRUE + extract_file_flag <- TRUE if (!overwrite && file.exists(output_zip_file)) { PEcAn.logger::logger.debug("File '", output_zip_file, "' already exists, skipping download") download_file_flag <- FALSE @@ -107,7 +111,7 @@ download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, output_csv_file <- output_csv_file_hr } } - + if (download_file_flag) { extract_file_flag <- TRUE PEcAn.utils::download_file(ftplink, output_zip_file) @@ -133,26 +137,28 @@ download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, PEcAn.logger::logger.severe("ZIP file ", output_zip_file, " did not contain CSV file ", outcsvname) } } - + dbfilename <- paste0(substr(outfname, 1, 30), file_timestep, "_", syear, "-", eyear, "_", endname) - + rows <- 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = dbfilename, - stringsAsFactors = FALSE) - - results$file[rows] <- output_csv_file - results$host[rows] <- PEcAn.remote::fqdn() - results$startdate[rows] <- paste0(syear, "-01-01 00:00:00") - results$enddate[rows] <- paste0(eyear, "-12-31 23:59:59") - results$mimetype[rows] <- "text/csv" + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = dbfilename, + stringsAsFactors = FALSE + ) + + results$file[rows] <- output_csv_file + results$host[rows] <- PEcAn.remote::fqdn() + results$startdate[rows] <- paste0(syear, "-01-01 00:00:00") + results$enddate[rows] <- paste0(eyear, "-12-31 23:59:59") + results$mimetype[rows] <- "text/csv" results$formatname[rows] <- "FLUXNET2015_SUBSET_HH" - + # return list of files downloaded return(results) } # download.Fluxnet2015 diff --git a/modules/data.atmosphere/R/download.FluxnetLaThuile.R b/modules/data.atmosphere/R/download.FluxnetLaThuile.R index 44d065a46cb..b6755e0cd51 100644 --- a/modules/data.atmosphere/R/download.FluxnetLaThuile.R +++ b/modules/data.atmosphere/R/download.FluxnetLaThuile.R @@ -1,7 +1,8 @@ # lookup the site based on the site_id download.FluxnetLaThuile.site <- function(site_id) { - sites <- utils::read.csv(system.file("data/FLUXNET.sitemap.csv", package = "PEcAn.data.atmosphere"), - stringsAsFactors = FALSE) + sites <- utils::read.csv(system.file("data/FLUXNET.sitemap.csv", package = "PEcAn.data.atmosphere"), + stringsAsFactors = FALSE + ) sites$FLUX.id[which(sites$site.id == site_id)] } # download.FluxnetLaThuile.site @@ -19,67 +20,73 @@ download.FluxnetLaThuile.site <- function(site_id) { ##' @param verbose should the function be very verbose ##' @param username should be the registered Fluxnet username, else defaults to pecan ##' @param ... further arguments, currently ignored -##' +##' ##' @author Ankur Desai -download.FluxnetLaThuile <- function(sitename, outfolder, start_date, end_date, +download.FluxnetLaThuile <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, username = "pecan", ...) { # get start/end year code works on whole years only - + site <- sub(".* \\((.*)\\)", "\\1", sitename) - + start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") - + end_date <- as.POSIXlt(end_date, tz = "UTC") + start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - + end_year <- lubridate::year(end_date) + # make sure output folder exists if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } - + # url where Ameriflux data is stored # ftp://ftp:ankurdesai@ftp.fluxdata.org/.fluxnet_30284/.coreplusquality_4036/US-WCr.2004.synth.hourly.coreplusquality.csv # change from here - baseurl <- paste0("ftp://ftp:", - username, - "@ftp.fluxdata.org/.fluxnet_30284/.coreplusquality_4036/", - site) - + baseurl <- paste0( + "ftp://ftp:", + username, + "@ftp.fluxdata.org/.fluxnet_30284/.coreplusquality_4036/", + site + ) + # find all links we need based on the years and download them rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = site, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = site, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { - outputfile <- file.path(outfolder, - paste(site, "FluxnetLaThuile.hourly.coreplusquality", year, "csv", sep = ".")) - + outputfile <- file.path( + outfolder, + paste(site, "FluxnetLaThuile.hourly.coreplusquality", year, "csv", sep = ".") + ) + # create array with results - row <- year - start_year + 1 - results$file[row] <- outputfile - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "text/csv" + row <- year - start_year + 1 + results$file[row] <- outputfile + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "text/csv" results$formatname[row] <- "FluxnetLaThuile.hourly.coreplusquality" - + # see if file exists if (file.exists(outputfile) && !overwrite) { PEcAn.logger::logger.debug("File '", outputfile, "' already exists, skipping to next file.") next } - + file <- paste(baseurl, year, "synth.hourly.coreplusquality.csv", sep = ".") PEcAn.utils::download_file(file, outputfile) } - + # return list of files downloaded return(results) } # download.FluxnetLaThuile diff --git a/modules/data.atmosphere/R/download.GFDL.R b/modules/data.atmosphere/R/download.GFDL.R index dd1640ae7ae..18e33d4dae9 100644 --- a/modules/data.atmosphere/R/download.GFDL.R +++ b/modules/data.atmosphere/R/download.GFDL.R @@ -1,4 +1,4 @@ -#' Download GFDL CMIP5 outputs for a single grid point using OPeNDAP and convert to CF +#' Download GFDL CMIP5 outputs for a single grid point using OPeNDAP and convert to CF #' #' @export #' @param outfolder Directory for storing output @@ -19,25 +19,24 @@ download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, overwrite = FALSE, verbose = FALSE, model = "CM3", scenario = "rcp45", ensemble_member = "r1i1p1", ...) { - - if(is.null(model)) model <- "CM3" - if(is.null(scenario)) scenario <- "rcp45" - if(is.null(ensemble_member)) ensemble_member <- "r1i1p1" + if (is.null(model)) model <- "CM3" + if (is.null(scenario)) scenario <- "rcp45" + if (is.null(ensemble_member)) ensemble_member <- "r1i1p1" start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - obs_per_year <- 365 * 24 /3 # 3-hr intervals, leap days ignored + end_year <- lubridate::year(end_date) + obs_per_year <- 365 * 24 / 3 # 3-hr intervals, leap days ignored - #Fix Outfolder to include model and scenario + # Fix Outfolder to include model and scenario folder_name <- paste0("GFDL_", model, "_", scenario, "_", ensemble_member) source_id_foldername <- basename(outfolder) source_all_foldername <- gsub("GFDL", folder_name, source_id_foldername) outfolder <- file.path(paste0(outfolder, source_all_foldername)) - lat.in <- as.numeric(lat.in) - lat_floor <- floor(lat.in) - lon.in <- as.numeric(lon.in) - lon_floor <- floor(lon.in) + lat.in <- as.numeric(lat.in) + lat_floor <- floor(lat.in) + lon.in <- as.numeric(lon.in) + lon_floor <- floor(lon.in) if (lon_floor < 0) { lon_floor <- 360 + lon_floor } @@ -60,7 +59,7 @@ download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, formatname = character(rows), startdate = character(rows), enddate = character(rows), - dbfile.name = paste("GFDL", model, scenario, ensemble_member, sep = "."), # 'GFDL', + dbfile.name = paste("GFDL", model, scenario, ensemble_member, sep = "."), # 'GFDL', stringsAsFactors = FALSE ) @@ -79,7 +78,7 @@ download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, for (i in seq_len(rows)) { year <- ylist[i] # find start position of currently-wanted year in the 5-year DAP file - time_offset <- 1 + ((year-1) %% 5) * obs_per_year + time_offset <- 1 + ((year - 1) %% 5) * obs_per_year PEcAn.logger::logger.debug( sprintf( @@ -93,23 +92,23 @@ download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, paste("GFDL", model, scenario, ensemble_member, year, "nc", sep = ".") ) - results$file[i] <- loc.file - results$host[i] <- PEcAn.remote::fqdn() - results$startdate[i] <- paste0(year, "-01-01 00:00:00") - results$enddate[i] <- paste0(year, "-12-31 23:59:59") - results$mimetype[i] <- "application/x-netcdf" + results$file[i] <- loc.file + results$host[i] <- PEcAn.remote::fqdn() + results$startdate[i] <- paste0(year, "-01-01 00:00:00") + results$enddate[i] <- paste0(year, "-12-31 23:59:59") + results$mimetype[i] <- "application/x-netcdf" results$formatname[i] <- "CF Meteorology" - + if (file.exists(loc.file) && !isTRUE(overwrite)) { PEcAn.logger::logger.error("File already exists. Skipping to next year") next } - + met_start <- 2006 met_block <- 5 - url_year <- met_start + floor((year - met_start) / met_block) * met_block + url_year <- met_start + floor((year - met_start) / met_block) * met_block start_url <- paste0(url_year, "0101") - end_url <- paste0(url_year + met_block - 1, "1231") + end_url <- paste0(url_year + met_block - 1, "1231") ## Create dimensions lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) @@ -155,30 +154,35 @@ download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, # But if these disagree by more than 3 hours, we have a problem. raw_time <- ncdf4::ncvar_get(dap, "time", start = time_offset, count = obs_per_year) converted_time <- PEcAn.utils::ud_convert(raw_time, dap$dim$time$units, dim$time$units) - if(!all(diff(converted_time) == 3 * 60 * 60)){ + if (!all(diff(converted_time) == 3 * 60 * 60)) { PEcAn.logger::logger.error( "Expected timestamps at 3-hour intervals, got", paste(range(diff(converted_time)), collapse = "-"), - "seconds") + "seconds" + ) } - if(!all(abs(dim$time$vals - converted_time) < (3 * 60 * 60))){ + if (!all(abs(dim$time$vals - converted_time) < (3 * 60 * 60))) { PEcAn.logger::logger.error( "Timestamps in GFDL source file differ from expected by more than 3 hours:", "Expected", paste(range(dim$time$vals), collapse = "-"), dim$time$units, ", got", paste(range(converted_time), collapse = "-"), ". Greatest difference from expected:", - max(abs(dim$time$vals - converted_time)), "seconds") + max(abs(dim$time$vals - converted_time)), "seconds" + ) } dat.list[[j]] <- ncdf4::ncvar_get(dap, as.character(var$DAP.name[j]), - start = c(lon_GFDL, lat_GFDL, time_offset), - count = c(1, 1, obs_per_year)) - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), - dim = dim, - missval = -999, - verbose = verbose) + start = c(lon_GFDL, lat_GFDL, time_offset), + count = c(1, 1, obs_per_year) + ) + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), + dim = dim, + missval = -999, + verbose = verbose + ) ncdf4::nc_close(dap) } @@ -189,7 +193,6 @@ download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = dat.list[[j]]) } ncdf4::nc_close(loc) - } return(invisible(results)) diff --git a/modules/data.atmosphere/R/download.GLDAS.R b/modules/data.atmosphere/R/download.GLDAS.R index f416f28ce19..4db883e2a1a 100644 --- a/modules/data.atmosphere/R/download.GLDAS.R +++ b/modules/data.atmosphere/R/download.GLDAS.R @@ -17,45 +17,54 @@ ##' @author Christy Rollinson download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon.in, overwrite = FALSE, verbose = FALSE, ...) { - # Date stuff start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - site_id <- as.numeric(site_id) + end_year <- lubridate::year(end_date) + site_id <- as.numeric(site_id) # NOTE: This is commented out in other functions. Should we ditch it here as well?? - outfolder <- paste0(outfolder, "_site_", paste0(site_id%/%1e+09, "-", site_id%%1e+09)) + outfolder <- paste0(outfolder, "_site_", paste0(site_id %/% 1e+09, "-", site_id %% 1e+09)) GLDAS_start <- 1948 if (start_year < GLDAS_start) { - PEcAn.logger::logger.severe(sprintf('Input year range (%d:%d) exceeds the GLDAS range (%d:present)', - start_year, end_year, - GLDAS_start)) + PEcAn.logger::logger.severe(sprintf( + "Input year range (%d:%d) exceeds the GLDAS range (%d:present)", + start_year, end_year, + GLDAS_start + )) } - lat.in <- as.numeric(lat.in) - lon.in <- as.numeric(lon.in) - dap_base <- "http://hydro1.sci.gsfc.nasa.gov/thredds/dodsC/GLDAS_NOAH10SUBP_3H" # Right now changed to 1-degree because it gets us back further + lat.in <- as.numeric(lat.in) + lon.in <- as.numeric(lon.in) + dap_base <- "http://hydro1.sci.gsfc.nasa.gov/thredds/dodsC/GLDAS_NOAH10SUBP_3H" # Right now changed to 1-degree because it gets us back further dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - ylist <- seq(start_year, end_year, by = 1) - rows <- length(ylist) - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = "NLDAS", - stringsAsFactors = FALSE) - var <- data.frame(DAP.name = c("Near_surface_air_temperature", "Surface_incident_longwave_radiation", - "Surface_pressure", "Surface_incident_shortwave_radiation", "Near_surface_wind_magnitude", - "Near_surface_specific_humidity", "Rainfall_rate"), - CF.name = c("air_temperature", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "surface_downwelling_shortwave_flux_in_air", "wind", "specific_humidity", - "precipitation_flux"), - units = c("Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "g/g", "kg/m2/s")) + ylist <- seq(start_year, end_year, by = 1) + rows <- length(ylist) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = "NLDAS", + stringsAsFactors = FALSE + ) + var <- data.frame( + DAP.name = c( + "Near_surface_air_temperature", "Surface_incident_longwave_radiation", + "Surface_pressure", "Surface_incident_shortwave_radiation", "Near_surface_wind_magnitude", + "Near_surface_specific_humidity", "Rainfall_rate" + ), + CF.name = c( + "air_temperature", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "surface_downwelling_shortwave_flux_in_air", "wind", "specific_humidity", + "precipitation_flux" + ), + units = c("Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "g/g", "kg/m2/s") + ) for (i in seq_len(rows)) { year <- ylist[i] @@ -72,30 +81,32 @@ download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon # Now we need to check whether we're ending on the right day day2 <- lubridate::yday(end_date) days.use <- seq(day1, day2) - nday <- length(days.use) # Update nday + nday <- length(days.use) # Update nday } else if (i == 1) { # If this is the first of many years, we only need to worry about the start date day1 <- lubridate::yday(start_date) days.use <- seq(day1, nday) - nday <- length(days.use) # Update nday + nday <- length(days.use) # Update nday } else if (i == rows) { # If this is the last of many years, we only need to worry about the end date day2 <- lubridate::yday(end_date) days.use <- seq_len(day2) - nday <- length(days.use) # Update nday + nday <- length(days.use) # Update nday } - ntime <- nday * 24 / 3 # leap year or not*time slice (3-hourly) + ntime <- nday * 24 / 3 # leap year or not*time slice (3-hourly) loc.file <- file.path(outfolder, paste("GLDAS", year, "nc", sep = ".")) ## Create dimensions lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", - units = "sec", - vals = seq((min(days.use + 1 - 1/8) * 24 * 360), (max(days.use) + 1 - 1/8) * 24 * 360, length.out = ntime), - create_dimvar = TRUE, - unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", + units = "sec", + vals = seq((min(days.use + 1 - 1 / 8) * 24 * 360), (max(days.use) + 1 - 1 / 8) * 24 * 360, length.out = ntime), + create_dimvar = TRUE, + unlim = TRUE + ) dim <- list(lat, lon, time) var.list <- list() @@ -103,26 +114,28 @@ download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon # Defining our dimensions up front for (j in seq_len(nrow(var))) { - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), - dim = dim, - missval = -999, - verbose = verbose) - dat.list[[j]] <- array(NA, dim = c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), + dim = dim, + missval = -999, + verbose = verbose + ) + dat.list[[j]] <- array(NA, dim = c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays } names(var.list) <- names(dat.list) <- var$CF.name ## get data off OpenDAP for (j in seq_along(days.use)) { date.now <- as.Date(days.use[j], origin = as.Date(paste0(year - 1, "-12-31"))) - mo.now <- stringr::str_pad(lubridate::month(date.now), 2, pad = "0") - day.mo <- stringr::str_pad(lubridate::day(date.now), 2, pad = "0") - doy <- stringr::str_pad(days.use[j], 3, pad = "0") + mo.now <- stringr::str_pad(lubridate::month(date.now), 2, pad = "0") + day.mo <- stringr::str_pad(lubridate::day(date.now), 2, pad = "0") + doy <- stringr::str_pad(days.use[j], 3, pad = "0") # Because the suffixes are really different for these files, # get a list and go through each day dap.log <- data.frame(XML::readHTMLTable(paste0(dap_base, "/", year, "/", doy, "/catalog.html"))) - dap.log <- dap.log[order(dap.log[, 1], decreasing = F), ] # Sort them so that we go from 0 to 21 + dap.log <- dap.log[order(dap.log[, 1], decreasing = F), ] # Sort them so that we go from 0 to 21 for (h in seq_len(nrow(dap.log))[-1]) { dap_file <- paste0(dap_base, "/", year, "/", doy, "/", dap.log[h, 1], ".ascii?") @@ -135,12 +148,14 @@ download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon con = textConnection(substr(latlon, lat.ind[[1]][3], lon.ind[[1]][3] - 1)), sep = ",", fileEncoding = "\n", - skip = 1)) + skip = 1 + )) lons <- as.vector(utils::read.table( con = textConnection(substr(latlon, lon.ind[[1]][3], nchar(latlon))), sep = ",", fileEncoding = "\n", - skip = 1)) + skip = 1 + )) lat.use <- which(lats - 0.25 / 2 <= lat.in & lats + 0.25 / 2 >= lat.in) lon.use <- which(lons - 0.25 / 2 <= lon.in & lons + 0.25 / 2 >= lon.in) @@ -149,7 +164,9 @@ download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon dap_query <- "" for (v in seq_len(nrow(var))) { dap_query <- paste(dap_query, - paste0(var$DAP.name[v], "[0:1:0]", "[", lat.use, "][", lon.use, "]"), sep = ",") + paste0(var$DAP.name[v], "[0:1:0]", "[", lat.use, "][", lon.use, "]"), + sep = "," + ) } dap_query <- substr(dap_query, 2, nchar(dap_query)) @@ -161,10 +178,11 @@ download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon dat.list[[v]][, , (j * 8) - 8 + h - 1] <- utils::read.delim( con = textConnection(substr(dap.out, ind.1[[1]][1], end.1[[1]][2])), sep = ",", - fileEncoding = "\n")[1, 1] - } # end variable loop - } # end hour - } # end day + fileEncoding = "\n" + )[1, 1] + } # end variable loop + } # end hour + } # end day ## put data in new file loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) @@ -173,11 +191,11 @@ download.GLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon } ncdf4::nc_close(loc) - results$file[i] <- loc.file - results$host[i] <- PEcAn.remote::fqdn() - results$startdate[i] <- paste0(year, "-01-01 00:00:00") - results$enddate[i] <- paste0(year, "-12-31 23:59:59") - results$mimetype[i] <- "application/x-netcdf" + results$file[i] <- loc.file + results$host[i] <- PEcAn.remote::fqdn() + results$startdate[i] <- paste0(year, "-01-01 00:00:00") + results$enddate[i] <- paste0(year, "-12-31 23:59:59") + results$mimetype[i] <- "application/x-netcdf" results$formatname[i] <- "CF Meteorology" } diff --git a/modules/data.atmosphere/R/download.Geostreams.R b/modules/data.atmosphere/R/download.Geostreams.R index 7828b5642d0..902fe5a733f 100644 --- a/modules/data.atmosphere/R/download.Geostreams.R +++ b/modules/data.atmosphere/R/download.Geostreams.R @@ -31,35 +31,40 @@ #' @export #' @author Harsh Agrawal, Chris Black #' @examples \dontrun{ -#' download.Geostreams(outfolder = "~/output/dbfiles/Clowder_EF", -#' sitename = "UIUC Energy Farm - CEN", -#' start_date = "2016-01-01", end_date="2016-12-31", -#' key="verysecret") +#' download.Geostreams( +#' outfolder = "~/output/dbfiles/Clowder_EF", +#' sitename = "UIUC Energy Farm - CEN", +#' start_date = "2016-01-01", end_date = "2016-12-31", +#' key = "verysecret" +#' ) #' } -download.Geostreams <- function(outfolder, sitename, +download.Geostreams <- function(outfolder, sitename, start_date, end_date, url = "https://terraref.ncsa.illinois.edu/clowder/api/geostreams", key = NULL, user = NULL, pass = NULL, - ...){ - - start_date = lubridate::parse_date_time(start_date, orders = c("ymd", "ymdHMS", "ymdHMSz"), tz = "UTC") - end_date = lubridate::parse_date_time(end_date, orders = c("ymd", "ymdHMS", "ymdHMSz"), tz = "UTC") + ...) { + start_date <- lubridate::parse_date_time(start_date, orders = c("ymd", "ymdHMS", "ymdHMSz"), tz = "UTC") + end_date <- lubridate::parse_date_time(end_date, orders = c("ymd", "ymdHMS", "ymdHMSz"), tz = "UTC") auth <- get_clowderauth(key, user, pass, url) - sensor_result <- httr::GET(url = paste0(url, "/sensors"), - query = list(sensor_name = sitename, key = auth$key, ...), - config = auth$userpass) + sensor_result <- httr::GET( + url = paste0(url, "/sensors"), + query = list(sensor_name = sitename, key = auth$key, ...), + config = auth$userpass + ) httr::stop_for_status(sensor_result, "look up site info in Clowder") sensor_txt <- httr::content(sensor_result, as = "text", encoding = "UTF-8") sensor_info <- jsonlite::fromJSON(sensor_txt) sensor_id <- sensor_info$id - sensor_mintime = lubridate::parse_date_time(sensor_info$min_start_time, - orders = "ymdHMSz", tz = "UTC") - sensor_maxtime = lubridate::parse_date_time(sensor_info$max_end_time, - orders = "ymdHMSz", tz = "UTC") + sensor_mintime <- lubridate::parse_date_time(sensor_info$min_start_time, + orders = "ymdHMSz", tz = "UTC" + ) + sensor_maxtime <- lubridate::parse_date_time(sensor_info$max_end_time, + orders = "ymdHMSz", tz = "UTC" + ) if (start_date < sensor_mintime) { PEcAn.logger::logger.severe("Requested start date", start_date, "is before data begin", sensor_mintime) } @@ -67,56 +72,65 @@ download.Geostreams <- function(outfolder, sitename, PEcAn.logger::logger.severe("Requested end date", end_date, "is after data end", sensor_maxtime) } - result_files = c() + result_files <- c() for (year in lubridate::year(start_date):lubridate::year(end_date)) { query_args <- list( sensor_id = sensor_id, since = strftime( - max(start_date, lubridate::ymd(paste0(year, "-01-01"), tz="UTC")), + max(start_date, lubridate::ymd(paste0(year, "-01-01"), tz = "UTC")), format = "%Y-%m-%dT%H:%M:%SZ", - tz = "UTC"), + tz = "UTC" + ), until = strftime( - min(end_date, lubridate::ymd(paste0(year, "-12-31"), tz="UTC")), + min(end_date, lubridate::ymd(paste0(year, "-12-31"), tz = "UTC")), format = "%Y-%m-%dT%H:%M:%SZ", - tz = "UTC"), + tz = "UTC" + ), key = auth$key, - ...) + ... + ) - met_result <- httr::GET(url = paste0(url, "/datapoints"), - query = query_args, - config = auth$userpass) + met_result <- httr::GET( + url = paste0(url, "/datapoints"), + query = query_args, + config = auth$userpass + ) PEcAn.logger::logger.info(met_result$url) httr::stop_for_status(met_result, "download met data from Clowder") result_txt <- httr::content(met_result, as = "text", encoding = "UTF-8") combined_result <- paste0( - '{"sensor_info":', sensor_txt, ',\n', - '"data":', result_txt, '}') + '{"sensor_info":', sensor_txt, ",\n", + '"data":', result_txt, "}" + ) dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) out_file <- file.path( outfolder, - paste("Geostreams", sitename, start_date, end_date, year, "json", sep=".")) - write(x = combined_result, file=out_file) - result_files = append(result_files, out_file) + paste("Geostreams", sitename, start_date, end_date, year, "json", sep = ".") + ) + write(x = combined_result, file = out_file) + result_files <- append(result_files, out_file) } - return(data.frame(file = result_files, - host = PEcAn.remote::fqdn(), - mimetype = "application/json", - formatname = "Geostreams met", - startdate = start_date, - enddate = end_date, - dbfile.name = paste("Geostreams", sitename, start_date, end_date, sep = "."), - stringsAsFactors = FALSE)) + return(data.frame( + file = result_files, + host = PEcAn.remote::fqdn(), + mimetype = "application/json", + formatname = "Geostreams met", + startdate = start_date, + enddate = end_date, + dbfile.name = paste("Geostreams", sitename, start_date, end_date, sep = "."), + stringsAsFactors = FALSE + )) } #' Authentication lookup helper -#' +#' #' @param key,user,pass passed unchanged from \code{\link{download.Geostreams}} call, possibly null #' @param url matched against \code{} in authfile, ignored if authfile contains no hostname. #' @param authfile path to a PEcAn-formatted XML settings file; must contain a \code{} key #' -get_clowderauth <- function(key, user, pass, url, authfile="~/.pecan.clowder.xml") { +get_clowderauth <- function(key, user, pass, url, authfile = "~/.pecan.clowder.xml") { if (!is.null(key)) { return(list(key = key)) } else if (!is.null(user) && !is.null(pass)) { @@ -129,12 +143,18 @@ get_clowderauth <- function(key, user, pass, url, authfile="~/.pecan.clowder.xml return(NULL) } if (!is.null(auth_file$key)) { - return(list(key=key)) + return(list(key = key)) } else { # allow for cases where one of user/pass given as argument and other is in file - if (is.null(user)) { user <- auth_file$user } - if (is.null(pass)) { pass <- auth_file$password } - if (xor(is.null(user), is.null(pass))) { return(NULL) } + if (is.null(user)) { + user <- auth_file$user + } + if (is.null(pass)) { + pass <- auth_file$password + } + if (xor(is.null(user), is.null(pass))) { + return(NULL) + } return(list(userpass = httr::authenticate(user = user, password = pass))) } } else { diff --git a/modules/data.atmosphere/R/download.ICOS.R b/modules/data.atmosphere/R/download.ICOS.R index 853f671209c..cadb1627428 100644 --- a/modules/data.atmosphere/R/download.ICOS.R +++ b/modules/data.atmosphere/R/download.ICOS.R @@ -1,6 +1,6 @@ #' Download ICOS Ecosystem data products #' -#' Currently available products: +#' Currently available products: #' Drought-2018 ecosystem eddy covariance flux product https://www.icos-cp.eu/data-products/YVR0-4898 #' ICOS Final Fully Quality Controlled Observational Data (Level 2) https://www.icos-cp.eu/data-products/ecosystem-release #' @@ -16,7 +16,7 @@ #' @export #' @examples #' \dontrun{ -#' download.ICOS("FI-Sii", "/home/carya/pecan", "2016-01-01", "2018-01-01", product="Drought2018") +#' download.ICOS("FI-Sii", "/home/carya/pecan", "2016-01-01", "2018-01-01", product = "Drought2018") #' } #' @author Ayush Prasad #' @@ -27,7 +27,6 @@ download.ICOS <- end_date, product, overwrite = FALSE, ...) { - # make sure output folder exists if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) @@ -37,7 +36,7 @@ download.ICOS <- download_file_flag <- TRUE extract_file_flag <- TRUE sitename <- sub(".* \\((.*)\\)", "\\1", sitename) - + if (tolower(product) == "drought2018") { # construct output CSV file name output_file_name <- @@ -46,62 +45,60 @@ download.ICOS <- sitename, "_FLUXNET2015_FULLSET_HH_" ) - + # construct zip file name zip_file_name <- paste0(outfolder, "/Drought", sitename, ".zip") - + # data type, can be found from the machine readable page of the product data_type <- "http://meta.icos-cp.eu/resources/cpmeta/dought2018ArchiveProduct" - + file_name <- - paste0('FLX_', sitename, '_FLUXNET2015_FULLSET_HH') - + paste0("FLX_", sitename, "_FLUXNET2015_FULLSET_HH") + format_name <- "ICOS_ECOSYSTEM_HH" - } else if (tolower(product) == "etc") { output_file_name <- paste0("ICOSETC_", sitename, "_FLUXNET_HH_01.csv") - + # construct zip file name zip_file_name <- paste0(outfolder, "/ICOSETC_Archive_", sitename, ".zip") - + # data type, can be found from the machine readable page of the product data_type <- "http://meta.icos-cp.eu/resources/cpmeta/etcArchiveProduct" - + file_name <- paste0("ICOSETC_", sitename, "_FLUXNET_HH") - + format_name <- "ICOS_ECOSYSTEM_HH" - } else { PEcAn.logger::logger.severe("Invalid product. Product should be one of 'Drought2018', 'ETC' ") } - + output_file <- list.files(path = outfolder, pattern = output_file_name) - if(length(output_file != 0) && !overwrite){ - PEcAn.logger::logger.info("Output CSV file for the requested site already exists") - download_file_flag <- FALSE - extract_file_flag <- FALSE - output_file_name <- output_file + if (length(output_file != 0) && !overwrite) { + PEcAn.logger::logger.info("Output CSV file for the requested site already exists") + download_file_flag <- FALSE + extract_file_flag <- FALSE + output_file_name <- output_file } if (extract_file_flag && - file.exists(zip_file_name) && !overwrite) { + file.exists(zip_file_name) && !overwrite) { PEcAn.logger::logger.info("Zip file for the requested site already exists, extracting it...") download_file_flag <- FALSE extract_file_flag <- TRUE } - + if (download_file_flag) { # Find dataset product id by using the site name - + # ICOS SPARQL end point url <- "https://meta.icos-cp.eu/sparql?type=JSON" - + # RDF query to find out the information about the data set using the site name body <- " prefix cpmeta: @@ -143,7 +140,7 @@ download.ICOS <- ) ) } - + if (dataset_end_date < lubridate::as_date(end_date)) { PEcAn.logger::logger.severe( paste( @@ -154,52 +151,58 @@ download.ICOS <- ) } dataset_id <- sub(".*/", "", dataset_url) - + # construct the download URL download_url <- - paste0('https://data.icos-cp.eu/licence_accept?ids=%5B%22', - dataset_id, - '%22%5D') + paste0( + "https://data.icos-cp.eu/licence_accept?ids=%5B%22", + dataset_id, + "%22%5D" + ) # Download the zip file file <- - httr::GET(url = download_url, - httr::write_disk(zip_file_name, - overwrite = TRUE), - httr::progress()) + httr::GET( + url = download_url, + httr::write_disk(zip_file_name, + overwrite = TRUE + ), + httr::progress() + ) } - + if (extract_file_flag) { # extract only the hourly data file zipped_csv_name <- grep( - paste0('*', file_name), + paste0("*", file_name), utils::unzip(zip_file_name, list = TRUE)$Name, ignore.case = TRUE, value = TRUE ) utils::unzip(zip_file_name, - files = zipped_csv_name, - junkpaths = TRUE, - exdir = outfolder) + files = zipped_csv_name, + junkpaths = TRUE, + exdir = outfolder + ) if (tolower(product) == "drought2018") { output_file_name <- zipped_csv_name - }else if (tolower(product) == "etc") { + } else if (tolower(product) == "etc") { # reformat file slightly so that both Drought2018 and ETC files can use the same format tmp_csv <- utils::read.csv(file.path(outfolder, output_file_name)) - new_tmp <- cbind(tmp_csv[, -which(colnames(tmp_csv)=="LW_OUT")], tmp_csv[, which(colnames(tmp_csv)=="LW_OUT")]) - colnames(new_tmp) <- c(colnames(tmp_csv)[-which(colnames(tmp_csv)=="LW_OUT")], "LW_OUT") + new_tmp <- cbind(tmp_csv[, -which(colnames(tmp_csv) == "LW_OUT")], tmp_csv[, which(colnames(tmp_csv) == "LW_OUT")]) + colnames(new_tmp) <- c(colnames(tmp_csv)[-which(colnames(tmp_csv) == "LW_OUT")], "LW_OUT") utils::write.csv(new_tmp, file = file.path(outfolder, output_file_name), row.names = FALSE) } } - - + + # get start and end year of data from file firstline <- system(paste0("head -2 ", file.path(outfolder, output_file_name)), intern = TRUE) firstline <- firstline[2] lastline <- system(paste0("tail -1 ", file.path(outfolder, output_file_name)), intern = TRUE) - + firstdate_st <- paste0( substr(firstline, 1, 4), "-", @@ -222,9 +225,9 @@ download.ICOS <- ":", substr(lastline, 11, 12) ) - - - rows <- 1 + + + rows <- 1 results <- data.frame( file = character(rows), host = character(rows), @@ -235,15 +238,14 @@ download.ICOS <- dbfile.name = substr(basename(output_file_name), 1, nchar(basename(output_file_name)) - 4), stringsAsFactors = FALSE ) - - results$file[rows] <- + + results$file[rows] <- file.path(outfolder, output_file_name) - results$host[rows] <- PEcAn.remote::fqdn() - results$startdate[rows] <- firstdate_st - results$enddate[rows] <- lastdate_st - results$mimetype[rows] <- "text/csv" + results$host[rows] <- PEcAn.remote::fqdn() + results$startdate[rows] <- firstdate_st + results$enddate[rows] <- lastdate_st + results$mimetype[rows] <- "text/csv" results$formatname[rows] <- format_name - + return(results) - } diff --git a/modules/data.atmosphere/R/download.MACA.R b/modules/data.atmosphere/R/download.MACA.R index f4bc93253fb..3064fcd13d4 100644 --- a/modules/data.atmosphere/R/download.MACA.R +++ b/modules/data.atmosphere/R/download.MACA.R @@ -1,4 +1,4 @@ -##' Download MACA CMIP5 outputs for a single grid point using OPeNDAP and convert to CF +##' Download MACA CMIP5 outputs for a single grid point using OPeNDAP and convert to CF ##' @name download.MACA ##' @title download.MACA ##' @export @@ -9,7 +9,7 @@ ##' @param model , select which MACA model to run (options are BNU-ESM, CNRM-CM5, CSIRO-Mk3-6-0, bcc-csm1-1, bcc-csm1-1-m, CanESM2, GFDL-ESM2M, GFDL-ESM2G, HadGEM2-CC365, HadGEM2-ES365, inmcm4, MIROC5, MIROC-ESM, MIROC-ESM-CHEM, MRI-CGCM3, CCSM4, IPSL-CM5A-LR, IPSL-CM5A-MR, IPSL-CM5B-LR, NorESM1-M) ##' @param scenario , select which scenario to run (options are rcp45, rcp85) ##' @param ensemble_member , r1i1p1 is the only ensemble member available for this dataset, CCSM4 uses r6i1p1 instead -##' @param site_id BETY site id +##' @param site_id BETY site id ##' @param lat.in latitude of site ##' @param lon.in longitude of site ##' @param overwrite overwrite existing files? Default is FALSE @@ -17,143 +17,152 @@ ##' @param ... other inputs ##' ##' @author James Simkins -download.MACA <- function(outfolder, start_date, end_date, site_id, lat.in, lon.in, model='IPSL-CM5A-LR', scenario='rcp85', ensemble_member='r1i1p1', - overwrite=FALSE, verbose=FALSE, ...){ +download.MACA <- function(outfolder, start_date, end_date, site_id, lat.in, lon.in, model = "IPSL-CM5A-LR", scenario = "rcp85", ensemble_member = "r1i1p1", + overwrite = FALSE, verbose = FALSE, ...) { start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) site_id <- as.numeric(site_id) model <- paste0(model) scenario <- paste0(scenario) ensemble_member <- paste0(ensemble_member) - outfolder <- paste0(outfolder,"_site_",paste0(site_id %/% 1000000000, "-", site_id %% 1000000000)) - - if (model == 'CCSM4'){ - ensemble_member <- 'r6i1p1' + outfolder <- paste0(outfolder, "_site_", paste0(site_id %/% 1000000000, "-", site_id %% 1000000000)) + + if (model == "CCSM4") { + ensemble_member <- "r6i1p1" } - + lat.in <- as.numeric(lat.in) lat <- lat.in - 25.063077926635742 - lat_MACA <- round(lat*24) + lat_MACA <- round(lat * 24) lon.in <- as.numeric(lon.in) - if (lon.in < 0){ - lon.in <- 180 - lon.in} + if (lon.in < 0) { + lon.in <- 180 - lon.in + } lon <- lon.in - 235.22784423828125 - lon_MACA <- round(lon*24) - + lon_MACA <- round(lon * 24) - dap_base <-'http://thredds.northwestknowledge.net:8080/thredds/dodsC/MACAV2' - ylist <- seq(start_year,end_year,by=1) + dap_base <- "http://thredds.northwestknowledge.net:8080/thredds/dodsC/MACAV2" + + ylist <- seq(start_year, end_year, by = 1) rows <- length(ylist) - results <- data.frame(file=character(rows), host=character(rows), - mimetype=character(rows), formatname=character(rows), - startdate=character(rows), enddate=character(rows), - dbfile.name = paste("MACA",model,scenario,ensemble_member,sep="."),#"MACA", - stringsAsFactors = FALSE) - - - var <- data.frame(DAP.name <- c("tasmax","tasmin","rsds","uas","vas","huss","pr","none","none","none"), - long_DAP.name <- c("air_temperature","air_temperature","surface_downwelling_shortwave_flux_in_air", - "eastward_wind","northward_wind","specific_humidity","precipitation","air_pressure", - "surface_downwelling_longwave_flux_in_air","air_temp"), - CF.name <- c("air_temperature_max","air_temperature_min","surface_downwelling_shortwave_flux_in_air", - "eastward_wind","northward_wind","specific_humidity","precipitation_flux","air_pressure", - "surface_downwelling_longwave_flux_in_air","air_temperature"), - units <- c('Kelvin','Kelvin',"W/m2","m/s","m/s","g/g","kg/m2/s", "Pascal", "W/m2","Kelvin") - ) - - - for (i in 1:rows){ - year <- ylist[i] + results <- data.frame( + file = character(rows), host = character(rows), + mimetype = character(rows), formatname = character(rows), + startdate = character(rows), enddate = character(rows), + dbfile.name = paste("MACA", model, scenario, ensemble_member, sep = "."), # "MACA", + stringsAsFactors = FALSE + ) + + + var <- data.frame( + DAP.name <- c("tasmax", "tasmin", "rsds", "uas", "vas", "huss", "pr", "none", "none", "none"), + long_DAP.name <- c( + "air_temperature", "air_temperature", "surface_downwelling_shortwave_flux_in_air", + "eastward_wind", "northward_wind", "specific_humidity", "precipitation", "air_pressure", + "surface_downwelling_longwave_flux_in_air", "air_temp" + ), + CF.name <- c( + "air_temperature_max", "air_temperature_min", "surface_downwelling_shortwave_flux_in_air", + "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux", "air_pressure", + "surface_downwelling_longwave_flux_in_air", "air_temperature" + ), + units <- c("Kelvin", "Kelvin", "W/m2", "m/s", "m/s", "g/g", "kg/m2/s", "Pascal", "W/m2", "Kelvin") + ) + + + for (i in 1:rows) { + year <- ylist[i] ntime <- (1825) - + met_start <- 2006 met_block <- 5 - url_year <- met_start + floor((year-met_start)/met_block)*met_block + url_year <- met_start + floor((year - met_start) / met_block) * met_block start_url <- paste0(url_year) - end_url <- paste0(url_year+met_block-1) - dir.create(outfolder, showWarnings=FALSE, recursive=TRUE) - - loc.file <- file.path(outfolder,paste("MACA",model,scenario,ensemble_member,year,"nc",sep=".")) - + end_url <- paste0(url_year + met_block - 1) + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) + + loc.file <- file.path(outfolder, paste("MACA", model, scenario, ensemble_member, year, "nc", sep = ".")) + ## Create dimensions - lat <- ncdf4::ncdim_def(name='latitude', units='degree_north', vals=lat.in, create_dimvar=TRUE) - lon <- ncdf4::ncdim_def(name='longitude', units='degree_east', vals=lon.in, create_dimvar=TRUE) - time <- ncdf4::ncdim_def(name='time', units="sec", vals=(1:365)*86400, create_dimvar=TRUE, unlim=TRUE) - dim<-list(lat,lon,time) - + lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) + lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) + time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = (1:365) * 86400, create_dimvar = TRUE, unlim = TRUE) + dim <- list(lat, lon, time) + var.list <- list() dat.list <- list() - + ## get data off OpenDAP - for(j in 1:length(var$CF.name)){ - dap_end <- paste0('/', - model,'/macav2metdata_', - var$DAP.name[j],'_', - model,'_', - ensemble_member,'_', - scenario,'_', - start_url,'_', - end_url,'_CONUS_daily.nc') - dap_file <- paste0(dap_base,dap_end) - if(j < 8){ - dap <- ncdf4::nc_open(dap_file) - dat.list[[j]] <- ncdf4::ncvar_get(dap,as.character(var$long_DAP.name[j]),c(lon_MACA,lat_MACA,1),c(1,1,ntime)) - var.list[[j]] <- ncdf4::ncvar_def(name=as.character(var$CF.name[j]), units=as.character(var$units[j]), dim=dim, missval=-9999.0, verbose=verbose) - ncdf4::nc_close(dap) + for (j in 1:length(var$CF.name)) { + dap_end <- paste0( + "/", + model, "/macav2metdata_", + var$DAP.name[j], "_", + model, "_", + ensemble_member, "_", + scenario, "_", + start_url, "_", + end_url, "_CONUS_daily.nc" + ) + dap_file <- paste0(dap_base, dap_end) + if (j < 8) { + dap <- ncdf4::nc_open(dap_file) + dat.list[[j]] <- ncdf4::ncvar_get(dap, as.character(var$long_DAP.name[j]), c(lon_MACA, lat_MACA, 1), c(1, 1, ntime)) + var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), units = as.character(var$units[j]), dim = dim, missval = -9999.0, verbose = verbose) + ncdf4::nc_close(dap) } else { dat.list[[j]] <- NA - var.list[[j]] <- ncdf4::ncvar_def(name=as.character(var$CF.name[j]), units=as.character(var$units[j]), dim=dim, missval=-9999.0, verbose=verbose)} + var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), units = as.character(var$units[j]), dim = dim, missval = -9999.0, verbose = verbose) + } } - + dat.list <- as.data.frame(dat.list) - colnames(dat.list) <- c("air_temperature_max","air_temperature_min","surface_downwelling_shortwave_flux_in_air","eastward_wind","northward_wind","specific_humidity","precipitation_flux","air_pressure", "surface_downwelling_longwave_flux_in_air","air_temperature") - #for (n in 1:1825){ - # dat.list[n,"air_pressure"] = 1 - #dat.list[n,"surface_downwelling_longwave_flux_in_air"] = 1} - #take average of temperature min and max - dat.list[["air_temperature"]] <- (dat.list[["air_temperature_max"]]+dat.list[["air_temperature_min"]])/2 - #convert mm precipitation to precipitation flux - dat.list[["precipitation_flux"]] <- (dat.list[["precipitation_flux"]]/(24*3600)) - - - #read in a 5 year file, but only storing 1 year at a time, so this selects the particular year of the 5 year span that you want - if (year%%5 == 1){ - dat.list = dat.list[1:365,] + colnames(dat.list) <- c("air_temperature_max", "air_temperature_min", "surface_downwelling_shortwave_flux_in_air", "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux", "air_pressure", "surface_downwelling_longwave_flux_in_air", "air_temperature") + # for (n in 1:1825){ + # dat.list[n,"air_pressure"] = 1 + # dat.list[n,"surface_downwelling_longwave_flux_in_air"] = 1} + # take average of temperature min and max + dat.list[["air_temperature"]] <- (dat.list[["air_temperature_max"]] + dat.list[["air_temperature_min"]]) / 2 + # convert mm precipitation to precipitation flux + dat.list[["precipitation_flux"]] <- (dat.list[["precipitation_flux"]] / (24 * 3600)) + + + # read in a 5 year file, but only storing 1 year at a time, so this selects the particular year of the 5 year span that you want + if (year %% 5 == 1) { + dat.list <- dat.list[1:365, ] } - if (year%%5 == 2){ - dat.list = dat.list[366:730,] + if (year %% 5 == 2) { + dat.list <- dat.list[366:730, ] } - if (year%%5 == 3){ - dat.list = dat.list[731:1095,] + if (year %% 5 == 3) { + dat.list <- dat.list[731:1095, ] } - if (year%%5 == 4){ - dat.list = dat.list[1096:1460,] + if (year %% 5 == 4) { + dat.list <- dat.list[1096:1460, ] } - if (year%%5 == 0){ - dat.list = dat.list[1461:1825,] + if (year %% 5 == 0) { + dat.list <- dat.list[1461:1825, ] } - + ## put data in new file - loc <- ncdf4::nc_create(filename=loc.file, vars=var.list, verbose=verbose) - for(j in seq_along(var$CF.name)){ - ncdf4::ncvar_put(nc=loc, varid=as.character(var$CF.name[j]), vals=dat.list[[j]]) + loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + for (j in seq_along(var$CF.name)) { + ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = dat.list[[j]]) } ncdf4::nc_close(loc) - + results$file[i] <- loc.file results$host[i] <- PEcAn.remote::fqdn() - results$startdate[i] <- paste0(year,"-01-01 00:00:00") - results$enddate[i] <- paste0(year,"-12-31 23:59:59") - results$mimetype[i] <- 'application/x-netcdf' - results$formatname[i] <- 'CF Meteorology' - + results$startdate[i] <- paste0(year, "-01-01 00:00:00") + results$enddate[i] <- paste0(year, "-12-31 23:59:59") + results$mimetype[i] <- "application/x-netcdf" + results$formatname[i] <- "CF Meteorology" } - + return(invisible(results)) } -#download.MACA('maca','2006-01-01 00:00:00','2006-12-31 23:59:59',5,45,-90) - +# download.MACA('maca','2006-01-01 00:00:00','2006-12-31 23:59:59',5,45,-90) diff --git a/modules/data.atmosphere/R/download.MERRA.R b/modules/data.atmosphere/R/download.MERRA.R index 333625d9519..9934df5c9c5 100644 --- a/modules/data.atmosphere/R/download.MERRA.R +++ b/modules/data.atmosphere/R/download.MERRA.R @@ -10,10 +10,9 @@ download.MERRA <- function(outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - dates <- seq.Date(as.Date(start_date), as.Date(end_date), "1 day") - if(!file.exists(outfolder)) { + if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } @@ -48,10 +47,14 @@ download.MERRA <- function(outfolder, start_date, end_date, baseday <- paste0(year, "-01-01T00:00:00Z") # Accommodate partial years - y_startdate <- pmax(ISOdate(year, 01, 01, 0, tz = "UTC"), - lubridate::as_datetime(start_date)) - y_enddate <- pmin(ISOdate(year, 12, 31, 23, 59, 59, tz = "UTC"), - lubridate::as_datetime(paste(end_date, "23:59:59Z"))) + y_startdate <- pmax( + ISOdate(year, 01, 01, 0, tz = "UTC"), + lubridate::as_datetime(start_date) + ) + y_enddate <- pmin( + ISOdate(year, 12, 31, 23, 59, 59, tz = "UTC"), + lubridate::as_datetime(paste(end_date, "23:59:59Z")) + ) timeseq <- as.numeric(difftime( seq(y_startdate, y_enddate, "hours"), @@ -71,8 +74,10 @@ download.MERRA <- function(outfolder, start_date, end_date, ## Create dimensions lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", units = paste("Days since ", baseday), - vals = timeseq, create_dimvar = TRUE, unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = paste("Days since ", baseday), + vals = timeseq, create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) ## Create output variables @@ -109,7 +114,7 @@ download.MERRA <- function(outfolder, start_date, end_date, "It will be overwritten." ) } - + ## Create output file loc <- ncdf4::nc_create(loc.file, var_list) on.exit(ncdf4::nc_close(loc), add = TRUE) @@ -124,36 +129,40 @@ download.MERRA <- function(outfolder, start_date, end_date, PEcAn.logger::severeifnot(paste0("File ", mostfile, " does not exist."), file.exists(mostfile)) nc <- ncdf4::nc_open(mostfile) for (r in seq_len(nrow(merra_vars))) { - x <- ncdf4::ncvar_get(nc, merra_vars[r,][["MERRA_name"]]) - ncdf4::ncvar_put(loc, merra_vars[r,][["CF_name"]], x, - start = c(1, 1, start), count = c(1, 1, 24)) + x <- ncdf4::ncvar_get(nc, merra_vars[r, ][["MERRA_name"]]) + ncdf4::ncvar_put(loc, merra_vars[r, ][["CF_name"]], x, + start = c(1, 1, start), count = c(1, 1, 24) + ) } ncdf4::nc_close(nc) presfile <- file.path(outfolder, sprintf("merra-pres-%s.nc", as.character(date))) PEcAn.logger::severeifnot(paste0("File ", presfile, " does not exist."), file.exists(presfile)) nc <- ncdf4::nc_open(presfile) for (r in seq_len(nrow(merra_pres_vars))) { - x <- ncdf4::ncvar_get(nc, merra_pres_vars[r,][["MERRA_name"]]) - ncdf4::ncvar_put(loc, merra_pres_vars[r,][["CF_name"]], x, - start = c(1, 1, start), count = c(1, 1, 24)) + x <- ncdf4::ncvar_get(nc, merra_pres_vars[r, ][["MERRA_name"]]) + ncdf4::ncvar_put(loc, merra_pres_vars[r, ][["CF_name"]], x, + start = c(1, 1, start), count = c(1, 1, 24) + ) } ncdf4::nc_close(nc) fluxfile <- file.path(outfolder, sprintf("merra-flux-%s.nc", as.character(date))) PEcAn.logger::severeifnot(paste0("File ", fluxfile, " does not exist."), file.exists(fluxfile)) nc <- ncdf4::nc_open(fluxfile) for (r in seq_len(nrow(merra_flux_vars))) { - x <- ncdf4::ncvar_get(nc, merra_flux_vars[r,][["MERRA_name"]]) - ncdf4::ncvar_put(loc, merra_flux_vars[r,][["CF_name"]], x, - start = c(1, 1, start), count = c(1, 1, 24)) + x <- ncdf4::ncvar_get(nc, merra_flux_vars[r, ][["MERRA_name"]]) + ncdf4::ncvar_put(loc, merra_flux_vars[r, ][["CF_name"]], x, + start = c(1, 1, start), count = c(1, 1, 24) + ) } ncdf4::nc_close(nc) lfofile <- file.path(outfolder, sprintf("merra-lfo-%s.nc", as.character(date))) PEcAn.logger::severeifnot(paste0("File ", lfofile, " does not exist."), file.exists(lfofile)) nc <- ncdf4::nc_open(lfofile) for (r in seq_len(nrow(merra_lfo_vars))) { - x <- ncdf4::ncvar_get(nc, merra_lfo_vars[r,][["MERRA_name"]]) - ncdf4::ncvar_put(loc, merra_lfo_vars[r,][["CF_name"]], x, - start = c(1, 1, start), count = c(1, 1, 24)) + x <- ncdf4::ncvar_get(nc, merra_lfo_vars[r, ][["MERRA_name"]]) + ncdf4::ncvar_put(loc, merra_lfo_vars[r, ][["CF_name"]], x, + start = c(1, 1, start), count = c(1, 1, 24) + ) } ncdf4::nc_close(nc) } @@ -164,14 +173,16 @@ download.MERRA <- function(outfolder, start_date, end_date, ncdf4::ncvar_get(loc, "surface_diffuse_downwelling_photosynthetic_radiative_flux_in_air") + ncdf4::ncvar_get(loc, "surface_diffuse_downwelling_nearinfrared_radiative_flux_in_air") ncdf4::ncvar_put(loc, "surface_diffuse_downwelling_shortwave_flux_in_air", sw_diffuse, - start = c(1, 1, 1), count = c(1, 1, -1)) + start = c(1, 1, 1), count = c(1, 1, -1) + ) # Total SW direct = Direct PAR + Direct NIR sw_direct <- ncdf4::ncvar_get(loc, "surface_direct_downwelling_photosynthetic_radiative_flux_in_air") + ncdf4::ncvar_get(loc, "surface_direct_downwelling_nearinfrared_radiative_flux_in_air") ncdf4::ncvar_put(loc, "surface_direct_downwelling_shortwave_flux_in_air", sw_direct, - start = c(1, 1, 1), count = c(1, 1, -1)) + start = c(1, 1, 1), count = c(1, 1, -1) + ) } } @@ -210,8 +221,10 @@ get_merra_date <- function(date, latitude, longitude, outdir, overwrite = FALSE) ) qvars <- sprintf("%s%s", merra_vars$MERRA_name, idxstring) qstring <- paste(qvars, collapse = ",") - outfile <- file.path(outdir, sprintf("merra-most-%d-%02d-%02d.nc", - year, month, day)) + outfile <- file.path(outdir, sprintf( + "merra-most-%d-%02d-%02d.nc", + year, month, day + )) if (overwrite || !file.exists(outfile)) { req <- httr::GET( paste(url, qstring, sep = "?"), @@ -228,8 +241,10 @@ get_merra_date <- function(date, latitude, longitude, outdir, overwrite = FALSE) ) qvars <- sprintf("%s%s", merra_pres_vars$MERRA_name, idxstring) qstring <- paste(qvars, collapse = ",") - outfile <- file.path(outdir, sprintf("merra-pres-%d-%02d-%02d.nc", - year, month, day)) + outfile <- file.path(outdir, sprintf( + "merra-pres-%d-%02d-%02d.nc", + year, month, day + )) if (overwrite || !file.exists(outfile)) { req <- httr::GET( paste(url, qstring, sep = "?"), @@ -246,8 +261,10 @@ get_merra_date <- function(date, latitude, longitude, outdir, overwrite = FALSE) ) qvars <- sprintf("%s%s", merra_flux_vars$MERRA_name, idxstring) qstring <- paste(qvars, collapse = ",") - outfile <- file.path(outdir, sprintf("merra-flux-%d-%02d-%02d.nc", - year, month, day)) + outfile <- file.path(outdir, sprintf( + "merra-flux-%d-%02d-%02d.nc", + year, month, day + )) if (overwrite || !file.exists(outfile)) { req <- PEcAn.utils::robustly(httr::GET, n = 10)( paste(url, qstring, sep = "?"), @@ -264,8 +281,10 @@ get_merra_date <- function(date, latitude, longitude, outdir, overwrite = FALSE) ) qvars <- sprintf("%s%s", merra_lfo_vars$MERRA_name, idxstring) qstring <- paste(qvars, collapse = ",") - outfile <- file.path(outdir, sprintf("merra-lfo-%d-%02d-%02d.nc", - year, month, day)) + outfile <- file.path(outdir, sprintf( + "merra-lfo-%d-%02d-%02d.nc", + year, month, day + )) if (overwrite || !file.exists(outfile)) { req <- PEcAn.utils::robustly(httr::GET, n = 10)( paste(url, qstring, sep = "?"), diff --git a/modules/data.atmosphere/R/download.MsTMIP_NARR.R b/modules/data.atmosphere/R/download.MsTMIP_NARR.R index dfa59ec9742..d4eaae11bad 100644 --- a/modules/data.atmosphere/R/download.MsTMIP_NARR.R +++ b/modules/data.atmosphere/R/download.MsTMIP_NARR.R @@ -16,38 +16,43 @@ ##' @author James Simkins download.MsTMIP_NARR <- function(outfolder, start_date, end_date, site_id, lat.in, lon.in, overwrite = FALSE, verbose = FALSE, ...) { - start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - site_id <- as.numeric(site_id) - outfolder <- paste0(outfolder, "_site_", paste0(site_id%/%1e+09, "-", site_id%%1e+09)) + end_year <- lubridate::year(end_date) + site_id <- as.numeric(site_id) + outfolder <- paste0(outfolder, "_site_", paste0(site_id %/% 1e+09, "-", site_id %% 1e+09)) - lat.in <- as.numeric(lat.in) - lon.in <- as.numeric(lon.in) + lat.in <- as.numeric(lat.in) + lon.in <- as.numeric(lon.in) lat_trunc <- floor(4 * (84 - as.numeric(lat.in))) lon_trunc <- floor(4 * (as.numeric(lon.in) + 170)) - dap_base <- "http://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1220/mstmip_driver_na_qd_climate_" + dap_base <- "http://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1220/mstmip_driver_na_qd_climate_" dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) ylist <- seq(start_year, end_year, by = 1) rows <- length(ylist) - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = "MsTMIP_NARR", - stringsAsFactors = FALSE) - - var <- data.frame(DAP.name = c("air_2m", "dswrf", "dlwrf", "wnd_10m", "apcp", "shum_2m", "rhum_2m"), - CF.name = c("air_temperature", "surface_downwelling_shortwave_flux_in_air", - "surface_downwelling_longwave_flux_in_air", - "wind_speed", "precipitation_flux", "specific_humidity", "relative_humidity"), - units = c("Kelvin", "W/m2", "W/m2", "m/s", "kg/m2/s", "g/g", "%")) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = "MsTMIP_NARR", + stringsAsFactors = FALSE + ) + + var <- data.frame( + DAP.name = c("air_2m", "dswrf", "dlwrf", "wnd_10m", "apcp", "shum_2m", "rhum_2m"), + CF.name = c( + "air_temperature", "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", + "wind_speed", "precipitation_flux", "specific_humidity", "relative_humidity" + ), + units = c("Kelvin", "W/m2", "W/m2", "m/s", "kg/m2/s", "g/g", "%") + ) for (i in seq_len(rows)) { @@ -60,11 +65,13 @@ download.MsTMIP_NARR <- function(outfolder, start_date, end_date, site_id, lat.i ## Create dimensions lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", - units = "sec", - vals = (1:ntime) * 10800, - create_dimvar = TRUE, - unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", + units = "sec", + vals = (1:ntime) * 10800, + create_dimvar = TRUE, + unlim = TRUE + ) dim <- list(lat, lon, time) var.list <- list() @@ -75,19 +82,25 @@ download.MsTMIP_NARR <- function(outfolder, start_date, end_date, site_id, lat.i ## get data off OpenDAP for (j in seq_len(nrow(var))) { if (var$DAP.name[j] == "dswrf") { - (dap_file <- paste0("http://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1220/mstmip_driver_na_qd_dswrf_", - year, "_v1.nc4")) + (dap_file <- paste0( + "http://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1220/mstmip_driver_na_qd_dswrf_", + year, "_v1.nc4" + )) } else { (dap_file <- paste0(dap_base, var$DAP.name[j], "_", year, "_v1.nc4")) } dap <- ncdf4::nc_open(dap_file) - dat.list[[j]] <- ncdf4::ncvar_get(dap, as.character(DAPvar[j]), - c(lon_trunc, lat_trunc, 1), c(1, 1, ntime)) - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), - dim = dim, - missval = -999, - verbose = verbose) + dat.list[[j]] <- ncdf4::ncvar_get( + dap, as.character(DAPvar[j]), + c(lon_trunc, lat_trunc, 1), c(1, 1, ntime) + ) + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), + dim = dim, + missval = -999, + verbose = verbose + ) ncdf4::nc_close(dap) } @@ -101,11 +114,11 @@ download.MsTMIP_NARR <- function(outfolder, start_date, end_date, site_id, lat.i } ncdf4::nc_close(loc) - results$file[i] <- loc.file - results$host[i] <- PEcAn.remote::fqdn() - results$startdate[i] <- paste0(year, "-01-01 00:00:00") - results$enddate[i] <- paste0(year, "-12-31 23:59:59") - results$mimetype[i] <- "application/x-netcdf" + results$file[i] <- loc.file + results$host[i] <- PEcAn.remote::fqdn() + results$startdate[i] <- paste0(year, "-01-01 00:00:00") + results$enddate[i] <- paste0(year, "-12-31 23:59:59") + results$mimetype[i] <- "application/x-netcdf" results$formatname[i] <- "CF Meteorology" } diff --git a/modules/data.atmosphere/R/download.NARR.R b/modules/data.atmosphere/R/download.NARR.R index 32d98314883..8874468f106 100644 --- a/modules/data.atmosphere/R/download.NARR.R +++ b/modules/data.atmosphere/R/download.NARR.R @@ -9,70 +9,73 @@ ##' @param ... other inputs ##' example options(download.ftp.method="ncftpget") ##' @importFrom dplyr %>% -##' +##' ##' @examples ##' \dontrun{ ##' download.NARR("~/",'2000/01/01','2000/01/02', overwrite = TRUE, verbose = TRUE) ##' } -##' +##' ##' @export ##' ##' @author Betsy Cowdery, Shawn Serbin download.NARR <- function(outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, method, ...) { - start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) NARR_start <- 1979 if (start_year < NARR_start) { - PEcAn.logger::logger.severe(sprintf('Input year range (%d:%d) exceeds the NARR range (%d:present)', - start_year, end_year, - NARR_start)) + PEcAn.logger::logger.severe(sprintf( + "Input year range (%d:%d) exceeds the NARR range (%d:present)", + start_year, end_year, + NARR_start + )) } - + # Download Raw NARR from the internet - + vlist <- c("pres.sfc", "dswrf", "dlwrf", "air.2m", "shum.2m", "prate", "vwnd.10m", "uwnd.10m") ylist <- seq(end_year, start_year, by = -1) - + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - + rows <- length(vlist) * length(ylist) - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = "NARR", - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = "NARR", + stringsAsFactors = FALSE + ) + for (v in vlist) { for (year in ylist) { new.file <- file.path(outfolder, paste(v, year, "nc", sep = ".")) - + # create array with results row <- which(vlist == v) * which(ylist == year) - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "NARR" - + if (file.exists(new.file) && !overwrite) { PEcAn.logger::logger.debug("File '", new.file, "' already exists, skipping to next file.") next } - + url <- paste0("ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/", v, ".", year, ".nc") - + PEcAn.logger::logger.debug(paste0("Downloading from:\n", url, "\nto:\n", new.file)) PEcAn.utils::download_file(url, new.file, method) } } - + return(invisible(results)) } # download.NARR diff --git a/modules/data.atmosphere/R/download.NARR_site.R b/modules/data.atmosphere/R/download.NARR_site.R index bf2fdb1b51d..02d5d4a4fc1 100644 --- a/modules/data.atmosphere/R/download.NARR_site.R +++ b/modules/data.atmosphere/R/download.NARR_site.R @@ -15,12 +15,10 @@ #' @param ... further arguments, currently ignored #' #' @examples -#' #' \dontrun{ #' download.NARR_site(tempdir(), "2001-01-01", "2001-01-12", 43.372, -89.907) #' } #' -#' #' @export #' @importFrom rlang .data #' @@ -34,7 +32,6 @@ download.NARR_site <- function(outfolder, parallel = TRUE, ncores = if (parallel) parallel::detectCores() else NULL, ...) { - if (verbose) PEcAn.logger::logger.info("Downloading NARR data") narr_data <- get_NARR_thredds( start_date, end_date, lat.in, lon.in, @@ -114,7 +111,7 @@ prepare_narr_year <- function(dat, file, lat_nc, lon_nc, verbose = FALSE) { ) nc <- ncdf4::nc_create(file, ncvar_list, verbose = verbose) on.exit(ncdf4::nc_close(nc), add = TRUE) - purrr::iwalk(nc_values, ~ncdf4::ncvar_put(nc, .y, .x, verbose = verbose)) + purrr::iwalk(nc_values, ~ ncdf4::ncvar_put(nc, .y, .x, verbose = verbose)) invisible(ncvar_list) } @@ -148,7 +145,6 @@ col2ncvar <- function(variable, dims) { #' @return `tibble` containing time series of NARR data for the given site #' @author Alexey Shiklomanov #' @examples -#' #' \dontrun{ #' dat <- get_NARR_thredds("2008-01-01", "2008-01-15", 43.3724, -89.9071) #' } @@ -158,9 +154,7 @@ get_NARR_thredds <- function(start_date, end_date, lat.in, lon.in, progress = TRUE, drop_outside = TRUE, parallel = TRUE, - ncores = 1 - ) { - + ncores = 1) { PEcAn.logger::severeifnot( length(start_date) == 1, msg = paste("Start date must be a scalar, but has length", length(start_date)) @@ -217,12 +211,13 @@ get_NARR_thredds <- function(start_date, end_date, lat.in, lon.in, xy <- latlon2narr(nc1, lat.in, lon.in) if (parallel) { - if (!requireNamespace("parallel", quietly = TRUE) - || !requireNamespace("doParallel", quietly = TRUE)) { + if (!requireNamespace("parallel", quietly = TRUE) || + !requireNamespace("doParallel", quietly = TRUE)) { PEcAn.logger::logger.severe( "Could not find all packages needed for simultaneous NARR downloads. ", "Either run `install.packages(c(\"parallel\", \"doParallel\"))`, ", - "or call get_NARR_thredds with `parallel = FALSE`.") + "or call get_NARR_thredds with `parallel = FALSE`." + ) } # Load in parallel @@ -239,12 +234,11 @@ get_NARR_thredds <- function(start_date, end_date, lat.in, lon.in, .packages = c("PEcAn.data.atmosphere", "dplyr"), .export = c("get_narr_url", "robustly") ), - PEcAn.utils::robustly(get_narr_url)(url, xy = xy, flx = flx) + PEcAn.utils::robustly(get_narr_url)(url, xy = xy, flx = flx) ) flx_data_raw <- dplyr::filter(get_dfs, .data$flx) sfc_data_raw <- dplyr::filter(get_dfs, !.data$flx) } else { - # Retrieve remaining variables by iterating over URLs npb <- nrow(flx_df) * nrow(narr_flx_vars) + nrow(sfc_df) * nrow(narr_sfc_vars) @@ -457,8 +451,8 @@ latlon2narr <- function(nc, lat.in, lon.in) { narr_x <- ncdf4::ncvar_get(nc, "x") narr_y <- ncdf4::ncvar_get(nc, "y") ptrans <- latlon2lcc(lat.in, lon.in) - x_ind <- which.min((ptrans$x - narr_x) ^ 2) - y_ind <- which.min((ptrans$y - narr_y) ^ 2) + x_ind <- which.min((ptrans$x - narr_x)^2) + y_ind <- which.min((ptrans$y - narr_y)^2) c(x = x_ind, y = y_ind) } @@ -469,9 +463,9 @@ latlon2narr <- function(nc, lat.in, lon.in) { #' @return `sp::SpatialPoints` object containing transformed x and y #' coordinates, in km, which should match NARR coordinates #' @importFrom sf st_crs - # ^not used directly here, but needed by sp::CRS. - # sp lists sf in Suggests rather than Imports, - # so importing it here to ensure it's available at run time +# ^not used directly here, but needed by sp::CRS. +# sp lists sf in Suggests rather than Imports, +# so importing it here to ensure it's available at run time #' @author Alexey Shiklomanov #' @export latlon2lcc <- function(lat.in, lon.in) { diff --git a/modules/data.atmosphere/R/download.NEONmet.R b/modules/data.atmosphere/R/download.NEONmet.R index be132a1a5ee..f13392e9737 100644 --- a/modules/data.atmosphere/R/download.NEONmet.R +++ b/modules/data.atmosphere/R/download.NEONmet.R @@ -1,11 +1,11 @@ ##' Download NEON Site Met CSV files ##' ##' download.NEONmet -##' +##' ##' Uses NEON v0 API to download met data from NEON towers and convert to CF NetCDF -##' +##' ##' @export -##' @param sitename the NEON ID of the site to be downloaded, used as file name prefix. +##' @param sitename the NEON ID of the site to be downloaded, used as file name prefix. ##' The 4-letter SITE code in \href{https://www.neonscience.org/science-design/field-sites/list}{list of NEON sites} ##' @param outfolder location on disk where outputs will be stored ##' @param start_date the start date of the data to be downloaded. Format is YYYY-MM-DD (will only use the year and month of the date) @@ -14,306 +14,343 @@ ##' @param verbose makes the function output more text ##' @param ... further arguments, currently ignored ##' -##' @examples +##' @examples ##' \dontrun{ ##' result <- download.NEONmet('HARV','~/','2017-01-01','2017-01-31',overwrite=TRUE) ##' } -download.NEONmet <- function(sitename, outfolder, start_date, end_date, - overwrite = FALSE, verbose = FALSE, ...) { - +download.NEONmet <- function(sitename, outfolder, start_date, end_date, + overwrite = FALSE, verbose = FALSE, ...) { if (!file.exists(outfolder)) { dir.create(outfolder) } - - #Check if site is a NEON site + + # Check if site is a NEON site site <- sub(".* \\((.*)\\)", "\\1", sitename) siteinfo <- nneo::nneo_site(site) if (!exists("siteinfo")) { PEcAn.logger::logger.error("Could not get information about", sitename, ".", "Is this a NEON site?") } - - #See what products and dates are available for this site - availProducts <- siteinfo$dataProducts$dataProductCode #list of data prodcuts by code - availDates <- siteinfo$dataProducts$availableMonths #lists of availably YYYY-MM per site, use unlist() + + # See what products and dates are available for this site + availProducts <- siteinfo$dataProducts$dataProductCode # list of data prodcuts by code + availDates <- siteinfo$dataProducts$availableMonths # lists of availably YYYY-MM per site, use unlist() lat <- siteinfo$siteLatitude lon <- siteinfo$siteLongitude - - #Figure out which months are needed + + # Figure out which months are needed start_date <- as.POSIXlt(start_date, tz = "UTC") end_date <- as.POSIXlt(end_date, tz = "UTC") start_ymd <- lubridate::ymd(start_date) end_ymd <- lubridate::ymd(end_date) - #Subset to max/min available dates if start or end date exceed those bounds + # Subset to max/min available dates if start or end date exceed those bounds allDates <- unlist(availDates) minDate <- min(allDates) maxDate <- max(allDates) - start_ym <- substr(start_ymd,1,7) - end_ym <- substr(end_ymd,1,7) - if (start_ym maxDate) { + end_ym <- maxDate } - if (end_ym>maxDate) { - end_ym <- maxDate - } - start_year <- as.numeric(substr(start_ym,1,4)) - end_year <- as.numeric(substr(end_ym,1,4)) - - #create results data frame + start_year <- as.numeric(substr(start_ym, 1, 4)) + end_year <- as.numeric(substr(end_ym, 1, 4)) + + # create results data frame rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = paste0("NEONmet.",site), - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = paste0("NEONmet.", site), + stringsAsFactors = FALSE + ) + all_years <- start_year:end_year all_files <- file.path(outfolder, paste0("NEONmet.", site, ".", as.character(all_years), ".nc")) results$file <- all_files results$host <- PEcAn.remote::fqdn() - results$mimetype <- "application/x-netcdf" + results$mimetype <- "application/x-netcdf" results$formatname <- "CF" - results$startdate <- paste0(all_years, "-01-01 00:00:00") - results$enddate <- paste0(all_years, "-12-31 23:59:59") - + results$startdate <- paste0(all_years, "-01-01 00:00:00") + results$enddate <- paste0(all_years, "-12-31 23:59:59") + for (current_year in all_years) { - - #Figure out start and end time for this year - y_idx <- current_year - start_year + 1 - if (current_year==start_year) - { - start_m <- substr(start_ym,6,7) - } else { - start_m <- "01" - } - if (current_year==end_year) { - end_m <- substr(end_ym,6,7) - } else { - end_m <- "12" - } - days_in_last_month <- as.character(lubridate::days_in_month(lubridate::ymd(paste0(current_year,end_m,"-01")))) - start_ymd <- (paste0(current_year,"-",start_m,"-01")) - end_ymd <- (paste0(current_year,"-",end_m,"-",days_in_last_month)) - start_date <- as.POSIXlt(paste0(start_ymd," 00:00:00 UTC"), tz = "UTC") - end_date <- as.POSIXlt(paste0(end_ymd," 23:30:00 UTC"), tz = "UTC") + # Figure out start and end time for this year + y_idx <- current_year - start_year + 1 + if (current_year == start_year) { + start_m <- substr(start_ym, 6, 7) + } else { + start_m <- "01" + } + if (current_year == end_year) { + end_m <- substr(end_ym, 6, 7) + } else { + end_m <- "12" + } + days_in_last_month <- as.character(lubridate::days_in_month(lubridate::ymd(paste0(current_year, end_m, "-01")))) + start_ymd <- (paste0(current_year, "-", start_m, "-01")) + end_ymd <- (paste0(current_year, "-", end_m, "-", days_in_last_month)) + start_date <- as.POSIXlt(paste0(start_ymd, " 00:00:00 UTC"), tz = "UTC") + end_date <- as.POSIXlt(paste0(end_ymd, " 23:30:00 UTC"), tz = "UTC") - #Warn if no data is available for any months in given year - monthsNeeded <- substr(seq(as.Date(start_ymd),as.Date(end_ymd),by='month'),1,7) - if (length(intersect(unlist(availDates),monthsNeeded))==0) { - PEcAn.logger::logger.warn("No data available in year ",current_year) - next() - } - startMon <- min(monthsNeeded) - endMon <- max(monthsNeeded) - - #Set up netcdf file, dimensions, and sequence of dates + # Warn if no data is available for any months in given year + monthsNeeded <- substr(seq(as.Date(start_ymd), as.Date(end_ymd), by = "month"), 1, 7) + if (length(intersect(unlist(availDates), monthsNeeded)) == 0) { + PEcAn.logger::logger.warn("No data available in year ", current_year) + next() + } + startMon <- min(monthsNeeded) + endMon <- max(monthsNeeded) + + # Set up netcdf file, dimensions, and sequence of dates new.file <- all_files[y_idx] if (file.exists(new.file) && !overwrite) { PEcAn.logger::logger.debug("File '", new.file, "' already exists, skipping.") next() } - - seqTime <- seq(start_date,end_date,by=1800) + + seqTime <- seq(start_date, end_date, by = 1800) datetime <- as.POSIXct(seqTime) results$startdate[y_idx] <- as.character(datetime[1]) results$enddate[y_idx] <- as.character(datetime[length(datetime)]) - + days_since_1700 <- datetime - lubridate::ymd_hm("1700-01-01 00:00 UTC") - t <- ncdf4::ncdim_def("time", "days since 1700-01-01", as.numeric(days_since_1700)) #define netCDF dimensions for variables + t <- ncdf4::ncdim_def("time", "days since 1700-01-01", as.numeric(days_since_1700)) # define netCDF dimensions for variables timestep <- 1800 - - ## create lat lon dimensions - x <- ncdf4::ncdim_def("longitude", "degrees_east", lon) # define netCDF dimensions for variables + + ## create lat lon dimensions + x <- ncdf4::ncdim_def("longitude", "degrees_east", lon) # define netCDF dimensions for variables y <- ncdf4::ncdim_def("latitude", "degrees_north", lat) xytdim <- list(x, y, t) - - #STEPS: Download all months in startdate to enddate for given variable - # Read CSV, copy to array, add to NetCDF file - #NEON.DP1.00002 Air Temp profile or NEON.DP1.00003 Triple-aspirated T (preferred) - airTempLoc <- grep("DP1\\.00002",availProducts) - airTemp3Loc <- grep("DP1\\.00003",availProducts) - if ((length(airTempLoc)==0) && (length(airTemp3Loc)==0)) { - PEcAn.logger::logger.error("Air temperature DP1.00002 or DP1.00003 not available") + # STEPS: Download all months in startdate to enddate for given variable + # Read CSV, copy to array, add to NetCDF file + + # NEON.DP1.00002 Air Temp profile or NEON.DP1.00003 Triple-aspirated T (preferred) + airTempLoc <- grep("DP1\\.00002", availProducts) + airTemp3Loc <- grep("DP1\\.00003", availProducts) + if ((length(airTempLoc) == 0) && (length(airTemp3Loc) == 0)) { + PEcAn.logger::logger.error("Air temperature DP1.00002 or DP1.00003 not available") } - airTempDates <- neonmet.getDates(availDates,airTempLoc,startMon,endMon) - airTemp3Dates <- neonmet.getDates(availDates,airTemp3Loc,startMon,endMon) + airTempDates <- neonmet.getDates(availDates, airTempLoc, startMon, endMon) + airTemp3Dates <- neonmet.getDates(availDates, airTemp3Loc, startMon, endMon) nairTemp <- length(airTempDates) nairTemp3 <- length(airTemp3Dates) - if ((nairTemp==0) && (nairTemp3==0)) { - PEcAn.logger::logger.error("Air temperature DP1.00002 or DP1.00003 not available in date range ",startMon," ",endMon) + if ((nairTemp == 0) && (nairTemp3 == 0)) { + PEcAn.logger::logger.error("Air temperature DP1.00002 or DP1.00003 not available in date range ", startMon, " ", endMon) } - #define NetCDF variable and create NetCDF file + # define NetCDF variable and create NetCDF file airT.var <- ncdf4::ncvar_def(name = "air_temperature", units = "K", dim = xytdim) - nc <- ncdf4::nc_create(new.file, vars = airT.var) #create netCDF file - if (nairTemp3>nairTemp) { + nc <- ncdf4::nc_create(new.file, vars = airT.var) # create netCDF file + if (nairTemp3 > nairTemp) { if (verbose) { PEcAn.logger::logger.info("Reading NEON SingleAsp AirTemp") } - ncdata <- neonmet.getVals(dates=airTemp3Dates,product=availProducts[airTemp3Loc[1]],site=site, - datetime=datetime,data_col="tempTripleMean",QF=1, - units=c("celsius","K")) + ncdata <- neonmet.getVals( + dates = airTemp3Dates, product = availProducts[airTemp3Loc[1]], site = site, + datetime = datetime, data_col = "tempTripleMean", QF = 1, + units = c("celsius", "K") + ) } else { if (verbose) { PEcAn.logger::logger.info("Reading NEON TripleAsp AirTemp") } - ncdata <- neonmet.getVals(dates=airTempDates,product=availProducts[airTempLoc[1]],site=site, - datetime=datetime,data_col="tempSingleMean", - units=c("celsius","K")) + ncdata <- neonmet.getVals( + dates = airTempDates, product = availProducts[airTempLoc[1]], site = site, + datetime = datetime, data_col = "tempSingleMean", + units = c("celsius", "K") + ) } ncdf4::ncvar_put(nc, varid = airT.var, vals = ncdata) # NEON.DP1.00004 Pressure - pSurfLoc <- grep("DP1\\.00004",availProducts) - pSurfDates <- neonmet.getDates(availDates,pSurfLoc,startMon,endMon) + pSurfLoc <- grep("DP1\\.00004", availProducts) + pSurfDates <- neonmet.getDates(availDates, pSurfLoc, startMon, endMon) npSurf <- length(pSurfDates) - if (length(pSurfDates)>0) { + if (length(pSurfDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON Pressure") } Psurf.var <- ncdf4::ncvar_def(name = "air_pressure", units = "Pa", dim = xytdim) nc <- ncdf4::ncvar_add(nc = nc, v = Psurf.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=pSurfDates,product=availProducts[pSurfLoc[1]],site=site, - datetime=datetime,data_col="staPresMean",QF_col="staPresFinalQF", - units=c("kPa","Pa")) + ncdata <- neonmet.getVals( + dates = pSurfDates, product = availProducts[pSurfLoc[1]], site = site, + datetime = datetime, data_col = "staPresMean", QF_col = "staPresFinalQF", + units = c("kPa", "Pa") + ) ncdf4::ncvar_put(nc, varid = Psurf.var, vals = ncdata) } else { PEcAn.logger::logger.warn("No NEON Pressure Data") } # NEON.DP1.00024 PAR - PARLoc <- grep("DP1\\.00024",availProducts) - PARDates <- neonmet.getDates(availDates,PARLoc,startMon,endMon) - if (length(PARDates)>0) { + PARLoc <- grep("DP1\\.00024", availProducts) + PARDates <- neonmet.getDates(availDates, PARLoc, startMon, endMon) + if (length(PARDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON PAR") } - PAR.var <- ncdf4::ncvar_def(name = "surface_downwelling_photosynthetic_photon_flux_in_air", - units = "mol m-2 s-1", - dim = xytdim) + PAR.var <- ncdf4::ncvar_def( + name = "surface_downwelling_photosynthetic_photon_flux_in_air", + units = "mol m-2 s-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = PAR.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=PARDates,product=availProducts[PARLoc[1]],site=site, - datetime=datetime,data_col="PARMean",QF_col="PARFinalQF", - units=c("umol m-2 s-1", "mol m-2 s-1")) + ncdata <- neonmet.getVals( + dates = PARDates, product = availProducts[PARLoc[1]], site = site, + datetime = datetime, data_col = "PARMean", QF_col = "PARFinalQF", + units = c("umol m-2 s-1", "mol m-2 s-1") + ) ncdf4::ncvar_put(nc, varid = PAR.var, vals = ncdata) } else { PEcAn.logger::logger.warn("No NEON PAR DAta") } - + # NEON.DP1.00006 Precip (missing uncertainty information) - precipLoc <- grep("DP1\\.00006",availProducts) - precipDates <- neonmet.getDates(availDates,precipLoc,startMon,endMon) - if (length(precipDates)>0) { + precipLoc <- grep("DP1\\.00006", availProducts) + precipDates <- neonmet.getDates(availDates, precipLoc, startMon, endMon) + if (length(precipDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON Precip") } - precip.var <- ncdf4::ncvar_def(name = "precipitation_flux", - units = "kg m-2 s-1", - dim = xytdim) + precip.var <- ncdf4::ncvar_def( + name = "precipitation_flux", + units = "kg m-2 s-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = precip.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=precipDates,product=availProducts[precipLoc[1]],site=site, - datetime=datetime,data_col="priPrecipBulk",QF_col="priPrecipFinalQF", - urlstring = "\\.00000\\.900\\.(.*)30min", - units=c("kg m-2 1/1800 s-1", "kg m-2 s-1")) #mm per half hour + ncdata <- neonmet.getVals( + dates = precipDates, product = availProducts[precipLoc[1]], site = site, + datetime = datetime, data_col = "priPrecipBulk", QF_col = "priPrecipFinalQF", + urlstring = "\\.00000\\.900\\.(.*)30min", + units = c("kg m-2 1/1800 s-1", "kg m-2 s-1") + ) # mm per half hour ncdf4::ncvar_put(nc, varid = precip.var, vals = ncdata) } else { PEcAn.logger::logger.warn("No NEON Precip") } - + # NEON.DP1.00098 RH - RHLoc <- grep("DP1\\.00098",availProducts) - RHDates <- neonmet.getDates(availDates,RHLoc,startMon,endMon) - if (length(RHDates)>0) { + RHLoc <- grep("DP1\\.00098", availProducts) + RHDates <- neonmet.getDates(availDates, RHLoc, startMon, endMon) + if (length(RHDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON RH") } - RH.var <- ncdf4::ncvar_def(name = "relative_humidity", - units = "%", - dim = xytdim) + RH.var <- ncdf4::ncvar_def( + name = "relative_humidity", + units = "%", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = RH.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=RHDates,product=availProducts[RHLoc[1]],site=site, - datetime=datetime,data_col="RHMean",QF_col="RHFinalQF", - units=c("%", "%")) + ncdata <- neonmet.getVals( + dates = RHDates, product = availProducts[RHLoc[1]], site = site, + datetime = datetime, data_col = "RHMean", QF_col = "RHFinalQF", + units = c("%", "%") + ) ncdf4::ncvar_put(nc, varid = RH.var, vals = ncdata) } else { PEcAn.logger::logger.warn("No NEON RH data") } - + # DP1.00023 SW/LW or NEON.DP1.00022 SW (Possible future: DP1.00014 for Direct/Diffuse SW) - SWLoc <- grep("DP1\\.00022",availProducts) - SWLWLoc <- grep("DP1\\.00023",availProducts) - SWDates <- neonmet.getDates(availDates,SWLoc,startMon,endMon) - SWLWDates <- neonmet.getDates(availDates,SWLWLoc,startMon,endMon) - if (length(SWLWDates)>0) { + SWLoc <- grep("DP1\\.00022", availProducts) + SWLWLoc <- grep("DP1\\.00023", availProducts) + SWDates <- neonmet.getDates(availDates, SWLoc, startMon, endMon) + SWLWDates <- neonmet.getDates(availDates, SWLWLoc, startMon, endMon) + if (length(SWLWDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON SWLW") } - SW.var <- ncdf4::ncvar_def(name = "surface_downwelling_shortwave_flux_in_air", - units = "W m-2", - dim = xytdim) + SW.var <- ncdf4::ncvar_def( + name = "surface_downwelling_shortwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = SW.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=SWLWDates,product=availProducts[SWLWLoc[1]],site=site, - datetime=datetime,data_col="inSWMean",QF_col="inSWFinalQF", - units=c("W m-2", "W m-2")) + ncdata <- neonmet.getVals( + dates = SWLWDates, product = availProducts[SWLWLoc[1]], site = site, + datetime = datetime, data_col = "inSWMean", QF_col = "inSWFinalQF", + units = c("W m-2", "W m-2") + ) ncdf4::ncvar_put(nc, varid = SW.var, vals = ncdata) - LW.var <- ncdf4::ncvar_def(name = "surface_downwelling_longwave_flux_in_air", - units = "W m-2", - dim = xytdim) + LW.var <- ncdf4::ncvar_def( + name = "surface_downwelling_longwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = LW.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=SWLWDates,product=availProducts[SWLWLoc[1]],site=site, - datetime=datetime,data_col="inLWMean",QF_col="inLWFinalQF", - units=c("W m-2", "W m-2")) + ncdata <- neonmet.getVals( + dates = SWLWDates, product = availProducts[SWLWLoc[1]], site = site, + datetime = datetime, data_col = "inLWMean", QF_col = "inLWFinalQF", + units = c("W m-2", "W m-2") + ) ncdf4::ncvar_put(nc, varid = LW.var, vals = ncdata) } else { - if (length(SWDates)>0) { + if (length(SWDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON SW") } - SW.var <- ncdf4::ncvar_def(name = "surface_downwelling_shortwave_flux_in_air", - units = "W m-2", - dim = xytdim) + SW.var <- ncdf4::ncvar_def( + name = "surface_downwelling_shortwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = SW.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=SWDates,product=availProducts[SWLoc[1]],site=site, - datetime=datetime,data_col="shortRadMean", - units=c("W m-2", "W m-2")) + ncdata <- neonmet.getVals( + dates = SWDates, product = availProducts[SWLoc[1]], site = site, + datetime = datetime, data_col = "shortRadMean", + units = c("W m-2", "W m-2") + ) ncdf4::ncvar_put(nc, varid = SW.var, vals = ncdata) } else { PEcAn.logger::logger.warn("No NEON SW/LW or SW") } } - + # NEON.DP1.00001 2D wind speed/direction - have to do northward/eastward math - WSpdLoc = grep("DP1\\.00001",availProducts) - WSpdDates <- neonmet.getDates(availDates,WSpdLoc,startMon,endMon) - if (length(WSpdDates)>0) { + WSpdLoc <- grep("DP1\\.00001", availProducts) + WSpdDates <- neonmet.getDates(availDates, WSpdLoc, startMon, endMon) + if (length(WSpdDates) > 0) { if (verbose) { PEcAn.logger::logger.info("Reading NEON Wind Speed/Direction") } - WSpd.var <- ncdf4::ncvar_def(name = "wind_speed", - units = "m s-1", - dim = xytdim) - WDir.var <- ncdf4::ncvar_def(name = "wind_direction", - units = "degrees", - dim = xytdim) + WSpd.var <- ncdf4::ncvar_def( + name = "wind_speed", + units = "m s-1", + dim = xytdim + ) + WDir.var <- ncdf4::ncvar_def( + name = "wind_direction", + units = "degrees", + dim = xytdim + ) Ewind.var <- ncdf4::ncvar_def(name = "eastward_wind", units = "m s-1", dim = xytdim) Nwind.var <- ncdf4::ncvar_def(name = "northward_wind", units = "m s-1", dim = xytdim) nc <- ncdf4::ncvar_add(nc = nc, v = WSpd.var, verbose = verbose) nc <- ncdf4::ncvar_add(nc = nc, v = WDir.var, verbose = verbose) nc <- ncdf4::ncvar_add(nc = nc, v = Ewind.var, verbose = verbose) nc <- ncdf4::ncvar_add(nc = nc, v = Nwind.var, verbose = verbose) - ncdata_spd <- neonmet.getVals(dates=WSpdDates,product=availProducts[WSpdLoc[1]],site=site, - datetime=datetime,data_col="windSpeedMean",QF_col="windSpeedFinalQF", - units=c("m s-1", "m s-1")) + ncdata_spd <- neonmet.getVals( + dates = WSpdDates, product = availProducts[WSpdLoc[1]], site = site, + datetime = datetime, data_col = "windSpeedMean", QF_col = "windSpeedFinalQF", + units = c("m s-1", "m s-1") + ) ncdf4::ncvar_put(nc, varid = WSpd.var, vals = ncdata_spd) - ncdata_dir <- neonmet.getVals(dates=WSpdDates,product=availProducts[WSpdLoc[1]],site=site, - datetime=datetime,data_col="windDirMean",QF_col="windDirFinalQF", - units=c("degrees", "degrees")) - - ncdf4::ncvar_put(nc, varid = WDir.var, vals = ncdata_dir) - wdir_rad <- PEcAn.utils::ud_convert(ncdata_dir,"degrees","radians") + ncdata_dir <- neonmet.getVals( + dates = WSpdDates, product = availProducts[WSpdLoc[1]], site = site, + datetime = datetime, data_col = "windDirMean", QF_col = "windDirFinalQF", + units = c("degrees", "degrees") + ) + + ncdf4::ncvar_put(nc, varid = WDir.var, vals = ncdata_dir) + wdir_rad <- PEcAn.utils::ud_convert(ncdata_dir, "degrees", "radians") ncdata_e <- ncdata_spd * cos(wdir_rad) ncdata_n <- ncdata_spd * sin(wdir_rad) ncdf4::ncvar_put(nc, varid = Ewind.var, vals = ncdata_e) @@ -321,75 +358,79 @@ download.NEONmet <- function(sitename, outfolder, start_date, end_date, } else { PEcAn.logger::logger.warn("No NEON Wind data") } - + # NEON.DP1.00041 Soil temp (take 2cm level which is level 501) - soilTLoc = grep("DP1\\.00041",availProducts) - soilTDates <- neonmet.getDates(availDates,soilTLoc,startMon,endMon) - if (length(soilTDates>0)) { + soilTLoc <- grep("DP1\\.00041", availProducts) + soilTDates <- neonmet.getDates(availDates, soilTLoc, startMon, endMon) + if (length(soilTDates > 0)) { if (verbose) { PEcAn.logger::logger.info("Reading NEON Soil Temp") } - soilT.var <- ncdf4::ncvar_def(name = "soil_temperature", - units = "K", - dim = xytdim) + soilT.var <- ncdf4::ncvar_def( + name = "soil_temperature", + units = "K", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = soilT.var, verbose = verbose) - ncdata <- neonmet.getVals(dates=soilTDates,product=availProducts[soilTLoc[1]],site=site, - datetime=datetime,data_col="soilTempMean", - urlstring = "\\.00000\\.001\\.5..\\.(.*)_30_minute", - units=c("celsius", "K"),belowground=TRUE) + ncdata <- neonmet.getVals( + dates = soilTDates, product = availProducts[soilTLoc[1]], site = site, + datetime = datetime, data_col = "soilTempMean", + urlstring = "\\.00000\\.001\\.5..\\.(.*)_30_minute", + units = c("celsius", "K"), belowground = TRUE + ) ncdf4::ncvar_put(nc, varid = soilT.var, vals = ncdata) } else { PEcAn.logger::logger.warn("No NEON Soil Temp") } - + # NEON.DP1.00034 CO2 at tower top (alt NEON.DP3.00009 CO2 profile) - not yet avail, don't have variable names - ncdf4::nc_close(nc) - } #For loop + ncdf4::nc_close(nc) + } # For loop return(invisible(results)) -} #function +} # function -neonmet.getDates <- function(availDates,Loc,startMon,endMon) { - if (length(Loc)>0) { +neonmet.getDates <- function(availDates, Loc, startMon, endMon) { + if (length(Loc) > 0) { Dates <- unlist(availDates[Loc[1]]) GoodDates <- which((Dates >= startMon) & (Dates <= endMon)) - if (length(GoodDates)>0) { + if (length(GoodDates) > 0) { return(Dates[GoodDates]) } else { return(NULL) } - } else { + } else { return(NULL) - } + } } -neonmet.getVals <- function(dates,product,site,datetime, - data_col,QF_col="finalQF", QF=0, - urlstring = "\\.00000\\.000\\.(.*)30min",units,FillValue=NA,belowground=FALSE) { - ncdata <- rep(FillValue,length(datetime)) +neonmet.getVals <- function(dates, product, site, datetime, + data_col, QF_col = "finalQF", QF = 0, + urlstring = "\\.00000\\.000\\.(.*)30min", units, FillValue = NA, belowground = FALSE) { + ncdata <- rep(FillValue, length(datetime)) for (mon in dates) { neonData <- nneo::nneo_data(product_code = product, site_code = site, year_month = mon) urls <- neonData$data$files$name - if (length(urls)>0) { - #Extract and read 30 minute data from the highest vertical level among files returned - #If belowground, then take top most level (lowest value) - if (belowground==TRUE) { - url30 <- utils::head(sort(urls[grep(urlstring,urls)]),1) + if (length(urls) > 0) { + # Extract and read 30 minute data from the highest vertical level among files returned + # If belowground, then take top most level (lowest value) + if (belowground == TRUE) { + url30 <- utils::head(sort(urls[grep(urlstring, urls)]), 1) } else { - url30 <- utils::tail(sort(urls[grep(urlstring,urls)]),1) + url30 <- utils::tail(sort(urls[grep(urlstring, urls)]), 1) } - if (length(url30)!=0) { - csvData <- nneo::nneo_file(product_code = product, site_code = site, year_month = mon, filename = url30) - #Retreive time dimension and figure out where in array to put it - csvDateTime <- as.POSIXct(gsub("T"," ",csvData$startDateTime),tz="UTC") - arrLoc <- floor(as.numeric(difftime(csvDateTime,datetime[1],tz="UTC",units="hours"))*2)+1 + if (length(url30) != 0) { + csvData <- nneo::nneo_file(product_code = product, site_code = site, year_month = mon, filename = url30) + # Retreive time dimension and figure out where in array to put it + csvDateTime <- as.POSIXct(gsub("T", " ", csvData$startDateTime), tz = "UTC") + arrLoc <- floor(as.numeric(difftime(csvDateTime, datetime[1], tz = "UTC", units = "hours")) * 2) + 1 csvVar <- csvData[[data_col]] - if (length(QF_col)!=0) { + if (length(QF_col) != 0) { csvQF <- csvData[[QF_col]] - csvVar[which(csvQF!=QF)] <- NA + csvVar[which(csvQF != QF)] <- NA } - if ((length(units)=2)&&(units[1]!=units[2])) { - csvVar <- PEcAn.utils::ud_convert(csvVar,units[1], units[2]) - #need a correction for precip or rate conversion /1800 + if ((length(units) <- 2) && (units[1] != units[2])) { + csvVar <- PEcAn.utils::ud_convert(csvVar, units[1], units[2]) + # need a correction for precip or rate conversion /1800 } ncdata[arrLoc] <- csvVar } @@ -397,6 +438,3 @@ neonmet.getVals <- function(dates,product,site,datetime, } return(ncdata) } - - - diff --git a/modules/data.atmosphere/R/download.NLDAS.R b/modules/data.atmosphere/R/download.NLDAS.R index b9e3a43ec44..e1f6c471e74 100644 --- a/modules/data.atmosphere/R/download.NLDAS.R +++ b/modules/data.atmosphere/R/download.NLDAS.R @@ -17,20 +17,21 @@ ##' @author Christy Rollinson (with help from Ankur Desai) download.NLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon.in, overwrite = FALSE, verbose = FALSE, ...) { - # Date stuff start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - site_id <- as.numeric(site_id) - outfolder <- paste0(outfolder, "_site_", paste0(site_id %/% 1e+09, "-", site_id %% 1e+09)) + end_year <- lubridate::year(end_date) + site_id <- as.numeric(site_id) + outfolder <- paste0(outfolder, "_site_", paste0(site_id %/% 1e+09, "-", site_id %% 1e+09)) NLDAS_start <- 1980 if (start_year < NLDAS_start) { - PEcAn.logger::logger.severe(sprintf('Input year range (%d:%d) exceeds the NLDAS range (%d:present)', - start_year, end_year, - NLDAS_start)) + PEcAn.logger::logger.severe(sprintf( + "Input year range (%d:%d) exceeds the NLDAS range (%d:present)", + start_year, end_year, + NLDAS_start + )) } lat.in <- as.numeric(lat.in) @@ -40,23 +41,31 @@ download.NLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon ylist <- seq(start_year, end_year, by = 1) rows <- length(ylist) - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = "NLDAS", - stringsAsFactors = FALSE) - - var <- data.frame(DAP.name = c("N2-m_above_ground_Temperature", "LW_radiation_flux_downwards_surface", - "Pressure", "SW_radiation_flux_downwards_surface", "N10-m_above_ground_Zonal_wind_speed", - "N10-m_above_ground_Meridional_wind_speed", "N2-m_above_ground_Specific_humidity", "Precipitation_hourly_total"), - DAP.dim = c(2, 1, 1, 1, 2, 2, 2, 1), - CF.name = c("air_temperature", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "surface_downwelling_shortwave_flux_in_air", "eastward_wind", "northward_wind", - "specific_humidity", "precipitation_flux"), - units = c("Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "g/g", "kg/m2/s")) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = "NLDAS", + stringsAsFactors = FALSE + ) + + var <- data.frame( + DAP.name = c( + "N2-m_above_ground_Temperature", "LW_radiation_flux_downwards_surface", + "Pressure", "SW_radiation_flux_downwards_surface", "N10-m_above_ground_Zonal_wind_speed", + "N10-m_above_ground_Meridional_wind_speed", "N2-m_above_ground_Specific_humidity", "Precipitation_hourly_total" + ), + DAP.dim = c(2, 1, 1, 1, 2, 2, 2, 1), + CF.name = c( + "air_temperature", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "surface_downwelling_shortwave_flux_in_air", "eastward_wind", "northward_wind", + "specific_humidity", "precipitation_flux" + ), + units = c("Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "g/g", "kg/m2/s") + ) time.stamps <- seq(0, 2300, by = 100) for (i in seq_len(rows)) { year <- ylist[i] @@ -72,29 +81,31 @@ download.NLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon # Now we need to check whether we're ending on the right day day2 <- lubridate::yday(end_date) days.use <- day1:day2 - nday <- length(days.use) # Update nday + nday <- length(days.use) # Update nday } else if (i == 1) { # If this is the first of many years, we only need to worry about the start date day1 <- lubridate::yday(start_date) days.use <- day1:nday - nday <- length(days.use) # Update nday + nday <- length(days.use) # Update nday } else if (i == rows) { # If this is the last of many years, we only need to worry about the start date day2 <- lubridate::yday(end_date) days.use <- 1:day2 - nday <- length(days.use) # Update nday + nday <- length(days.use) # Update nday } - ntime <- nday * 24 # leap year or not;time slice (hourly) + ntime <- nday * 24 # leap year or not;time slice (hourly) loc.file <- file.path(outfolder, paste("NLDAS", year, "nc", sep = ".")) ## Create dimensions lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", units = "sec", - vals = seq((min(days.use) + 1 - 1 / 24) * 24 * 360, (max(days.use) + 1 - 1/24) * 24 * 360, length.out = ntime), - create_dimvar = TRUE, - unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = "sec", + vals = seq((min(days.use) + 1 - 1 / 24) * 24 * 360, (max(days.use) + 1 - 1 / 24) * 24 * 360, length.out = ntime), + create_dimvar = TRUE, + unlim = TRUE + ) dim <- list(lat, lon, time) var.list <- list() @@ -102,12 +113,14 @@ download.NLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon # Defining our dimensions up front for (j in 1:nrow(var)) { - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), - dim = dim, - missval = -999, - verbose = verbose) - dat.list[[j]] <- array(NA, dim = c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), + dim = dim, + missval = -999, + verbose = verbose + ) + dat.list[[j]] <- array(NA, dim = c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays } names(var.list) <- names(dat.list) <- var$CF.name @@ -119,17 +132,23 @@ download.NLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon doy <- stringr::str_pad(days.use[j], 3, pad = "0") for (h in seq_along(time.stamps)) { hr <- stringr::str_pad(time.stamps[h], 4, pad = "0") - dap_file <- paste0(dap_base, "/", year, "/", doy, "/", "NLDAS_FORA0125_H.A", year, - mo.now, day.mo, ".", hr, ".002.grb.ascii?") + dap_file <- paste0( + dap_base, "/", year, "/", doy, "/", "NLDAS_FORA0125_H.A", year, + mo.now, day.mo, ".", hr, ".002.grb.ascii?" + ) # Query lat/lon latlon <- curl::curl_download(paste0(dap_file, "lat[0:1:223],lon[0:1:463]")) lat.ind <- gregexpr("lat", latlon) lon.ind <- gregexpr("lon", latlon) - lats <- as.vector(utils::read.table(con <- textConnection(substr(latlon, lat.ind[[1]][3], - lon.ind[[1]][3] - 1)), sep = ",", fileEncoding = "\n", skip = 1)) - lons <- as.vector(utils::read.table(con <- textConnection(substr(latlon, lon.ind[[1]][3], - nchar(latlon))), sep = ",", fileEncoding = "\n", skip = 1)) + lats <- as.vector(utils::read.table(con <- textConnection(substr( + latlon, lat.ind[[1]][3], + lon.ind[[1]][3] - 1 + )), sep = ",", fileEncoding = "\n", skip = 1)) + lons <- as.vector(utils::read.table(con <- textConnection(substr( + latlon, lon.ind[[1]][3], + nchar(latlon) + )), sep = ",", fileEncoding = "\n", skip = 1)) lat.use <- which(lats - 0.125 / 2 <= lat.in & lats + 0.125 / 2 >= lat.in) lon.use <- which(lons - 0.125 / 2 <= lon.in & lons + 0.125 / 2 >= lon.in) @@ -142,21 +161,25 @@ download.NLDAS <- function(outfolder, start_date, end_date, site_id, lat.in, lon time.string <- paste0(time.string, "[0:1:0]") } dap_query <- paste(dap_query, - paste0(var$DAP.name[v], time.string, "[", lat.use, "][", lon.use, "]"), sep = ",") + paste0(var$DAP.name[v], time.string, "[", lat.use, "][", lon.use, "]"), + sep = "," + ) } dap_query <- substr(dap_query, 2, nchar(dap_query)) dap.out <- curl::curl_download(paste0(dap_file, dap_query)) for (v in seq_len(nrow(var))) { var.now <- var$DAP.name[v] - ind.1 <- gregexpr(paste(var.now, var.now, sep = "."), dap.out) - end.1 <- gregexpr(paste(var.now, "time", sep = "."), dap.out) + ind.1 <- gregexpr(paste(var.now, var.now, sep = "."), dap.out) + end.1 <- gregexpr(paste(var.now, "time", sep = "."), dap.out) dat.list[[v]][, , j * 24 - 24 + h] <- - utils::read.delim(con <- textConnection(substr(dap.out, - ind.1[[1]][1], end.1[[1]][2])), sep = ",", fileEncoding = "\n")[1, 1] - } # end variable loop - } # end hour - } # end day + utils::read.delim(con <- textConnection(substr( + dap.out, + ind.1[[1]][1], end.1[[1]][2] + )), sep = ",", fileEncoding = "\n")[1, 1] + } # end variable loop + } # end hour + } # end day ## change units of precip to kg/m2/s instead of hour accumulated precip dat.list[["precipitation_flux"]] <- dat.list[["precipitation_flux"]] / 3600 diff --git a/modules/data.atmosphere/R/download.NOAA_GEFS.R b/modules/data.atmosphere/R/download.NOAA_GEFS.R index e68bc7d166a..950e6dac512 100644 --- a/modules/data.atmosphere/R/download.NOAA_GEFS.R +++ b/modules/data.atmosphere/R/download.NOAA_GEFS.R @@ -1,98 +1,101 @@ ##' @title Download NOAA GEFS Weather Data -##' +##' ##' @section Information on Units: -##' Information on NOAA weather units can be found below. Note that the temperature is measured in degrees C, +##' Information on NOAA weather units can be found below. Note that the temperature is measured in degrees C, ##' but is converted at the station and downloaded in Kelvin. ##' @references https://www.ncdc.noaa.gov/crn/measurements.html -##' +##' ##' @section NOAA_GEFS General Information: -##' This function downloads NOAA GEFS weather data. GEFS is an ensemble of 21 different weather forecast models. -##' A 16 day forecast is avaliable every 6 hours. Each forecast includes information on a total of 8 variables. +##' This function downloads NOAA GEFS weather data. GEFS is an ensemble of 21 different weather forecast models. +##' A 16 day forecast is avaliable every 6 hours. Each forecast includes information on a total of 8 variables. ##' These are transformed from the NOAA standard to the internal PEcAn ##' standard. -##' +##' ##' @section Data Avaliability: ##' NOAA GEFS weather data is avaliable on a rolling 12 day basis; dates provided in "start_date" must be within this range. The end date can be any point after ##' that, but if the end date is beyond 16 days, only 16 days worth of forecast are recorded. Times are rounded down to the previous 6 hour forecast. NOAA ##' GEFS weather data isn't always posted immediately, and to compensate, this function adjusts requests made in the last two hours ##' back two hours (approximately the amount of time it takes to post the data) to make sure the most current forecast is used. -##' +##' ##' @section Data Save Format: ##' Data is saved in the netcdf format to the specified directory. File names reflect the precision of the data to the given range of days. ##' NOAA.GEFS.willow creek.3.2018-06-08T06:00.2018-06-24T06:00.nc specifies the forecast, using ensemble number 3 at willow creek on ##' June 6th, 2018 at 6:00 a.m. to June 24th, 2018 at 6:00 a.m. -##' +##' ##' @return A list of data frames is returned containing information about the data file that can be used to locate it later. Each ##' data frame contains information about one file. ##' ##' @param outfolder Directory where results should be written -##' @param start_date, Range of dates/times to be downloaded (default assumed to be time that function is run) +##' @param start_date, Range of dates/times to be downloaded (default assumed to be time that function is run) ##' @param end_date, end date for range of dates to be downloaded (default 16 days from start_date) ##' @param lat.in site latitude in decimal degrees ##' @param lon.in site longitude in decimal degrees ##' @param site_id The unique ID given to each site. This is used as part of the file name. -##' @param sitename Site name -##' @param username username from pecan workflow +##' @param sitename Site name +##' @param username username from pecan workflow ##' @param overwrite logical. Download a fresh version even if a local file with the same name already exists? ##' @param downscale logical, assumed True. Indicated whether data should be downscaled to hourly ##' @param ... Additional optional parameters ##' ##' @export -##' -##' @examples +##' +##' @examples ##' \dontrun{ -##' download.NOAA_GEFS(outfolder="~/Working/results", -##' lat.in= 45.805925, -##' lon.in = -90.07961, +##' download.NOAA_GEFS(outfolder="~/Working/results", +##' lat.in= 45.805925, +##' lon.in = -90.07961, ##' site_id = 676) ##' } -##' -##' @author Quinn Thomas, modified by K Zarada -##' +##' +##' @author Quinn Thomas, modified by K Zarada +##' download.NOAA_GEFS <- function(site_id, sitename = NULL, - username = 'pecan', + username = "pecan", lat.in, lon.in, outfolder, - start_date= Sys.Date(), + start_date = Sys.Date(), end_date = start_date + lubridate::days(16), downscale = TRUE, overwrite = FALSE, - ...){ - - forecast_date = as.Date(start_date) - forecast_time = (lubridate::hour(start_date) %/% 6)*6 - - end_hr = (as.numeric(difftime(end_date, start_date, units = 'hours')) %/% 6)*6 - + ...) { + forecast_date <- as.Date(start_date) + forecast_time <- (lubridate::hour(start_date) %/% 6) * 6 + + end_hr <- (as.numeric(difftime(end_date, start_date, units = "hours")) %/% 6) * 6 + model_name <- "NOAAGEFS_6hr" - model_name_ds <-"NOAAGEFS_1hr" #Downscaled NOAA GEFS + model_name_ds <- "NOAAGEFS_1hr" # Downscaled NOAA GEFS model_name_raw <- "NOAAGEFS_raw" - + PEcAn.logger::logger.info(paste0("Downloading GEFS for site ", site_id, " for ", start_date)) - + PEcAn.logger::logger.info(paste0("Overwrite existing files: ", overwrite)) - - - noaa_grid_download(lat_list = lat.in, - lon_list = lon.in, - end_hr = end_hr, - forecast_time = forecast_time, - forecast_date = forecast_date, - model_name_raw = model_name_raw, - output_directory = outfolder) - - results <- process_gridded_noaa_download(lat_list = lat.in, - lon_list = lon.in, - site_id = site_id, - downscale = downscale, - overwrite = overwrite, - forecast_date = forecast_date, - forecast_time = forecast_time, - model_name = model_name, - model_name_ds = model_name_ds, - model_name_raw = model_name_raw, - output_directory = outfolder) + + + noaa_grid_download( + lat_list = lat.in, + lon_list = lon.in, + end_hr = end_hr, + forecast_time = forecast_time, + forecast_date = forecast_date, + model_name_raw = model_name_raw, + output_directory = outfolder + ) + + results <- process_gridded_noaa_download( + lat_list = lat.in, + lon_list = lon.in, + site_id = site_id, + downscale = downscale, + overwrite = overwrite, + forecast_date = forecast_date, + forecast_time = forecast_time, + model_name = model_name, + model_name_ds = model_name_ds, + model_name_raw = model_name_raw, + output_directory = outfolder + ) return(results) } diff --git a/modules/data.atmosphere/R/download.PalEON.R b/modules/data.atmosphere/R/download.PalEON.R index 3618d2193b7..22d15a020f7 100644 --- a/modules/data.atmosphere/R/download.PalEON.R +++ b/modules/data.atmosphere/R/download.PalEON.R @@ -10,60 +10,61 @@ ##' @param sitename sitename ##' @param overwrite overwrite existing files? Default is FALSE ##' @param ... Other inputs -##' +##' ##' @author Betsy Cowdery download.PalEON <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, ...) { - if (sitename == "Harvard Forest - Lyford Plots (PalEON PHA)") { site <- "PHA" - } # 1-650 done + } # 1-650 done else if (sitename == "Howland Forest- main tower (US-Ho1) (PalEON PHO)") { site <- "PHO" - } # 0-759 + } # 0-759 else if (sitename == "Billy\U2019s Lake (PalEON PBL)") { - #\U2019 = curly right single-quote, escaped to keep R from complaining about non-ASCII in code files + # \U2019 = curly right single-quote, escaped to keep R from complaining about non-ASCII in code files # (yes, the curly quote is present in the DB sitename) site <- "PBL" - } # 1-672 done + } # 1-672 done else if (sitename == "Deming Lake (PalEON PDL)") { site <- "PDL" - } # 1-673 done + } # 1-673 done else if (sitename == "Minden Bog (PalEON PMB)") { site <- "PMB" - } # 1-674 done + } # 1-674 done else if (sitename == "University of Notre Dame Environmental Research Center (PalEON UNDERC)") { site <- "PUN" - } # 1-675 done + } # 1-675 done else { - PEcAn.logger::logger.severe("Unknown site name") + PEcAn.logger::logger.severe("Unknown site name") } - + start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - ylist <- start_year:end_year - mlist <- 1:12 - vlist <- c("lwdown", "precipf", "psurf", "qair", "swdown", "tair", "wind") - + end_year <- lubridate::year(end_date) + ylist <- start_year:end_year + mlist <- 1:12 + vlist <- c("lwdown", "precipf", "psurf", "qair", "swdown", "tair", "wind") + system(paste0("mkdir -p ", outfolder)) - + V <- length(vlist) Y <- length(ylist) M <- length(mlist) rows <- V * Y * M - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = "PalEON", - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = "PalEON", + stringsAsFactors = FALSE + ) + files <- dir(outfolder) if (sum(!(vlist %in% files)) > 0) { - PEcAn.logger::logger.error("Don't have all variables downloaded") + PEcAn.logger::logger.error("Don't have all variables downloaded") } else { for (v in vlist) { print(sprintf("Checking %s", v)) @@ -71,7 +72,7 @@ download.PalEON <- function(sitename, outfolder, start_date, end_date, overwrite for (m in mlist) { file <- file.path(outfolder, v, sprintf("%s_%s_%04d_%02d.nc", site, v, y, m)) if (!(file.exists(file))) { - PEcAn.logger::logger.error("Missing met file") + PEcAn.logger::logger.error("Missing met file") } row <- (which(vlist == v) - 1) * Y * M + (which(ylist == y) - 1) * M + m # print(row) @@ -86,6 +87,6 @@ download.PalEON <- function(sitename, outfolder, start_date, end_date, overwrite print(sprintf("Finished %s", v)) } } - + return(invisible(results)) } # download.PalEON diff --git a/modules/data.atmosphere/R/download.PalEON_ENS.R b/modules/data.atmosphere/R/download.PalEON_ENS.R index 2858b6a4447..306f69a5c76 100644 --- a/modules/data.atmosphere/R/download.PalEON_ENS.R +++ b/modules/data.atmosphere/R/download.PalEON_ENS.R @@ -8,55 +8,56 @@ ##' @param sitename sitename ##' @param overwrite overwrite existing files? Default is FALSE ##' @param ... Other inputs -##' +##' ##' @author Betsy Cowdery, Mike Dietze download.PalEON_ENS <- function(sitename, outfolder, start_date, end_date, overwrite = FALSE, ...) { - ## parse dates start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - ylist <- start_year:end_year - - ## install iCommands: + end_year <- lubridate::year(end_date) + ylist <- start_year:end_year + + ## install iCommands: ## wget ftp://ftp.renci.org/pub/irods/releases/4.1.9/ubuntu14/irods-icommands-4.1.9-ubuntu14-x86_64.deb ## sudo dpkg -i irods-icommands-4.1.9-ubuntu14-x86_64.deb ## iinit ## then follow https://pods.iplantcollaborative.org/wiki/display/DS/Setting+Up+iCommands to connect - + ## Get the data from iPlant ## iget /iplant/home/crollinson/paleon/phase3_met_drivers/test_ensemble -r -T -P ## ###### NEED TO AUTOMATE !!!! - - + + ## extract the data and loop over ensemble members - dlpath <- "/home/carya/test_ensemble/" ## download path + dlpath <- "/home/carya/test_ensemble/" ## download path setwd(dlpath) - ens_zip <- dir(dlpath,pattern="tar.bz2",) + ens_zip <- dir(dlpath, pattern = "tar.bz2", ) results <- list() - for(i in seq_along(ens_zip)){ - system2("tar",paste("-xvjf",ens_zip[i])) ## unzip file - ens_folder <- strsplit(basename(ens_zip[i]),"\\.")[[1]][1] - + for (i in seq_along(ens_zip)) { + system2("tar", paste("-xvjf", ens_zip[i])) ## unzip file + ens_folder <- strsplit(basename(ens_zip[i]), "\\.")[[1]][1] + ens_files <- dir(ens_folder) rows <- length(ens_files) - ens_years <- sapply(strsplit(ens_files,"_",fixed=TRUE),function(n){ - as.numeric(sub(".nc","",n[length(n)])) - }) - - results[[i]] <- data.frame(file = ens_files, - host = rep(PEcAn.remote::fqdn(),rows), - mimetype = rep("application/x-netcdf",rows), - formatname = rep("ALMA",rows), ## would really like to switch to CF - startdate = paste0(ens_years, "-01-01 00:00:00"), - enddate = paste0(ens_years, "-12-31 23:59:59"), - dbfile.name = "PalEON_ENS", - stringsAsFactors = FALSE) + ens_years <- sapply(strsplit(ens_files, "_", fixed = TRUE), function(n) { + as.numeric(sub(".nc", "", n[length(n)])) + }) + + results[[i]] <- data.frame( + file = ens_files, + host = rep(PEcAn.remote::fqdn(), rows), + mimetype = rep("application/x-netcdf", rows), + formatname = rep("ALMA", rows), ## would really like to switch to CF + startdate = paste0(ens_years, "-01-01 00:00:00"), + enddate = paste0(ens_years, "-12-31 23:59:59"), + dbfile.name = "PalEON_ENS", + stringsAsFactors = FALSE + ) } - - if(length(results) == 1) results <- results[[1]] ## flatten to single met rather than ensemble - + + if (length(results) == 1) results <- results[[1]] ## flatten to single met rather than ensemble + return(invisible(results)) } # download.PalEON_ENS diff --git a/modules/data.atmosphere/R/download.US_WCr.R b/modules/data.atmosphere/R/download.US_WCr.R index 66615e6ef44..493b8a620e3 100644 --- a/modules/data.atmosphere/R/download.US_WCr.R +++ b/modules/data.atmosphere/R/download.US_WCr.R @@ -1,76 +1,84 @@ ##' @title download.US-WCr -##' +##' ##' @section General Description: ##' Obtains data from Ankur Desai's Willow Creek flux tower, and selects certain variables (NEE and LE) to return ##' Data is returned at the given timestep in the given range. -##' +##' ##' This data includes information on a number of flux variables. -##' +##' ##' The timestep parameter is measured in hours, but is then converted to half hours because the data's timestep ##' is every half hour. -##' +##' ##' @param start_date Start date/time data should be downloaded for ##' @param end_date End date/time data should be downloaded for ##' @param timestep How often to take data points from the file. Must be a multiple of 0.5 ##' @export -##' +##' ##' @author Luke Dramko download.US_WCr <- function(start_date, end_date, timestep = 1) { - timestep = 2 * timestep #data is actually every half hour - + timestep <- 2 * timestep # data is actually every half hour + if (timestep != as.integer(timestep)) { - PEcAn.logger::logger.severe(paste0("Invalid timestep ", timestep/2, ". Timesteps must be at ", - "least every half hour (timestep = 0.5).")) + PEcAn.logger::logger.severe(paste0( + "Invalid timestep ", timestep / 2, ". Timesteps must be at ", + "least every half hour (timestep = 0.5)." + )) } - - start_date <- as.POSIXct(start_date, tz="UTC") - end_date <- as.POSIXct(end_date, tz="UTC") - - nee_col = 9 # Column number of NEE - le_col = 10 # Column number of LE - + + start_date <- as.POSIXct(start_date, tz = "UTC") + end_date <- as.POSIXct(end_date, tz = "UTC") + + nee_col <- 9 # Column number of NEE + le_col <- 10 # Column number of LE + # Data is found here # Original url: http://flux.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek2018_flux.txt base_url <- "http://co2.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek" - - flux = NULL; - + + flux <- NULL + for (year in as.integer(format(start_date, "%Y")):as.integer(format(end_date, "%Y"))) { - url <- paste0(base_url, year, "_flux.txt") #Build proper url + url <- paste0(base_url, year, "_flux.txt") # Build proper url PEcAn.logger::logger.info(paste0("Reading data for year ", year)) print(url) - influx <- tryCatch(utils::read.table(url, sep="", header=FALSE), error=function(e) {NULL}, warning=function(e) {NULL}) - if (is.null(influx)) { #Error encountered in data fetching. + influx <- tryCatch(utils::read.table(url, sep = "", header = FALSE), error = function(e) { + NULL + }, warning = function(e) { + NULL + }) + if (is.null(influx)) { # Error encountered in data fetching. PEcAn.logger::logger.warn(paste0("Data not avaliable for year ", year, ". All values for ", year, " will be NA.")) # Determine the number of days in the year rows_in_year <- PEcAn.utils::ud_convert(lubridate::as.duration(lubridate::interval(as.POSIXct(paste0(year, "-01-01")), as.POSIXct(paste0(year + 1, "-01-01")))), "s", "day") - rows_in_year = rows_in_year * 48 # 48 measurements per day, one every half hour. - influx <- matrix(rep(-999, rows_in_year * 13), nrow=rows_in_year, ncol = 13) + rows_in_year <- rows_in_year * 48 # 48 measurements per day, one every half hour. + influx <- matrix(rep(-999, rows_in_year * 13), nrow = rows_in_year, ncol = 13) } flux <- rbind(flux, influx) } PEcAn.logger::logger.info("Flux data has been read.") - + # Contains only the data needed in a data frame - new.flux <- data.frame(DOY = flux[,3], - HRMIN = flux[,4], - NEE = as.numeric(flux[,nee_col]), - LE = as.numeric(flux[,le_col])) - + new.flux <- data.frame( + DOY = flux[, 3], + HRMIN = flux[, 4], + NEE = as.numeric(flux[, nee_col]), + LE = as.numeric(flux[, le_col]) + ) + # Calculate minutes from start year to find the right row to pull data from. - year_start <- as.POSIXct(format(start_date, "%Y-01-01 00:00:00"), tz="UTC") - + year_start <- as.POSIXct(format(start_date, "%Y-01-01 00:00:00"), tz = "UTC") + start_interval <- lubridate::interval(year_start, start_date) - days <- lubridate::as.duration(start_interval) # Actually returns a number of seconds + days <- lubridate::as.duration(start_interval) # Actually returns a number of seconds days <- PEcAn.utils::ud_convert(as.integer(days), "s", "day") # Days, including fractional part, if any. - hours <- floor(PEcAn.utils::ud_convert(days - floor(days), "day", "hr")) # Extract the hour component, round to the previous hour. - if (days - floor(days) >= 0.5) { # Flux data is at half-hour precision + hours <- floor(PEcAn.utils::ud_convert(days - floor(days), "day", "hr")) # Extract the hour component, round to the previous hour. + if (days - floor(days) >= 0.5) { # Flux data is at half-hour precision hours <- hours + 0.5 } days <- floor(days) # Extract the whole day component - + start_row <- as.integer(days * 48 + hours * 2) - + data_interval <- lubridate::interval(start_date, end_date) days <- lubridate::as.duration(data_interval) # a number of seconds days <- PEcAn.utils::ud_convert(as.integer(days), "s", "day") @@ -80,36 +88,40 @@ download.US_WCr <- function(start_date, end_date, timestep = 1) { } days <- floor(days) end_row <- start_row + as.integer(days * 48 + hours * 2) - + # Calculations are one time point behind the actual start time; corrects the off-by-one error - start_row = start_row + 1; - end_row = end_row + 1; - + start_row <- start_row + 1 + end_row <- end_row + 1 + # Vectors that will contain the output data - out_nee = NULL - out_le = NULL - + out_nee <- NULL + out_le <- NULL + PEcAn.logger::logger.info("Starting at row (nonconverted) ") - print(new.flux[start_row,]) #print gives a much more interpretable output than pasting in the logger call. - - for (d in seq(start_row, end_row, by=timestep)) { - row = new.flux[d,] - + print(new.flux[start_row, ]) # print gives a much more interpretable output than pasting in the logger call. + + for (d in seq(start_row, end_row, by = timestep)) { + row <- new.flux[d, ] + # NEE values val <- as.numeric(row$NEE) - if (val == -999) { val <- NA } else { - val <- PEcAn.utils::misc.convert(row$NEE, "umol C m-2 s-1", "kg C m-2 s-1") + if (val == -999) { + val <- NA + } else { + val <- PEcAn.utils::misc.convert(row$NEE, "umol C m-2 s-1", "kg C m-2 s-1") } out_nee <- c(out_nee, val) - + # LE values val <- as.numeric(row$LE) - if (val == -999) { val <- NA } + if (val == -999) { + val <- NA + } out_le <- c(out_le, val) } - - return(list(nee=out_nee[-1], qle=out_le[-1])) # Start time not included in the forecast + + return(list(nee = out_nee[-1], qle = out_le[-1])) # Start time not included in the forecast } # download.wcr.R # This line is great for testing. -# download.US_WCr('2018-07-23 06:00', '2018-08-08 06:00', timestep=12) \ No newline at end of file +# download.US_WCr('2018-07-23 06:00', '2018-08-08 06:00', timestep=12) diff --git a/modules/data.atmosphere/R/download.US_Wlef.R b/modules/data.atmosphere/R/download.US_Wlef.R index b6389c3f24a..828891a9b7e 100644 --- a/modules/data.atmosphere/R/download.US_Wlef.R +++ b/modules/data.atmosphere/R/download.US_Wlef.R @@ -1,105 +1,114 @@ ##' @title download.US_Wlef -##' +##' ##' @section General Description: ##' Obtains data from Ankur Desai's WLEF/ Parks Fall flux tower, and selects certain variables (NEE and LE) to return ##' Data is returned at the given timestep in the given range. -##' +##' ##' This data includes information on a number of flux variables. -##' -##' +##' +##' ##' @param start_date Start date/time data should be downloaded for ##' @param end_date End date/time data should be downloaded for ##' @param timestep How often to take data points from the file. Must be integer ##' @export -##' +##' ##' @author Luke Dramko and K Zarada download.US_Wlef <- function(start_date, end_date, timestep = 1) { - if (timestep != as.integer(timestep)) { - PEcAn.logger::logger.severe(paste0("Invalid timestep ",timestep, ". Timesteps must be integer")) + PEcAn.logger::logger.severe(paste0("Invalid timestep ", timestep, ". Timesteps must be integer")) } - - start_date <- as.POSIXct(start_date, tz="UTC") - end_date <- as.POSIXct(end_date, tz="UTC") - - nee_col = 7 # Column number of NEE - le_col = 8 # Column number of LE - + + start_date <- as.POSIXct(start_date, tz = "UTC") + end_date <- as.POSIXct(end_date, tz = "UTC") + + nee_col <- 7 # Column number of NEE + le_col <- 8 # Column number of LE + # Data is found here # Original url: http://flux.aos.wisc.edu/data/cheas/wcreek/flux/prelim/wcreek2018_flux.txt base_url <- "http://flux.aos.wisc.edu/data/cheas/wlef/flux/prelim/" - - flux = NULL; - + + flux <- NULL + for (year in as.integer(format(start_date, "%Y")):as.integer(format(end_date, "%Y"))) { - url <- paste0(base_url, year,"/flux_", year, ".txt") #Build proper url + url <- paste0(base_url, year, "/flux_", year, ".txt") # Build proper url PEcAn.logger::logger.info(paste0("Reading data for year ", year)) print(url) - influx <- tryCatch(utils::read.table(url, header = T, sep = ""), error=function(e) {NULL}, warning=function(e) {NULL}) - if (is.null(influx)) { #Error encountered in data fetching. + influx <- tryCatch(utils::read.table(url, header = T, sep = ""), error = function(e) { + NULL + }, warning = function(e) { + NULL + }) + if (is.null(influx)) { # Error encountered in data fetching. PEcAn.logger::logger.warn(paste0("Data not avaliable for year ", year, ". All values for ", year, " will be NA.")) # Determine the number of days in the year rows_in_year <- PEcAn.utils::ud_convert(lubridate::as.duration(lubridate::interval(as.POSIXct(paste0(year, "-01-01")), as.POSIXct(paste0(year + 1, "-01-01")))), "s", "day") - rows_in_year = rows_in_year * 24 # 48 measurements per day, one every half hour. - influx <- matrix(rep(-999, rows_in_year * 13), nrow=rows_in_year, ncol = 13) + rows_in_year <- rows_in_year * 24 # 48 measurements per day, one every half hour. + influx <- matrix(rep(-999, rows_in_year * 13), nrow = rows_in_year, ncol = 13) } flux <- rbind(flux, influx) } PEcAn.logger::logger.info("Flux data has been read.") - + # Contains only the data needed in a data frame - new.flux <- data.frame(DOY = flux[,5], - HR = flux[,4], - NEE = as.numeric(flux[,nee_col]), - LE = as.numeric(flux[,le_col])) - + new.flux <- data.frame( + DOY = flux[, 5], + HR = flux[, 4], + NEE = as.numeric(flux[, nee_col]), + LE = as.numeric(flux[, le_col]) + ) + # Calculate minutes from start year to find the right row to pull data from. - year_start <- as.POSIXct(format(start_date, "%Y-01-01 00:00:00"), tz="UTC") - + year_start <- as.POSIXct(format(start_date, "%Y-01-01 00:00:00"), tz = "UTC") + start_interval <- lubridate::interval(year_start, start_date) - days <- lubridate::as.duration(start_interval) # Actually returns a number of seconds + days <- lubridate::as.duration(start_interval) # Actually returns a number of seconds days <- PEcAn.utils::ud_convert(as.integer(days), "s", "day") # Days, including fractional part, if any. - hours <- floor(PEcAn.utils::ud_convert(days - floor(days), "day", "hr")) # Extract the hour component, round to the previous hour. + hours <- floor(PEcAn.utils::ud_convert(days - floor(days), "day", "hr")) # Extract the hour component, round to the previous hour. days <- floor(days) # Extract the whole day component - + start_row <- as.integer(days * 24 + hours) - + data_interval <- lubridate::interval(start_date, end_date) days <- lubridate::as.duration(data_interval) # a number of seconds days <- PEcAn.utils::ud_convert(as.integer(days), "s", "day") hours <- floor(PEcAn.utils::ud_convert(as.integer(days - floor(days)), "day", "hr")) # Round down to the nearest half hour days <- floor(days) end_row <- start_row + as.integer(days * 24 + hours) - + # Calculations are one time point behind the actual start time; corrects the off-by-one error - start_row = start_row + 1; - end_row = end_row + 1; - + start_row <- start_row + 1 + end_row <- end_row + 1 + # Vectors that will contain the output data - out_nee = NULL - out_le = NULL - + out_nee <- NULL + out_le <- NULL + PEcAn.logger::logger.info("Starting at row (nonconverted) ") - print(new.flux[start_row,]) #print gives a much more interpretable output than pasting in the logger call. - - for (d in seq(start_row, end_row, by=timestep)) { - row = new.flux[d,] - + print(new.flux[start_row, ]) # print gives a much more interpretable output than pasting in the logger call. + + for (d in seq(start_row, end_row, by = timestep)) { + row <- new.flux[d, ] + # NEE values val <- as.numeric(row$NEE) - if (val == -999) { val <- NA } else { + if (val == -999) { + val <- NA + } else { val <- PEcAn.utils::misc.convert(row$NEE, "umol C m-2 s-1", "kg C m-2 s-1") } out_nee <- c(out_nee, val) - + # LE values val <- as.numeric(row$LE) - if (val == -999) { val <- NA } + if (val == -999) { + val <- NA + } out_le <- c(out_le, val) } - - return(list(nee=out_nee[-1], qle=out_le[-1])) # Start time not included in the forecast + + return(list(nee = out_nee[-1], qle = out_le[-1])) # Start time not included in the forecast } # download.US_Syv.R # This line is great for testing. -#download.US_Wlef('2018-07-23 06:00', '2018-08-08 06:00', timestep=12) +# download.US_Wlef('2018-07-23 06:00', '2018-08-08 06:00', timestep=12) diff --git a/modules/data.atmosphere/R/download.raw.met.module.R b/modules/data.atmosphere/R/download.raw.met.module.R index 1041a847d73..333b34a5941 100644 --- a/modules/data.atmosphere/R/download.raw.met.module.R +++ b/modules/data.atmosphere/R/download.raw.met.module.R @@ -1,6 +1,6 @@ #' @name download.raw.met.module #' @title download.raw.met.module -#' +#' #' @return A list of data frames is returned containing information about the data file that can be used to locate it later. Each #' data frame contains information about one file. #' @@ -21,9 +21,9 @@ #' @param username database username #' @param overwrite whether to force download.raw.met.module to proceed #' @param dbparms database settings from settings file -#' @param Ens.Flag default set to FALSE +#' @param Ens.Flag default set to FALSE +#' #' -#' #' @export #' #' @@ -46,83 +46,80 @@ username, overwrite = FALSE, dbparms, - Ens.Flag=FALSE) { - - outfolder <- file.path(dir,paste0(met, "_site_", str_ns)) - - pkg <- "PEcAn.data.atmosphere" - fcn <- paste0("download.", met) - - #Some data products can be forecasts instead of real time data. Others can be ensembles of data instead of a single source. Some can be both. - #Not all of the registration.xml files for each data source contains a or tag; therefore, we must check for their - #existence first. - forecast = FALSE - ensemble = FALSE - if (!is.null(register$forecast)) { - forecast = as.logical(register$forecast) - } - if (!is.null(register$ensemble) && !is.na(as.integer(register$ensemble)) && as.integer(register$ensemble) > 1) { - ensemble = as.integer(register$ensemble) #No ensembles is given by FALSE, while the presence of ensembles is given by the number of ensembles. - ifelse(is.na(ensemble), FALSE, ensemble) #If ensemble happens to be a character vector or something it can't convert, as.integer will evaluate to NA. - } - - if (register$scale == "regional") { - raw.id <- PEcAn.DB::convert_input( - input.id = NA, - outfolder = outfolder, - formatname = register$format$name, - mimetype = register$format$mimetype, - site.id = site.id, - start_date = start_date, - end_date = end_date, - pkg = pkg, - fcn = fcn, - con = con, - host = host, - write = TRUE, - overwrite = overwrite, - site_id = site.id, - lat.in = lat.in, - lon.in = lon.in, - model = input_met$model, - scenario = input_met$scenario, - ensemble_member = input_met$ensemble_member, - method = input_met$method, - pattern = met, - dbparms=dbparms, - ensemble = ensemble - ) - - } else if (register$scale == "site") { - # Site-level met - raw.id <- PEcAn.DB::convert_input( - input.id = NA, - outfolder = outfolder, - formatname = register$format$name, - mimetype = register$format$mimetype, - site.id = site.id, - start_date = start_date, - end_date = end_date, - pkg = pkg, - fcn = fcn, - con = con, - host = host, - write = TRUE, - overwrite = overwrite, - forecast = forecast, - ensemble = ensemble, - sitename = site$name, - username = username, - lat.in = lat.in, - lon.in = lon.in, - pattern = met, - site_id = site.id, - product = input_met$product - ) - - } else { - PEcAn.logger::logger.severe("Unknown register$scale") - } - - return(raw.id) -} # .download.raw.met.module + Ens.Flag = FALSE) { + outfolder <- file.path(dir, paste0(met, "_site_", str_ns)) + + pkg <- "PEcAn.data.atmosphere" + fcn <- paste0("download.", met) + + # Some data products can be forecasts instead of real time data. Others can be ensembles of data instead of a single source. Some can be both. + # Not all of the registration.xml files for each data source contains a or tag; therefore, we must check for their + # existence first. + forecast <- FALSE + ensemble <- FALSE + if (!is.null(register$forecast)) { + forecast <- as.logical(register$forecast) + } + if (!is.null(register$ensemble) && !is.na(as.integer(register$ensemble)) && as.integer(register$ensemble) > 1) { + ensemble <- as.integer(register$ensemble) # No ensembles is given by FALSE, while the presence of ensembles is given by the number of ensembles. + ifelse(is.na(ensemble), FALSE, ensemble) # If ensemble happens to be a character vector or something it can't convert, as.integer will evaluate to NA. + } + + if (register$scale == "regional") { + raw.id <- PEcAn.DB::convert_input( + input.id = NA, + outfolder = outfolder, + formatname = register$format$name, + mimetype = register$format$mimetype, + site.id = site.id, + start_date = start_date, + end_date = end_date, + pkg = pkg, + fcn = fcn, + con = con, + host = host, + write = TRUE, + overwrite = overwrite, + site_id = site.id, + lat.in = lat.in, + lon.in = lon.in, + model = input_met$model, + scenario = input_met$scenario, + ensemble_member = input_met$ensemble_member, + method = input_met$method, + pattern = met, + dbparms = dbparms, + ensemble = ensemble + ) + } else if (register$scale == "site") { + # Site-level met + raw.id <- PEcAn.DB::convert_input( + input.id = NA, + outfolder = outfolder, + formatname = register$format$name, + mimetype = register$format$mimetype, + site.id = site.id, + start_date = start_date, + end_date = end_date, + pkg = pkg, + fcn = fcn, + con = con, + host = host, + write = TRUE, + overwrite = overwrite, + forecast = forecast, + ensemble = ensemble, + sitename = site$name, + username = username, + lat.in = lat.in, + lon.in = lon.in, + pattern = met, + site_id = site.id, + product = input_met$product + ) + } else { + PEcAn.logger::logger.severe("Unknown register$scale") + } + + return(raw.id) + } # .download.raw.met.module diff --git a/modules/data.atmosphere/R/download_noaa_gefs_efi.R b/modules/data.atmosphere/R/download_noaa_gefs_efi.R index 2837800f508..6ab3b5e654a 100644 --- a/modules/data.atmosphere/R/download_noaa_gefs_efi.R +++ b/modules/data.atmosphere/R/download_noaa_gefs_efi.R @@ -10,102 +10,110 @@ #' #' #' @author Alexis Helgeson -#' -download_NOAA_GEFS_EFI <- function(sitename, outfolder, start_date, site.lat, site.lon){ - #using the stage2 fcn mean that the met as already been downscaled and gapfilled to 1 hr intervals - met = PEcAn.data.atmosphere::noaa_stage2(cycle = 0, - version = "v12", - endpoint = "data.ecoforecast.org", - verbose = TRUE, - start_date = start_date) - - weather = met %>% - dplyr::filter(.data$reference_datetime == as.POSIXct(start_date,tz="UTC"), sitename == sitename) %>% +#' +download_NOAA_GEFS_EFI <- function(sitename, outfolder, start_date, site.lat, site.lon) { + # using the stage2 fcn mean that the met as already been downscaled and gapfilled to 1 hr intervals + met <- PEcAn.data.atmosphere::noaa_stage2( + cycle = 0, + version = "v12", + endpoint = "data.ecoforecast.org", + verbose = TRUE, + start_date = start_date + ) + + weather <- met %>% + dplyr::filter(.data$reference_datetime == as.POSIXct(start_date, tz = "UTC"), sitename == sitename) %>% dplyr::collect() %>% dplyr::select(.data$sitename, .data$prediction, .data$variable, .data$horizon, .data$parameter, .data$datetime) - + PEcAn.logger::logger.info("Met Aquired for", sitename, "on", as.character(start_date)) - #grab/calculate timestep, this might not be necessary b/c of the datetime column? - forecast_date = start_date - cycle = 0 + # grab/calculate timestep, this might not be necessary b/c of the datetime column? + forecast_date <- start_date + cycle <- 0 hours_char <- unique(weather$horizon) forecast_times <- lubridate::as_datetime(forecast_date) + lubridate::hours(as.numeric(cycle)) + lubridate::hours(as.numeric(hours_char)) - - #the neon4cast fcn already has the weather variable names in cf standard + + # the neon4cast fcn already has the weather variable names in cf standard cf_var_names <- unique(weather$variable) - + noaa_data <- list() - - for(v in 1:length(cf_var_names)){ - + + for (v in 1:length(cf_var_names)) { noaa_data[v] <- NULL - #filter for met variable + # filter for met variable curr_var <- dplyr::filter(weather, .data$variable == cf_var_names[v]) - #remove ensemble member 31 does not cover full timeseries - #this is a HACK should add a generalized method for ensemble member outlier detection + # remove ensemble member 31 does not cover full timeseries + # this is a HACK should add a generalized method for ensemble member outlier detection curr_var <- dplyr::filter(curr_var, .data$parameter <= 30) - noaa_data[[v]] <- list(value = curr_var$prediction, - ensembles = curr_var$parameter, - forecast.date = curr_var$datetime) - + noaa_data[[v]] <- list( + value = curr_var$prediction, + ensembles = curr_var$parameter, + forecast.date = curr_var$datetime + ) } - + names(noaa_data) <- cf_var_names - - #adding in windspeed and specific humidity + + # adding in windspeed and specific humidity cf_var_names1 <- c("surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", "precipitation_flux", "air_pressure", "relative_humidity", "air_temperature", "specific_humidity", "wind_speed") cf_var_units1 <- c("Wm-2", "Wm-2", "kgm-2s-1", "Pa", "1", "K", "1", "ms-1") - #calculate specific humdity using realtive humidity (no unit conversion requied as relative humidity is in range 0-1), air temperature (no unit conversion already in K), and air pressure (no unit conversion already in Pa) + # calculate specific humdity using realtive humidity (no unit conversion requied as relative humidity is in range 0-1), air temperature (no unit conversion already in K), and air pressure (no unit conversion already in Pa) specific_humidity <- rep(NA, length(noaa_data$relative_humidity$value)) - specific_humidity[which(!is.na(noaa_data$relative_humidity$value))] <- PEcAn.data.atmosphere::rh2qair(rh = noaa_data$relative_humidity$value[which(!is.na(noaa_data$relative_humidity$value))], - T = noaa_data$air_temperature$value[which(!is.na(noaa_data$relative_humidity$value))], - press = noaa_data$air_pressure$value[which(!is.na(noaa_data$relative_humidity$value))]) - - #Calculate wind speed from east and north components + specific_humidity[which(!is.na(noaa_data$relative_humidity$value))] <- PEcAn.data.atmosphere::rh2qair( + rh = noaa_data$relative_humidity$value[which(!is.na(noaa_data$relative_humidity$value))], + T = noaa_data$air_temperature$value[which(!is.na(noaa_data$relative_humidity$value))], + press = noaa_data$air_pressure$value[which(!is.na(noaa_data$relative_humidity$value))] + ) + + # Calculate wind speed from east and north components wind_speed <- sqrt(noaa_data$eastward_wind$value^2 + noaa_data$northward_wind$value^2) - - forecast_noaa <- tibble::tibble(time = noaa_data$surface_downwelling_longwave_flux_in_air$forecast.date, - NOAA.member = noaa_data$surface_downwelling_longwave_flux_in_air$ensembles, - air_temperature = noaa_data$air_temperature$value, - air_pressure= noaa_data$air_pressure$value, - relative_humidity = noaa_data$relative_humidity$value, - surface_downwelling_longwave_flux_in_air = noaa_data$surface_downwelling_longwave_flux_in_air$value, - surface_downwelling_shortwave_flux_in_air = noaa_data$surface_downwelling_shortwave_flux_in_air$value, - precipitation_flux = noaa_data$precipitation_flux$value, - specific_humidity = specific_humidity, - wind_speed = wind_speed) - + + forecast_noaa <- tibble::tibble( + time = noaa_data$surface_downwelling_longwave_flux_in_air$forecast.date, + NOAA.member = noaa_data$surface_downwelling_longwave_flux_in_air$ensembles, + air_temperature = noaa_data$air_temperature$value, + air_pressure = noaa_data$air_pressure$value, + relative_humidity = noaa_data$relative_humidity$value, + surface_downwelling_longwave_flux_in_air = noaa_data$surface_downwelling_longwave_flux_in_air$value, + surface_downwelling_shortwave_flux_in_air = noaa_data$surface_downwelling_shortwave_flux_in_air$value, + precipitation_flux = noaa_data$precipitation_flux$value, + specific_humidity = specific_humidity, + wind_speed = wind_speed + ) + PEcAn.logger::logger.info("Met df complied including specific humidity and wind speed") - - #create directory to save ensemble member if one does not already exist - output_path = file.path(outfolder, "noaa/NOAAGEFS_1hr/", sitename, "/", forecast_date, "/00/") - if(!dir.exists(output_path)){dir.create(output_path, recursive = TRUE)} - + + # create directory to save ensemble member if one does not already exist + output_path <- file.path(outfolder, "noaa/NOAAGEFS_1hr/", sitename, "/", forecast_date, "/00/") + if (!dir.exists(output_path)) { + dir.create(output_path, recursive = TRUE) + } + for (ens in 1:length(unique(forecast_noaa$NOAA.member))) { # i is the ensemble number - + forecast_noaa_ens <- forecast_noaa %>% dplyr::filter(.data$NOAA.member == ens) %>% dplyr::filter(!is.na(.data$air_temperature)) - + end_date <- forecast_noaa_ens %>% dplyr::summarise(max_time = max(.data$time)) - - identifier = paste("NOAA_GEFS", sitename, ens, format(as.POSIXct(forecast_date), "%Y-%m-%dT%H:%M"), - format(end_date$max_time, "%Y-%m-%dT%H:%M"), sep="_") - - fname <- paste0(identifier, ".nc") - #ensemble_folder = file.path(output_path, identifier) - output_file <- file.path(output_path,fname) - - #Write netCDF - if(!nrow(forecast_noaa_ens) == 0){ - PEcAn.data.atmosphere::write_noaa_gefs_netcdf(df = forecast_noaa_ens,ens, lat = site.lat, lon = site.lon, cf_units = cf_var_units1, output_file = output_file, overwrite = TRUE) - }else {next} - } - - return(PEcAn.logger::logger.info("Met download complete and saved as .nc files at", output_path)) -} + identifier <- paste("NOAA_GEFS", sitename, ens, format(as.POSIXct(forecast_date), "%Y-%m-%dT%H:%M"), + format(end_date$max_time, "%Y-%m-%dT%H:%M"), + sep = "_" + ) + fname <- paste0(identifier, ".nc") + # ensemble_folder = file.path(output_path, identifier) + output_file <- file.path(output_path, fname) + # Write netCDF + if (!nrow(forecast_noaa_ens) == 0) { + PEcAn.data.atmosphere::write_noaa_gefs_netcdf(df = forecast_noaa_ens, ens, lat = site.lat, lon = site.lon, cf_units = cf_var_units1, output_file = output_file, overwrite = TRUE) + } else { + next + } + } + return(PEcAn.logger::logger.info("Met download complete and saved as .nc files at", output_path)) +} diff --git a/modules/data.atmosphere/R/downscaling_helper_functions.R b/modules/data.atmosphere/R/downscaling_helper_functions.R index 85532c9a186..6ffbdf28d40 100644 --- a/modules/data.atmosphere/R/downscaling_helper_functions.R +++ b/modules/data.atmosphere/R/downscaling_helper_functions.R @@ -8,27 +8,27 @@ #' @export #' -downscale_spline_to_hrly <- function(df,VarNames, hr = 1){ +downscale_spline_to_hrly <- function(df, VarNames, hr = 1) { # -------------------------------------- # purpose: interpolates debiased forecasts from 6-hourly to hourly # Creator: Laura Puckett, December 16 2018 # -------------------------------------- # @param: df, a dataframe of debiased 6-hourly forecasts time <- NULL - t0 = min(df$time) + t0 <- min(df$time) df <- df %>% dplyr::mutate(days_since_t0 = difftime(.data$time, t0, units = "days")) - interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1/(24/hr)) + interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1 / (24 / hr)) noaa_data_interp <- tibble::tibble(time = lubridate::as_datetime(t0 + interp.df.days, tz = "UTC")) - for(Var in 1:length(VarNames)){ + for (Var in 1:length(VarNames)) { curr_data <- stats::spline(x = df$days_since_t0, y = unlist(df[VarNames[Var]]), method = "fmm", xout = interp.df.days)$y noaa_data_interp <- cbind(noaa_data_interp, curr_data) } - names(noaa_data_interp) <- c("time",VarNames) + names(noaa_data_interp) <- c("time", VarNames) return(noaa_data_interp) } @@ -47,8 +47,7 @@ downscale_spline_to_hrly <- function(df,VarNames, hr = 1){ #' #' -downscale_ShortWave_to_hrly <- function(df,lat, lon, hr = 1){ - +downscale_ShortWave_to_hrly <- function(df, lat, lon, hr = 1) { ## downscale shortwave to hourly t0 <- min(df$time) @@ -57,7 +56,7 @@ downscale_ShortWave_to_hrly <- function(df,lat, lon, hr = 1){ dplyr::mutate(days_since_t0 = difftime(.data$time, t0, units = "days")) %>% dplyr::mutate(lead_var = dplyr::lead(.data$surface_downwelling_shortwave_flux_in_air, 1)) - interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1/(24/hr)) + interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1 / (24 / hr)) noaa_data_interp <- tibble::tibble(time = lubridate::as_datetime(t0 + interp.df.days)) @@ -67,13 +66,13 @@ downscale_ShortWave_to_hrly <- function(df,lat, lon, hr = 1){ data.hrly$group_6hr <- NA group <- 0 - for(i in 1:nrow(data.hrly)){ - if(!is.na(data.hrly$lead_var[i])){ + for (i in 1:nrow(data.hrly)) { + if (!is.na(data.hrly$lead_var[i])) { curr <- data.hrly$lead_var[i] data.hrly$surface_downwelling_shortwave_flux_in_air[i] <- curr group <- group + 1 data.hrly$group_6hr[i] <- group - }else{ + } else { data.hrly$surface_downwelling_shortwave_flux_in_air[i] <- curr data.hrly$group_6hr[i] <- group } @@ -81,16 +80,15 @@ downscale_ShortWave_to_hrly <- function(df,lat, lon, hr = 1){ ShortWave.ds <- data.hrly %>% dplyr::mutate(hour = lubridate::hour(.data$time)) %>% - dplyr::mutate(doy = lubridate::yday(.data$time) + .data$hour/(24/hr))%>% + dplyr::mutate(doy = lubridate::yday(.data$time) + .data$hour / (24 / hr)) %>% dplyr::mutate(rpot = downscale_solar_geom(.data$doy, as.vector(lon), as.vector(lat))) %>% # hourly sw flux calculated using solar geometry dplyr::group_by(.data$group_6hr) %>% dplyr::mutate(avg.rpot = mean(.data$rpot, na.rm = TRUE)) %>% # daily sw mean from solar geometry dplyr::ungroup() %>% - dplyr::mutate(surface_downwelling_shortwave_flux_in_air = ifelse(.data$avg.rpot > 0, .data$rpot* (.data$surface_downwelling_shortwave_flux_in_air/.data$avg.rpot),0)) %>% + dplyr::mutate(surface_downwelling_shortwave_flux_in_air = ifelse(.data$avg.rpot > 0, .data$rpot * (.data$surface_downwelling_shortwave_flux_in_air / .data$avg.rpot), 0)) %>% dplyr::select("time", "surface_downwelling_shortwave_flux_in_air") return(ShortWave.ds) - } @@ -104,44 +102,46 @@ downscale_ShortWave_to_hrly <- function(df,lat, lon, hr = 1){ #' @export #' -downscale_repeat_6hr_to_hrly <- function(df, varName, hr = 1){ - - #bind variables +downscale_repeat_6hr_to_hrly <- function(df, varName, hr = 1) { + # bind variables lead_var <- time <- NULL - #Get first time point + # Get first time point t0 <- min(df$time) df <- df %>% dplyr::select("time", tidyselect::all_of(varName)) %>% - #Calculate time difference + # Calculate time difference dplyr::mutate(days_since_t0 = difftime(.data$time, t0, units = "days")) %>% - #Shift valued back because the 6hr value represents the average over the - #previous 6hr period - dplyr::mutate(lead_var = dplyr::lead(df[,varName], 1)) - - #Create new vector with all hours - interp.df.days <- seq(min(df$days_since_t0), - as.numeric(max(df$days_since_t0)), - 1 / (24 / hr)) - - #Create new data frame + # Shift valued back because the 6hr value represents the average over the + # previous 6hr period + dplyr::mutate(lead_var = dplyr::lead(df[, varName], 1)) + + # Create new vector with all hours + interp.df.days <- seq( + min(df$days_since_t0), + as.numeric(max(df$days_since_t0)), + 1 / (24 / hr) + ) + + # Create new data frame noaa_data_interp <- tibble::tibble(time = lubridate::as_datetime(t0 + interp.df.days)) - #Join 1 hr data frame with 6 hr data frame + # Join 1 hr data frame with 6 hr data frame data.hrly <- noaa_data_interp %>% dplyr::left_join(df, by = "time") - #Fill in hours - for(i in 1:nrow(data.hrly)){ - if(!is.na(data.hrly$lead_var[i])){ + # Fill in hours + for (i in 1:nrow(data.hrly)) { + if (!is.na(data.hrly$lead_var[i])) { curr <- data.hrly$lead_var[i] - }else{ + } else { data.hrly$lead_var[i] <- curr } } - #Clean up data frame - data.hrly <- data.hrly %>% dplyr::select("time", "lead_var") %>% + # Clean up data frame + data.hrly <- data.hrly %>% + dplyr::select("time", "lead_var") %>% dplyr::arrange(.data$time) names(data.hrly) <- c("time", varName) @@ -162,7 +162,6 @@ downscale_repeat_6hr_to_hrly <- function(df, varName, hr = 1){ #' @export #' downscale_solar_geom <- function(doy, lon, lat) { - dt <- stats::median(diff(doy)) * 86400 # average number of seconds in time interval hr <- (doy - floor(doy)) * 24 # hour of day for each element of doy diff --git a/modules/data.atmosphere/R/extract.nc.R b/modules/data.atmosphere/R/extract.nc.R index c2f1f7c5001..4c3095d5656 100644 --- a/modules/data.atmosphere/R/extract.nc.R +++ b/modules/data.atmosphere/R/extract.nc.R @@ -17,67 +17,74 @@ ##' @author Betsy Cowdery extract.nc <- function(in.path, in.prefix, outfolder, start_date, end_date, slat, slon, overwrite = FALSE, verbose = FALSE, ...) { - - in.path <- as.character(in.path) + in.path <- as.character(in.path) in.prefix <- as.character(in.prefix) outfolder <- as.character(outfolder) - slat <- eval(parse(text = slat)) - slon <- eval(parse(text = slon)) - + slat <- eval(parse(text = slat)) + slon <- eval(parse(text = slon)) + if (!file.exists(outfolder)) { dir.create(outfolder) } - + # Find closest coordinates to site - close <- closest_xy(slat, slon, infolder=in.path, infile=in.prefix) + close <- closest_xy(slat, slon, infolder = in.path, infile = in.prefix) x <- close$x y <- close$y - + start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) - if(nchar(in.prefix)>0 & substr(in.prefix,nchar(in.prefix),nchar(in.prefix)) != ".") in.prefix = paste0(in.prefix,".") + end_year <- lubridate::year(end_date) + rows <- end_year - start_year + 1 + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) + if (nchar(in.prefix) > 0 & substr(in.prefix, nchar(in.prefix), nchar(in.prefix)) != ".") in.prefix <- paste0(in.prefix, ".") for (year in start_year:end_year) { year_txt <- formatC(year, width = 4, format = "d", flag = "0") infile <- file.path(in.path, paste0(in.prefix, year_txt, ".nc")) outfile <- file.path(outfolder, paste0(in.prefix, year_txt, ".nc")) - + # create array with results row <- year - start_year + 1 - results$file[row] <- outfile - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- outfile + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF" - + if (file.exists(outfile) && !overwrite) { PEcAn.logger::logger.debug("File '", outfile, "' already exists, skipping to next file.") next } - + if (verbose) { - print(paste(c("ncks", list("-d", - paste0("x,", x, ",", x), "-d", - paste0("y,", y, ",", y), - infile, outfile)), collapse = " ")) + print(paste(c("ncks", list( + "-d", + paste0("x,", x, ",", x), "-d", + paste0("y,", y, ",", y), + infile, outfile + )), collapse = " ")) } - if(close$use_xy){ - system2("ncks", list("-d", paste0("x,", x, ",", x), "-d", - paste0("y,", y, ",", y), infile, outfile)) + if (close$use_xy) { + system2("ncks", list( + "-d", paste0("x,", x, ",", x), "-d", + paste0("y,", y, ",", y), infile, outfile + )) } else { - system2("ncks", list("-d", paste0("latitude,", x, ",", x), "-d", - paste0("longitude,", y, ",", y), infile, outfile)) + system2("ncks", list( + "-d", paste0("latitude,", x, ",", x), "-d", + paste0("longitude,", y, ",", y), infile, outfile + )) } - + ## Hack to ensure lat and lon are consistant nc <- ncdf4::nc_open(outfile, write = TRUE) ncdf4::ncvar_put(nc, "latitude", vals = slat) diff --git a/modules/data.atmosphere/R/extract.nc.module.R b/modules/data.atmosphere/R/extract.nc.module.R index 2c07bdcdd80..d6a36658fd3 100644 --- a/modules/data.atmosphere/R/extract.nc.module.R +++ b/modules/data.atmosphere/R/extract.nc.module.R @@ -1,46 +1,47 @@ ##' @export -.extract.nc.module <- function(cf.id, register, dir, met, str_ns, site, new.site, con, +.extract.nc.module <- function(cf.id, register, dir, met, str_ns, site, new.site, con, start_date, end_date, host, overwrite = FALSE) { PEcAn.logger::logger.info("Site Extraction") - + input.id <- cf.id[1] - if(host$name == "localhost"){ + if (host$name == "localhost") { outfolder <- file.path(dir, paste0(met, "_CF_site_", str_ns)) } else { - if(is.null(host$folder)){ + if (is.null(host$folder)) { PEcAn.logger::logger.severe("host$folder required when running extract.nc.module for remote servers") } else { outfolder <- file.path(host$folder, paste0(met, "_CF_site_", str_ns)) } } - pkg <- "PEcAn.data.atmosphere" - fcn <- "extract.nc" + pkg <- "PEcAn.data.atmosphere" + fcn <- "extract.nc" formatname <- "CF Meteorology" - mimetype <- "application/x-netcdf" - -if (exists(paste0("extract.nc.", met))) fcn <- paste0("extract.nc.", met) - + mimetype <- "application/x-netcdf" + + if (exists(paste0("extract.nc.", met))) fcn <- paste0("extract.nc.", met) + + + ready.id <- PEcAn.DB::convert_input( + input.id = input.id, + outfolder = outfolder, + formatname = formatname, + mimetype = mimetype, + site.id = site$id, + start_date = start_date, + end_date = end_date, + pkg = pkg, + fcn = fcn, + con = con, host = host, + write = TRUE, + slat = new.site$lat, slon = new.site$lon, + newsite = new.site$id, + overwrite = overwrite, + exact.dates = FALSE, + ensemble = register$ensemble %>% as.numeric() + ) - ready.id <- PEcAn.DB::convert_input(input.id = input.id, - outfolder = outfolder, - formatname = formatname, - mimetype = mimetype, - site.id = site$id, - start_date = start_date, - end_date = end_date, - pkg = pkg, - fcn = fcn, - con = con, host = host, - write = TRUE, - slat = new.site$lat, slon = new.site$lon, - newsite = new.site$id, - overwrite = overwrite, - exact.dates = FALSE, - ensemble = register$ensemble %>% as.numeric()) - PEcAn.logger::logger.info("Finished Extracting Met") - + return(ready.id) } # .extract.nc.module - diff --git a/modules/data.atmosphere/R/extract.success.R b/modules/data.atmosphere/R/extract.success.R index 0c23d70e025..6fa3169d094 100644 --- a/modules/data.atmosphere/R/extract.success.R +++ b/modules/data.atmosphere/R/extract.success.R @@ -1,22 +1,21 @@ extract.success <- function(in.path, in.prefix, outfolder) { - - in.path <- as.character(in.path) + in.path <- as.character(in.path) in.prefix <- as.character(in.prefix) outfolder <- as.character(outfolder) - - infiles <- dir(in.path, in.prefix) + + infiles <- dir(in.path, in.prefix) infiles.nc <- infiles[grep(pattern = "*.nc", infiles)] - - outfiles <- dir(outfolder) + + outfiles <- dir(outfolder) outfiles.nc <- outfiles[grep(pattern = "*.nc", outfiles)] outfiles.h5 <- outfiles[grep(pattern = "*.h5", outfiles)] - + if (length(outfiles) == 0) { s <- FALSE } else if (length(outfiles.nc) == length(infiles) || length(outfiles.h5) == length(infiles) * 12) { s <- TRUE } else { - PEcAn.logger::logger.severe("Uh oh - we should not be here") + PEcAn.logger::logger.severe("Uh oh - we should not be here") } return(s) } # extract.success diff --git a/modules/data.atmosphere/R/extract_ERA5.R b/modules/data.atmosphere/R/extract_ERA5.R index fe5e5b7b5ab..72472ba631c 100644 --- a/modules/data.atmosphere/R/extract_ERA5.R +++ b/modules/data.atmosphere/R/extract_ERA5.R @@ -21,18 +21,17 @@ #' @export #' @examples #' \dontrun{ -#' point.data <- ERA5_extract(sslat=40, slon=-120, years=c(1990:1995), vars=NULL) -#' -# point.data %>% -#' purrr::map(~xts::apply.daily(.x, mean)) +#' point.data <- ERA5_extract(sslat = 40, slon = -120, years = c(1990:1995), vars = NULL) #' +#' # point.data %>% +#' purrr::map(~ xts::apply.daily(.x, mean)) #' } extract.nc.ERA5 <- - function(slat , - slon , - in.path , + function(slat, + slon, + in.path, start_date, - end_date , + end_date, outfolder, in.prefix, newsite, @@ -40,127 +39,126 @@ extract.nc.ERA5 <- overwrite = FALSE, verbose = FALSE, ...) { - # library(xts) - # Distributing the job between whatever core is available. - - years <- seq(lubridate::year(start_date), - lubridate::year(end_date), - 1 + # Distributing the job between whatever core is available. + + years <- seq( + lubridate::year(start_date), + lubridate::year(end_date), + 1 ) ensemblesN <- seq(1, 10) - - - tryCatch({ - #for each ensemble - one.year.out <- years %>% - purrr::map(function(year) { - - # for each year - point.data <- ensemblesN %>% - purrr::map(function(ens) { - - - ncfile <- file.path(in.path, paste0(in.prefix, year, ".nc")) - - #printing out initial information. - if (verbose) { - PEcAn.logger::logger.info(paste0("Trying to open :", ncfile, " ")) - - if (!file.exists(ncfile)) - PEcAn.logger::logger.severe("The nc file was not found.") - - #msg - PEcAn.logger::logger.info(paste0(year, " is being processed ", "for ensemble #", ens, " ")) - } - - #open the file - nc_data <- ncdf4::nc_open(ncfile) - # time stamp - - t <- ncdf4::ncvar_get(nc_data, "time") - tunits <- ncdf4::ncatt_get(nc_data, 'time') - tustr <- strsplit(tunits$units, " ") - timestamp <- - as.POSIXct(t * 3600, tz = "UTC", origin = tustr[[1]][3]) - try(ncdf4::nc_close(nc_data)) - - - # set the vars - if (is.null(vars)) - vars <- names(nc_data$var) - # for the variables extract the data - all.data.point <- vars %>% - purrr::set_names(vars) %>% - purrr::map_dfc(function(vname) { - if (verbose) { - PEcAn.logger::logger.info(paste0(" \t ",vname, "is being extracted ! ")) + + + tryCatch( + { + # for each ensemble + one.year.out <- years %>% + purrr::map(function(year) { + # for each year + point.data <- ensemblesN %>% + purrr::map(function(ens) { + ncfile <- file.path(in.path, paste0(in.prefix, year, ".nc")) + + # printing out initial information. + if (verbose) { + PEcAn.logger::logger.info(paste0("Trying to open :", ncfile, " ")) + + if (!file.exists(ncfile)) { + PEcAn.logger::logger.severe("The nc file was not found.") } - - brick.tmp <- - raster::brick(ncfile, varname = vname, level = ens) - nn <- - raster::extract(brick.tmp, - sp::SpatialPoints(cbind(slon, slat)), - method = 'simple') - if (verbose) { - if (!is.numeric(nn)) { - PEcAn.logger::logger.severe(paste0( - "Expected raster object to be numeric, but it has type `", - paste0(typeof(nn), collapse = " "), - "`" - )) + + # msg + PEcAn.logger::logger.info(paste0(year, " is being processed ", "for ensemble #", ens, " ")) + } + + # open the file + nc_data <- ncdf4::nc_open(ncfile) + # time stamp + + t <- ncdf4::ncvar_get(nc_data, "time") + tunits <- ncdf4::ncatt_get(nc_data, "time") + tustr <- strsplit(tunits$units, " ") + timestamp <- + as.POSIXct(t * 3600, tz = "UTC", origin = tustr[[1]][3]) + try(ncdf4::nc_close(nc_data)) + + + # set the vars + if (is.null(vars)) { + vars <- names(nc_data$var) + } + # for the variables extract the data + all.data.point <- vars %>% + purrr::set_names(vars) %>% + purrr::map_dfc(function(vname) { + if (verbose) { + PEcAn.logger::logger.info(paste0(" \t ", vname, "is being extracted ! ")) } - } - - # replacing the missing/filled values with NA - nn[nn == nc_data$var[[vname]]$missval] <- NA - # send out the extracted var as a new col - t(nn) - - }) - - #close the connection - - # send out as xts object - xts::xts(all.data.point, order.by = timestamp) - }) %>% - stats::setNames(paste0("ERA_ensemble_", ensemblesN)) - - #Merge mean and the speard - return(point.data) - - }) %>% - stats::setNames(years) - - - # The order of one.year.out is year and then Ens - Mainly because of the spead / I wanted to touch each file just once. - # This now changes the order to ens - year - point.data <- ensemblesN %>% - purrr::map(function(Ensn) { - rbind.xts <- do.call("::", list("xts", "rbind.xts")) - one.year.out %>% - purrr::map( ~ .x [[Ensn]]) %>% - do.call("rbind.xts", .) - }) - - - # Calling the met2CF inside extract bc in met process met2CF comes before extract ! - out <-met2CF.ERA5( - slat, - slon, - start_date, - end_date, - sitename=newsite, - outfolder, - point.data, - overwrite = FALSE, - verbose = verbose - ) - return(out) - - }, error = function(e) { - PEcAn.logger::logger.severe(paste0(conditionMessage(e))) - }) - - } \ No newline at end of file + + brick.tmp <- + raster::brick(ncfile, varname = vname, level = ens) + nn <- + raster::extract(brick.tmp, + sp::SpatialPoints(cbind(slon, slat)), + method = "simple" + ) + if (verbose) { + if (!is.numeric(nn)) { + PEcAn.logger::logger.severe(paste0( + "Expected raster object to be numeric, but it has type `", + paste0(typeof(nn), collapse = " "), + "`" + )) + } + } + + # replacing the missing/filled values with NA + nn[nn == nc_data$var[[vname]]$missval] <- NA + # send out the extracted var as a new col + t(nn) + }) + + # close the connection + + # send out as xts object + xts::xts(all.data.point, order.by = timestamp) + }) %>% + stats::setNames(paste0("ERA_ensemble_", ensemblesN)) + + # Merge mean and the speard + return(point.data) + }) %>% + stats::setNames(years) + + + # The order of one.year.out is year and then Ens - Mainly because of the spead / I wanted to touch each file just once. + # This now changes the order to ens - year + point.data <- ensemblesN %>% + purrr::map(function(Ensn) { + rbind.xts <- do.call("::", list("xts", "rbind.xts")) + one.year.out %>% + purrr::map(~ .x[[Ensn]]) %>% + do.call("rbind.xts", .) + }) + + + # Calling the met2CF inside extract bc in met process met2CF comes before extract ! + out <- met2CF.ERA5( + slat, + slon, + start_date, + end_date, + sitename = newsite, + outfolder, + point.data, + overwrite = FALSE, + verbose = verbose + ) + return(out) + }, + error = function(e) { + PEcAn.logger::logger.severe(paste0(conditionMessage(e))) + } + ) + } diff --git a/modules/data.atmosphere/R/extract_local_CMIP5.R b/modules/data.atmosphere/R/extract_local_CMIP5.R index 8cadc9bf16b..3a085916fa1 100644 --- a/modules/data.atmosphere/R/extract_local_CMIP5.R +++ b/modules/data.atmosphere/R/extract_local_CMIP5.R @@ -7,9 +7,9 @@ ##' @description This function extracts CMIP5 data from grids that have been downloaded and stored locally. ##' Files are saved as a netCDF file in CF conventions at *DAILY* resolution. Note: At this point ##' in time, variables that are only available at a native monthly resolution will be repeated to -##' give a pseudo-daily record (and can get dealt with in the downscaling workflow). These files +##' give a pseudo-daily record (and can get dealt with in the downscaling workflow). These files ##' are ready to be used in the general PEcAn workflow or fed into the downscaling workflow. -# ----------------------------------- +# ----------------------------------- # Parameters # ----------------------------------- ##' @param outfolder - directory where output files will be stored @@ -21,8 +21,8 @@ ##' @param model which GCM to extract data from ##' @param scenario which experiment to pull (p1000, historical, ...) ##' @param ensemble_member which CMIP5 experiment ensemble member -##' @param date.origin (optional) specify the date of origin for timestamps in the files being read. -##' If NULL defaults to 1850 for historical simulations (except MPI-ESM-P) and +##' @param date.origin (optional) specify the date of origin for timestamps in the files being read. +##' If NULL defaults to 1850 for historical simulations (except MPI-ESM-P) and ##' 850 for p1000 simulations (plus MPI-ESM-P historical). Format: YYYY-MM-DD ##' @param adjust.pr - adjustment factor fore precipitation when the extracted values seem off ##' @param overwrite logical. Download a fresh version even if a local file with the same name already exists? @@ -31,25 +31,24 @@ ##' @export ##' # ----------------------------------- -extract.local.CMIP5 <- function(outfolder, in.path, start_date, end_date, lat.in, lon.in, - model , scenario , ensemble_member = "r1i1p1", date.origin=NULL, adjust.pr=1, - overwrite = FALSE, verbose = FALSE, ...){ - +extract.local.CMIP5 <- function(outfolder, in.path, start_date, end_date, lat.in, lon.in, + model, scenario, ensemble_member = "r1i1p1", date.origin = NULL, adjust.pr = 1, + overwrite = FALSE, verbose = FALSE, ...) { # Some GCMs don't do leap year; we'll have to deal with this separately # no.leap <- c("bcc-csm1-1", "CCSM4") - - if(is.null(date.origin)){ - if(scenario == "p1000" | GCM=="MPI-ESM-P") { - date.origin=as.Date("850-01-01") - } else if(scenario == "historical" & GCM!="MPI-ESM-P") { - date.origin=as.Date("1850-01-01") + + if (is.null(date.origin)) { + if (scenario == "p1000" | GCM == "MPI-ESM-P") { + date.origin <- as.Date("850-01-01") + } else if (scenario == "historical" & GCM != "MPI-ESM-P") { + date.origin <- as.Date("1850-01-01") } else { # PEcAn.logger::logger.error("No date.origin specified and scenario not implemented yet") - date.origin=as.Date("0001-01-01") + date.origin <- as.Date("0001-01-01") } - } - - + } + + # Days per month dpm <- lubridate::days_in_month(1:12) @@ -57,126 +56,128 @@ extract.local.CMIP5 <- function(outfolder, in.path, start_date, end_date, lat.in start_date <- as.POSIXlt(start_date, tz = "GMT") end_date <- as.POSIXlt(end_date, tz = "GMT") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) - lat.in = as.numeric(lat.in) - lon.in = as.numeric(lon.in) + lat.in <- as.numeric(lat.in) + lon.in <- as.numeric(lon.in) # dir.nldas="http://hydro1.sci.gsfc.nasa.gov/thredds/dodsC/NLDAS_FORA0125_H.002" - dir.create(outfolder, showWarnings=FALSE, recursive=TRUE) - - ylist <- seq(start_year,end_year,by=1) - rows = length(ylist) - results <- data.frame(file=character(rows), host=character(rows), - mimetype=character(rows), formatname=character(rows), - startdate=character(rows), enddate=character(rows), - dbfile.name = "NLDAS", - stringsAsFactors = FALSE - ) - + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) + + ylist <- seq(start_year, end_year, by = 1) + rows <- length(ylist) + results <- data.frame( + file = character(rows), host = character(rows), + mimetype = character(rows), formatname = character(rows), + startdate = character(rows), enddate = character(rows), + dbfile.name = "NLDAS", + stringsAsFactors = FALSE + ) + # The table of var name conversion # psl; sfcWind; tasmax; tasmin; huss - #"co2", "mole_fraction_of_carbon_dioxide_in_air", "1e-6" - var <- data.frame(DAP.name = c("tas", "tasmax", "tasmin", "rlds", "ps", "rsds", "uas", "vas", "sfcWind", "ua", "va", "huss", "pr", "co2mass"), - CF.name = c("air_temperature", "air_temperature_maximum", "air_temperature_minimum", - "surface_downwelling_longwave_flux_in_air", - "air_pressure", "surface_downwelling_shortwave_flux_in_air", - "eastward_wind", "northward_wind", "wind_speed", "eastward_wind", "northward_wind", - "specific_humidity", "precipitation_flux", "mole_fraction_of_carbon_dioxide_in_air"), - units = c("Kelvin", "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "m/s", "m/s", "m/s", "g/g", "kg/m2/s", "1e-6")) - - # Some constants for converting CO2 if it's there + # "co2", "mole_fraction_of_carbon_dioxide_in_air", "1e-6" + var <- data.frame( + DAP.name = c("tas", "tasmax", "tasmin", "rlds", "ps", "rsds", "uas", "vas", "sfcWind", "ua", "va", "huss", "pr", "co2mass"), + CF.name = c( + "air_temperature", "air_temperature_maximum", "air_temperature_minimum", + "surface_downwelling_longwave_flux_in_air", + "air_pressure", "surface_downwelling_shortwave_flux_in_air", + "eastward_wind", "northward_wind", "wind_speed", "eastward_wind", "northward_wind", + "specific_humidity", "precipitation_flux", "mole_fraction_of_carbon_dioxide_in_air" + ), + units = c("Kelvin", "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "m/s", "m/s", "m/s", "g/g", "kg/m2/s", "1e-6") + ) + + # Some constants for converting CO2 if it's there co2.molmass <- 44.01 # g/mol https://en.wikipedia.org/wiki/Carbon_dioxide#Atmospheric_concentration atm.molmass <- 28.97 # g/mol https://en.wikipedia.org/wiki/Density_of_air atm.masstot <- 5.1480e18 # kg https://journals.ametsoc.org/doi/10.1175/JCLI-3299.1 - atm.mol <- atm.masstot/atm.molmass + atm.mol <- atm.masstot / atm.molmass # Figuring out what we have daily for and what we only have monthly for path.day <- file.path(in.path, "day") path.mo <- file.path(in.path, "month") - + vars.gcm.day <- dir(path.day) - vars.gcm.mo <- dir(path.mo) + vars.gcm.mo <- dir(path.mo) # If our extraction bath is different from what we had, modify it - if("atmos" %in% vars.gcm.day){ - path.day <- file.path(in.path, "day", "atmos", "day", ensemble_member, "latest") - path.mo <- file.path(in.path, "mon", "atmos", "Amon", ensemble_member, "latest") + if ("atmos" %in% vars.gcm.day) { + path.day <- file.path(in.path, "day", "atmos", "day", ensemble_member, "latest") + path.mo <- file.path(in.path, "mon", "atmos", "Amon", ensemble_member, "latest") - vars.gcm.day <- dir(path.day) - vars.gcm.mo <- dir(path.mo) + vars.gcm.day <- dir(path.day) + vars.gcm.mo <- dir(path.mo) } vars.gcm.mo <- vars.gcm.mo[!vars.gcm.mo %in% vars.gcm.day] - + vars.gcm <- c(vars.gcm.day, vars.gcm.mo) - + # Rewriting the dap name to get the closest variable that we have for the GCM (some only give uss stuff at sea level) - if(!("huss" %in% vars.gcm)) var$DAP.name[var$DAP.name=="huss"] <- "hus" - if(!("ps" %in% vars.gcm)) var$DAP.name[var$DAP.name=="ps"] <- "psl" + if (!("huss" %in% vars.gcm)) var$DAP.name[var$DAP.name == "huss"] <- "hus" + if (!("ps" %in% vars.gcm)) var$DAP.name[var$DAP.name == "ps"] <- "psl" # Making sure we're only trying to grab the variables we have (i.e. don't try sfcWind if we don't have it) - var <- var[var$DAP.name %in% vars.gcm,] - + var <- var[var$DAP.name %in% vars.gcm, ] + # Native CMIP5 file structure is organized by variable and then with multiple years per file # this means we need to do some funky things to get all variables for one year into a single file var$DAP.name <- as.character(var$DAP.name) - + files.var <- list() - n.file=0 - for(v in var$DAP.name){ - files.var[[v]] <- list() - if(v %in% vars.gcm.day){ - v.res="day" + n.file <- 0 + for (v in var$DAP.name) { + files.var[[v]] <- list() + if (v %in% vars.gcm.day) { + v.res <- "day" # Get a list of file names - files.var[[v]] <- data.frame(file.name=dir(file.path(path.day, v))) - } else { - v.res="month" - files.var[[v]] <- data.frame(file.name=dir(file.path(path.mo, v))) - } - - # Set up an index to help us find out which file we'll need + files.var[[v]] <- data.frame(file.name = dir(file.path(path.day, v))) + } else { + v.res <- "month" + files.var[[v]] <- data.frame(file.name = dir(file.path(path.mo, v))) + } + + # Set up an index to help us find out which file we'll need # files.var[[v]][["years"]] <- data.frame(first.date=NA, last.date=NA) - for(i in 1:nrow(files.var[[v]])){ - dt.str <- stringr::str_split(stringr::str_split(files.var[[v]][i,"file.name"], "_")[[1]][6], "-")[[1]] - - # Don't bother storing this file if we don't want those years - if(v.res=="day"){ - files.var[[v]][i, "first.date"] <- as.Date(dt.str[1], format="%Y%m%d") - files.var[[v]][i, "last.date" ] <- as.Date(substr(dt.str[2], 1, 8), format="%Y%m%d") - } else { - # For monthly data, we can assume the first day of the month is day 1 of that month - # dfirst <- lubridate::days_in_month(as.numeric(substr(dt.str[1], 5, 6))) - files.var[[v]][i, "first.date"] <- as.Date(paste0(dt.str[1], 01), format="%Y%m%d") - - # For the last day, i wish we could assume it ends in December, but some models are - # jerks, so we should double check - dlast <- lubridate::days_in_month(as.numeric(substr(dt.str[2], 5, 6))) - files.var[[v]][i, "last.date" ] <- as.Date(paste0(substr(dt.str[2], 1, 6), dlast), format="%Y%m%d") - } - - } # End file loop - - # get rid of files outside of what we actually need - files.var[[v]] <- files.var[[v]][files.var[[v]]$first.date<=as.Date(end_date) & files.var[[v]]$last.date>=as.Date(start_date),] - # if(as.numeric(substr(yr.str[1], 1, 4)) > end_year | as.numeric(substr(yr.str[2], 1, 4))< start_year) next - n.file=n.file+nrow(files.var[[v]]) - + for (i in 1:nrow(files.var[[v]])) { + dt.str <- stringr::str_split(stringr::str_split(files.var[[v]][i, "file.name"], "_")[[1]][6], "-")[[1]] + + # Don't bother storing this file if we don't want those years + if (v.res == "day") { + files.var[[v]][i, "first.date"] <- as.Date(dt.str[1], format = "%Y%m%d") + files.var[[v]][i, "last.date"] <- as.Date(substr(dt.str[2], 1, 8), format = "%Y%m%d") + } else { + # For monthly data, we can assume the first day of the month is day 1 of that month + # dfirst <- lubridate::days_in_month(as.numeric(substr(dt.str[1], 5, 6))) + files.var[[v]][i, "first.date"] <- as.Date(paste0(dt.str[1], 01), format = "%Y%m%d") + + # For the last day, i wish we could assume it ends in December, but some models are + # jerks, so we should double check + dlast <- lubridate::days_in_month(as.numeric(substr(dt.str[2], 5, 6))) + files.var[[v]][i, "last.date"] <- as.Date(paste0(substr(dt.str[2], 1, 6), dlast), format = "%Y%m%d") + } + } # End file loop + + # get rid of files outside of what we actually need + files.var[[v]] <- files.var[[v]][files.var[[v]]$first.date <= as.Date(end_date) & files.var[[v]]$last.date >= as.Date(start_date), ] + # if(as.numeric(substr(yr.str[1], 1, 4)) > end_year | as.numeric(substr(yr.str[2], 1, 4))< start_year) next + n.file <- n.file + nrow(files.var[[v]]) } # end variable loop - # Querying large netcdf files 1,000 times is slow. So lets open the connection once and + # Querying large netcdf files 1,000 times is slow. So lets open the connection once and # pull the full time series # Loop through using the files using the first variable; shoudl be tair & should be highest res avail # This will require quite a bit of memory, but it's doable dat.all <- list() - dat.time <- seq(start_date, end_date, by="day") # Everything should end up being a day - + dat.time <- seq(start_date, end_date, by = "day") # Everything should end up being a day + print("- Extracting files: ") - pb <- utils::txtProgressBar(min=1, max=n.file, style=3) - pb.ind=1 + pb <- utils::txtProgressBar(min = 1, max = n.file, style = 3) + pb.ind <- 1 # Loop through each variable so that we don't have to open files more than once - for(v in 1:nrow(var)){ - - var.now <- var[v,"DAP.name"] + for (v in 1:nrow(var)) { + var.now <- var[v, "DAP.name"] # print(var.now) - + dat.all[[v]] <- vector() # initialize the layer # Figure out the temporal resolution of the variable v.res <- ifelse(var.now %in% vars.gcm.day, "day", "month") @@ -184,236 +185,237 @@ extract.local.CMIP5 <- function(outfolder, in.path, start_date, end_date, lat.in # Figure out what file we need # file.ind <- which(files.var[[var.now]][i]) - for(i in 1:nrow(files.var[[var.now]])){ + for (i in 1:nrow(files.var[[var.now]])) { utils::setTxtProgressBar(pb, pb.ind) - pb.ind=pb.ind+1 - f.now <- files.var[[var.now]][i,"file.name"] + pb.ind <- pb.ind + 1 + f.now <- files.var[[var.now]][i, "file.name"] # print(f.now) - + # Open up the file ncT <- ncdf4::nc_open(file.path(p.res, var.now, f.now)) - + # Extract our dimensions # Check to see if we need to extract lat/lon or not - if(ncT$var[[var.now]]$ndims>1){ - lat_bnd <- ncdf4::ncvar_get(ncT, "lat_bnds") - lon_bnd <- ncdf4::ncvar_get(ncT, "lon_bnds") + if (ncT$var[[var.now]]$ndims > 1) { + lat_bnd <- ncdf4::ncvar_get(ncT, "lat_bnds") + lon_bnd <- ncdf4::ncvar_get(ncT, "lon_bnds") } nc.time <- ncdf4::ncvar_get(ncT, "time") - if(v.res=="day"){ - date.leaps <- seq(files.var[[var.now]][i,"first.date"], files.var[[var.now]][i,"last.date"], by="day") + if (v.res == "day") { + date.leaps <- seq(files.var[[var.now]][i, "first.date"], files.var[[var.now]][i, "last.date"], by = "day") } else { # if we're dealing with monthly data, start with the first of the month - date.leaps <- seq(files.var[[var.now]][i,"first.date"], files.var[[var.now]][i,"last.date"], by="day") + date.leaps <- seq(files.var[[var.now]][i, "first.date"], files.var[[var.now]][i, "last.date"], by = "day") } # Figure out if we're missing leap dat - no.leap <- ifelse(length(nc.time)!=length(date.leaps), TRUE, FALSE) - + no.leap <- ifelse(length(nc.time) != length(date.leaps), TRUE, FALSE) + # splt.ind <- ifelse(GCM %in% c("MPI-ESM-P"), 4, 3) # date.origin <- as.Date(stringr::str_split(ncT$dim$time$units, " ")[[1]][splt.ind]) - if(v.res == "day"){ + if (v.res == "day") { nc.date <- date.origin + nc.time - + nc.min <- as.Date(min(nc.date)) # mean(diff(nc.date)) - date.ref <- files.var[[var.now]][i,"first.date"]+0.5 # Set a half-day offset to make centered - + date.ref <- files.var[[var.now]][i, "first.date"] + 0.5 # Set a half-day offset to make centered + # If things don't align with the specified origin, update it & try again - if(nc.min != date.ref){ + if (nc.min != date.ref) { date.off <- date.ref - nc.min # Figure out our date offset - - nc.date <- date.origin + nc.time + date.off - } + + nc.date <- date.origin + nc.time + date.off + } } else { - dfirst <- lubridate::days_in_month(lubridate::month(files.var[[var.now]][i,"first.date"])) - - dates.mo <- seq.Date(files.var[[var.now]][i,"first.date"]+dfirst/2, files.var[[var.now]][i,"last.date"], by="month") - - if(length(dates.mo) == length(nc.time)){ + dfirst <- lubridate::days_in_month(lubridate::month(files.var[[var.now]][i, "first.date"])) + + dates.mo <- seq.Date(files.var[[var.now]][i, "first.date"] + dfirst / 2, files.var[[var.now]][i, "last.date"], by = "month") + + if (length(dates.mo) == length(nc.time)) { nc.date <- dates.mo } else { # I have no freaking clue what to do if things don't work out, so lets just go back to whatever we first tried date.off <- date.ref - nc.min # Figure out our date offset - + nc.date <- nc.date + date.off + 1 } } - + # If we're missing leap year, lets adjust our date stamps so we can only pull what we need - if(v.res=="day" & no.leap==TRUE){ - cells.bump <- which(lubridate::leap_year(lubridate::year(date.leaps)) & lubridate::month(date.leaps)==02 & lubridate::day(date.leaps)==29) - for(j in 1:length(cells.bump)){ - nc.date[(cells.bump[j]-1):length(nc.date)] <- nc.date[(cells.bump[j]-1):length(nc.date)]+1 + if (v.res == "day" & no.leap == TRUE) { + cells.bump <- which(lubridate::leap_year(lubridate::year(date.leaps)) & lubridate::month(date.leaps) == 02 & lubridate::day(date.leaps) == 29) + for (j in 1:length(cells.bump)) { + nc.date[(cells.bump[j] - 1):length(nc.date)] <- nc.date[(cells.bump[j] - 1):length(nc.date)] + 1 } } - + # Find our time index - if(v.res=="day"){ - time.ind <- which(nc.date>=as.Date(start_date) & nc.date<=as.Date(end_date)+0.5) + if (v.res == "day") { + time.ind <- which(nc.date >= as.Date(start_date) & nc.date <= as.Date(end_date) + 0.5) } else { # date.ind <- rep(files.var[[var.now]][i,"first.date"]:files.var[[var.now]][i,"last.date"], each=12) - time.ind <- which(nc.date>=as.Date(start_date) & nc.date<=as.Date(end_date)+0.5) + time.ind <- which(nc.date >= as.Date(start_date) & nc.date <= as.Date(end_date) + 0.5) } - + # Subset our dates & times to match our index nc.date <- nc.date[time.ind] - date.leaps <- date.leaps[which(date.leaps>=as.Date(start_date) & date.leaps<=as.Date(end_date))] - + date.leaps <- date.leaps[which(date.leaps >= as.Date(start_date) & date.leaps <= as.Date(end_date))] + # Find the closest grid cell for our site (using harvard as a protoype) - ind.lat <- which(lat_bnd[1,]<=lat.in & lat_bnd[2,]>=lat.in) - if(max(lon.in)>=180){ - ind.lon <- which(lon_bnd[1,]>=lon.in & lon_bnd[2,]<=lon.in) + ind.lat <- which(lat_bnd[1, ] <= lat.in & lat_bnd[2, ] >= lat.in) + if (max(lon.in) >= 180) { + ind.lon <- which(lon_bnd[1, ] >= lon.in & lon_bnd[2, ] <= lon.in) } else { - ind.lon <- which(lon_bnd[1,]<=180+lon.in & lon_bnd[2,]>=180+lon.in) + ind.lon <- which(lon_bnd[1, ] <= 180 + lon.in & lon_bnd[2, ] >= 180 + lon.in) } - + # Extract all of the available data - if(var.now %in% c("hus", "ua", "va")){ # These have multiple strata; we only want 1 + if (var.now %in% c("hus", "ua", "va")) { # These have multiple strata; we only want 1 plev <- ncdf4::ncvar_get(ncT, "plev") - puse <- which(plev==max(plev)) # Get humidity at the place of highest pressure (closest to surface) - dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(ind.lon, ind.lat, puse, time.ind[1]), c(1,1,1,length(time.ind))) + puse <- which(plev == max(plev)) # Get humidity at the place of highest pressure (closest to surface) + dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(ind.lon, ind.lat, puse, time.ind[1]), c(1, 1, 1, length(time.ind))) # If dat.list has missing values, try the next layer puse.orig <- puse - while(is.na(mean(dat.temp))){ - if(puse.orig==1) { puse = puse + 1 } else { puse = puse -1 } - dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(ind.lon, ind.lat, puse, time.ind[1]), c(1,1,1,length(time.ind))) + while (is.na(mean(dat.temp))) { + if (puse.orig == 1) { + puse <- puse + 1 + } else { + puse <- puse - 1 + } + dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(ind.lon, ind.lat, puse, time.ind[1]), c(1, 1, 1, length(time.ind))) } } else { # Note that CO2 appears to be a global value - if(ncT$var[[var.now]]$ndims==1){ - dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(time.ind[1]), c(length(time.ind))) + if (ncT$var[[var.now]]$ndims == 1) { + dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(time.ind[1]), c(length(time.ind))) } else { - dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(ind.lon, ind.lat, time.ind[1]), c(1,1,length(time.ind))) - } + dat.temp <- ncdf4::ncvar_get(ncT, var.now, c(ind.lon, ind.lat, time.ind[1]), c(1, 1, length(time.ind))) + } } - + # Add leap year and trick monthly into daily # Figure out if we're missing leap year - if(v.res=="day" & no.leap==TRUE){ - cells.dup <- which(lubridate::leap_year(lubridate::year(date.leaps)) & lubridate::month(date.leaps)==02 & lubridate::day(date.leaps)==28) - if(length(cells.dup)>0){ - for(j in 1:length(cells.dup)){ + if (v.res == "day" & no.leap == TRUE) { + cells.dup <- which(lubridate::leap_year(lubridate::year(date.leaps)) & lubridate::month(date.leaps) == 02 & lubridate::day(date.leaps) == 28) + if (length(cells.dup) > 0) { + for (j in 1:length(cells.dup)) { dat.temp <- append(dat.temp, dat.temp[cells.dup[j]], cells.dup[j]) } } } - - + + # If we have monthly data, lets trick it into being daily - if(v.res == "month"){ - mo.ind <- rep(1:12, length.out=length(dat.temp)) + if (v.res == "month") { + mo.ind <- rep(1:12, length.out = length(dat.temp)) yr.ind <- lubridate::year(nc.date) dat.trick <- vector() - for(j in 1:length(dat.temp)){ - if(lubridate::leap_year(yr.ind[j]) & mo.ind[j]==2){ - dat.trick <- c(dat.trick, rep(dat.temp[j], dpm[mo.ind[j]]+1)) + for (j in 1:length(dat.temp)) { + if (lubridate::leap_year(yr.ind[j]) & mo.ind[j] == 2) { + dat.trick <- c(dat.trick, rep(dat.temp[j], dpm[mo.ind[j]] + 1)) } else { - dat.trick <- c(dat.trick, rep(dat.temp[j], dpm[mo.ind[j]])) + dat.trick <- c(dat.trick, rep(dat.temp[j], dpm[mo.ind[j]])) } } dat.temp <- dat.trick } # End leap day trick - + dat.all[[v]] <- append(dat.all[[v]], dat.temp, length(dat.all[[v]])) - ncdf4::nc_close(ncT) + ncdf4::nc_close(ncT) } # End file loop } # End variable loop - + print("") print("- Writing to NetCDF: ") - pb <- utils::txtProgressBar(min=1, max=rows, style=3) - for (i in 1:rows){ + pb <- utils::txtProgressBar(min = 1, max = rows, style = 3) + for (i in 1:rows) { utils::setTxtProgressBar(pb, i) - - y.now = ylist[i] - yr.ind <- which(lubridate::year(dat.time)==y.now) - - + + y.now <- ylist[i] + yr.ind <- which(lubridate::year(dat.time) == y.now) + + dpm <- lubridate::days_in_month(1:12) - if(lubridate::leap_year(y.now)) dpm[2] <- dpm[2] + 1 # make sure Feb has 29 days if we're dealing with a leap year - + if (lubridate::leap_year(y.now)) dpm[2] <- dpm[2] + 1 # make sure Feb has 29 days if we're dealing with a leap year + # figure out how many days we're working with - if(rows>1 & i!=1 & i!=rows){ # If we have multiple years and we're not in the first or last year, we're taking a whole year - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year - day1 = 1 - day2 = nday - days.use = day1:day2 - } else if(rows==1){ + if (rows > 1 & i != 1 & i != rows) { # If we have multiple years and we're not in the first or last year, we're taking a whole year + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + day1 <- 1 + day2 <- nday + days.use <- day1:day2 + } else if (rows == 1) { # if we're working with only 1 year, lets only pull what we need to - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year day1 <- lubridate::yday(start_date) # Now we need to check whether we're ending on the right day day2 <- lubridate::yday(end_date) - days.use = day1:day2 - nday=length(days.use) # Update nday - } else if(i==1) { + days.use <- day1:day2 + nday <- length(days.use) # Update nday + } else if (i == 1) { # If this is the first of many years, we only need to worry about the start date - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year day1 <- lubridate::yday(start_date) - day2 = nday - days.use = day1:day2 - nday=length(days.use) # Update nday - } else if(i==rows) { + day2 <- nday + days.use <- day1:day2 + nday <- length(days.use) # Update nday + } else if (i == rows) { # If this is the last of many years, we only need to worry about the start date - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year - day1 = 1 + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + day1 <- 1 day2 <- lubridate::yday(end_date) - days.use = day1:day2 - nday=length(days.use) # Update nday + days.use <- day1:day2 + nday <- length(days.use) # Update nday } - ntime = nday # leap year or not; time slice (coerce to daily) - - loc.file <- file.path(outfolder, paste(model, scenario, ensemble_member, stringr::str_pad(y.now, width=4, side="left", pad="0"), "nc", sep = ".")) - - + ntime <- nday # leap year or not; time slice (coerce to daily) + + loc.file <- file.path(outfolder, paste(model, scenario, ensemble_member, stringr::str_pad(y.now, width = 4, side = "left", pad = "0"), "nc", sep = ".")) + + ## Create dimensions - dim.lat <- ncdf4::ncdim_def(name='latitude', units='degree_north', vals=lat.in, create_dimvar=TRUE) - dim.lon <- ncdf4::ncdim_def(name='longitude', units='degree_east', vals=lon.in, create_dimvar=TRUE) - dim.time <- ncdf4::ncdim_def(name='time', units="sec", vals=seq((min(days.use)+1-1/24)*24*360, (max(days.use)+1-1/24)*24*360, length.out=ntime), create_dimvar=TRUE, unlim=TRUE) - nc.dim=list(dim.lat,dim.lon,dim.time) - - + dim.lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) + dim.lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) + dim.time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = seq((min(days.use) + 1 - 1 / 24) * 24 * 360, (max(days.use) + 1 - 1 / 24) * 24 * 360, length.out = ntime), create_dimvar = TRUE, unlim = TRUE) + nc.dim <- list(dim.lat, dim.lon, dim.time) + + # Defining our dimensions up front - var.list = list() - dat.list = list() + var.list <- list() + dat.list <- list() - for(j in 1:nrow(var)){ - var.list[[j]] = ncdf4::ncvar_def(name=as.character(var$CF.name[j]), units=as.character(var$units[j]), dim=nc.dim, missval=-999, verbose=verbose) - dat.list[[j]] <- array(NA, dim=c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays + for (j in 1:nrow(var)) { + var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), units = as.character(var$units[j]), dim = nc.dim, missval = -999, verbose = verbose) + dat.list[[j]] <- array(NA, dim = c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays } names(var.list) <- names(dat.list) <- var$CF.name - + # Loop through each variable in the order of everything else - for(v in 1:nrow(var)){ - dat.list[[v]] <- dat.all[[v]][yr.ind] + for (v in 1:nrow(var)) { + dat.list[[v]] <- dat.all[[v]][yr.ind] } # End variable loop - + # Adjusting Preciptiation if necessary - dat.list[["precipitation_flux"]] <- dat.list[["precipitation_flux"]]*adjust.pr - - if("mole_fraction_of_carbon_dioxide_in_air" %in% names(dat.list)){ - co2.mol <- dat.list[["mole_fraction_of_carbon_dioxide_in_air"]]/co2.molmass # kg co2 - dat.list[["mole_fraction_of_carbon_dioxide_in_air"]] <- co2.mol/atm.mol*1e6 # kmol/kmol * 1e6 to be in CF units (ppm) + dat.list[["precipitation_flux"]] <- dat.list[["precipitation_flux"]] * adjust.pr + + if ("mole_fraction_of_carbon_dioxide_in_air" %in% names(dat.list)) { + co2.mol <- dat.list[["mole_fraction_of_carbon_dioxide_in_air"]] / co2.molmass # kg co2 + dat.list[["mole_fraction_of_carbon_dioxide_in_air"]] <- co2.mol / atm.mol * 1e6 # kmol/kmol * 1e6 to be in CF units (ppm) } - + ## put data in new file - loc <- ncdf4::nc_create(filename=loc.file, vars=var.list, verbose=verbose) - for(j in 1:nrow(var)){ - ncdf4::ncvar_put(nc=loc, varid=as.character(var$CF.name[j]), vals=dat.list[[j]]) + loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + for (j in 1:nrow(var)) { + ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = dat.list[[j]]) } ncdf4::nc_close(loc) results$file[i] <- loc.file # results$host[i] <- fqdn() - results$startdate[i] <- paste0(as.Date(paste(y.now, day1, sep="-"), format = "%Y-%j"), " 00:00:00") - results$enddate[i] <- paste0(as.Date(paste(y.now, day2, sep="-"), format = "%Y-%j"), " 00:00:00") - results$mimetype[i] <- 'application/x-netcdf' - results$formatname[i] <- 'CF Meteorology' - + results$startdate[i] <- paste0(as.Date(paste(y.now, day1, sep = "-"), format = "%Y-%j"), " 00:00:00") + results$enddate[i] <- paste0(as.Date(paste(y.now, day2, sep = "-"), format = "%Y-%j"), " 00:00:00") + results$mimetype[i] <- "application/x-netcdf" + results$formatname[i] <- "CF Meteorology" } # End i loop (rows/years) - } # End function - diff --git a/modules/data.atmosphere/R/extract_local_NLDAS.R b/modules/data.atmosphere/R/extract_local_NLDAS.R index 2846dafca09..58776d9a40a 100644 --- a/modules/data.atmosphere/R/extract_local_NLDAS.R +++ b/modules/data.atmosphere/R/extract_local_NLDAS.R @@ -5,11 +5,11 @@ # ----------------------------------- ##' @author Christy Rollinson ##' @description This function extracts NLDAS data from grids that have been downloaded and stored locally. -##' Once upon a time, you could query these files directly from the internet, but now they're -##' behind a tricky authentication wall. Files are saved as a netCDF file in CF conventions. -##' These files are ready to be used in the general PEcAn workflow or fed into the downscaling +##' Once upon a time, you could query these files directly from the internet, but now they're +##' behind a tricky authentication wall. Files are saved as a netCDF file in CF conventions. +##' These files are ready to be used in the general PEcAn workflow or fed into the downscaling ##' workflow. -# ----------------------------------- +# ----------------------------------- # Parameters # ----------------------------------- ##' @param outfolder - directory where output files will be stored @@ -24,163 +24,161 @@ ##' @param ... Other arguments, currently ignored ##' @export # ----------------------------------- -extract.local.NLDAS <- function(outfolder, in.path, start_date, end_date, lat.in, lon.in, - overwrite = FALSE, verbose = FALSE, ...){ - +extract.local.NLDAS <- function(outfolder, in.path, start_date, end_date, lat.in, lon.in, + overwrite = FALSE, verbose = FALSE, ...) { # Date stuff start_date <- as.POSIXlt(start_date, tz = "GMT") end_date <- as.POSIXlt(end_date, tz = "GMT") start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - - lat.in = as.numeric(lat.in) - lon.in = as.numeric(lon.in) + end_year <- lubridate::year(end_date) + + lat.in <- as.numeric(lat.in) + lon.in <- as.numeric(lon.in) # dir.nldas="http://hydro1.sci.gsfc.nasa.gov/thredds/dodsC/NLDAS_FORA0125_H.002" - dir.create(outfolder, showWarnings=FALSE, recursive=TRUE) - - ylist <- seq(start_year,end_year,by=1) - rows = length(ylist) - results <- data.frame(file=character(rows), host=character(rows), - mimetype=character(rows), formatname=character(rows), - startdate=character(rows), enddate=character(rows), - dbfile.name = "NLDAS", - stringsAsFactors = FALSE - ) - + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) + + ylist <- seq(start_year, end_year, by = 1) + rows <- length(ylist) + results <- data.frame( + file = character(rows), host = character(rows), + mimetype = character(rows), formatname = character(rows), + startdate = character(rows), enddate = character(rows), + dbfile.name = "NLDAS", + stringsAsFactors = FALSE + ) + # I fixed the shortwave radiation parsing script, but haven't pushed those changes, so until NLDAS gets re-formatted, just index it differently - var = data.frame(NLDAS.name = c("air_temperature","surface_downwelling_longwave_flux_in_air","air_pressure","downwelling_shortwave_flux_in_air","eastward_wind","northward_wind","specific_humidity","precipitation_amount"), - CF.name = c("air_temperature","surface_downwelling_longwave_flux_in_air","air_pressure","surface_downwelling_shortwave_flux_in_air","eastward_wind","northward_wind","specific_humidity","precipitation_flux"), - units = c('Kelvin',"W/m2","Pascal","W/m2","m/s","m/s","g/g","kg/m2/s") - ) - + var <- data.frame( + NLDAS.name = c("air_temperature", "surface_downwelling_longwave_flux_in_air", "air_pressure", "downwelling_shortwave_flux_in_air", "eastward_wind", "northward_wind", "specific_humidity", "precipitation_amount"), + CF.name = c("air_temperature", "surface_downwelling_longwave_flux_in_air", "air_pressure", "surface_downwelling_shortwave_flux_in_air", "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux"), + units = c("Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "g/g", "kg/m2/s") + ) + # Progress bar because this can be very slow - for (i in 1:rows){ - y.now = ylist[i] - + for (i in 1:rows) { + y.now <- ylist[i] + # figure out how many days we're working with - if(rows>1 & i!=1 & i!=rows){ # If we have multiple years and we're not in the first or last year, we're taking a whole year - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year - day1 = 1 - day2 = nday - days.use = day1:day2 - } else if(rows==1){ + if (rows > 1 & i != 1 & i != rows) { # If we have multiple years and we're not in the first or last year, we're taking a whole year + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + day1 <- 1 + day2 <- nday + days.use <- day1:day2 + } else if (rows == 1) { # if we're working with only 1 year, lets only pull what we need to - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year day1 <- lubridate::yday(start_date) # Now we need to check whether we're ending on the right day day2 <- lubridate::yday(end_date) - days.use = day1:day2 - nday=length(days.use) # Update nday - } else if(i==1) { + days.use <- day1:day2 + nday <- length(days.use) # Update nday + } else if (i == 1) { # If this is the first of many years, we only need to worry about the start date - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year day1 <- lubridate::yday(start_date) - day2 = nday - days.use = day1:day2 - nday=length(days.use) # Update nday - } else if(i==rows) { + day2 <- nday + days.use <- day1:day2 + nday <- length(days.use) # Update nday + } else if (i == rows) { # If this is the last of many years, we only need to worry about the start date - nday = ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year - day1 = 1 + nday <- ifelse(lubridate::leap_year(y.now), 366, 365) # leap year or not; days per year + day1 <- 1 day2 <- lubridate::yday(end_date) - days.use = day1:day2 - nday=length(days.use) # Update nday + days.use <- day1:day2 + nday <- length(days.use) # Update nday } - ntime = nday*24 # leap year or not;time slice (hourly) - - loc.file = file.path(outfolder, paste("NLDAS",y.now,"nc",sep=".")) - + ntime <- nday * 24 # leap year or not;time slice (hourly) + + loc.file <- file.path(outfolder, paste("NLDAS", y.now, "nc", sep = ".")) + ## Create dimensions - dim.lat <- ncdf4::ncdim_def(name='latitude', units='degree_north', vals=lat.in, create_dimvar=TRUE) - dim.lon <- ncdf4::ncdim_def(name='longitude', units='degree_east', vals=lon.in, create_dimvar=TRUE) - dim.time <- ncdf4::ncdim_def(name='time', units="sec", vals=seq((min(days.use)+1-1/24)*24*360, (max(days.use)+1-1/24)*24*360, length.out=ntime), create_dimvar=TRUE, unlim=TRUE) - nc.dim=list(dim.lat,dim.lon,dim.time) - - var.list = list() - dat.list = list() - + dim.lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat.in, create_dimvar = TRUE) + dim.lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon.in, create_dimvar = TRUE) + dim.time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = seq((min(days.use) + 1 - 1 / 24) * 24 * 360, (max(days.use) + 1 - 1 / 24) * 24 * 360, length.out = ntime), create_dimvar = TRUE, unlim = TRUE) + nc.dim <- list(dim.lat, dim.lon, dim.time) + + var.list <- list() + dat.list <- list() + # Defining our dimensions up front - for(j in 1:nrow(var)){ - var.list[[j]] = ncdf4::ncvar_def(name=as.character(var$CF.name[j]), units=as.character(var$units[j]), dim=nc.dim, missval=-999, verbose=verbose) - dat.list[[j]] <- array(NA, dim=c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays + for (j in 1:nrow(var)) { + var.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), units = as.character(var$units[j]), dim = nc.dim, missval = -999, verbose = verbose) + dat.list[[j]] <- array(NA, dim = c(length(lat.in), length(lon.in), ntime)) # Go ahead and make the arrays } names(var.list) <- names(dat.list) <- var$CF.name - - + + # Progress bar just to help us track what's going on print("") print(y.now) - pb <- utils::txtProgressBar(min=1, max=nday, style=3) - pb.index=1 - + pb <- utils::txtProgressBar(min = 1, max = nday, style = 3) + pb.index <- 1 + ## get data off OpenDAP - for(j in 1:length(days.use)){ + for (j in 1:length(days.use)) { utils::setTxtProgressBar(pb, pb.index) - - date.now <- as.Date(days.use[j], origin=as.Date(paste0(y.now-1,"-12-31"))) - mo.now <- stringr::str_pad(lubridate::month(date.now), 2, pad="0") - day.mo <- stringr::str_pad(lubridate::day(date.now), 2, pad="0") - doy <- stringr::str_pad(days.use[j], 3, pad="0") - + + date.now <- as.Date(days.use[j], origin = as.Date(paste0(y.now - 1, "-12-31"))) + mo.now <- stringr::str_pad(lubridate::month(date.now), 2, pad = "0") + day.mo <- stringr::str_pad(lubridate::day(date.now), 2, pad = "0") + doy <- stringr::str_pad(days.use[j], 3, pad = "0") + # Local netcdf format is 1-file per day # NLDAS_FORA0125_H.A19790102.nc - dap_file <- ncdf4::nc_open(file.path(in.path,y.now,mo.now,paste0("NLDAS_FORA0125_H.A",y.now, mo.now,day.mo,".nc"))) - + dap_file <- ncdf4::nc_open(file.path(in.path, y.now, mo.now, paste0("NLDAS_FORA0125_H.A", y.now, mo.now, day.mo, ".nc"))) + # Query lat/lon lats <- ncdf4::ncvar_get(dap_file, "lat") lons <- ncdf4::ncvar_get(dap_file, "lon") - + # Get the average resolution (without hard-coding and possibly making an error) x.inc <- mean(abs(diff(lons))) y.inc <- mean(abs(diff(lats))) - - lat.use <- which(lats-y.inc/2<=lat.in & lats+y.inc/2>=lat.in) - lon.use <- which(lons-x.inc/2<=lon.in & lons+x.inc/2>=lon.in) - + + lat.use <- which(lats - y.inc / 2 <= lat.in & lats + y.inc / 2 >= lat.in) + lon.use <- which(lons - x.inc / 2 <= lon.in & lons + x.inc / 2 >= lon.in) + # Extracting the variables for (v in 1:nrow(var)) { v.nldas <- paste(var$NLDAS.name[v]) - v.cf <- paste(var$CF.name [v]) - - if(!v.nldas %in% names(dap_file$var) & v.cf %in% names(dap_file$var)) v.nldas <- v.cf - + v.cf <- paste(var$CF.name[v]) + + if (!v.nldas %in% names(dap_file$var) & v.cf %in% names(dap_file$var)) v.nldas <- v.cf + # Variables have different dimensions (which is a pain in the butt) # so we need to check to see whether we're pulling 4 dimensions or just 3 - if(dap_file$var[[v.nldas]]$ndims == 4){ - dat.list[[v.cf]][,,(j*24-23):(j*24)] <- ncdf4::ncvar_get(dap_file, v.nldas, - start=c(lon.use,lat.use,1,1), - count=c(1,1,1,24) + if (dap_file$var[[v.nldas]]$ndims == 4) { + dat.list[[v.cf]][, , (j * 24 - 23):(j * 24)] <- ncdf4::ncvar_get(dap_file, v.nldas, + start = c(lon.use, lat.use, 1, 1), + count = c(1, 1, 1, 24) ) } else { - dat.list[[v.cf]][,,(j*24-23):(j*24)] <- ncdf4::ncvar_get(dap_file, v.nldas, - start=c(lon.use,lat.use,1), - count=c(1,1,24) + dat.list[[v.cf]][, , (j * 24 - 23):(j * 24)] <- ncdf4::ncvar_get(dap_file, v.nldas, + start = c(lon.use, lat.use, 1), + count = c(1, 1, 24) ) - } } # end variable loop - + ncdf4::nc_close(dap_file) # close file - pb.index=pb.index+1 # Advance our progress bar + pb.index <- pb.index + 1 # Advance our progress bar } # end day - - ## change units of precip from kg/m2/hr to kg/m2/s - dat.list[["precipitation_flux"]] = dat.list[["precipitation_flux"]]/(60*60) - + + ## change units of precip from kg/m2/hr to kg/m2/s + dat.list[["precipitation_flux"]] <- dat.list[["precipitation_flux"]] / (60 * 60) + ## put data in new file - loc <- ncdf4::nc_create(filename=loc.file, vars=var.list, verbose=verbose) - for(j in 1:nrow(var)){ - ncdf4::ncvar_put(nc=loc, varid=as.character(var$CF.name[j]), vals=dat.list[[j]]) + loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + for (j in 1:nrow(var)) { + ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = dat.list[[j]]) } ncdf4::nc_close(loc) - + results$file[i] <- loc.file # results$host[i] <- fqdn() - results$startdate[i] <- paste0(as.Date(paste(y.now, day1, sep="-"), format = "%Y-%j"), " 00:00:00") - results$enddate[i] <- paste0(as.Date(paste(y.now, day2, sep="-"), format = "%Y-%j"), " 00:00:00") - results$mimetype[i] <- 'application/x-netcdf' - results$formatname[i] <- 'CF Meteorology' - + results$startdate[i] <- paste0(as.Date(paste(y.now, day1, sep = "-"), format = "%Y-%j"), " 00:00:00") + results$enddate[i] <- paste0(as.Date(paste(y.now, day2, sep = "-"), format = "%Y-%j"), " 00:00:00") + results$mimetype[i] <- "application/x-netcdf" + results$formatname[i] <- "CF Meteorology" } - } diff --git a/modules/data.atmosphere/R/get_cf_variables_table.R b/modules/data.atmosphere/R/get_cf_variables_table.R index 30dcf731a66..991dc0f57c5 100644 --- a/modules/data.atmosphere/R/get_cf_variables_table.R +++ b/modules/data.atmosphere/R/get_cf_variables_table.R @@ -13,7 +13,7 @@ get_cf_variables_table <- function(cf_url = build_cf_variables_table_url(57)) { entries_flat <- purrr::map(entries, unlist) entries_df <- purrr::map(entries, as.list) %>% purrr::transpose() %>% - purrr::map(~ifelse(purrr::map_lgl(.x, is.null), NA, .x)) %>% + purrr::map(~ ifelse(purrr::map_lgl(.x, is.null), NA, .x)) %>% purrr::map_dfc(unlist, recursive = TRUE) entries_df %>% dplyr::select( @@ -43,6 +43,7 @@ build_cf_variables_table_url <- function( # which complain if any usage line is wider than 90 chars "http://cfconventions.org/", "Data/cf-standard-names/%d/src/", - "src-cf-standard-name-table.xml")) { + "src-cf-standard-name-table.xml" + )) { sprintf(url_format_string, version) } diff --git a/modules/data.atmosphere/R/half_hour_downscale.R b/modules/data.atmosphere/R/half_hour_downscale.R index bb14748412a..c1da23e6284 100644 --- a/modules/data.atmosphere/R/half_hour_downscale.R +++ b/modules/data.atmosphere/R/half_hour_downscale.R @@ -10,145 +10,146 @@ #' @param hr set half hour #' #' @export -#' -temporal_downscale_half_hour <- function(input_file, output_file, overwrite = TRUE, hr = 0.5){ - - # open netcdf +#' +temporal_downscale_half_hour <- function(input_file, output_file, overwrite = TRUE, hr = 0.5) { + # open netcdf nc <- ncdf4::nc_open(input_file) - - if(stringr::str_detect(input_file, "ens")){ + + if (stringr::str_detect(input_file, "ens")) { ens_postion <- stringr::str_locate(input_file, "ens") ens_name <- stringr::str_sub(input_file, start = ens_postion[1], end = ens_postion[2] + 2) ens <- as.numeric(stringr::str_sub(input_file, start = ens_postion[2] + 1, end = ens_postion[2] + 2)) - }else{ + } else { ens <- 0 ens_name <- "ens00" } - + # retrive variable names cf_var_names <- names(nc$var) - + # generate time vector time <- ncdf4::ncvar_get(nc, "time") begining_time <- lubridate::ymd_hm(ncdf4::ncatt_get(nc, "time", - attname = "units")$value) + attname = "units" + )$value) time <- begining_time + lubridate::hours(time) - + # retrive lat and lon lat.in <- ncdf4::ncvar_get(nc, "latitude") lon.in <- ncdf4::ncvar_get(nc, "longitude") - + # generate data frame from netcdf variables and retrive units noaa_data <- tibble::tibble(time = time) var_units <- rep(NA, length(cf_var_names)) - for(i in 1:length(cf_var_names)){ + for (i in 1:length(cf_var_names)) { curr_data <- ncdf4::ncvar_get(nc, cf_var_names[i]) noaa_data <- cbind(noaa_data, curr_data) var_units[i] <- ncdf4::ncatt_get(nc, cf_var_names[i], attname = "units")$value } - + ncdf4::nc_close(nc) - - names(noaa_data) <- c("time",cf_var_names) - + + names(noaa_data) <- c("time", cf_var_names) + # spline-based downscaling - if(length(which(c("air_temperature", "wind_speed","specific_humidity", "air_pressure") %in% cf_var_names) == 4)){ - #convert units for qair2rh conversion + if (length(which(c("air_temperature", "wind_speed", "specific_humidity", "air_pressure") %in% cf_var_names) == 4)) { + # convert units for qair2rh conversion noaa_data_units <- data.frame(time = noaa_data$time, wind_speed = noaa_data$wind_speed, specific_humidity = noaa_data$specific_humidity) airTemp <- noaa_data$air_temperature airPress <- noaa_data$air_pressure - #convert K to C - K2C <- function(K){ - C = K - 273.15 + # convert K to C + K2C <- function(K) { + C <- K - 273.15 return(C) } temp.K <- as.matrix(airTemp) temp.C <- apply(temp.K, 1, K2C) noaa_data_units$air_temperature <- temp.C - #convert Pa to mb - Pa2mb <- function(P){ - M <- P/100 + # convert Pa to mb + Pa2mb <- function(P) { + M <- P / 100 return(M) } press.P <- as.matrix(airPress) press.mb <- apply(press.P, 1, Pa2mb) noaa_data_units$air_pressure <- press.mb - forecast_noaa_ds <- PEcAn.data.atmosphere::downscale_spline_to_half_hrly(df = noaa_data_units, VarNames = c("wind_speed","specific_humidity", "air_temperature", "air_pressure")) - }else{ - #Add error message - PEcAn.logger::logger.error(paste0("1hr Met ncdf file missing either air_temperature, wind_speed, specific_humidity, or air_pressure")) + forecast_noaa_ds <- PEcAn.data.atmosphere::downscale_spline_to_half_hrly(df = noaa_data_units, VarNames = c("wind_speed", "specific_humidity", "air_temperature", "air_pressure")) + } else { + # Add error message + PEcAn.logger::logger.error(paste0("1hr Met ncdf file missing either air_temperature, wind_speed, specific_humidity, or air_pressure")) } - + # Convert splined SH, temperature, and presssure to RH forecast_noaa_ds <- forecast_noaa_ds %>% dplyr::mutate(relative_humidity = qair2rh(qair = forecast_noaa_ds$specific_humidity, temp = forecast_noaa_ds$air_temperature, press = forecast_noaa_ds$air_pressure)) %>% dplyr::mutate(relative_humidity = ifelse(.data$relative_humidity > 1, 1, .data$relative_humidity)) - #convert airTemp and air Press back to K and Pa for met2model fcns - #convert C to K - C2K <- function(C){ - K = C + 273.15 + # convert airTemp and air Press back to K and Pa for met2model fcns + # convert C to K + C2K <- function(C) { + K <- C + 273.15 return(K) } temp.C <- as.matrix(forecast_noaa_ds$air_temperature) temp.K <- apply(temp.C, 1, C2K) forecast_noaa_ds$air_temperature <- temp.K - #convert mb to Pa - mb2Pa <- function(M){ - P = M*100 + # convert mb to Pa + mb2Pa <- function(M) { + P <- M * 100 return(P) } press.mb <- as.matrix(forecast_noaa_ds$air_pressure) press.P <- apply(press.mb, 1, mb2Pa) forecast_noaa_ds$air_pressure <- press.P # convert longwave to hourly (just copy 6 hourly values over past 6-hour time period) - if("surface_downwelling_longwave_flux_in_air" %in% cf_var_names){ + if ("surface_downwelling_longwave_flux_in_air" %in% cf_var_names) { LW.flux.hrly <- downscale_repeat_6hr_to_half_hrly(df = noaa_data, varName = "surface_downwelling_longwave_flux_in_air") forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, LW.flux.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - + # convert precipitation to hourly (just copy 6 hourly values over past 6-hour time period) - if("precipitation_flux" %in% cf_var_names){ + if ("precipitation_flux" %in% cf_var_names) { Precip.flux.hrly <- downscale_repeat_6hr_to_half_hrly(df = noaa_data, varName = "precipitation_flux") forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, Precip.flux.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - + # convert cloud_area_fraction to hourly (just copy 6 hourly values over past 6-hour time period) - if("cloud_area_fraction" %in% cf_var_names){ + if ("cloud_area_fraction" %in% cf_var_names) { cloud_area_fraction.flux.hrly <- downscale_repeat_6hr_to_half_hrly(df = noaa_data, varName = "cloud_area_fraction") forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, cloud_area_fraction.flux.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - + # use solar geometry to convert shortwave from 6 hr to 1 hr - if("surface_downwelling_shortwave_flux_in_air" %in% cf_var_names){ + if ("surface_downwelling_shortwave_flux_in_air" %in% cf_var_names) { ShortWave.hrly <- downscale_ShortWave_to_half_hrly(df = noaa_data, lat = lat.in, lon = lon.in) forecast_noaa_ds <- dplyr::inner_join(forecast_noaa_ds, ShortWave.hrly, by = "time") - }else{ - #Add error message + } else { + # Add error message } - - #Add dummy ensemble number to work with write_noaa_gefs_netcdf() + + # Add dummy ensemble number to work with write_noaa_gefs_netcdf() forecast_noaa_ds$NOAA.member <- ens - - #Make sure var names are in correct order + + # Make sure var names are in correct order forecast_noaa_ds <- forecast_noaa_ds %>% dplyr::select("time", tidyselect::all_of(cf_var_names), "NOAA.member") - - #Write netCDF - PEcAn.data.atmosphere::write_noaa_gefs_netcdf(df = forecast_noaa_ds, - ens = ens, - lat = lat.in, - lon = lon.in, - cf_units = var_units, - output_file = output_file, - overwrite = overwrite) - -} #temporal_downscale + + # Write netCDF + PEcAn.data.atmosphere::write_noaa_gefs_netcdf( + df = forecast_noaa_ds, + ens = ens, + lat = lat.in, + lon = lon.in, + cf_units = var_units, + output_file = output_file, + overwrite = overwrite + ) +} # temporal_downscale #' @title Downscale spline to half hourly @@ -161,29 +162,29 @@ temporal_downscale_half_hour <- function(input_file, output_file, overwrite = TR #' @export #' -downscale_spline_to_half_hrly <- function(df,VarNames, hr = 0.5){ +downscale_spline_to_half_hrly <- function(df, VarNames, hr = 0.5) { time <- NULL - t0 = min(df$time) + t0 <- min(df$time) df <- df %>% dplyr::mutate(days_since_t0 = difftime(.data$time, t0, units = "days")) - - interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1/(24/hr)) - + + interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1 / (24 / hr)) + noaa_data_interp <- tibble::tibble(time = lubridate::as_datetime(t0 + interp.df.days, tz = "UTC")) - - for(Var in 1:length(VarNames)){ + + for (Var in 1:length(VarNames)) { curr_data <- stats::spline(x = df$days_since_t0, y = unlist(df[VarNames[Var]]), method = "fmm", xout = interp.df.days)$y noaa_data_interp <- cbind(noaa_data_interp, curr_data) } - - names(noaa_data_interp) <- c("time",VarNames) - + + names(noaa_data_interp) <- c("time", VarNames) + return(noaa_data_interp) } #' @title Downscale shortwave to half hourly #' @return A dataframe of downscaled state variables -#' +#' #' @param df data frame of variables #' @param lat lat of site #' @param lon long of site @@ -194,44 +195,44 @@ downscale_spline_to_half_hrly <- function(df,VarNames, hr = 0.5){ #' @export #' -downscale_ShortWave_to_half_hrly <- function(df,lat, lon, hr = 0.5){ +downscale_ShortWave_to_half_hrly <- function(df, lat, lon, hr = 0.5) { ## downscale shortwave to half hourly - + t0 <- min(df$time) df <- df %>% dplyr::select("time", "surface_downwelling_shortwave_flux_in_air") %>% dplyr::mutate(days_since_t0 = difftime(.data$time, t0, units = "days")) %>% dplyr::mutate(hour = lubridate::hour(.data$time)) %>% - dplyr::mutate(day = lubridate::date(.data$time))#%>% - #dplyr::mutate(lead_var = dplyr::lead(.data$surface_downwelling_shortwave_flux_in_air, 1)) - - interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1/(24/hr)) - + dplyr::mutate(day = lubridate::date(.data$time)) # %>% + # dplyr::mutate(lead_var = dplyr::lead(.data$surface_downwelling_shortwave_flux_in_air, 1)) + + interp.df.days <- seq(min(df$days_since_t0), as.numeric(max(df$days_since_t0)), 1 / (24 / hr)) + noaa_data_interp <- tibble::tibble(time = lubridate::as_datetime(t0 + interp.df.days)) - + data.hrly <- noaa_data_interp %>% dplyr::left_join(df, by = "time") - + data.hrly$surface_downwelling_shortwave_flux_in_air <- NA data.hrly$day <- lubridate::date(data.hrly$time) data.hrly$hour <- lubridate::hour(data.hrly$time) - data.hrly$hourmin <- lubridate::hour(data.hrly$time) + lubridate::minute(data.hrly$time)/60 - data.hrly$doyH <- lubridate::yday(data.hrly$time) + data.hrly$hour/(24/hr) - data.hrly$doyHM <- lubridate::yday(data.hrly$time) + data.hrly$hourmin/(24/hr) + data.hrly$hourmin <- lubridate::hour(data.hrly$time) + lubridate::minute(data.hrly$time) / 60 + data.hrly$doyH <- lubridate::yday(data.hrly$time) + data.hrly$hour / (24 / hr) + data.hrly$doyHM <- lubridate::yday(data.hrly$time) + data.hrly$hourmin / (24 / hr) data.hrly$rpotH <- downscale_solar_geom_halfhour(data.hrly$doyH, as.vector(lon), as.vector(lat)) data.hrly$rpotHM <- downscale_solar_geom_halfhour(data.hrly$doyHM, as.vector(lon), as.vector(lat)) - + for (k in 1:nrow(data.hrly)) { - if(is.na(data.hrly$surface_downwelling_shortwave_flux_in_air[k])){ + if (is.na(data.hrly$surface_downwelling_shortwave_flux_in_air[k])) { SWflux <- as.matrix(subset(df, .data$day == data.hrly$day[k] & .data$hour == data.hrly$hour[k], data.hrly$surface_downwelling_shortwave_flux_in_air[k])) - data.hrly$surface_downwelling_shortwave_flux_in_air[k] <- ifelse(data.hrly$rpotHM[k] > 0, as.numeric(SWflux[1])*(data.hrly$rpotH[k]/data.hrly$rpotHM[k]),0) + data.hrly$surface_downwelling_shortwave_flux_in_air[k] <- ifelse(data.hrly$rpotHM[k] > 0, as.numeric(SWflux[1]) * (data.hrly$rpotH[k] / data.hrly$rpotHM[k]), 0) } } - - #ShortWave.ds <- dplyr::select(data.hrly, time, surface_downwelling_shortwave_flux_in_air) + + # ShortWave.ds <- dplyr::select(data.hrly, time, surface_downwelling_shortwave_flux_in_air) ShortWave.ds <- data.hrly %>% dplyr::select("time", "surface_downwelling_shortwave_flux_in_air") # data.hrly$group_6hr <- NA - # + # # group <- 0 # for(i in 1:nrow(data.hrly)){ # if(!is.na(data.hrly$lead_var[i])){ @@ -244,7 +245,7 @@ downscale_ShortWave_to_half_hrly <- function(df,lat, lon, hr = 0.5){ # data.hrly$group_6hr[i] <- data.hrly$group_6hr[i-1] # } # } - # + # # ShortWave.ds <- data.hrly %>% # dplyr::mutate(hour = lubridate::hour(.data$time) + lubridate::minute(.data$time)/60) %>% # dplyr::mutate(doy = lubridate::yday(.data$time) + .data$hour/(24/hr))%>% @@ -256,7 +257,6 @@ downscale_ShortWave_to_half_hrly <- function(df,lat, lon, hr = 0.5){ # dplyr::select(.data$time, .data$surface_downwelling_shortwave_flux_in_air) return(ShortWave.ds) - } #' @title Downscale repeat to half hourly @@ -269,57 +269,59 @@ downscale_ShortWave_to_half_hrly <- function(df,lat, lon, hr = 0.5){ #' @export #' -downscale_repeat_6hr_to_half_hrly <- function(df, varName, hr = 0.5){ - - #bind variables +downscale_repeat_6hr_to_half_hrly <- function(df, varName, hr = 0.5) { + # bind variables lead_var <- time <- NULL - #Get first time point + # Get first time point t0 <- min(df$time) - + df <- df %>% dplyr::select("time", tidyselect::all_of(varName)) %>% - #Calculate time difference + # Calculate time difference dplyr::mutate(days_since_t0 = difftime(.data$time, t0, units = "days")) %>% - #Shift valued back because the 6hr value represents the average over the - #previous 6hr period - dplyr::mutate(lead_var = dplyr::lead(df[,varName], 1)) - #check for NA values and gapfill using closest timestep - for(k in 1:dim(df)[1]){ + # Shift valued back because the 6hr value represents the average over the + # previous 6hr period + dplyr::mutate(lead_var = dplyr::lead(df[, varName], 1)) + # check for NA values and gapfill using closest timestep + for (k in 1:dim(df)[1]) { if (is.na(df$lead_var[k])) { - df$lead_var[k] <- df$lead_var[k-1] - }else{ + df$lead_var[k] <- df$lead_var[k - 1] + } else { df$lead_var[k] <- df$lead_var[k] } } - - #Create new vector with all hours - interp.df.days <- seq(from = min(df$days_since_t0), - to = as.numeric(max(df$days_since_t0)), - by = 1 / (24 / hr)) - - #Create new data frame + + # Create new vector with all hours + interp.df.days <- seq( + from = min(df$days_since_t0), + to = as.numeric(max(df$days_since_t0)), + by = 1 / (24 / hr) + ) + + # Create new data frame noaa_data_interp <- tibble::tibble(time = lubridate::as_datetime(t0 + interp.df.days)) - - #Join 1 hr data frame with 6 hr data frame + + # Join 1 hr data frame with 6 hr data frame data.hrly <- noaa_data_interp %>% dplyr::left_join(df, by = "time") - - #Fill in hours + + # Fill in hours curr <- vector() - for(i in 1:nrow(data.hrly)){ - if(is.na(data.hrly$lead_var[i])){ - curr[i] <- data.hrly$lead_var[i-1] - }else{ + for (i in 1:nrow(data.hrly)) { + if (is.na(data.hrly$lead_var[i])) { + curr[i] <- data.hrly$lead_var[i - 1] + } else { curr[i] <- data.hrly$lead_var[i] } } data.hrly$curr <- curr - #Clean up data frame - data.hrly <- data.hrly %>% dplyr::select("time", "curr") %>% + # Clean up data frame + data.hrly <- data.hrly %>% + dplyr::select("time", "curr") %>% dplyr::arrange(.data$time) - + names(data.hrly) <- c("time", varName) - + return(data.hrly) } @@ -329,16 +331,15 @@ downscale_repeat_6hr_to_half_hrly <- function(df, varName, hr = 0.5){ #' @param lon, longitude #' @param lat, latitude #' @return vector of potential shortwave radiation for each doy -#' +#' #' @author Quinn Thomas #' @export #' #' downscale_solar_geom_halfhour <- function(doy, lon, lat) { - dt <- stats::median(diff(doy)) * 86400 # average number of seconds in time interval hr <- (doy - floor(doy)) * 48 # hour of day for each element of doy - + ## calculate potential radiation cosz <- cos_solar_zenith_angle(doy, lat, lon, dt, hr) rpot <- 1366 * cosz diff --git a/modules/data.atmosphere/R/lightME.R b/modules/data.atmosphere/R/lightME.R index 842263a718e..038d1853b8e 100644 --- a/modules/data.atmosphere/R/lightME.R +++ b/modules/data.atmosphere/R/lightME.R @@ -1,4 +1,3 @@ - ##' Simulates the light macro environment ##' ##' Simulates light macro environment based on latitude, day of the year. @@ -22,41 +21,42 @@ ##' } ##' @keywords models lightME <- function(lat = 40, DOY = 190, t.d = 12, t.sn = 12, atm.P = 1e+05, alpha = 0.85) { - ## The equations used here can be found in - ## https://web.archive.org/web/20170803073722/http://www.life.illinois.edu/plantbio/wimovac/newpage9.htm + ## https://web.archive.org/web/20170803073722/http://www.life.illinois.edu/plantbio/wimovac/newpage9.htm ## The original source is Monteith and Unsworth ## Monteith, John, and Mike Unsworth. Principles of environmental physics. Academic Press, 2013. Dtr <- (pi / 180) - + omega <- lat * Dtr - + delta0 <- 360 * (DOY + 10) / 365 - delta <- -23.5 * cos(delta0 * Dtr) + delta <- -23.5 * cos(delta0 * Dtr) deltaR <- delta * Dtr - t.f <- (15 * (t.d - t.sn)) * Dtr - SSin <- sin(deltaR) * sin(omega) - CCos <- cos(deltaR) * cos(omega) + t.f <- (15 * (t.d - t.sn)) * Dtr + SSin <- sin(deltaR) * sin(omega) + CCos <- cos(deltaR) * cos(omega) CosZenithAngle0 <- SSin + CCos * cos(t.f) - CosZenithAngle <- ifelse(CosZenithAngle0 <= 10 ^ -10, 1e-10, CosZenithAngle0) - - CosHour <- -tan(omega) * tan(deltaR) + CosZenithAngle <- ifelse(CosZenithAngle0 <= 10^-10, 1e-10, CosZenithAngle0) + + CosHour <- -tan(omega) * tan(deltaR) CosHourDeg <- (1 / Dtr) * (CosHour) - CosHour <- ifelse(CosHourDeg < -57, -0.994, CosHour) - Daylength <- 2 * (1 / Dtr) * (acos(CosHour)) / 15 - SunUp <- 12 - Daylength / 2 - SunDown <- 12 + Daylength / 2 + CosHour <- ifelse(CosHourDeg < -57, -0.994, CosHour) + Daylength <- 2 * (1 / Dtr) * (acos(CosHour)) / 15 + SunUp <- 12 - Daylength / 2 + SunDown <- 12 + Daylength / 2 SinSolarElevation <- CosZenithAngle - SolarElevation <- (1 / Dtr) * (asin(SinSolarElevation)) - - PP.o <- 10 ^ 5 / atm.P + SolarElevation <- (1 / Dtr) * (asin(SinSolarElevation)) + + PP.o <- 10^5 / atm.P Solar_Constant <- 2650 ## Notice the difference with the website for the eq below - I.dir <- Solar_Constant * (alpha ^ ((PP.o) / CosZenithAngle)) - I.diff <- 0.3 * Solar_Constant * (1 - alpha ^ ((PP.o) / CosZenithAngle)) * CosZenithAngle + I.dir <- Solar_Constant * (alpha^((PP.o) / CosZenithAngle)) + I.diff <- 0.3 * Solar_Constant * (1 - alpha^((PP.o) / CosZenithAngle)) * CosZenithAngle propIdir <- I.dir / (I.dir + I.diff) propIdiff <- I.diff / (I.dir + I.diff) - - return(list(I.dir = I.dir, I.diff = I.diff, cos.th = CosZenithAngle, - propIdir = propIdir, propIdiff = propIdiff)) + + return(list( + I.dir = I.dir, I.diff = I.diff, cos.th = CosZenithAngle, + propIdir = propIdir, propIdiff = propIdiff + )) } # lightME diff --git a/modules/data.atmosphere/R/load.cfmet.R b/modules/data.atmosphere/R/load.cfmet.R index f8a9e0cee56..29c846ae235 100644 --- a/modules/data.atmosphere/R/load.cfmet.R +++ b/modules/data.atmosphere/R/load.cfmet.R @@ -1,4 +1,3 @@ - ##' Load met data from PEcAn formatted met driver ##' ##' subsets a PEcAn formatted met driver file and converts to a data.frame object @@ -14,33 +13,33 @@ ##' @export ##' @author David LeBauer load.cfmet <- function(met.nc, lat, lon, start.date, end.date) { - ## Lat and Lon Lat <- ncdf4::ncvar_get(met.nc, "latitude") Lon <- ncdf4::ncvar_get(met.nc, "longitude") - if(min(abs(Lat-lat)) > 2.5 | min(abs(Lon-lon)) > 2.5){ - PEcAn.logger::logger.severe("lat / lon (", lat, ",", lon, ") outside range of met file (", range(Lat), ",", range(Lon)) + if (min(abs(Lat - lat)) > 2.5 | min(abs(Lon - lon)) > 2.5) { + PEcAn.logger::logger.severe("lat / lon (", lat, ",", lon, ") outside range of met file (", range(Lat), ",", range(Lon)) } lati <- which.min(abs(Lat - lat)) loni <- which.min(abs(Lon - lon)) - start.date <- lubridate::parse_date_time(start.date, tz = "UTC", orders=c("ymd_HMSz", "ymd_HMS", "ymd_H", "ymd")) + start.date <- lubridate::parse_date_time(start.date, tz = "UTC", orders = c("ymd_HMSz", "ymd_HMS", "ymd_H", "ymd")) # If end.date is provided as a datetime, assume it's the exact time to stop. # if it's a date, assume we want the whole final day. end.date <- tryCatch( lubridate::parse_date_time(end.date, tz = "UTC", orders = c("ymdHMSz", "ymdHMS")), - warning = function(w){ - lubridate::parse_date_time(paste(end.date, "23:59:59"), tz = "UTC", orders = "ymdHMS")} + warning = function(w) { + lubridate::parse_date_time(paste(end.date, "23:59:59"), tz = "UTC", orders = "ymdHMS") + } ) time.idx <- ncdf4::ncvar_get(met.nc, "time") ## confirm that time units are PEcAn standard basetime.string <- ncdf4::ncatt_get(met.nc, "time", "units")$value - base.date <- lubridate::parse_date_time(basetime.string, c("ymd_HMSz", "ymd_HMS", "ymd_H", "ymd")) - base.units <- strsplit(basetime.string, " since ")[[1]][1] + base.date <- lubridate::parse_date_time(basetime.string, c("ymd_HMSz", "ymd_HMS", "ymd_H", "ymd")) + base.units <- strsplit(basetime.string, " since ")[[1]][1] ## convert to days if (base.units != "days") { @@ -50,34 +49,39 @@ load.cfmet <- function(met.nc, lat, lon, start.date, end.date) { date <- as.POSIXct.numeric(round(time.idx), origin = base.date, tz = "UTC") all.dates <- data.frame(index = seq_along(time.idx), date = date) - + if (start.date + lubridate::days(1) < min(all.dates$date)) { - PEcAn.logger::logger.severe("run start date", start.date, "before met data starts", min(all.dates$date)) + PEcAn.logger::logger.severe("run start date", start.date, "before met data starts", min(all.dates$date)) } if (end.date > max(all.dates$date)) { - PEcAn.logger::logger.severe("run end date", end.date, "after met data ends", max(all.dates$date)) + PEcAn.logger::logger.severe("run end date", end.date, "after met data ends", max(all.dates$date)) } - + run.dates <- all.dates %>% dplyr::filter(.data$date >= start.date & .data$date <= end.date) %>% dplyr::mutate( doy = lubridate::yday(.data$date), year = lubridate::year(.data$date), month = lubridate::month(.data$date), - day = lubridate::day(.data$date), - hour = lubridate::hour(.data$date) + lubridate::minute(.data$date) / 60) + day = lubridate::day(.data$date), + hour = lubridate::hour(.data$date) + lubridate::minute(.data$date) / 60 + ) results <- list() ## pressure naming hack pending https://github.com/ebimodeling/model-drivers/issues/2 standard_names <- append(as.character(PEcAn.utils::standard_vars$standard_name), "surface_pressure") - variables <- as.character(standard_names[standard_names %in% - c("surface_pressure", attributes(met.nc$var)$names)]) - - - vars <- lapply(variables, function(x) get.ncvector(x, lati = lati, loni = loni, - run.dates = run.dates, met.nc = met.nc)) + variables <- as.character(standard_names[standard_names %in% + c("surface_pressure", attributes(met.nc$var)$names)]) + + + vars <- lapply(variables, function(x) { + get.ncvector(x, + lati = lati, loni = loni, + run.dates = run.dates, met.nc = met.nc + ) + }) names(vars) <- gsub("surface_pressure", "air_pressure", variables) diff --git a/modules/data.atmosphere/R/merge.met.variable.R b/modules/data.atmosphere/R/merge.met.variable.R index dbd369f3769..6e67407b345 100644 --- a/modules/data.atmosphere/R/merge.met.variable.R +++ b/modules/data.atmosphere/R/merge.met.variable.R @@ -1,7 +1,7 @@ #' Merge a new met variable from an external file (e.g. CO2) into existing met files #' -#' Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. -#' Currently unit and name checking only implemented for CO2. +#' Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. +#' Currently unit and name checking only implemented for CO2. #' Currently does not yet support merge data that has lat/lon #' New variable only has time dimension and thus MIGHT break downstream code.... #' @@ -9,9 +9,9 @@ #' @param in.prefix prefix of original data #' @param start_date,end_date date (or character in a standard date format). Only year component is used. #' @param merge.file path of file to be merged in -#' @param overwrite logical: replace output file if it already exists? +#' @param overwrite logical: replace output file if it already exists? #' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions -#' print debugging information as they run? +#' print debugging information as they run? #' @param ... other arguments, currently ignored #' #' @return Currently nothing. TODO: Return a data frame summarizing the merged files. @@ -19,121 +19,128 @@ #' #' @examples #' \dontrun{ -#' in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" -#' in.prefix <- "" -#' outfolder <- "~/paleon/metTest/" +#' in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" +#' in.prefix <- "" +#' outfolder <- "~/paleon/metTest/" #' merge.file <- "~/paleon/paleon_monthly_co2.nc" #' start_date <- "0850-01-01" -#' end_date <- "2010-12-31" -#' overwrite <- FALSE -#' verbose <- TRUE -#' -#' merge_met_variable(in.path,in.prefix,start_date,end_date,merge.file,overwrite,verbose) -#' PEcAn.DALEC::met2model.DALEC(in.path,in.prefix,outfolder,start_date,end_date) +#' end_date <- "2010-12-31" +#' overwrite <- FALSE +#' verbose <- TRUE +#' +#' merge_met_variable(in.path, in.prefix, start_date, end_date, merge.file, overwrite, verbose) +#' PEcAn.DALEC::met2model.DALEC(in.path, in.prefix, outfolder, start_date, end_date) #' } -merge_met_variable <- function(in.path,in.prefix,start_date, end_date, merge.file, - overwrite = FALSE, verbose = FALSE, ...){ - +merge_met_variable <- function(in.path, in.prefix, start_date, end_date, merge.file, + overwrite = FALSE, verbose = FALSE, ...) { # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - if(nchar(in.prefix)>0) in.prefix <- paste0(in.prefix,".") - + end_year <- lubridate::year(end_date) + if (nchar(in.prefix) > 0) in.prefix <- paste0(in.prefix, ".") + ## open and parse file to be merged in merge.nc <- ncdf4::nc_open(merge.file) merge.vars <- names(merge.nc$var) - merge.attr <- ncdf4::ncatt_get(merge.nc,varid = merge.vars[1]) - merge.time <- ncdf4::ncvar_get(merge.nc,"time") - merge.time.attr <- ncdf4::ncatt_get(merge.nc,"time") - merge.data <- ncdf4::ncvar_get(merge.nc,varid = merge.vars[1]) - + merge.attr <- ncdf4::ncatt_get(merge.nc, varid = merge.vars[1]) + merge.time <- ncdf4::ncvar_get(merge.nc, "time") + merge.time.attr <- ncdf4::ncatt_get(merge.nc, "time") + merge.data <- ncdf4::ncvar_get(merge.nc, varid = merge.vars[1]) + origin <- "1970-01-01 00:00:00 UTC" - merge.time.std <- PEcAn.utils::ud_convert(merge.time, - merge.time.attr$units, - paste0("seconds since ",origin)) - - merge.time.std <- as.POSIXct(merge.time.std,tz = "UTC",origin=origin) + merge.time.std <- PEcAn.utils::ud_convert( + merge.time, + merge.time.attr$units, + paste0("seconds since ", origin) + ) + + merge.time.std <- as.POSIXct(merge.time.std, tz = "UTC", origin = origin) merge.years <- unique(lubridate::year(merge.time.std)) - + # check dates - if(lubridate::year(merge.time.std[1]) > start_year){ - PEcAn.logger::logger.error("merge.time > start_year", merge.time.std[1],start_date) + if (lubridate::year(merge.time.std[1]) > start_year) { + PEcAn.logger::logger.error("merge.time > start_year", merge.time.std[1], start_date) ncdf4::nc_close(merge.nc) return(NULL) } - if(lubridate::year(utils::tail(merge.time.std,1)) < end_year){ - PEcAn.logger::logger.error("merge.time < end_year", utils::tail(merge.time.std,1),end_date) + if (lubridate::year(utils::tail(merge.time.std, 1)) < end_year) { + PEcAn.logger::logger.error("merge.time < end_year", utils::tail(merge.time.std, 1), end_date) ncdf4::nc_close(merge.nc) return(NULL) } - + # check lat/lon merge.dims <- names(merge.nc$dim) byLatLon <- FALSE - if(length(grep("^lat",merge.dims))>0 & length(grep("^lon",merge.dims))>0){ + if (length(grep("^lat", merge.dims)) > 0 & length(grep("^lon", merge.dims)) > 0) { byLatLon <- TRUE - } - + } + ## close merge file ncdf4::nc_close(merge.nc) - + ## name and variable conversions - if(toupper(merge.vars[1]) == "CO2"){ + if (toupper(merge.vars[1]) == "CO2") { merge.vars[1] <- "mole_fraction_of_carbon_dioxide_in_air" merge.data <- PEcAn.utils::ud_convert(merge.data, merge.attr$units, "mol/mol") - merge.attr$units = "mol/mol" + merge.attr$units <- "mol/mol" } - + ## prep data structure for results rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) + for (year in start_year:end_year) { old.file <- file.path(in.path, paste0(in.prefix, year, ".nc")) -# new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) - + # new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) + ## subset merged data merge.sel <- which(lubridate::year(merge.time.std) == year) - merge.sub <- data.frame(time=merge.time.std[merge.sel],data = merge.data[merge.sel]) - + merge.sub <- data.frame(time = merge.time.std[merge.sel], data = merge.data[merge.sel]) + ## open target file - nc <- ncdf4::nc_open(old.file,write = TRUE) - - if(merge.vars[1] %in% names(nc$var)) { - PEcAn.logger::logger.info("variable already exists",merge.vars[1]) + nc <- ncdf4::nc_open(old.file, write = TRUE) + + if (merge.vars[1] %in% names(nc$var)) { + PEcAn.logger::logger.info("variable already exists", merge.vars[1]) ncdf4::nc_close(nc) next } - - ##extract target time - target.time <- ncdf4::ncvar_get(nc,"time") - target.time.attr <- ncdf4::ncatt_get(nc,"time") - target.time.std <- PEcAn.utils::ud_convert(target.time, - target.time.attr$units, - paste0("seconds since ",origin)) - target.time.std <- as.POSIXct(target.time.std,tz = "UTC",origin=origin) - - + + ## extract target time + target.time <- ncdf4::ncvar_get(nc, "time") + target.time.attr <- ncdf4::ncatt_get(nc, "time") + target.time.std <- PEcAn.utils::ud_convert( + target.time, + target.time.attr$units, + paste0("seconds since ", origin) + ) + target.time.std <- as.POSIXct(target.time.std, tz = "UTC", origin = origin) + + ## interpolate merged data to target time - merge.interp <- stats::approx(merge.sub$time,merge.sub$data, xout = target.time.std, - rule = 2, method = "linear", ties = mean) - + merge.interp <- stats::approx(merge.sub$time, merge.sub$data, + xout = target.time.std, + rule = 2, method = "linear", ties = mean + ) + ## insert new variable - var.merge <- ncdf4::ncvar_def(name = merge.vars[1], units = merge.attr$units, dim = nc$dim$time, - missval = merge.attr$`_FillValue`, verbose = verbose) + var.merge <- ncdf4::ncvar_def( + name = merge.vars[1], units = merge.attr$units, dim = nc$dim$time, + missval = merge.attr$`_FillValue`, verbose = verbose + ) nc <- ncdf4::ncvar_add(nc = nc, v = var.merge, verbose = verbose) ncdf4::ncvar_put(nc = nc, varid = merge.vars[1], vals = merge.interp$y) - + ## close file ncdf4::nc_close(nc) - } ## end loop over year - } diff --git a/modules/data.atmosphere/R/met.process.R b/modules/data.atmosphere/R/met.process.R index 047defde304..abce810cef5 100644 --- a/modules/data.atmosphere/R/met.process.R +++ b/modules/data.atmosphere/R/met.process.R @@ -1,7 +1,7 @@ ##' met.process ##' ##' @param site Site info from settings file -##' @param input_met Which data source to process. +##' @param input_met Which data source to process. ##' @param start_date the start date of the data to be downloaded (will only use the year part of the date) ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param model model_type name @@ -10,9 +10,9 @@ ##' @param dir directory to write outputs to ##' @param spin spin-up settings passed to model-specific met2model. List containing nyear (number of years of spin-up), nsample (first n years to cycle), and resample (TRUE/FALSE) ##' @param overwrite Whether to force met.process to proceed. -##' -##' -##' `overwrite` may be a list with individual components corresponding to +##' +##' +##' `overwrite` may be a list with individual components corresponding to ##' `download`, `met2cf`, `standardize`, and `met2model`. If it is instead a simple boolean, ##' the default behavior for `overwrite=FALSE` is to overwrite nothing, as you might expect. ##' Note however that the default behavior for `overwrite=TRUE` is to overwrite everything @@ -24,40 +24,38 @@ ##' @export ##' @author Elizabeth Cowdery, Michael Dietze, Ankur Desai, James Simkins, Ryan Kelly met.process <- function(site, input_met, start_date, end_date, model, - host = "localhost", dbparms, dir, spin=NULL, + host = "localhost", dbparms, dir, spin = NULL, overwrite = FALSE) { - - # get met source and potentially determine where to start in the process - if(is.null(input_met$source)){ - if(is.null(input_met$id)){ + if (is.null(input_met$source)) { + if (is.null(input_met$id)) { PEcAn.logger::logger.warn("met.process only has a path provided, assuming path is model driver and skipping processing") - + # Additional layer of list depth added for consistancy with other return statements. - temp_path = input_met$path + temp_path <- input_met$path input_met$path <- list() input_met$path$path1 <- temp_path return(input_met) - }else { - PEcAn.logger::logger.warn("No met source specified") - if(!is.null(input_met$id) & !is.null(input_met$path)){ - PEcAn.logger::logger.warn("Assuming source CFmet") + } else { + PEcAn.logger::logger.warn("No met source specified") + if (!is.null(input_met$id) & !is.null(input_met$path)) { + PEcAn.logger::logger.warn("Assuming source CFmet") met <- input_met$source <- "CFmet" ## this case is normally hit when the use provides an existing file that has already been ## downloaded, processed, and just needs conversion to model-specific format. ## setting a 'safe' (global) default } else { - PEcAn.logger::logger.error("Cannot process met without source information") - } + PEcAn.logger::logger.error("Cannot process met without source information") + } } } else { - met <-input_met$source + met <- input_met$source } - + # If overwrite is a plain boolean, fill in defaults for each stage if (!is.list(overwrite)) { if (overwrite) { # Default for overwrite==TRUE is to overwrite everything but download - overwrite <- list(download = FALSE, met2cf = TRUE, standardize = TRUE, met2model = TRUE) + overwrite <- list(download = FALSE, met2cf = TRUE, standardize = TRUE, met2model = TRUE) } else { overwrite <- list(download = FALSE, met2cf = FALSE, standardize = FALSE, met2model = FALSE) } @@ -77,16 +75,17 @@ met.process <- function(site, input_met, start_date, end_date, model, } overwrite.check <- unlist(overwrite) for (i in seq_along(overwrite.check)) { - if (i < length(overwrite.check) && - overwrite.check[i] == TRUE && - !all(overwrite.check[(i + 1):length(overwrite.check)])) { + if (i < length(overwrite.check) && + overwrite.check[i] == TRUE && + !all(overwrite.check[(i + 1):length(overwrite.check)])) { PEcAn.logger::logger.debug(overwrite) PEcAn.logger::logger.error( "If overwriting any stage of met.process, ", - "all subsequent stages need to be overwritten too. Please correct.") + "all subsequent stages need to be overwritten too. Please correct." + ) } } - + # set up connection and host information con <- PEcAn.DB::db.open(dbparms) @@ -98,47 +97,53 @@ met.process <- function(site, input_met, start_date, end_date, model, # read in registration xml for met specific information register.xml <- system.file(paste0("registration/register.", met, ".xml"), package = "PEcAn.data.atmosphere") - register <- read.register(register.xml, con) + register <- read.register(register.xml, con) # first attempt at function that designates where to start met.process if (is.null(input_met$id)) { stage <- list(download.raw = TRUE, met2cf = TRUE, standardize = TRUE, met2model = TRUE) - format.vars <- PEcAn.DB::query.format.vars(bety = con, format.id = register$format$id) # query variable info from format id + format.vars <- PEcAn.DB::query.format.vars(bety = con, format.id = register$format$id) # query variable info from format id } else { - stage <- met.process.stage(input.id=input_met$id, raw.id=register$format$id, con) - format.vars <- PEcAn.DB::query.format.vars(bety = con, input.id = input_met$id) # query DB to get format variable information if available - # Is there a situation in which the input ID could be given but not the file path? + stage <- met.process.stage(input.id = input_met$id, raw.id = register$format$id, con) + format.vars <- PEcAn.DB::query.format.vars(bety = con, input.id = input_met$id) # query DB to get format variable information if available + # Is there a situation in which the input ID could be given but not the file path? # I'm assuming not right now - assign(stage$id.name, - list( - input.id = input_met$id, - dbfile.id = PEcAn.DB::dbfile.check("Input", input_met$id, hostname = machine.host, con = - con)$id - )) + assign( + stage$id.name, + list( + input.id = input_met$id, + dbfile.id = PEcAn.DB::dbfile.check("Input", input_met$id, + hostname = machine.host, con = + con + )$id + ) + ) } #--- If the met source is local then there is no need for download - if (!is.null(register$Local)){ + if (!is.null(register$Local)) { if (as.logical(register$Local)) { stage$download.raw <- FALSE stage$local <- TRUE } - }else{ + } else { stage$local <- FALSE } - + PEcAn.logger::logger.debug(stage) - - if(is.null(model)){ + + if (is.null(model)) { stage$model <- FALSE } - - + + # setup site database number, lat, lon and name and copy for format.vars if new input - latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] - new.site <- data.frame(id = as.numeric(site$id), - lat = latlon$lat, - lon = latlon$lon) + latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] + new.site <- data.frame( + id = as.numeric(site$id), + lat = latlon$lat, + lon = latlon$lon + ) str_ns <- paste0(new.site$id %/% 1e+09, "-", new.site$id %% 1e+09) - + if (is.null(format.vars$lat)) { format.vars$lat <- new.site$lat } @@ -148,7 +153,7 @@ met.process <- function(site, input_met, start_date, end_date, model, if (is.null(format.vars$site)) { format.vars$site <- new.site$id } - + #--------------------------------------------------------------------------------------------------# # Or met source is either downloadable or it's local . # Download raw met @@ -173,42 +178,46 @@ met.process <- function(site, input_met, start_date, end_date, model, overwrite = overwrite$download, site = site, username = username, - dbparms=dbparms + dbparms = dbparms ) - + if (met %in% c("CRUNCEP", "GFDL", "NOAA_GEFS", "MERRA")) { ready.id <- raw.id # input_met$id overwrites ready.id below, needs to be populated here input_met$id <- raw.id stage$met2cf <- FALSE stage$standardize <- FALSE - } - }else if (stage$local){ # In parallel to download met module this needs to check if the files are already downloaded or not + } + } else if (stage$local) { # In parallel to download met module this needs to check if the files are already downloaded or not db.file <- PEcAn.DB::dbfile.input.check( - siteid=new.site$id %>% as.character(), - startdate = start_date %>% as.Date, - enddate = end_date %>% as.Date, + siteid = new.site$id %>% as.character(), + startdate = start_date %>% as.Date(), + enddate = end_date %>% as.Date(), parentid = NA, - mimetype="application/x-netcdf", - formatname="CF Meteorology", + mimetype = "application/x-netcdf", + formatname = "CF Meteorology", con, hostname = PEcAn.remote::fqdn(), exact.dates = TRUE, -# pattern = met, - return.all=TRUE - ) - # If we already had the met downloaded for this site - if (nrow(db.file) >0 ){ + # pattern = met, + return.all = TRUE + ) + # If we already had the met downloaded for this site + if (nrow(db.file) > 0) { cf.id <- raw.id <- db.file - }else{ + } else { # I did this bc dbfile.input.check does not cover the between two time periods situation - mimetypeid <- PEcAn.DB::get.id(table = "mimetypes", colnames = "type_string", - values = "application/x-netcdf", con = con) + mimetypeid <- PEcAn.DB::get.id( + table = "mimetypes", colnames = "type_string", + values = "application/x-netcdf", con = con + ) + + formatid <- PEcAn.DB::get.id( + table = "formats", colnames = c("mimetype_id", "name"), + values = c(mimetypeid, "CF Meteorology"), con = con + ) - formatid <- PEcAn.DB::get.id(table = "formats", colnames = c("mimetype_id", "name"), - values = c(mimetypeid, "CF Meteorology"), con = con) - machine.id <- PEcAn.DB::get.id(table = "machines", "hostname", PEcAn.remote::fqdn(), con) # Finding the tiles. raw.tiles <- dplyr::tbl(con, "inputs") %>% @@ -219,14 +228,14 @@ met.process <- function(site, input_met, start_date, end_date, model, .data$format_id == formatid ) %>% dplyr::filter(grepl(met, "name")) %>% - dplyr::inner_join(dplyr::tbl(con, "dbfiles"), by = c('id' = 'container_id')) %>% + dplyr::inner_join(dplyr::tbl(con, "dbfiles"), by = c("id" = "container_id")) %>% dplyr::filter(.data$machine_id == machine.id) %>% dplyr::collect() - + cf.id <- raw.id <- list(input.id = raw.tiles$id.x, dbfile.id = raw.tiles$id.y) } - - stage$met2cf <- FALSE + + stage$met2cf <- FALSE stage$standardize <- TRUE } @@ -234,22 +243,24 @@ met.process <- function(site, input_met, start_date, end_date, model, # Change to CF Standards if (stage$met2cf) { new.site.id <- ifelse(met %in% c("NARR"), register$siteid, site$id) - - cf.id <- .met2cf.module(raw.id = raw.id, - register = register, - met = met, - str_ns = str_ns, - dir = dir, - machine = machine, - site.id = new.site.id, - lat = new.site$lat, lon = new.site$lon, - start_date = start_date, end_date = end_date, - con = con, host = host, - overwrite = overwrite$met2cf, - format.vars = format.vars, - bety = con) + + cf.id <- .met2cf.module( + raw.id = raw.id, + register = register, + met = met, + str_ns = str_ns, + dir = dir, + machine = machine, + site.id = new.site.id, + lat = new.site$lat, lon = new.site$lon, + start_date = start_date, end_date = end_date, + con = con, host = host, + overwrite = overwrite$met2cf, + format.vars = format.vars, + bety = con + ) } else { - if (! met %in% c("ERA5", "FieldObservatory")) cf.id = input_met$id + if (!met %in% c("ERA5", "FieldObservatory")) cf.id <- input_met$id } #--------------------------------------------------------------------------------------------------# @@ -259,36 +270,41 @@ met.process <- function(site, input_met, start_date, end_date, model, ready.id <- list(input.id = NULL, dbfile.id = NULL) for (i in seq_along(cf.id[[1]])) { - if (register$scale == "regional") { #### Site extraction - id_stdized <- .extract.nc.module(cf.id = list(input.id = cf.id$container_id[i], - dbfile.id = cf.id$id[i]), - register = register, - dir = dir, - met = met, - str_ns = str_ns, - site = site, - new.site = new.site, - con = con, - start_date = start_date, - end_date = end_date, - host = host, - overwrite = overwrite$standardize) - # Expand to support ensemble names in the future + id_stdized <- .extract.nc.module( + cf.id = list( + input.id = cf.id$container_id[i], + dbfile.id = cf.id$id[i] + ), + register = register, + dir = dir, + met = met, + str_ns = str_ns, + site = site, + new.site = new.site, + con = con, + start_date = start_date, + end_date = end_date, + host = host, + overwrite = overwrite$standardize + ) + # Expand to support ensemble names in the future } else if (register$scale == "site") { ##### Site Level Processing - id_stdized <- .metgapfill.module(cf.id = list(input.id = cf.id$input.id[i], dbfile.id = cf.id$dbfile.id[i]), - register = register, - dir = dir, - met = met, - str_ns = str_ns, - site = site, new.site = new.site, - con = con, - start_date = start_date, end_date = end_date, - host = host, - overwrite = overwrite$standardize, - ensemble_name = i) + id_stdized <- .metgapfill.module( + cf.id = list(input.id = cf.id$input.id[i], dbfile.id = cf.id$dbfile.id[i]), + register = register, + dir = dir, + met = met, + str_ns = str_ns, + site = site, new.site = new.site, + con = con, + start_date = start_date, end_date = end_date, + host = host, + overwrite = overwrite$standardize, + ensemble_name = i + ) } else { # No action taken. These ids will be dropped from ready.id id_stdized <- NULL @@ -296,9 +312,7 @@ met.process <- function(site, input_met, start_date, end_date, model, ready.id$input.id <- c(ready.id$input.id, id_stdized$input.id) ready.id$dbfile.id <- c(ready.id$dbfile.id, id_stdized$dbfile.id) - } # End for loop - } else { ready.id <- input_met$id } @@ -306,46 +320,47 @@ met.process <- function(site, input_met, start_date, end_date, model, #--------------------------------------------------------------------------------------------------# # Prepare for Model if (stage$met2model) { - ## Get Model Registration - reg.model.xml <- system.file(paste0("register.", model, ".xml"), package = paste0("PEcAn.",model)) + reg.model.xml <- system.file(paste0("register.", model, ".xml"), package = paste0("PEcAn.", model)) reg.model <- XML::xmlToList(XML::xmlParse(reg.model.xml)) - - met2model.result = list() + + met2model.result <- list() for (i in seq_along(ready.id[[1]])) { - met2model.result[[i]] <- .met2model.module(ready.id = list(input.id = ready.id$input.id[i], dbfile.id = ready.id$dbfile.id[i]), - model = model, - con = con, - host = host, - dir = dir, - met = met, - str_ns = str_ns, - site = site, - start_date = start_date, - end_date = end_date, - new.site = new.site, - overwrite = overwrite$met2model, - exact.dates = reg.model$exact.dates, - spin = spin, - register = register, - ensemble_name = i) - } + met2model.result[[i]] <- .met2model.module( + ready.id = list(input.id = ready.id$input.id[i], dbfile.id = ready.id$dbfile.id[i]), + model = model, + con = con, + host = host, + dir = dir, + met = met, + str_ns = str_ns, + site = site, + start_date = start_date, + end_date = end_date, + new.site = new.site, + overwrite = overwrite$met2model, + exact.dates = reg.model$exact.dates, + spin = spin, + register = register, + ensemble_name = i + ) + } model.id <- list() model.file.info <- list() model.file <- list() for (i in seq_along(met2model.result)) { - model.id[[i]] <- met2model.result[[i]]$model.id + model.id[[i]] <- met2model.result[[i]]$model.id model.file.info[[i]] <- PEcAn.DB::db.query(paste0("SELECT * from dbfiles where id = ", model.id[[i]]$dbfile.id), con) model.file[[i]] <- file.path(model.file.info[[i]]$file_path, model.file.info[[i]]$file_name) } - - - + + + # met.process now returns the entire $met portion of settings, updated with parellel lists containing # the model-specific data files and their input ids. - + input_met$id <- list() input_met$path <- list() @@ -353,32 +368,30 @@ met.process <- function(site, input_met, start_date, end_date, model, input_met$id[[paste0("id", i)]] <- model.id[[i]]$input.id input_met$path[[as.character(paste0("path", i))]] <- model.file[[i]] } - - } else { # Because current ensemble data cannot reach this else statement, it only supports single source data. - PEcAn.logger::logger.info("ready.id",ready.id,machine.host) - model.id <- PEcAn.DB::dbfile.check("Input", ready.id, con, hostname=machine.host) - if(!(is.null(model.id)|length(model.id)==0)) { - model.id$dbfile.id <- model.id$id + PEcAn.logger::logger.info("ready.id", ready.id, machine.host) + model.id <- PEcAn.DB::dbfile.check("Input", ready.id, con, hostname = machine.host) + if (!(is.null(model.id) | length(model.id) == 0)) { + model.id$dbfile.id <- model.id$id model.file.info <- PEcAn.DB::db.query(paste0("SELECT * from dbfiles where id = ", model.id$dbfile.id), con) model.file <- file.path(model.file.info$file_path, model.file.info$file_name) } else { PEcAn.logger::logger.severe("Missing model id.") } - + input_met$path <- list() # for consistancy with the code in the if to this else. input_met$path$path1 <- model.file input_met$id <- model.id$container_id # This is the input id, whereas $id is the dbfile id. - PEcAn.logger::logger.info("model.file = ",model.file,input_met) + PEcAn.logger::logger.info("model.file = ", model.file, input_met) } - + return(input_met) # Returns an updated $met entry for the settings object. } # met.process -################################################################################################################################# +################################################################################################################################# ##' Function to find the site code for a specific tag ##' diff --git a/modules/data.atmosphere/R/met.process.stage.R b/modules/data.atmosphere/R/met.process.stage.R index 29a4317da0f..693a7f8240c 100644 --- a/modules/data.atmosphere/R/met.process.stage.R +++ b/modules/data.atmosphere/R/met.process.stage.R @@ -7,22 +7,27 @@ ##' ##' @author Elizabeth Cowdery met.process.stage <- function(input.id, raw.id, con) { - format.id <- PEcAn.DB::db.query(paste("SELECT format_id from inputs where id =", input.id), con)[[1]] - cf.id <- 33 - + cf.id <- 33 + if (format.id == raw.id && format.id != cf.id) { - stage <- list(download.raw = FALSE, met2cf = TRUE, standardize = TRUE, - met2model = TRUE, id.name = "raw.id") + stage <- list( + download.raw = FALSE, met2cf = TRUE, standardize = TRUE, + met2model = TRUE, id.name = "raw.id" + ) } else if (format.id == cf.id) { - # we will still do the standardization since extracting/gapfilling etc + # we will still do the standardization since extracting/gapfilling etc # more than once makes no difference - stage <- list(download.raw = FALSE, met2cf = FALSE, standardize = TRUE, - met2model = TRUE, id.name = "cf.id") + stage <- list( + download.raw = FALSE, met2cf = FALSE, standardize = TRUE, + met2model = TRUE, id.name = "cf.id" + ) } else { # assume the only other option is a model format so nothing needs to be done - stage <- list(download.raw = FALSE, met2cf = FALSE, standardize = FALSE, - met2model = FALSE, id.name = "model.id") + stage <- list( + download.raw = FALSE, met2cf = FALSE, standardize = FALSE, + met2model = FALSE, id.name = "model.id" + ) } return(invisible(stage)) } # met.process.stage diff --git a/modules/data.atmosphere/R/met2CF.ALMA.R b/modules/data.atmosphere/R/met2CF.ALMA.R index d674d071b9c..d38e94c5189 100644 --- a/modules/data.atmosphere/R/met2CF.ALMA.R +++ b/modules/data.atmosphere/R/met2CF.ALMA.R @@ -25,11 +25,10 @@ insertPmet <- function(vals, nc2, var2, dim2, units2 = NA, conv = NULL, ##' ##' @author Mike Dietze met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, - verbose = FALSE, ...) { - + verbose = FALSE, ...) { # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) if (!file.exists(outfolder)) { dir.create(outfolder) @@ -42,14 +41,16 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end } rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { my.prefix <- in.prefix if (nchar(my.prefix) > 0) { @@ -58,11 +59,11 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end new.file <- file.path(outfolder, sprintf("%s%04d.nc", my.prefix, year)) row <- year - start_year + 1 - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF" if (file.exists(new.file) && !overwrite) { @@ -89,12 +90,12 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end PEcAn.logger::logger.severe("missing file", v, stub) } old.file <- fnames[sel] - nc1 <- ncdf4::nc_open(old.file, write = FALSE) + nc1 <- ncdf4::nc_open(old.file, write = FALSE) if (length(met[[v]]) <= 1) { - met[[v]] <- aperm(ncdf4::ncvar_get(nc = nc1, varid = v),c(2,1,3)) ## switch order from lon/lat/time to lat/lon/time + met[[v]] <- aperm(ncdf4::ncvar_get(nc = nc1, varid = v), c(2, 1, 3)) ## switch order from lon/lat/time to lat/lon/time } else { - tmp <- aperm(ncdf4::ncvar_get(nc = nc1, varid = v),c(2,1,3)) ## switch order from lon/lat/time to lat/lon/time + tmp <- aperm(ncdf4::ncvar_get(nc = nc1, varid = v), c(2, 1, 3)) ## switch order from lon/lat/time to lat/lon/time met[[v]] <- abind::abind(met[[v]], tmp) } if (v == by.folder[1]) { @@ -106,57 +107,71 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end } } ncdf4::nc_close(nc1) - } ## end loop over months - } ## end loop over variables + } ## end loop over months + } ## end loop over variables # create new coordinate dimensions based on site location lat/lon - nc1 <- ncdf4::nc_open(old.file) - tdim <- nc1$dim[["time"]] - met[["time"]] <- PEcAn.utils::ud_convert(met[["time"]],"days","seconds") - tdim$units <- paste0("seconds since ",year,"-01-01 00:00:00") - tdim$vals <- met[["time"]] - tdim$len <- length(tdim$vals) + nc1 <- ncdf4::nc_open(old.file) + tdim <- nc1$dim[["time"]] + met[["time"]] <- PEcAn.utils::ud_convert(met[["time"]], "days", "seconds") + tdim$units <- paste0("seconds since ", year, "-01-01 00:00:00") + tdim$vals <- met[["time"]] + tdim$len <- length(tdim$vals) lat <- ncdf4::ncdim_def(name = "latitude", units = "degrees", vals = nc1$dim[["lat"]]$vals, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degrees", vals = nc1$dim[["lon"]]$vals, create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", units = tdim$units, vals = tdim$vals, - create_dimvar = TRUE, unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = tdim$units, vals = tdim$vals, + create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) cp.global.atts <- ncdf4::ncatt_get(nc = nc1, varid = 0) ncdf4::nc_close(nc1) # Open new file and fill in air_temperature print(year) - var <- ncdf4::ncvar_def(name = "air_temperature", units = "K", dim = dim, - missval = as.numeric(-9999)) + var <- ncdf4::ncvar_def( + name = "air_temperature", units = "K", dim = dim, + missval = as.numeric(-9999) + ) nc2 <- ncdf4::nc_create(filename = new.file, vars = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "air_temperature", vals = met[["tair"]]) # air_pressure - insertPmet(met[["psurf"]], nc2 = nc2, var2 = "air_pressure", units2 = "Pa", dim2 = dim, - verbose = verbose) + insertPmet(met[["psurf"]], + nc2 = nc2, var2 = "air_pressure", units2 = "Pa", dim2 = dim, + verbose = verbose + ) # convert CO2 to mole_fraction_of_carbon_dioxide_in_air # insertPmet(nc1=nc1, var1='CO2', nc2=nc2, var2='mole_fraction_of_carbon_dioxide_in_air', units2='mole/mole', dim2=dim, conv=function(x) { # x * 1e6 }, verbose=verbose) # specific_humidity - insertPmet(met[["qair"]], nc2 = nc2, var2 = "specific_humidity", units2 = "kg/kg", dim2 = dim, - verbose = verbose) + insertPmet(met[["qair"]], + nc2 = nc2, var2 = "specific_humidity", units2 = "kg/kg", dim2 = dim, + verbose = verbose + ) # surface_downwelling_shortwave_flux_in_air - insertPmet(met[["swdown"]], nc2 = nc2, var2 = "surface_downwelling_shortwave_flux_in_air", - units2 = "W m-2", dim2 = dim, verbose = verbose) + insertPmet(met[["swdown"]], + nc2 = nc2, var2 = "surface_downwelling_shortwave_flux_in_air", + units2 = "W m-2", dim2 = dim, verbose = verbose + ) # surface_downwelling_longwave_flux_in_air - insertPmet(met[["lwdown"]], nc2 = nc2, var2 = "surface_downwelling_longwave_flux_in_air", - units2 = "W m-2", dim2 = dim, verbose = verbose) + insertPmet(met[["lwdown"]], + nc2 = nc2, var2 = "surface_downwelling_longwave_flux_in_air", + units2 = "W m-2", dim2 = dim, verbose = verbose + ) # wind_speed insertPmet(met[["wind"]], nc2 = nc2, var2 = "wind_speed", units2 = "m s-1", dim2 = dim, verbose = verbose) # precipitation_flux - insertPmet(met[["precipf"]], nc2 = nc2, var2 = "precipitation_flux", units2 = "kg/m^2/s", - dim2 = dim, verbose = verbose) + insertPmet(met[["precipf"]], + nc2 = nc2, var2 = "precipitation_flux", units2 = "kg/m^2/s", + dim2 = dim, verbose = verbose + ) # add global attributes from original file for (j in seq_along(cp.global.atts)) { @@ -165,8 +180,8 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end # done, close file ncdf4::nc_close(nc2) -# save(results,file="met2CF.PalEON.RData") - } ## end loop over years + # save(results,file="met2CF.PalEON.RData") + } ## end loop over years return(invisible(results)) } # met2CF.PalEONregional @@ -189,10 +204,9 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end ##' @author Mike Dietze met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, lat, lon, overwrite = FALSE, verbose = FALSE, ...) { - # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) if (!file.exists(outfolder)) { dir.create(outfolder) @@ -205,14 +219,16 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l } rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { my.prefix <- in.prefix if (nchar(my.prefix) > 0) { @@ -221,11 +237,11 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l new.file <- file.path(outfolder, sprintf("%s%04d.nc", my.prefix, year)) row <- year - start_year + 1 - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF" if (file.exists(new.file) && !overwrite) { @@ -243,22 +259,24 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l names(met) <- by.folder met[["time"]] <- NA - if(FALSE){ + if (FALSE) { for (v in by.folder) { fnames <- dir(file.path(in.path, v), full.names = TRUE) for (m in 1:12) { - stub <- paste0(formatC(m, width = 2, format = "d", flag = "0"), ".", - formatC(year,width = 4,format = 'd',flag = '0')) + stub <- paste0( + formatC(m, width = 2, format = "d", flag = "0"), ".", + formatC(year, width = 4, format = "d", flag = "0") + ) sel <- grep(stub, fnames) if (length(sel) == 0) { PEcAn.logger::logger.severe("missing file", v, stub) } old.file <- fnames[sel] - nc1 <- ncdf4::nc_open(old.file, write = FALSE) + nc1 <- ncdf4::nc_open(old.file, write = FALSE) if (length(met[[v]]) <= 1) { met[[v]] <- ncdf4::ncvar_get(nc = nc1, varid = v) } else { - tmp <- ncdf4::ncvar_get(nc = nc1, varid = v) + tmp <- ncdf4::ncvar_get(nc = nc1, varid = v) met[[v]] <- abind::abind(met[[v]], tmp) } if (v == by.folder[1]) { @@ -270,89 +288,111 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l } } ncdf4::nc_close(nc1) - } ## end loop over months - } ## end loop over variables + } ## end loop over months + } ## end loop over variables } - - + + fnames <- dir(file.path(in.path, by.folder[1]), full.names = TRUE) - stub <- paste0(formatC(1, width = 2, format = "d", flag = "0"), ".", - formatC(year,width = 4,format = 'd',flag = '0')) - sel <- grep(stub, fnames) - if (length(sel) == 0) { - PEcAn.logger::logger.severe("missing file", by.folder[1], stub) - } - old.file <- fnames[sel] - - var.ids <- c('air_temperature','precipitation_flux', - 'surface_downwelling_shortwave_flux_in_air', - 'surface_downwelling_longwave_flux_in_air', - 'air_pressure','specific_humidity', - 'wind_speed') - for(v in var.ids){ - met[[v]] <- ncdf4::ncvar_get(nc = nc1, varid = v) - } + stub <- paste0( + formatC(1, width = 2, format = "d", flag = "0"), ".", + formatC(year, width = 4, format = "d", flag = "0") + ) + sel <- grep(stub, fnames) + if (length(sel) == 0) { + PEcAn.logger::logger.severe("missing file", by.folder[1], stub) + } + old.file <- fnames[sel] + + var.ids <- c( + "air_temperature", "precipitation_flux", + "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", + "air_pressure", "specific_humidity", + "wind_speed" + ) + for (v in var.ids) { + met[[v]] <- ncdf4::ncvar_get(nc = nc1, varid = v) + } # create new coordinate dimensions based on site location lat/lon - nc1 <- ncdf4::nc_open(old.file) - tdim <- nc1$dim[["time"]] + nc1 <- ncdf4::nc_open(old.file) + tdim <- nc1$dim[["time"]] met[["time"]] <- met[["time"]] + (850 - 1700) - tdim$units <- "days since 1700-01-01 00:00:00" - tdim$vals <- met[["time"]] - tdim$len <- length(tdim$vals) - latlon <- lat # nc1$dim$lat$vals - latlon[2] <- lon # nc1$dim$lon$vals + tdim$units <- "days since 1700-01-01 00:00:00" + tdim$vals <- met[["time"]] + tdim$len <- length(tdim$vals) + latlon <- lat # nc1$dim$lat$vals + latlon[2] <- lon # nc1$dim$lon$vals lat <- ncdf4::ncdim_def(name = "latitude", units = "", vals = 1:1, create_dimvar = FALSE) lon <- ncdf4::ncdim_def(name = "longitude", units = "", vals = 1:1, create_dimvar = FALSE) - time <- ncdf4::ncdim_def(name = "time", units = tdim$units, vals = tdim$vals, - create_dimvar = TRUE, unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = tdim$units, vals = tdim$vals, + create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) cp.global.atts <- ncdf4::ncatt_get(nc = nc1, varid = 0) ncdf4::nc_close(nc1) # Open new file and copy lat attribute to latitude print(c(latlon, year)) - var <- ncdf4::ncvar_def(name = "latitude", units = "degree_north", dim = (list(lat, lon, time)), - missval = as.numeric(-9999)) + var <- ncdf4::ncvar_def( + name = "latitude", units = "degree_north", dim = (list(lat, lon, time)), + missval = as.numeric(-9999) + ) nc2 <- ncdf4::nc_create(filename = new.file, vars = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "latitude", vals = rep(latlon[1], tdim$len)) # copy lon attribute to longitude - var <- ncdf4::ncvar_def(name = "longitude", units = "degree_east", dim = (list(lat, lon, time)), - missval = as.numeric(-9999)) + var <- ncdf4::ncvar_def( + name = "longitude", units = "degree_east", dim = (list(lat, lon, time)), + missval = as.numeric(-9999) + ) nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "longitude", vals = rep(latlon[2], tdim$len)) # air_temperature - insertPmet(met[["air_temperature"]], nc2 = nc2, var2 = "air_temperature", units2 = "K", dim2 = dim, - verbose = verbose) + insertPmet(met[["air_temperature"]], + nc2 = nc2, var2 = "air_temperature", units2 = "K", dim2 = dim, + verbose = verbose + ) # air_pressure - insertPmet(met[["air_pressure"]], nc2 = nc2, var2 = "air_pressure", units2 = "Pa", dim2 = dim, - verbose = verbose) + insertPmet(met[["air_pressure"]], + nc2 = nc2, var2 = "air_pressure", units2 = "Pa", dim2 = dim, + verbose = verbose + ) # convert CO2 to mole_fraction_of_carbon_dioxide_in_air # insertPmet(nc1=nc1, var1='CO2', nc2=nc2, var2='mole_fraction_of_carbon_dioxide_in_air', units2='mole/mole', dim2=dim, conv=function(x) { # x * 1e6 }, verbose=verbose) # specific_humidity - insertPmet(met[["specific_humidity"]], nc2 = nc2, var2 = "specific_humidity", units2 = "kg/kg", dim2 = dim, - verbose = verbose) + insertPmet(met[["specific_humidity"]], + nc2 = nc2, var2 = "specific_humidity", units2 = "kg/kg", dim2 = dim, + verbose = verbose + ) # surface_downwelling_shortwave_flux_in_air - insertPmet(met[["surface_downwelling_shortwave_flux_in_air"]], nc2 = nc2, var2 = "surface_downwelling_shortwave_flux_in_air", - units2 = "W m-2", dim2 = dim, verbose = verbose) + insertPmet(met[["surface_downwelling_shortwave_flux_in_air"]], + nc2 = nc2, var2 = "surface_downwelling_shortwave_flux_in_air", + units2 = "W m-2", dim2 = dim, verbose = verbose + ) # surface_downwelling_longwave_flux_in_air - insertPmet(met[["surface_downwelling_longwave_flux_in_air"]], nc2 = nc2, var2 = "surface_downwelling_longwave_flux_in_air", - units2 = "W m-2", dim2 = dim, verbose = verbose) + insertPmet(met[["surface_downwelling_longwave_flux_in_air"]], + nc2 = nc2, var2 = "surface_downwelling_longwave_flux_in_air", + units2 = "W m-2", dim2 = dim, verbose = verbose + ) # wind_speed insertPmet(met[["wind_speed"]], nc2 = nc2, var2 = "wind_speed", units2 = "m s-1", dim2 = dim, verbose = verbose) # precipitation_flux - insertPmet(met[["precipitation_flux"]], nc2 = nc2, var2 = "precipitation_flux", units2 = "kg/m^2/s", - dim2 = dim, verbose = verbose) + insertPmet(met[["precipitation_flux"]], + nc2 = nc2, var2 = "precipitation_flux", units2 = "kg/m^2/s", + dim2 = dim, verbose = verbose + ) # add global attributes from original file for (j in seq_along(cp.global.atts)) { @@ -361,7 +401,7 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l # done, close file ncdf4::nc_close(nc2) - } ## end loop over years + } ## end loop over years return(invisible(results)) } # met2CF.PalEON @@ -382,7 +422,6 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l ##' ##' @author Mike Dietze met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE) { - # get start/end year code works on whole years only start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) @@ -404,23 +443,25 @@ met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, ove } rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) row <- year - start_year + 1 - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF" if (file.exists(new.file) && !overwrite) { @@ -442,7 +483,6 @@ met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, ove ncdf4::nc_close(nc1) } else { - ### ASSUMING PALEON ORGANIZATION ONE FILE PER VARIABLE PER MONTH EACH VARIABLE ### IN A FOLDER WITH ITS OWN NAME @@ -456,20 +496,20 @@ met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, ove for (v in by.folder) { fnames <- dir(file.path(in.path, v), full.names = TRUE) for (m in 1:12) { - sel <- grep(paste0(year, "_", formatC(m, width = 2, format = "d", flag = "0")), fnames) + sel <- grep(paste0(year, "_", formatC(m, width = 2, format = "d", flag = "0")), fnames) old.file <- fnames[sel] - nc1 <- ncdf4::nc_open(old.file, write = FALSE) + nc1 <- ncdf4::nc_open(old.file, write = FALSE) if (length(met[[v]]) <= 1) { met[[v]] <- ncdf4::ncvar_get(nc = nc1, varid = v) } else { - tmp <- ncdf4::ncvar_get(nc = nc1, varid = v) + tmp <- ncdf4::ncvar_get(nc = nc1, varid = v) met[[v]] <- abind::abind(met[[v]], tmp) } if (v == by.folder[1]) { if (length(met[["time"]]) <= 1) { met[["time"]] <- nc1$dim[["time"]]$vals } else { - tmp <- nc1$dim[["time"]]$vals + tmp <- nc1$dim[["time"]]$vals met[["time"]] <- abind::abind(met[["time"]], tmp) } } @@ -479,26 +519,32 @@ met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, ove } # create new coordinate dimensions based on site location lat/lon - nc1 <- ncdf4::nc_open(old.file) - tdim <- nc1$dim[["time"]] - latlon <- nc1$dim$lat$vals + nc1 <- ncdf4::nc_open(old.file) + tdim <- nc1$dim[["time"]] + latlon <- nc1$dim$lat$vals latlon[2] <- nc1$dim$lon$vals lat <- ncdf4::ncdim_def(name = "latitude", units = "", vals = 1:1, create_dimvar = FALSE) lon <- ncdf4::ncdim_def(name = "longitude", units = "", vals = 1:1, create_dimvar = FALSE) - time <- ncdf4::ncdim_def(name = "time", units = tdim$units, vals = met[["time"]], - create_dimvar = TRUE, unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = tdim$units, vals = met[["time"]], + create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) # copy lat attribute to latitude print(latlon) - var <- ncdf4::ncvar_def(name = "latitude", units = "degree_north", dim = (list(lat, lon, time)), - missval = as.numeric(-9999)) + var <- ncdf4::ncvar_def( + name = "latitude", units = "degree_north", dim = (list(lat, lon, time)), + missval = as.numeric(-9999) + ) nc2 <- ncdf4::nc_create(filename = new.file, vars = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "latitude", vals = rep(latlon[1], tdim$len)) # copy lon attribute to longitude - var <- ncdf4::ncvar_def(name = "longitude", units = "degree_east", dim = (list(lat, lon, time)), - missval = as.numeric(-9999)) + var <- ncdf4::ncvar_def( + name = "longitude", units = "degree_east", dim = (list(lat, lon, time)), + missval = as.numeric(-9999) + ) nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "longitude", vals = rep(latlon[2], tdim$len)) @@ -512,122 +558,162 @@ met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, ove # e) write results to new file # convert TA to air_temperature - copyvals(nc1 = nc1, - var1 = "TA", - nc2 = nc2, - var2 = "air_temperature", units2 = "K", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "degC", "K") }, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "TA", + nc2 = nc2, + var2 = "air_temperature", units2 = "K", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "degC", "K") + }, + verbose = verbose + ) # convert PRESS to air_pressure - copyvals(nc1 = nc1, - var1 = "PRESS", - nc2 = nc2, - var2 = "air_pressure", units2 = "Pa", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, 'kPa', 'Pa') }, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "PRESS", + nc2 = nc2, + var2 = "air_pressure", units2 = "Pa", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "kPa", "Pa") + }, + verbose = verbose + ) # convert CO2 to mole_fraction_of_carbon_dioxide_in_air - copyvals(nc1 = nc1, - var1 = "CO2", - nc2 = nc2, - var2 = "mole_fraction_of_carbon_dioxide_in_air", units2 = "mole/mole", - dim2 = dim, conv = function(x) { PEcAn.utils::ud_convert(x, "mol/mol", "ppm") }, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "CO2", + nc2 = nc2, + var2 = "mole_fraction_of_carbon_dioxide_in_air", units2 = "mole/mole", + dim2 = dim, conv = function(x) { + PEcAn.utils::ud_convert(x, "mol/mol", "ppm") + }, + verbose = verbose + ) # convert TS1 to soil_temperature - copyvals(nc1 = nc1, - var1 = "TS1", - nc2 = nc2, - var2 = "soil_temperature", units2 = "K", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "degC", "K") }, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "TS1", + nc2 = nc2, + var2 = "soil_temperature", units2 = "K", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "degC", "K") + }, + verbose = verbose + ) # copy RH to relative_humidity - copyvals(nc1 = nc1, - var1 = "RH", - nc2 = nc2, - var2 = "relative_humidity", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "RH", + nc2 = nc2, + var2 = "relative_humidity", dim2 = dim, + verbose = verbose + ) # convert RH to SH rh <- ncdf4::ncvar_get(nc = nc1, varid = "RH") rh[rh == -6999 | rh == -9999] <- NA - rh <- rh/100 + rh <- rh / 100 ta <- ncdf4::ncvar_get(nc = nc1, varid = "TA") ta[ta == -6999 | ta == -9999] <- NA ta <- PEcAn.utils::ud_convert(ta, "degC", "K") sh <- rh2qair(rh = rh, T = ta) - var <- ncdf4::ncvar_def(name = "specific_humidity", units = "kg/kg", dim = dim, missval = -6999, - verbose = verbose) + var <- ncdf4::ncvar_def( + name = "specific_humidity", units = "kg/kg", dim = dim, missval = -6999, + verbose = verbose + ) nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "specific_humidity", vals = sh) # convert VPD to water_vapor_saturation_deficit # HACK : conversion will make all values < 0 to be NA - copyvals(nc1 = nc1, - var1 = "VPD", - nc2 = nc2, - var2 = "water_vapor_saturation_deficit", units2 = "mol m-2 s-1", - dim2 = dim, - conv = function(x) { ifelse(x < 0, NA, x * 1000) }, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "VPD", + nc2 = nc2, + var2 = "water_vapor_saturation_deficit", units2 = "mol m-2 s-1", + dim2 = dim, + conv = function(x) { + ifelse(x < 0, NA, x * 1000) + }, + verbose = verbose + ) # copy Rg to surface_downwelling_shortwave_flux_in_air - copyvals(nc1 = nc1, - var1 = "Rg", - nc2 = nc2, - var2 = "surface_downwelling_shortwave_flux_in_air", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "Rg", + nc2 = nc2, + var2 = "surface_downwelling_shortwave_flux_in_air", dim2 = dim, + verbose = verbose + ) # copy Rgl to surface_downwelling_longwave_flux_in_air - copyvals(nc1 = nc1, - var1 = "Rgl", - nc2 = nc2, - var2 = "surface_downwelling_longwave_flux_in_air", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "Rgl", + nc2 = nc2, + var2 = "surface_downwelling_longwave_flux_in_air", dim2 = dim, + verbose = verbose + ) # convert PAR to surface_downwelling_photosynthetic_photon_flux_in_air - copyvals(nc1 = nc1, - var1 = "PAR", - nc2 = nc2, - var2 = "surface_downwelling_photosynthetic_photon_flux_in_air", units2 = "mol m-2 s-1", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "umol m-2 s-1", "mol m-2 s-1") }, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "PAR", + nc2 = nc2, + var2 = "surface_downwelling_photosynthetic_photon_flux_in_air", units2 = "mol m-2 s-1", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "umol m-2 s-1", "mol m-2 s-1") + }, + verbose = verbose + ) # copy WD to wind_direction (not official CF) - copyvals(nc1 = nc1, - var1 = "WD", - nc2 = nc2, - var2 = "wind_direction", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "WD", + nc2 = nc2, + var2 = "wind_direction", dim2 = dim, + verbose = verbose + ) # copy WS to wind_speed - copyvals(nc1 = nc1, - var1 = "WS", - nc2 = nc2, - var2 = "wind_speed", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, + var1 = "WS", + nc2 = nc2, + var2 = "wind_speed", dim2 = dim, + verbose = verbose + ) # convert PREC to precipitation_flux t <- tdim$vals - min <- 0.02083 / 30 # 0.02083 time = 30 minutes - timestep <- round(x = mean(diff(t)) / min, digits = 1) # round to nearest 0.1 minute - copyvals(nc1 = nc1, - var1 = "PREC", - nc2 = nc2, - var2 = "precipitation_flux", units2 = "kg/m^2/s", - dim2 = dim, conv = function(x) { x / timestep / 60 }, - verbose = verbose) + min <- 0.02083 / 30 # 0.02083 time = 30 minutes + timestep <- round(x = mean(diff(t)) / min, digits = 1) # round to nearest 0.1 minute + copyvals( + nc1 = nc1, + var1 = "PREC", + nc2 = nc2, + var2 = "precipitation_flux", units2 = "kg/m^2/s", + dim2 = dim, conv = function(x) { + x / timestep / 60 + }, + verbose = verbose + ) # convert wind speed and wind direction to eastward_wind and northward_wind - wd <- ncdf4::ncvar_get(nc = nc1, varid = "WD") #wind direction + wd <- ncdf4::ncvar_get(nc = nc1, varid = "WD") # wind direction wd[wd == -6999 | wd == -9999] <- NA - ws <- ncdf4::ncvar_get(nc = nc1, varid = "WS") #wind speed + ws <- ncdf4::ncvar_get(nc = nc1, varid = "WS") # wind speed ws[ws == -6999 | ws == -9999] <- NA ew <- ws * cos(wd * (pi / 180)) nw <- ws * sin(wd * (pi / 180)) @@ -654,7 +740,7 @@ met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, ove # done, close both files ncdf4::nc_close(nc1) ncdf4::nc_close(nc2) - } ## end loop over years + } ## end loop over years return(invisible(results)) } # met2CF.ALMA diff --git a/modules/data.atmosphere/R/met2CF.Ameriflux.R b/modules/data.atmosphere/R/met2CF.Ameriflux.R index 88e873d4a3d..878c40e35ad 100644 --- a/modules/data.atmosphere/R/met2CF.Ameriflux.R +++ b/modules/data.atmosphere/R/met2CF.Ameriflux.R @@ -1,7 +1,6 @@ # helper function to copy variables and attributes from one nc file to another. This will do # conversion of the variables as well as on the min/max values copyvals <- function(nc1, var1, nc2, var2, dim2, units2 = NA, conv = NULL, missval = -6999, verbose = FALSE) { - vals <- ncdf4::ncvar_get(nc = nc1, varid = var1) vals[vals == -6999 | vals == -9999] <- NA if (!is.null(conv)) { @@ -53,7 +52,7 @@ getLatLon <- function(nc1) { return(c(as.numeric(lat$value), as.numeric(lon$value))) } } - PEcAn.logger::logger.severe("Could not get site location for file.") + PEcAn.logger::logger.severe("Could not get site location for file.") } # getLatLon @@ -74,40 +73,40 @@ getLatLon <- function(nc1) { ##' @author Josh Mantooth, Mike Dietze, Elizabeth Cowdery, Ankur Desai met2CF.Ameriflux <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - - # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) if (!file.exists(outfolder)) { dir.create(outfolder) } rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { old.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) # create array with results row <- year - start_year + 1 - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF" if (file.exists(new.file) && !overwrite) { - PEcAn.logger::logger.debug("File '", new.file, "' already exists, skipping to next file.") + PEcAn.logger::logger.debug("File '", new.file, "' already exists, skipping to next file.") next } @@ -125,10 +124,10 @@ met2CF.Ameriflux <- function(in.path, in.prefix, outfolder, start_date, end_date tdimunit <- unlist(strsplit(tdim$units, " ")) tdimtz <- substr(tdimunit[length(tdimunit)], 1, 1) if ((tdimtz == "+") || (tdimtz == "-")) { - lst <- tdimunit[length(tdimunit)] #already in definition, leave it alone + lst <- tdimunit[length(tdimunit)] # already in definition, leave it alone } else { if (is.null(getOption("geonamesUsername"))) { - options(geonamesUsername = "carya") #login to geoname server + options(geonamesUsername = "carya") # login to geoname server } lst <- geonames::GNtimezone(latlon[1], latlon[2], radius = 0)$gmtOffset if (lst >= 0) { @@ -141,8 +140,10 @@ met2CF.Ameriflux <- function(in.path, in.prefix, outfolder, start_date, end_date lat <- ncdf4::ncdim_def(name = "latitude", units = "", vals = 1:1, create_dimvar = FALSE) lon <- ncdf4::ncdim_def(name = "longitude", units = "", vals = 1:1, create_dimvar = FALSE) - time <- ncdf4::ncdim_def(name = "time", units = tdim$units, vals = tdim$vals, - create_dimvar = TRUE, unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = tdim$units, vals = tdim$vals, + create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) # copy lat attribute to latitude @@ -168,100 +169,140 @@ met2CF.Ameriflux <- function(in.path, in.prefix, outfolder, start_date, end_date # this conversion needs to come before others to reinitialize dimension used by copyvals (lat/lon/time) rh <- ncdf4::ncvar_get(nc = nc1, varid = "RH") rh[rh == -6999 | rh == -9999] <- NA - rh <- rh/100 + rh <- rh / 100 ta <- ncdf4::ncvar_get(nc = nc1, varid = "TA") ta[ta == -6999 | ta == -9999] <- NA ta <- PEcAn.utils::ud_convert(ta, "degC", "K") sh <- rh2qair(rh = rh, T = ta) - var <- ncdf4::ncvar_def(name = "specific_humidity", units = "kg/kg", dim = dim, - missval = -6999, verbose = verbose) + var <- ncdf4::ncvar_def( + name = "specific_humidity", units = "kg/kg", dim = dim, + missval = -6999, verbose = verbose + ) nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = verbose) ncdf4::ncvar_put(nc = nc2, varid = "specific_humidity", vals = sh) # convert TA to air_temperature - copyvals(nc1 = nc1, var1 = "TA", nc2 = nc2, - var2 = "air_temperature", units2 = "K", - dim2 = dim, conv = function(x) { PEcAn.utils::ud_convert(x, "degC", "K") }, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "TA", nc2 = nc2, + var2 = "air_temperature", units2 = "K", + dim2 = dim, conv = function(x) { + PEcAn.utils::ud_convert(x, "degC", "K") + }, + verbose = verbose + ) # convert PRESS to air_pressure - copyvals(nc1 = nc1, var1 = "PRESS", nc2 = nc2, - var2 = "air_pressure", units2 = "Pa", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "kPa", "Pa") }, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "PRESS", nc2 = nc2, + var2 = "air_pressure", units2 = "Pa", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "kPa", "Pa") + }, + verbose = verbose + ) # convert CO2 to mole_fraction_of_carbon_dioxide_in_air - copyvals(nc1 = nc1, var1 = "CO2", nc2 = nc2, - var2 = "mole_fraction_of_carbon_dioxide_in_air", - units2 = "mole/mole", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "ppm", "mol/mol") }, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "CO2", nc2 = nc2, + var2 = "mole_fraction_of_carbon_dioxide_in_air", + units2 = "mole/mole", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "ppm", "mol/mol") + }, + verbose = verbose + ) # convert TS1 to soil_temperature - copyvals(nc1 = nc1, var1 = "TS1", nc2 = nc2, - var2 = "soil_temperature", units2 = "K", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "degC", "K") }, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "TS1", nc2 = nc2, + var2 = "soil_temperature", units2 = "K", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "degC", "K") + }, + verbose = verbose + ) # copy RH to relative_humidity - copyvals(nc1 = nc1, var1 = "RH", nc2 = nc2, - var2 = "relative_humidity", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "RH", nc2 = nc2, + var2 = "relative_humidity", dim2 = dim, + verbose = verbose + ) # convert VPD to water_vapor_saturation_deficit HACK : conversion will make all values < 0 to be # NA - copyvals(nc1 = nc1, var1 = "VPD", nc2 = nc2, - var2 = "water_vapor_saturation_deficit", units2 = "Pa", - dim2 = dim, - conv = function(x) { ifelse(x < 0, NA, PEcAn.utils::ud_convert(x, "kPa", "Pa")) }, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "VPD", nc2 = nc2, + var2 = "water_vapor_saturation_deficit", units2 = "Pa", + dim2 = dim, + conv = function(x) { + ifelse(x < 0, NA, PEcAn.utils::ud_convert(x, "kPa", "Pa")) + }, + verbose = verbose + ) # copy Rg to surface_downwelling_shortwave_flux_in_air - copyvals(nc1 = nc1, var1 = "Rg", nc2 = nc2, - var2 = "surface_downwelling_shortwave_flux_in_air", - dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "Rg", nc2 = nc2, + var2 = "surface_downwelling_shortwave_flux_in_air", + dim2 = dim, + verbose = verbose + ) # copy Rgl to surface_downwelling_longwave_flux_in_air - copyvals(nc1 = nc1, var1 = "Rgl", nc2 = nc2, - var2 = "surface_downwelling_longwave_flux_in_air", - dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "Rgl", nc2 = nc2, + var2 = "surface_downwelling_longwave_flux_in_air", + dim2 = dim, + verbose = verbose + ) # convert PAR to surface_downwelling_photosynthetic_photon_flux_in_air - copyvals(nc1 = nc1, var1 = "PAR", nc2 = nc2, - var2 = "surface_downwelling_photosynthetic_photon_flux_in_air", units2 = "mol m-2 s-1", - dim2 = dim, - conv = function(x) { PEcAn.utils::ud_convert(x, "umol m-2 s-1", "mol m-2 s-1") }, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "PAR", nc2 = nc2, + var2 = "surface_downwelling_photosynthetic_photon_flux_in_air", units2 = "mol m-2 s-1", + dim2 = dim, + conv = function(x) { + PEcAn.utils::ud_convert(x, "umol m-2 s-1", "mol m-2 s-1") + }, + verbose = verbose + ) # copy WD to wind_direction (not official CF) - copyvals(nc1 = nc1, var1 = "WD", nc2 = nc2, - var2 = "wind_direction", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "WD", nc2 = nc2, + var2 = "wind_direction", dim2 = dim, + verbose = verbose + ) # copy WS to wind_speed - copyvals(nc1 = nc1, var1 = "WS", nc2 = nc2, - var2 = "wind_speed", dim2 = dim, - verbose = verbose) + copyvals( + nc1 = nc1, var1 = "WS", nc2 = nc2, + var2 = "wind_speed", dim2 = dim, + verbose = verbose + ) # convert PREC to precipitation_flux t <- tdim$vals - min <- 0.02083 / 30 # 0.02083 time = 30 minutes - timestep <- round(x = mean(diff(t)) / min, digits = 1) # round to nearest 0.1 minute - copyvals(nc1 = nc1, var1 = "PREC", nc2 = nc2, - var2 = "precipitation_flux", units2 = "kg/m^2/s", - dim2 = dim, - conv = function(x) { x / timestep / 60 }, - verbose = verbose) + min <- 0.02083 / 30 # 0.02083 time = 30 minutes + timestep <- round(x = mean(diff(t)) / min, digits = 1) # round to nearest 0.1 minute + copyvals( + nc1 = nc1, var1 = "PREC", nc2 = nc2, + var2 = "precipitation_flux", units2 = "kg/m^2/s", + dim2 = dim, + conv = function(x) { + x / timestep / 60 + }, + verbose = verbose + ) # convert wind speed and wind direction to eastward_wind and northward_wind - wd <- ncdf4::ncvar_get(nc = nc1, varid = "WD") #wind direction + wd <- ncdf4::ncvar_get(nc = nc1, varid = "WD") # wind direction wd[wd == -6999 | wd == -9999] <- NA - ws <- ncdf4::ncvar_get(nc = nc1, varid = "WS") #wind speed + ws <- ncdf4::ncvar_get(nc = nc1, varid = "WS") # wind speed ws[ws == -6999 | ws == -9999] <- NA ew <- ws * cos(wd * (pi / 180)) nw <- ws * sin(wd * (pi / 180)) @@ -288,7 +329,7 @@ met2CF.Ameriflux <- function(in.path, in.prefix, outfolder, start_date, end_date # done, close both files ncdf4::nc_close(nc1) ncdf4::nc_close(nc2) - } ## end loop over years + } ## end loop over years return(invisible(results)) } # met2CF.Ameriflux diff --git a/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R b/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R index 69f5c882304..b6380d016a0 100644 --- a/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R +++ b/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R @@ -24,8 +24,8 @@ ##' format$na.strings = list of missing values to convert to NA, such as -9999 ##' format$skip = lines to skip excluding header ##' format$vars$column_number = Column number in CSV file (optional, will use header name first) -##' Columns with NA for bety variable name are dropped. -##' Units for datetime field are the lubridate function that will be used to parse the date (e.g. \code{ymd_hms} or \code{mdy_hm}). +##' Columns with NA for bety variable name are dropped. +##' Units for datetime field are the lubridate function that will be used to parse the date (e.g. \code{ymd_hms} or \code{mdy_hm}). ##' @param overwrite should existing files be overwritten ##' @param verbose should ouput of function be extra verbose @@ -33,15 +33,14 @@ ##' ##' @author Ankur Desai met2CF.AmerifluxLBL <- function(in.path, in.prefix, outfolder, start_date, end_date, format, - overwrite = FALSE, verbose = FALSE, ...) { + overwrite = FALSE, verbose = FALSE, ...) { + ## Determine if file is in old or new format based on filename ending in "-1" or not + ## If in new format, then convert header input names based on file header + ## Otherwise just call met2CF.csv as usual -##Determine if file is in old or new format based on filename ending in "-1" or not -## If in new format, then convert header input names based on file header -## Otherwise just call met2CF.csv as usual - - file_version <- substr(in.prefix,nchar(in.prefix)-1,nchar(in.prefix)) - if (file_version!='-1') { -##Open the file and read the first few lines to get header + file_version <- substr(in.prefix, nchar(in.prefix) - 1, nchar(in.prefix)) + if (file_version != "-1") { + ## Open the file and read the first few lines to get header PEcAn.logger::logger.info("New Ameriflux format, updating format record") files <- dir(in.path, in.prefix, full.names = TRUE) files <- files[grep("*.csv", files)] @@ -50,51 +49,52 @@ met2CF.AmerifluxLBL <- function(in.path, in.prefix, outfolder, start_date, end_d return(NULL) } if (length(files) > 1) { - PEcAn.logger::logger.warn(length(files), ' met files found. Using first file: ', files[1]) + PEcAn.logger::logger.warn(length(files), " met files found. Using first file: ", files[1]) files <- files[1] } - somedat <- utils::read.csv(files, - header = TRUE, - skip = format$skip, - na.strings = format$na.strings, - as.is = TRUE, - check.names = FALSE,nrows=1) + somedat <- utils::read.csv(files, + header = TRUE, + skip = format$skip, + na.strings = format$na.strings, + as.is = TRUE, + check.names = FALSE, nrows = 1 + ) colname <- names(somedat) - - ##Take the original format and strip everything after _ + + ## Take the original format and strip everything after _ formatname <- format$vars$input_name - removeunder <- regexpr("\\_[^\\_]*$",formatname) - removethese <- which(removeunder!=-1) - if (length(removethese)>0){ - formatname[removethese] = substr(formatname[removethese],replicate(length(removethese),1),removeunder[removethese]-1) + removeunder <- regexpr("\\_[^\\_]*$", formatname) + removethese <- which(removeunder != -1) + if (length(removethese) > 0) { + formatname[removethese] <- substr(formatname[removethese], replicate(length(removethese), 1), removeunder[removethese] - 1) } - ##Loop over format names, match to new header and substitute in + ## Loop over format names, match to new header and substitute in for (i in 1:length(formatname)) { - if (formatname[i]=="TIMESTAMP") { - formatname[i]="TIMESTAMP_START" + if (formatname[i] == "TIMESTAMP") { + formatname[i] <- "TIMESTAMP_START" } - if (nchar(formatname[i])==1) { + if (nchar(formatname[i]) == 1) { ## to avoid overlap with single character and multi character variable names - namesearch <- colname[which(colname==formatname[i])] - if (length(namesearch)==0) { - namesearch <- sort(colname[grep(paste0("^",formatname[i],"_"),colname)]) + namesearch <- colname[which(colname == formatname[i])] + if (length(namesearch) == 0) { + namesearch <- sort(colname[grep(paste0("^", formatname[i], "_"), colname)]) } } else { - namesearch <- sort(colname[grep(paste0("^",formatname[i]),colname)]) + namesearch <- sort(colname[grep(paste0("^", formatname[i]), colname)]) } - if (length(namesearch)>1) { + if (length(namesearch) > 1) { namesearch <- namesearch[1] } - if (length(namesearch)==1) { - loc <- which(colname %in% namesearch) + if (length(namesearch) == 1) { + loc <- which(colname %in% namesearch) format$vars$column_number[i] <- loc format$vars$input_name[i] <- namesearch } } PEcAn.logger::logger.info(format$vars$input_name) } - ##Call met2CF with either original or modified format record - results <- PEcAn.data.atmosphere::met2CF.csv(in.path, in.prefix, outfolder,start_date, end_date,format, overwrite=overwrite) -## FUTURE: choose height based on tower height information -} \ No newline at end of file + ## Call met2CF with either original or modified format record + results <- PEcAn.data.atmosphere::met2CF.csv(in.path, in.prefix, outfolder, start_date, end_date, format, overwrite = overwrite) + ## FUTURE: choose height based on tower height information +} diff --git a/modules/data.atmosphere/R/met2CF.FACE.R b/modules/data.atmosphere/R/met2CF.FACE.R index a4634c760e3..257e6fb95b2 100644 --- a/modules/data.atmosphere/R/met2CF.FACE.R +++ b/modules/data.atmosphere/R/met2CF.FACE.R @@ -20,17 +20,15 @@ #' @author Elizabeth Cowdery #' #' @export -met2CF.FACE <- function(in.path,in.prefix,outfolder,start_date,end_date,input.id,site,format, ...) { - +met2CF.FACE <- function(in.path, in.prefix, outfolder, start_date, end_date, input.id, site, format, ...) { files <- dir(in.path, in.prefix) file <- files[grep(pattern = "*.nc", files)] if (!(length(file) == 1)) { return(NULL) } - f <- gsub("//","/",file.path(in.path, file)) - + f <- gsub("//", "/", file.path(in.path, file)) + for (treatment in c("a", "e")) { - t.outfolder <- paste(unlist(strsplit(outfolder, "FACE")), collapse = paste0("FACE_", treatment)) # t.outfolder <- paste0(outfolder,"_",treatment) if (!file.exists(t.outfolder)) { @@ -39,103 +37,109 @@ met2CF.FACE <- function(in.path,in.prefix,outfolder,start_date,end_date,input.id f.cf <- file.path(t.outfolder, file) if (!file.exists(f.cf)) { # file.copy(f,f.cf) - + # paste('ncks -x -v', paste0(rm.vars,collapse = ','), f.cf, f.cf) - + #---------------------------------------------------------------------# # Latitude and Longitude - + nc1 <- ncdf4::nc_open(f, write = TRUE) - + time_units <- paste0("hours/2", unlist(strsplit(nc1$var$TIMEstp$units, "timesteps"))[2]) time <- ncdf4::ncdim_def(name = "time", units = time_units, vals = nc1$dim$tstep$vals) lon <- ncdf4::ncdim_def("longitude", "degrees_east", as.numeric(site$lon)) # define netCDF dimensions for variables lat <- ncdf4::ncdim_def("latitude", "degrees_north", as.numeric(site$lat)) dim <- list(lat, lon, time) - + # convert wind speed and wind direction to eastward_wind and northward_wind - wd <- 0 # wind direction - not specified so I set to 0??? - ws <- ncdf4::ncvar_get(nc = nc1, varid = "Wind") #wind speed + wd <- 0 # wind direction - not specified so I set to 0??? + ws <- ncdf4::ncvar_get(nc = nc1, varid = "Wind") # wind speed ew <- ws * cos(wd * (pi / 180)) nw <- ws * sin(wd * (pi / 180)) - + var <- ncdf4::ncvar_def(name = "eastward_wind", units = "m/s", dim = dim, missval = -6999, verbose = FALSE) nc2 <- ncdf4::nc_create(filename = f.cf, vars = var, verbose = FALSE) ncdf4::ncvar_put(nc = nc2, varid = "eastward_wind", vals = ew) - + var <- ncdf4::ncvar_def(name = "northward_wind", units = "m/s", dim = dim, missval = -6999, verbose = FALSE) nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = FALSE) ncdf4::ncvar_put(nc = nc2, varid = "northward_wind", vals = nw) - + #---------------------------------------------------------------------# - # Loop through variables and convert - + # Loop through variables and convert + vars.used.index.all <- setdiff(seq_along(format$vars$variable_id), format$time.row) - nt <- setdiff(c("a","e"), treatment) - exclude.treatment <- paste0(nt,c("CO2","O3")) + nt <- setdiff(c("a", "e"), treatment) + exclude.treatment <- paste0(nt, c("CO2", "O3")) vars.used.index <- vars.used.index.all[!(format$vars$input_name[vars.used.index.all] %in% exclude.treatment)] - - derp <- grep(paste0(treatment,"CO2"), format$vars$input_name[vars.used.index]) - if(length(derp) >1){ - for(i in 2:length(derp)){ + + derp <- grep(paste0(treatment, "CO2"), format$vars$input_name[vars.used.index]) + if (length(derp) > 1) { + for (i in 2:length(derp)) { vars.used.index <- vars.used.index[-derp[i]] } } - derp <- grep(paste0(treatment,"O3"), format$vars$input_name[vars.used.index]) - if(length(derp) >1){ - for(i in 2:length(derp)){ + derp <- grep(paste0(treatment, "O3"), format$vars$input_name[vars.used.index]) + if (length(derp) > 1) { + for (i in 2:length(derp)) { vars.used.index <- vars.used.index[-derp[i]] } } vars_used <- format$vars[vars.used.index, ] - + # begin loop for (i in seq_len(nrow(vars_used))) { vals <- ncdf4::ncvar_get(nc1, vars_used$input_name[i]) - + if (vars_used$input_units[i] == vars_used$pecan_units[i]) { print("match") } else { u1 <- vars_used$input_units[i] u2 <- vars_used$pecan_units[i] if (units::ud_are_convertible(u1, u2)) { - print(sprintf("convert %s %s to %s %s", - vars_used$input_name[i], vars_used$input_units[i], - vars_used$pecan_name[i], vars_used$pecan_units[i])) + print(sprintf( + "convert %s %s to %s %s", + vars_used$input_name[i], vars_used$input_units[i], + vars_used$pecan_name[i], vars_used$pecan_units[i] + )) vals <- PEcAn.utils::ud_convert(vals, u1, u2) } else if (PEcAn.utils::misc.are.convertible(u1, u2)) { - print(sprintf("convert %s %s to %s %s", - vars_used$input_name[i], u1, - vars_used$pecan_name[i], u2)) + print(sprintf( + "convert %s %s to %s %s", + vars_used$input_name[i], u1, + vars_used$pecan_name[i], u2 + )) vals <- PEcAn.utils::misc.convert(x, u1, u2) } else { PEcAn.logger::logger.error("Units cannot be converted") - } + } } - - var <- ncdf4::ncvar_def(name = vars_used$pecan_name[i], - units = vars_used$pecan_units[i], - dim = dim, verbose = FALSE) + + var <- ncdf4::ncvar_def( + name = vars_used$pecan_name[i], + units = vars_used$pecan_units[i], + dim = dim, verbose = FALSE + ) nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = FALSE) ncdf4::ncvar_put(nc = nc2, varid = vars_used$pecan_name[i], vals = vals) - - att <- ncdf4::ncatt_get(nc1,vars_used$input_name[i], "long_name") + + att <- ncdf4::ncatt_get(nc1, vars_used$input_name[i], "long_name") if (att$hasatt) { val <- att$value ncdf4::ncatt_put(nc = nc2, varid = vars_used$pecan_name[i], attname = "long_name", attval = val) } } ncdf4::nc_close(nc2) - - + + # Split into annual files - + year <- ncdf4::ncvar_get(nc1, "YEAR") y <- year[1]:year[length(year)] n <- length(y) t <- -1 for (j in seq_len(n)) { - new.file <- file.path(t.outfolder, paste(in.prefix, y[j],"nc", sep =".")) + new.file <- file.path(t.outfolder, paste(in.prefix, y[j], "nc", sep = ".")) if (!file.exists(new.file)) { s <- t + 1 print(s) @@ -148,12 +152,11 @@ met2CF.FACE <- function(in.path,in.prefix,outfolder,start_date,end_date,input.id t <- e } print(paste("Treatment ", treatment, " done")) - } else { print(paste("Treatment ", treatment, " aleady done")) - } # end make new file + } # end make new file file.remove(f.cf) - } # end loop over treatments + } # end loop over treatments } # met2CF.FACE @@ -163,40 +166,40 @@ met2CF.FACE <- function(in.path,in.prefix,outfolder,start_date,end_date,input.id # ##################################################################### # # HOW I PREVIOUSLY DID CONVERSIONS (I think it contains errors) # # convert CO2 to mole_fraction_of_carbon_dioxide_in_air -# copyvals(nc1 = nc1, var1 = paste0(treatment, "CO2"), nc2 = nc2, -# var2 = "mole_fraction_of_carbon_dioxide_in_air", units2 = "mole/mole", +# copyvals(nc1 = nc1, var1 = paste0(treatment, "CO2"), nc2 = nc2, +# var2 = "mole_fraction_of_carbon_dioxide_in_air", units2 = "mole/mole", # dim2 = dim, conv = function(x) { x * 1e+06 }, # verbose = verbose) -# +# # # deal with the rest of the variables -# -# vars <- c("Rainf", "Tair", "RH", "VPD", "Qair", "Wind", "SWdown", "PAR", "LWdown", "Psurf", +# +# vars <- c("Rainf", "Tair", "RH", "VPD", "Qair", "Wind", "SWdown", "PAR", "LWdown", "Psurf", # paste0(treatment, "O3"), "SolarElevation") -# -# nvars <- c("precipitation_flux", "air_temperature", "relative_humidity", "water_vapor_saturation_deficit", -# "specific_humidity", "wind_speed", "surface_downwelling_shortwave_flux_in_air", -# "surface_downwelling_photosynthetic_radiative_flux_in_air", -# "surface_downwelling_longwave_flux_in_air", "air_pressure", -# "mass_concentration_of_ozone_in_air", +# +# nvars <- c("precipitation_flux", "air_temperature", "relative_humidity", "water_vapor_saturation_deficit", +# "specific_humidity", "wind_speed", "surface_downwelling_shortwave_flux_in_air", +# "surface_downwelling_photosynthetic_radiative_flux_in_air", +# "surface_downwelling_longwave_flux_in_air", "air_pressure", +# "mass_concentration_of_ozone_in_air", # "solar_elevation_angle") -# +# # if (!(length(nvars) == length(vars))) { # PEcAn.logger::logger.error("Variable mismatch") # } -# +# # l <- length(vars) # for (k in seq_len(l)) { # if (vars[k] %in% nc.vars) { # # nc <- tncar_rename(nc,vars[k],nvars[k]) -# +# # vals <- ncdf4::ncvar_get(nc1, vars[k]) -# +# # units <- ncdf4::ncatt_get(nc1, varid = vars[k], attname = "units", verbose = FALSE)$value -# +# # var <- ncdf4::ncvar_def(name = nvars[k], units = units, dim = dim, verbose = FALSE) # nc2 <- ncdf4::ncvar_add(nc = nc2, v = var, verbose = TRUE) # ncdf4::ncvar_put(nc = nc2, varid = nvars[k], vals = vals) -# +# # att <- ncdf4::ncatt_get(nc1, vars[k], "long_name") # if (att$hasatt) { # val <- att$value @@ -204,5 +207,5 @@ met2CF.FACE <- function(in.path,in.prefix,outfolder,start_date,end_date,input.id # } # } # } -# +# # ncdf4::nc_close(nc2) diff --git a/modules/data.atmosphere/R/met2CF.Geostreams.R b/modules/data.atmosphere/R/met2CF.Geostreams.R index ef7d372d206..3781ada52f6 100644 --- a/modules/data.atmosphere/R/met2CF.Geostreams.R +++ b/modules/data.atmosphere/R/met2CF.Geostreams.R @@ -1,24 +1,22 @@ - #' Convert geostreams JSON to CF met file -#' +#' #' @param in.path directory containing Geostreams JSON file(s) to be converted #' @param in.prefix initial portion of input filenames (everything before the dates) #' @param outfolder directory where nc output files should be written. Will be created if necessary #' @param start_date,end_date beginning and end of run, YYYY-MM-DD. #' @param overwrite logical: Regenerate existing files of the same name? -#' @param verbose logical, passed on to \code{\link[ncdf4]{nc_create}} +#' @param verbose logical, passed on to \code{\link[ncdf4]{nc_create}} #' to control how chatty it should be during netCDF creation #' @param ... other arguments, currently ignored #' @export #' @author Harsh Agrawal, Chris Black -met2CF.Geostreams <- function(in.path, in.prefix, outfolder, - start_date, end_date, - overwrite = FALSE, verbose = FALSE, ...) { - - start_date <- as.POSIXct(start_date, tz="UTC") - end_date <- as.POSIXct(end_date, tz="UTC") +met2CF.Geostreams <- function(in.path, in.prefix, outfolder, + start_date, end_date, + overwrite = FALSE, verbose = FALSE, ...) { + start_date <- as.POSIXct(start_date, tz = "UTC") + end_date <- as.POSIXct(end_date, tz = "UTC") - out_files = c() + out_files <- c() for (year in lubridate::year(start_date):lubridate::year(end_date)) { in_file <- file.path(in.path, paste(in.prefix, year, "json", sep = ".")) dat <- jsonlite::read_json(in_file, simplifyVector = TRUE, flatten = TRUE) @@ -33,22 +31,26 @@ met2CF.Geostreams <- function(in.path, in.prefix, outfolder, if (year == lubridate::year(start_date) & start_date < min(dat$start_time)) { PEcAn.logger::logger.severe( "Requested start date is", start_date, - "but", year, "data begin on", min(dat$start_time)) + "but", year, "data begin on", min(dat$start_time) + ) } if (year == lubridate::year(end_date) & end_date > max(dat$end_time)) { PEcAn.logger::logger.severe( "Requested end date is", end_date, - "but", year, "data end on", max(dat$end_time)) + "but", year, "data end on", max(dat$end_time) + ) } - dat$mid_time <- dat$start_time + (dat$end_time - dat$start_time)/2 - dat <- dat[dat$start_time >= start_date & dat$end_time <= end_date,] + dat$mid_time <- dat$start_time + (dat$end_time - dat$start_time) / 2 + dat <- dat[dat$start_time >= start_date & dat$end_time <= end_date, ] secs_elapsed <- unclass(dat$mid_time) - unclass(start_date) days_elapsed <- PEcAn.utils::ud_convert(secs_elapsed, "sec", "days") - + ref_time_str <- strftime(start_date, format = "%FT%TZ") - time <- ncdf4::ncdim_def(name = "time", units = paste("days since", ref_time_str), - vals = days_elapsed, create_dimvar = TRUE, unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = paste("days since", ref_time_str), + vals = days_elapsed, create_dimvar = TRUE, unlim = TRUE + ) if (length(unique(dat$geometry.coordinates)) == 1) { # all lat/lons are are identical-- no need to store extra copies @@ -56,10 +58,10 @@ met2CF.Geostreams <- function(in.path, in.prefix, outfolder, raw_lat <- dat$geometry.coordinates[[1]][[2]] } else { # multiple coords in same file -- keep lat and lon as full-length vectors - raw_lon <- sapply(dat$geometry.coordinates, function(x)x[[1]]) - raw_lat <- sapply(dat$geometry.coordinates, function(x)x[[2]]) + raw_lon <- sapply(dat$geometry.coordinates, function(x) x[[1]]) + raw_lat <- sapply(dat$geometry.coordinates, function(x) x[[2]]) } - + lat <- ncdf4::ncdim_def(name = "latitude", units = "degrees_north", vals = raw_lat, create_dimvar = TRUE) lon <- ncdf4::ncdim_def(name = "longitude", units = "degrees_east", vals = raw_lon, create_dimvar = TRUE) @@ -72,41 +74,47 @@ met2CF.Geostreams <- function(in.path, in.prefix, outfolder, names(dat) <- sub("precipitation_rate", "precipitation_flux", names(dat)) } - make_ncvar <- function(name){ - if (! name %in% pecan_standard_met_table$cf_standard_name) { - PEcAn.logger::logger.severe("Don't know how to convert parameter", name, "to CF standard format") + make_ncvar <- function(name) { + if (!name %in% pecan_standard_met_table$cf_standard_name) { + PEcAn.logger::logger.severe("Don't know how to convert parameter", name, "to CF standard format") } unit <- pecan_standard_met_table[pecan_standard_met_table$cf_standard_name == name, "units"] - ncdf4::ncvar_def(name = name, - units = unit, - dim = cf_dims, - missval = -999, - verbose = verbose) + ncdf4::ncvar_def( + name = name, + units = unit, + dim = cf_dims, + missval = -999, + verbose = verbose + ) } - var_list = lapply(vars, make_ncvar) + var_list <- lapply(vars, make_ncvar) dir.create(outfolder, recursive = TRUE, showWarnings = FALSE) nc.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) - if (!overwrite && file.exists(nc.file)) { - PEcAn.logger::logger.severe("Refusing to overwrite existing file", nc.file, " -- If you're sure, set overwrite=TRUE") + if (!overwrite && file.exists(nc.file)) { + PEcAn.logger::logger.severe("Refusing to overwrite existing file", nc.file, " -- If you're sure, set overwrite=TRUE") } cf <- ncdf4::nc_create(filename = nc.file, vars = var_list, verbose = verbose) for (var in var_list) { - ncdf4::ncvar_put(nc = cf, - varid = var, - vals = get(var$name, dat), - verbose = verbose) + ncdf4::ncvar_put( + nc = cf, + varid = var, + vals = get(var$name, dat), + verbose = verbose + ) } ncdf4::nc_close(cf) out_files <- append(out_files, nc.file) } - return(data.frame(file = out_files, - host = PEcAn.remote::fqdn(), - startdate = start_date, - enddate = end_date, - mimetype = "application/x-netcdf", - formatname = "CF Meteorology", - dbfile.name = in.prefix, - stringsAsFactors = FALSE)) + return(data.frame( + file = out_files, + host = PEcAn.remote::fqdn(), + startdate = start_date, + enddate = end_date, + mimetype = "application/x-netcdf", + formatname = "CF Meteorology", + dbfile.name = in.prefix, + stringsAsFactors = FALSE + )) } diff --git a/modules/data.atmosphere/R/met2CF.ICOS.R b/modules/data.atmosphere/R/met2CF.ICOS.R index f3775ad0426..727b8aefc45 100644 --- a/modules/data.atmosphere/R/met2CF.ICOS.R +++ b/modules/data.atmosphere/R/met2CF.ICOS.R @@ -1,5 +1,5 @@ -#' Convert variables ICOS variables to CF format. -#' +#' Convert variables ICOS variables to CF format. +#' #' Variables present in the output netCDF file: #' air_temperature, air_temperature, relative_humidity, #' specific_humidity, water_vapor_saturation_deficit, @@ -43,11 +43,12 @@ met2CF.ICOS <- overwrite = FALSE, ...) { results <- PEcAn.data.atmosphere::met2CF.csv(in.path, - in.prefix, - outfolder, - start_date, - end_date, - format, - overwrite = overwrite) + in.prefix, + outfolder, + start_date, + end_date, + format, + overwrite = overwrite + ) return(results) } diff --git a/modules/data.atmosphere/R/met2CF.NARR.R b/modules/data.atmosphere/R/met2CF.NARR.R index 968cdd83c31..9900bd5444a 100644 --- a/modules/data.atmosphere/R/met2CF.NARR.R +++ b/modules/data.atmosphere/R/met2CF.NARR.R @@ -11,76 +11,79 @@ ##' ##' @author Elizabeth Cowdery, Rob Kooper ##' @export -met2CF.NARR <- function(in.path, in.prefix, outfolder, start_date, end_date, +met2CF.NARR <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - + vars <- c("pres.sfc", "dswrf", "dlwrf", "air.2m", "shum.2m", "prate", "uwnd.10m", "vwnd.10m") svars <- c("pres", "dswrf", "dlwrf", "air", "shum", "prate", "uwnd", "vwnd") - cfvars <- c("air_pressure", "surface_downwelling_shortwave_flux_in_air", - "surface_downwelling_longwave_flux_in_air", - "air_temperature", "specific_humidity", "precipitation_flux", - "eastward_wind", "northward_wind") - + cfvars <- c( + "air_pressure", "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", + "air_temperature", "specific_humidity", "precipitation_flux", + "eastward_wind", "northward_wind" + ) + # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - years <- start_year:end_year - + end_year <- lubridate::year(end_date) + years <- start_year:end_year + rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) + for (y in years) { newfile <- file.path(outfolder, paste0("NARR.", y, ".nc")) - + # create array with results row <- y - start_year + 1 - results$file[row] <- newfile - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(y, "-01-01 00:00:00") - results$enddate[row] <- paste0(y, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- newfile + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(y, "-01-01 00:00:00") + results$enddate[row] <- paste0(y, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF (regional)" - + if (file.exists(newfile) && !overwrite) { PEcAn.logger::logger.debug("File '", newfile, "' already exists, skipping to next file.") next } else { PEcAn.logger::logger.info("Preparing file '", newfile, "'. ") } - + # use tempfile tmpfile <- file.path(outfolder, paste0("NARR.", y, ".tmp")) unlink(tmpfile) - + # keep track of variables to rename renamevars <- list("-v", "lat,latitude", "-v", "lon,longitude") for (i in seq_along(vars)) { file <- file.path(in.path, paste0(vars[i], ".", y, ".nc")) if (verbose) { print(paste(c("ncpdq", list("-A", "-U", "-4", "--no_tmp_fl", file, tmpfile)), collapse = " ")) - } + } system2("ncpdq", list("-A", "-U", "-4", "--no_tmp_fl", file, tmpfile)) renamevars <- c(renamevars, c("-v", paste0(svars[i], ",", cfvars[i]))) } - + # rename all variables if (verbose) { print(paste(c("ncrename", c(renamevars, tmpfile)), collapse = " ")) } system2("ncrename", c(renamevars, tmpfile)) - + # finally rename file file.rename(tmpfile, newfile) } - + return(invisible(results)) } # met2CF.NARR diff --git a/modules/data.atmosphere/R/met2CF.csv.R b/modules/data.atmosphere/R/met2CF.csv.R index f1188cadb4c..1a2b9416b2d 100644 --- a/modules/data.atmosphere/R/met2CF.csv.R +++ b/modules/data.atmosphere/R/met2CF.csv.R @@ -25,7 +25,7 @@ #' \item `format$skip`: lines to skip excluding header #' \item `format$vars$column_number`: column number in CSV file #' (optional, will use header name first) -#'} +#' } #' #' Columns with NA for bety variable name are dropped. #' @@ -49,46 +49,51 @@ #' @examples #' \dontrun{ #' con <- PEcAn.DB::db.open( -#' list(user='bety', password='bety', host='localhost', -#' dbname='bety', driver='PostgreSQL',write=TRUE)) -#' start_date <- lubridate::ymd_hm('200401010000') -#' end_date <- lubridate::ymd_hm('200412312330') -#' file<-PEcAn.data.atmosphere::download.Fluxnet2015('US-WCr','~/',start_date,end_date) -#' in.path <- '~/' +#' list( +#' user = "bety", password = "bety", host = "localhost", +#' dbname = "bety", driver = "PostgreSQL", write = TRUE +#' ) +#' ) +#' start_date <- lubridate::ymd_hm("200401010000") +#' end_date <- lubridate::ymd_hm("200412312330") +#' file <- PEcAn.data.atmosphere::download.Fluxnet2015("US-WCr", "~/", start_date, end_date) +#' in.path <- "~/" #' in.prefix <- file$dbfile.name -#' outfolder <- '~/' +#' outfolder <- "~/" #' format.id <- 5000000001 -#' format <- PEcAn.DB::query.format.vars(format.id=format.id,bety = bety) +#' format <- PEcAn.DB::query.format.vars(format.id = format.id, bety = bety) #' format$lon <- -92.0 #' format$lat <- 45.0 #' format$time_zone <- "America/Chicago" #' results <- PEcAn.data.atmosphere::met2CF.csv( #' in.path, in.prefix, outfolder, #' start_date, end_date, format, -#' overwrite=TRUE) +#' overwrite = TRUE +#' ) #' } #' #' @export -met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, format, lat = NULL, lon = NULL, - nc_verbose = FALSE, overwrite = FALSE,...) { - +met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, format, lat = NULL, lon = NULL, + nc_verbose = FALSE, overwrite = FALSE, ...) { start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) if (!file.exists(outfolder)) { dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) } - + ## set up results output to return rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) + files <- dir(in.path, in.prefix, full.names = TRUE) files <- files[grep("*.csv", files)] if (length(files) == 0) { @@ -96,41 +101,43 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form return(NULL) } if (length(files) > 1) { - PEcAn.logger::logger.warn(length(files), ' met files found. Using first file: ', files[1]) + PEcAn.logger::logger.warn(length(files), " met files found. Using first file: ", files[1]) files <- files[1] } - + # get lat/lon from format.vars if not passed directly if (missing(lat) || is.null(lat)) { - PEcAn.logger::logger.debug('Latitude is missing or NULL. Using `format$lat`.') + PEcAn.logger::logger.debug("Latitude is missing or NULL. Using `format$lat`.") lat <- format$lat if (is.null(lat)) { lat <- 0. } } if (missing(lon) || is.null(lon)) { - PEcAn.logger::logger.debug('Longitude is missing or NULL. Using `format$lon`.') + PEcAn.logger::logger.debug("Longitude is missing or NULL. Using `format$lon`.") lon <- format$lon if (is.null(lon)) { lon <- 0. } } - + # create new filename by swapping .csv with .nc, and adding year strings from start to end year # year(start_date) all_years <- start_year:end_year - all_files <- paste0(file.path(outfolder, gsub(".csv", "", basename(files))), ".", as.character(all_years), - ".nc") - + all_files <- paste0( + file.path(outfolder, gsub(".csv", "", basename(files))), ".", as.character(all_years), + ".nc" + ) + results$file <- all_files results$host <- PEcAn.remote::fqdn() - + # The For below loop updates the start/end date once file is read in - results$startdate <- paste0(all_years, "-01-01 00:00:00") - results$enddate <- paste0(all_years, "-12-31 23:59:59") - results$mimetype <- "application/x-netcdf" + results$startdate <- paste0(all_years, "-01-01 00:00:00") + results$enddate <- paste0(all_years, "-12-31 23:59:59") + results$mimetype <- "application/x-netcdf" results$formatname <- "CF" - + # If all the files already exist, then skip the conversion unless overwrite=TRUE if (!overwrite && all(file.exists(all_files))) { PEcAn.logger::logger.debug("File '", all_files, "' already exist, skipping.") @@ -141,7 +148,7 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form all_years <- all_years[which(!file.exists(all_files))] all_files <- all_files[which(!file.exists(all_files))] } - + ## Read the CSV file some files have a line under the header that lists variable units ## search for NA's after conversion to numeric skiplog <- FALSE @@ -155,61 +162,64 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form skiplog <- TRUE skiprows <- c(1:header - 1) } - alldat <- utils::read.csv(files, - header = header, - skip = format$skip, - na.strings = format$na.strings, - as.is = TRUE, - check.names = FALSE) + alldat <- utils::read.csv(files, + header = header, + skip = format$skip, + na.strings = format$na.strings, + as.is = TRUE, + check.names = FALSE + ) if (skiplog) { alldat <- alldat[-skiprows, ] } - - + + ## skip reading columns that are defined in format but not found in CSV header if (format$header >= 1) { csv_colnames <- names(alldat) - missing_col <- which(!(format$vars$input_name %in% csv_colnames)) + missing_col <- which(!(format$vars$input_name %in% csv_colnames)) format$vars$bety_name[missing_col] <- paste0(format$vars$bety_name[missing_col], "(missing)") } - + ## Get datetime vector - requires one column be connected to bety variable datetime or 3 columns year,day,hour ## FUTURE: Also consider year,month,day_of_month,hour or year,MMDD or fjday or other combinations - - if(!is.null(format$time_zone)){ - tz = format$time_zone - }else{ - tz = "UTC" + + if (!is.null(format$time_zone)) { + tz <- format$time_zone + } else { + tz <- "UTC" PEcAn.logger::logger.warn("No site timezone. Assuming input time zone is UTC. This may be incorrect.") } - - ##The following code forces the time zone into standard/winter/local time only - if (!(tz %in% c("UTC","GMT"))) { - tzdiff <- PEcAn.utils::timezone_hour(tz)*(-1) - if (tzdiff>=0) { - tzstr <- paste0("Etc/GMT+",tzdiff) + + ## The following code forces the time zone into standard/winter/local time only + if (!(tz %in% c("UTC", "GMT"))) { + tzdiff <- PEcAn.utils::timezone_hour(tz) * (-1) + if (tzdiff >= 0) { + tzstr <- paste0("Etc/GMT+", tzdiff) } else { - tzstr <- paste0("Etc/GMT",tzdiff) + tzstr <- paste0("Etc/GMT", tzdiff) } } else { tzstr <- tz } - - ##datetime_index <- which(format$vars$bety_name == "datetime") + + ## datetime_index <- which(format$vars$bety_name == "datetime") datetime_index <- format$time.row if (length(datetime_index) == 0) { - bety_year <- format$vars$bety_name == 'year' - bety_day <- format$vars$bety_name == 'day' - bety_hour <- format$vars$bety_hour == 'hour' + bety_year <- format$vars$bety_name == "year" + bety_day <- format$vars$bety_name == "day" + bety_hour <- format$vars$bety_hour == "hour" if (all(any(bety_year), any(bety_day), any(bety_hour))) { year_index <- which(bety_year) - DOY_index <- which(bety_day) + DOY_index <- which(bety_day) hour_index <- which(bety_hour) - yearday <- format(strptime(paste0(alldat[, format$vars$input_name[year_index]], "-", - alldat[, format$vars$input_name[DOY_index]]), format = "%Y-%j"), format = "%Y-%m-%d") - hh <- floor(alldat[, format$vars$input_name[hour_index]]) - mm <- (alldat[, format$vars$input_name[hour_index]] - hh) * 60 - yyddhhmm <- strptime(paste0(yearday, " ", hh, ":", mm), format = "%Y-%m-%d %H:%M", tz=tzstr) + yearday <- format(strptime(paste0( + alldat[, format$vars$input_name[year_index]], "-", + alldat[, format$vars$input_name[DOY_index]] + ), format = "%Y-%j"), format = "%Y-%m-%d") + hh <- floor(alldat[, format$vars$input_name[hour_index]]) + mm <- (alldat[, format$vars$input_name[hour_index]] - hh) * 60 + yyddhhmm <- strptime(paste0(yearday, " ", hh, ":", mm), format = "%Y-%m-%d %H:%M", tz = tzstr) alldatetime <- as.POSIXct(yyddhhmm) } else { ## Does not match any of the known date formats, add new ones here! @@ -217,14 +227,14 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form } } else { datetime_raw <- alldat[, format$vars$input_name[datetime_index]] - datetime_units <- paste(format$vars$storage_type[datetime_index],collapse = " ") #strptime convention + datetime_units <- paste(format$vars$storage_type[datetime_index], collapse = " ") # strptime convention if (datetime_units == "") { - datetime_units <- "%Y%m%d%H%M" #assume ISO convention + datetime_units <- "%Y%m%d%H%M" # assume ISO convention } - alldatetime <- as.POSIXct(strptime(datetime_raw,format=datetime_units,tz=tzstr)) + alldatetime <- as.POSIXct(strptime(datetime_raw, format = datetime_units, tz = tzstr)) } ## and remove datetime from 'dat' dataframe dat[, datetime_index] <- format$na.strings - + ## Only run if years > start_date < end_date ## if both are provided, clip data to those dates ## Otherwise set start/end to first/last datetime of file @@ -243,11 +253,10 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form } ## convert data to numeric - not needed and is slow ## dat <- as.data.frame(datetime = datetime, sapply(dat[,-datetime_index], as.numeric)) - + ## loop over years that need to be read - + for (i in seq_along(all_years)) { - ## Test that file has data for year being processed, else move on this.year <- all_years[i] availdat.year <- which(years == this.year) @@ -256,30 +265,30 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form next } new.file <- all_files[i] - + ## Extract that year's data from large file - dat <- alldat[availdat.year, ] + dat <- alldat[availdat.year, ] datetime <- alldatetime[availdat.year] - + results$startdate[this.year - start_year + 1] <- as.character(datetime[1]) - results$enddate[this.year - start_year + 1] <- as.character(datetime[length(datetime)]) - + results$enddate[this.year - start_year + 1] <- as.character(datetime[length(datetime)]) + ### create time dimension days_since_1700 <- datetime - lubridate::ymd_hm("1700-01-01 00:00") - t <- ncdf4::ncdim_def("time", "days since 1700-01-01", as.numeric(days_since_1700)) #define netCDF dimensions for variables + t <- ncdf4::ncdim_def("time", "days since 1700-01-01", as.numeric(days_since_1700)) # define netCDF dimensions for variables timestep <- as.numeric(mean(PEcAn.utils::ud_convert(diff(days_since_1700), "d", "s"))) - + ## create lat lon dimensions - x <- ncdf4::ncdim_def("longitude", "degrees_east", lon) # define netCDF dimensions for variables + x <- ncdf4::ncdim_def("longitude", "degrees_east", lon) # define netCDF dimensions for variables y <- ncdf4::ncdim_def("latitude", "degrees_north", lat) xytdim <- list(x, y, t) - + ## airT (celsius) => air_temperature (K) - REQUIRED for all met files locs <- which(format$vars$bety_name %in% "airT") if (length(locs) > 0) { k <- locs[1] airT.var <- ncdf4::ncvar_def(name = "air_temperature", units = "K", dim = xytdim) - nc <- ncdf4::nc_create(new.file, vars = airT.var) #create netCDF file + nc <- ncdf4::nc_create(new.file, vars = airT.var) # create netCDF file arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { if (any(colnames(format$vars) == "column_number")) { @@ -288,12 +297,14 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for airT by name or column number") } } - ncdf4::ncvar_put(nc, varid = airT.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "celsius", "K")) + ncdf4::ncvar_put(nc, + varid = airT.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "celsius", "K") + ) } else { PEcAn.logger::logger.error("No air temperature found in met file") } - + ## air_pressure (Pa) => air_pressure (Pa) locs <- which(format$vars$bety_name %in% "air_pressure") if (length(locs) > 0) { @@ -308,17 +319,21 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for air_pressure by name or column number") } } - ncdf4::ncvar_put(nc, varid = Psurf.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "Pa", "Pa")) + ncdf4::ncvar_put(nc, + varid = Psurf.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "Pa", "Pa") + ) } - + ## co2atm (umol/mol) => mole_fraction_of_carbon_dioxide_in_air (mol/mol) locs <- which(format$vars$bety_name %in% "co2atm") if (length(locs) > 0) { k <- locs[1] - CO2.var <- ncdf4::ncvar_def(name = "mole_fraction_of_carbon_dioxide_in_air", - units = "mol mol-1", - dim = xytdim) + CO2.var <- ncdf4::ncvar_def( + name = "mole_fraction_of_carbon_dioxide_in_air", + units = "mol mol-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = CO2.var, verbose = nc_verbose) arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { @@ -328,18 +343,21 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for co2atm by name or column number") } } - ncdf4::ncvar_put(nc, - varid = CO2.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "umol mol-1", "mol mol-1")) + ncdf4::ncvar_put(nc, + varid = CO2.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "umol mol-1", "mol mol-1") + ) } - + ## soilM (%) => volume_fraction_of_condensed_water_in_soil (%) locs <- which(format$vars$bety_name %in% "soilM") if (length(locs) > 0) { k <- locs[1] - soilM.var <- ncdf4::ncvar_def(name = "volume_fraction_of_condensed_water_in_soil", - units = "1", - dim = xytdim) + soilM.var <- ncdf4::ncvar_def( + name = "volume_fraction_of_condensed_water_in_soil", + units = "1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = soilM.var, verbose = nc_verbose) arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { @@ -349,11 +367,12 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for soilM by name or column number") } } - ncdf4::ncvar_put(nc, - varid = soilM.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "1", "1")) + ncdf4::ncvar_put(nc, + varid = soilM.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "1", "1") + ) } - + ## soilT (celsius) => soil_temperature (K) locs <- which(format$vars$bety_name %in% "soilT") if (length(locs) > 0) { @@ -368,11 +387,12 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for soilT by name or column number") } } - ncdf4::ncvar_put(nc, - varid = soilT.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "celsius", "K")) + ncdf4::ncvar_put(nc, + varid = soilT.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "celsius", "K") + ) } - + ## relative_humidity (%) => relative_humidity (%) locs <- which(format$vars$bety_name %in% "relative_humidity") if (length(locs) > 0) { @@ -387,11 +407,12 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for relative_humidity by name or column number") } } - ncdf4::ncvar_put(nc, - varid = RH.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "%", "%")) + ncdf4::ncvar_put(nc, + varid = RH.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "%", "%") + ) } - + ## specific_humidity (g g-1) => specific_humidity (kg kg-1) locs <- which(format$vars$bety_name %in% "specific_humidity") if (length(locs) > 0) { @@ -406,23 +427,26 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for specific_humidity by name or column number") } } - ncdf4::ncvar_put(nc, - varid = qair.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "g g-1", "kg kg-1")) + ncdf4::ncvar_put(nc, + varid = qair.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "g g-1", "kg kg-1") + ) } else { ## file needs to be closed and re-opened to access added variables ncdf4::nc_close(nc) nc <- ncdf4::nc_open(new.file, write = TRUE, readunlim = FALSE) if ("relative_humidity" %in% names(nc$var) & "air_temperature" %in% names(nc$var)) { ## Convert RH to SH - qair <- rh2qair(rh = ncdf4::ncvar_get(nc, "relative_humidity")/100, - T = ncdf4::ncvar_get(nc, "air_temperature")) + qair <- rh2qair( + rh = ncdf4::ncvar_get(nc, "relative_humidity") / 100, + T = ncdf4::ncvar_get(nc, "air_temperature") + ) qair.var <- ncdf4::ncvar_def(name = "specific_humidity", units = "kg kg-1", dim = xytdim) - nc <- ncdf4::ncvar_add(nc = nc, v = qair.var, verbose = nc_verbose) #add variable to existing netCDF file + nc <- ncdf4::ncvar_add(nc = nc, v = qair.var, verbose = nc_verbose) # add variable to existing netCDF file ncdf4::ncvar_put(nc, varid = "specific_humidity", vals = qair) } } - + ## VPD (Pa) => water_vapor_saturation_deficit (Pa) locs <- which(format$vars$bety_name %in% "VPD") if (length(locs) > 0) { @@ -437,19 +461,22 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for VPD by name or column number") } } - ncdf4::ncvar_put(nc, - varid = VPD.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "Pa", "Pa")) + ncdf4::ncvar_put(nc, + varid = VPD.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "Pa", "Pa") + ) } - + ## surface_downwelling_longwave_flux_in_air (W m-2) => surface_downwelling_longwave_flux_in_air (W ## m-2) locs <- which(format$vars$bety_name %in% "surface_downwelling_longwave_flux_in_air") if (length(locs) > 0) { k <- locs[1] - LW.var <- ncdf4::ncvar_def(name = "surface_downwelling_longwave_flux_in_air", - units = "W m-2", - dim = xytdim) + LW.var <- ncdf4::ncvar_def( + name = "surface_downwelling_longwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = LW.var, verbose = nc_verbose) arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { @@ -459,18 +486,21 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for surface_downwelling_longwave_flux_in_air by name or column number") } } - ncdf4::ncvar_put(nc, - varid = LW.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "W m-2", "W m-2")) + ncdf4::ncvar_put(nc, + varid = LW.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "W m-2", "W m-2") + ) } - + ## solar_radiation (W m-2) => surface_downwelling_shortwave_flux_in_air (W m-2) locs <- which(format$vars$bety_name %in% "solar_radiation") if (length(locs) > 0) { k <- locs[1] - SW.var <- ncdf4::ncvar_def(name = "surface_downwelling_shortwave_flux_in_air", - units = "W m-2", - dim = xytdim) + SW.var <- ncdf4::ncvar_def( + name = "surface_downwelling_shortwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = SW.var, verbose = nc_verbose) arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { @@ -480,18 +510,21 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for solar_radiation by name or column number") } } - ncdf4::ncvar_put(nc, - varid = SW.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "W m-2", "W m-2")) + ncdf4::ncvar_put(nc, + varid = SW.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "W m-2", "W m-2") + ) } - + ## PAR (umol m-2 s-1) => surface_downwelling_photosynthetic_photon_flux_in_air (mol m-2 s-1) locs <- which(format$vars$bety_name %in% "PAR") if (length(locs) > 0) { k <- locs[1] - PAR.var <- ncdf4::ncvar_def(name = "surface_downwelling_photosynthetic_photon_flux_in_air", - units = "mol m-2 s-1", - dim = xytdim) + PAR.var <- ncdf4::ncvar_def( + name = "surface_downwelling_photosynthetic_photon_flux_in_air", + units = "mol m-2 s-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = PAR.var, verbose = nc_verbose) arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { @@ -501,18 +534,21 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for PAR by name or column number") } } - ncdf4::ncvar_put(nc, - varid = PAR.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "umol m-2 s-1", "mol m-2 s-1")) + ncdf4::ncvar_put(nc, + varid = PAR.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "umol m-2 s-1", "mol m-2 s-1") + ) } - + ## precipitation_flux (kg m-2 s-1) => precipitation_flux (kg m-2 s-1) locs <- which(format$vars$bety_name %in% "precipitation_flux") if (length(locs) > 0) { k <- locs[1] - precip.var <- ncdf4::ncvar_def(name = "precipitation_flux", - units = "kg m-2 s-1", - dim = xytdim) + precip.var <- ncdf4::ncvar_def( + name = "precipitation_flux", + units = "kg m-2 s-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = precip.var, verbose = nc_verbose) arrloc <- as.character(format$vars$input_name[k]) if (arrloc == "") { @@ -524,33 +560,39 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form } rain <- dat[, arrloc] rain.units <- as.character(format$vars$input_units[k]) - rain.units <- switch(rain.units, mm = { - rain <- rain / timestep - "kg m-2 s-1" - }, m = { - rain <- rain / timestep - "Mg m-2 s-1" - }, `in` = { - rain <- PEcAn.utils::ud_convert(rain / timestep, "in", "mm") - "kg m-2 s-1" - }, `mm h-1` = { - rain <- PEcAn.utils::ud_convert(rain / timestep, "h", "s") - "kg m-2 s-1" - }, - 'kg m-2 (30 minute)-1' = { - rain <- rain / timestep - 'kg m-2 s-1' - }, - 'kg m-2 hr-1' = { - rain <- rain / timestep - 'kg m-2 s-1' - } + rain.units <- switch(rain.units, + mm = { + rain <- rain / timestep + "kg m-2 s-1" + }, + m = { + rain <- rain / timestep + "Mg m-2 s-1" + }, + `in` = { + rain <- PEcAn.utils::ud_convert(rain / timestep, "in", "mm") + "kg m-2 s-1" + }, + `mm h-1` = { + rain <- PEcAn.utils::ud_convert(rain / timestep, "h", "s") + "kg m-2 s-1" + }, + "kg m-2 (30 minute)-1" = { + rain <- rain / timestep + "kg m-2 s-1" + }, + "kg m-2 hr-1" = { + rain <- rain / timestep + "kg m-2 s-1" + } + ) + ncdf4::ncvar_put(nc, + varid = precip.var, + vals = met.conv(rain, rain.units, "kg m-2 s-1", "kg m-2 s-1") ) - ncdf4::ncvar_put(nc, varid = precip.var, - vals = met.conv(rain, rain.units, "kg m-2 s-1", "kg m-2 s-1")) } - - ## eastward_wind (m s-1) => eastward_wind (m s-1) + + ## eastward_wind (m s-1) => eastward_wind (m s-1) ## northward_wind (m s-1) => northward_wind (m s-1) if (("eastward_wind" %in% format$vars$bety_name) & ("northward_wind" %in% format$vars$bety_name)) { locs <- which(format$vars$bety_name %in% "northward_wind") @@ -565,10 +607,11 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for eastward_wind by name or column number") } } - ncdf4::ncvar_put(nc, - varid = Nwind.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "m s-1", "m s-1")) - + ncdf4::ncvar_put(nc, + varid = Nwind.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "m s-1", "m s-1") + ) + locs <- which(format$vars$bety_name %in% "eastward_wind") k <- locs[1] Ewind.var <- ncdf4::ncvar_def(name = "eastward_wind", units = "m s-1", dim = xytdim) @@ -580,9 +623,10 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form } else { PEcAn.logger::logger.error("Cannot find column location for northward_wind by name or column number") } - ncdf4::ncvar_put(nc, - varid = Ewind.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "m s-1", "m s-1")) + ncdf4::ncvar_put(nc, + varid = Ewind.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "m s-1", "m s-1") + ) } } else { locs_wd <- which(format$vars$bety_name %in% "wind_direction") @@ -608,9 +652,11 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form } } wind <- met.conv(dat[, arrloc_ws], format$vars$input_units[k_ws], "m s-1", "m s-1") - wind_direction <- met.conv(dat[, arrloc_wd], - format$vars$input_units[k_wd], "radians", - "radians") + wind_direction <- met.conv( + dat[, arrloc_wd], + format$vars$input_units[k_wd], "radians", + "radians" + ) uwind <- wind * cos(wind_direction) vwind <- wind * sin(wind_direction) Ewind.var <- ncdf4::ncvar_def(name = "eastward_wind", units = "m s-1", dim = xytdim) @@ -634,14 +680,15 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form PEcAn.logger::logger.error("Cannot find column location for Wspd by name or column number") } } - ncdf4::ncvar_put(nc, - varid = Wspd.var, - vals = met.conv(dat[, arrloc], format$vars$input_units[k], "m s-1", "m s-1")) + ncdf4::ncvar_put(nc, + varid = Wspd.var, + vals = met.conv(dat[, arrloc], format$vars$input_units[k], "m s-1", "m s-1") + ) } } - } ## end wind - - # ## wind_direction (degrees) => wind_from_direction (degrees) + } ## end wind + + # ## wind_direction (degrees) => wind_from_direction (degrees) # locs <- which(format$vars$bety_name %in% "wind_direction") # if (length(locs)>0) { # k <- locs[1] @@ -649,19 +696,19 @@ met2CF.csv <- function(in.path, in.prefix, outfolder, start_date, end_date, form # nc <- ncdf4::ncvar_add(nc = nc, v = Wdir.var, verbose = nc_verbose) # arrloc <- as.character(format$vars$input_name[k]) # if (arrloc=="") { - # if (any(colnames(format$vars)=="column_number")) { + # if (any(colnames(format$vars)=="column_number")) { # arrloc <- format$vars$column_number[k] # } else { # PEcAn.logger::logger.error("Cannot find column location for wind_direction by name or column number") # } # } # ncdf4::ncvar_put(nc, varid = Wdir.var, - # vals=met.conv(dat[,arrloc],format$vars$input_units[k],"degrees","degrees")) - # } - + # vals=met.conv(dat[,arrloc],format$vars$input_units[k],"degrees","degrees")) + # } + ncdf4::nc_close(nc) - } ## end loop over years - } ## end else file found + } ## end loop over years + } ## end else file found return(results) } # met2CF.csv @@ -670,9 +717,9 @@ met.conv <- function(x, orig, bety, CF) { orig <- as.character(orig) bety <- as.character(bety) CF <- as.character(CF) - + if (nchar(orig) == 0) { - orig <- bety ## if units not provided, default is that they were the same units as bety + orig <- bety ## if units not provided, default is that they were the same units as bety } if (PEcAn.utils::unit_is_parseable(orig)) { if (units::ud_are_convertible(orig, bety)) { diff --git a/modules/data.atmosphere/R/met2cf.ERA5.R b/modules/data.atmosphere/R/met2cf.ERA5.R index 2fb21bcd682..6494e827846 100644 --- a/modules/data.atmosphere/R/met2cf.ERA5.R +++ b/modules/data.atmosphere/R/met2cf.ERA5.R @@ -15,7 +15,7 @@ #' @return list of dataframes #' @export #' -met2CF.ERA5<- function(lat, +met2CF.ERA5 <- function(lat, long, start_date, end_date, @@ -24,54 +24,59 @@ met2CF.ERA5<- function(lat, out.xts, overwrite = FALSE, verbose = TRUE) { - - years <- seq(lubridate::year(start_date), - lubridate::year(end_date), - 1 + years <- seq( + lubridate::year(start_date), + lubridate::year(end_date), + 1 ) - + ensemblesN <- seq(1, 10) - start_date <- paste0(lubridate::year(start_date),"-01-01") %>% as.Date() - end_date <- paste0(lubridate::year(end_date),"-12-31") %>% as.Date() + start_date <- paste0(lubridate::year(start_date), "-01-01") %>% as.Date() + end_date <- paste0(lubridate::year(end_date), "-12-31") %>% as.Date() # adding RH and converting rain - + out.new <- ensemblesN %>% purrr::map(function(ensi) { - tryCatch({ - - ens <- out.xts[[ensi]] - # Solar radation conversions - #https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513 - #For ERA5 daily ensemble data, the accumulation period is 3 hours. Hence to convert to W/m2: - ens[, "ssrd"] <- ens[, "ssrd"] / (3 * 3600) - ens[, "strd"] <- ens[, "strd"] / (3 * 3600) - #precipitation it's originaly in meters. Meters times the density will give us the kg/m2 - ens[, "tp"] <- - ens[, "tp"] * 1000 / 3 # divided by 3 because we have 3 hours data - ens[, "tp"] <- - PEcAn.utils::ud_convert(as.numeric(ens[, "tp"]), "kg m-2 hr-1", "kg m-2 s-1") #There are 21600 seconds in 6 hours - #RH - #Adopted from weathermetrics/R/moisture_conversions.R - t <- - PEcAn.utils::ud_convert(ens[, "t2m"] %>% as.numeric(), "K", "degC") - dewpoint <- - PEcAn.utils::ud_convert(ens[, "d2m"] %>% as.numeric(), "K", "degC") - beta <- (112 - (0.1 * t) + dewpoint) / (112 + (0.9 * t)) - relative.humidity <- beta ^ 8 - #specific humidity - specific_humidity <- - PEcAn.data.atmosphere::rh2qair(relative.humidity, - ens[, "t2m"] %>% as.numeric(), - ens[, "sp"] %>% as.numeric()) # Pressure in Pa - }, - error = function(e) { - PEcAn.logger::logger.severe("Something went wrong during the unit conversion in met2cf ERA5.", - conditionMessage(e)) - }) - - - #adding humidity + tryCatch( + { + ens <- out.xts[[ensi]] + # Solar radation conversions + # https://confluence.ecmwf.int/pages/viewpage.action?pageId=104241513 + # For ERA5 daily ensemble data, the accumulation period is 3 hours. Hence to convert to W/m2: + ens[, "ssrd"] <- ens[, "ssrd"] / (3 * 3600) + ens[, "strd"] <- ens[, "strd"] / (3 * 3600) + # precipitation it's originaly in meters. Meters times the density will give us the kg/m2 + ens[, "tp"] <- + ens[, "tp"] * 1000 / 3 # divided by 3 because we have 3 hours data + ens[, "tp"] <- + PEcAn.utils::ud_convert(as.numeric(ens[, "tp"]), "kg m-2 hr-1", "kg m-2 s-1") # There are 21600 seconds in 6 hours + # RH + # Adopted from weathermetrics/R/moisture_conversions.R + t <- + PEcAn.utils::ud_convert(ens[, "t2m"] %>% as.numeric(), "K", "degC") + dewpoint <- + PEcAn.utils::ud_convert(ens[, "d2m"] %>% as.numeric(), "K", "degC") + beta <- (112 - (0.1 * t) + dewpoint) / (112 + (0.9 * t)) + relative.humidity <- beta^8 + # specific humidity + specific_humidity <- + PEcAn.data.atmosphere::rh2qair( + relative.humidity, + ens[, "t2m"] %>% as.numeric(), + ens[, "sp"] %>% as.numeric() + ) # Pressure in Pa + }, + error = function(e) { + PEcAn.logger::logger.severe( + "Something went wrong during the unit conversion in met2cf ERA5.", + conditionMessage(e) + ) + } + ) + + + # adding humidity xts::merge.xts(ens[, -c(3)], (specific_humidity)) %>% `colnames<-`( c( @@ -85,114 +90,122 @@ met2CF.ERA5<- function(lat, "specific_humidity" ) ) - }) - - #These are the cf standard names - cf_var_names = colnames(out.new[[1]]) - cf_var_units = c("K", "Pa", "kg m-2 s-1", "m s-1", "m s-1", "W m-2", "W m-2", "1") #Negative numbers indicate negative exponents - - results_list <- ensemblesN %>% + # These are the cf standard names + cf_var_names <- colnames(out.new[[1]]) + cf_var_units <- c("K", "Pa", "kg m-2 s-1", "m s-1", "m s-1", "W m-2", "W m-2", "1") # Negative numbers indicate negative exponents + + + results_list <- ensemblesN %>% purrr::map(function(i) { - start_date <- min(zoo::index(out.new[[i]])) end_date <- max(zoo::index(out.new[[i]])) # Create a data frame with information about the file. This data frame's format is an internal PEcAn standard, and is stored in the BETY database to - # locate the data file. + # locate the data file. results <- data.frame( file = "", - #Path to the file (added in loop below). + # Path to the file (added in loop below). host = PEcAn.remote::fqdn(), mimetype = "application/x-netcdf", formatname = "CF Meteorology", startdate = paste0(format( - start_date , "%Y-%m-%dT%H:%M:00 %z" + start_date, "%Y-%m-%dT%H:%M:00 %z" )), enddate = paste0(format( - end_date , "%Y-%m-%dT%H:%M:00 %z" + end_date, "%Y-%m-%dT%H:%M:00 %z" )), dbfile.name = paste0("ERA5.", i), stringsAsFactors = FALSE ) - + # i is the ensemble number - #Generating a unique identifier string that characterizes a particular data set. + # Generating a unique identifier string that characterizes a particular data set. identifier <- paste("ERA5", sitename, i, sep = "_") - + identifier.file <- paste("ERA5", - i, - lubridate::year(start_date), - sep = ".") - + i, + lubridate::year(start_date), + sep = "." + ) + ensemble_folder <- file.path(outfolder, identifier) - - #Each file will go in its own folder. + + # Each file will go in its own folder. if (!dir.exists(ensemble_folder)) { dir.create(ensemble_folder, - recursive = TRUE, - showWarnings = FALSE) + recursive = TRUE, + showWarnings = FALSE + ) } - - flname <-file.path(ensemble_folder, paste(identifier.file, "nc", sep = ".")) - - #Each ensemble member gets its own unique data frame, which is stored in results_list + + flname <- file.path(ensemble_folder, paste(identifier.file, "nc", sep = ".")) + + # Each ensemble member gets its own unique data frame, which is stored in results_list results$file <- flname - + years %>% purrr::map(function(year) { # identifier.file <- paste("ERA5", - i, - year, - sep = ".") - - flname <-file.path(ensemble_folder, paste(identifier.file, "nc", sep = ".")) + i, + year, + sep = "." + ) + + flname <- file.path(ensemble_folder, paste(identifier.file, "nc", sep = ".")) # Spliting it for this year data.for.this.year.ens <- out.new[[i]] - data.for.this.year.ens <- data.for.this.year.ens[year %>% as.character] - - - #Each ensemble gets its own file. - time_dim = ncdf4::ncdim_def( + data.for.this.year.ens <- data.for.this.year.ens[year %>% as.character()] + + + # Each ensemble gets its own file. + time_dim <- ncdf4::ncdim_def( name = "time", paste(units = "hours since", format(start_date, "%Y-%m-%dT%H:%M")), seq(0, (length(zoo::index( data.for.this.year.ens - )) * 3) - 1 , length.out = length(zoo::index(data.for.this.year.ens))), + )) * 3) - 1, length.out = length(zoo::index(data.for.this.year.ens))), create_dimvar = TRUE ) - lat_dim = ncdf4::ncdim_def("latitude", "degree_north", lat, create_dimvar = TRUE) - lon_dim = ncdf4::ncdim_def("longitude", "degree_east", long, create_dimvar = TRUE) - - #create a list of all ens - nc_var_list <- purrr::map2(cf_var_names, - cf_var_units, - ~ ncdf4::ncvar_def(.x, .y, list(time_dim, lat_dim, lon_dim), missval = NA_real_)) - - #results$dbfile.name <- flname - - + lat_dim <- ncdf4::ncdim_def("latitude", "degree_north", lat, create_dimvar = TRUE) + lon_dim <- ncdf4::ncdim_def("longitude", "degree_east", long, create_dimvar = TRUE) + + # create a list of all ens + nc_var_list <- purrr::map2( + cf_var_names, + cf_var_units, + ~ ncdf4::ncvar_def(.x, .y, list(time_dim, lat_dim, lon_dim), missval = NA_real_) + ) + + # results$dbfile.name <- flname + + if (!file.exists(flname) || overwrite) { - tryCatch({ - nc_flptr <- ncdf4::nc_create(flname, nc_var_list, verbose = verbose) - - #For each variable associated with that ensemble - for (j in seq_along(cf_var_names)) { - # "j" is the variable number. "i" is the ensemble number. - ncdf4::ncvar_put(nc_flptr, - nc_var_list[[j]], - zoo::coredata(data.for.this.year.ens)[, nc_var_list[[j]]$name]) + tryCatch( + { + nc_flptr <- ncdf4::nc_create(flname, nc_var_list, verbose = verbose) + + # For each variable associated with that ensemble + for (j in seq_along(cf_var_names)) { + # "j" is the variable number. "i" is the ensemble number. + ncdf4::ncvar_put( + nc_flptr, + nc_var_list[[j]], + zoo::coredata(data.for.this.year.ens)[, nc_var_list[[j]]$name] + ) + } + + ncdf4::nc_close(nc_flptr) # Write to the disk/storage + }, + error = function(e) { + PEcAn.logger::logger.severe( + "Something went wrong during the writing of the nc file.", + conditionMessage(e) + ) } - - ncdf4::nc_close(nc_flptr) #Write to the disk/storage - }, - error = function(e) { - PEcAn.logger::logger.severe("Something went wrong during the writing of the nc file.", - conditionMessage(e)) - }) - + ) } else { PEcAn.logger::logger.info(paste0( "The file ", @@ -200,12 +213,10 @@ met2CF.ERA5<- function(lat, " already exists. It was not overwritten." )) } - - - }) - + }) + return(results) }) - #For each ensemble - return(results_list ) + # For each ensemble + return(results_list) } diff --git a/modules/data.atmosphere/R/met2cf.module.R b/modules/data.atmosphere/R/met2cf.module.R index 0e80b575707..4abf5bd9b27 100644 --- a/modules/data.atmosphere/R/met2cf.module.R +++ b/modules/data.atmosphere/R/met2cf.module.R @@ -1,18 +1,17 @@ -.met2cf.module <- function(raw.id, register, met, str_ns, dir, machine, site.id, lat, lon, start_date, end_date, +.met2cf.module <- function(raw.id, register, met, str_ns, dir, machine, site.id, lat, lon, start_date, end_date, con, host, overwrite = FALSE, format.vars, bety) { - PEcAn.logger::logger.info("Begin change to CF Standards") - - input.id <- raw.id$input.id[1] - pkg <- "PEcAn.data.atmosphere" + + input.id <- raw.id$input.id[1] + pkg <- "PEcAn.data.atmosphere" formatname <- "CF Meteorology" - mimetype <- "application/x-netcdf" - format.id <- 33 - + mimetype <- "application/x-netcdf" + format.id <- 33 + if (register$scale == "regional") { input_name <- paste0(met, "_CF") outfolder <- file.path(dir, input_name) - + fcn1 <- paste0("met2CF.", met) mimename <- register$format$mimetype mimename <- substr(mimename, regexpr("/", mimename) + 1, nchar(mimename)) @@ -25,33 +24,36 @@ } else { PEcAn.logger::logger.error("met2CF function ", fcn1, " or ", fcn2, " don't exist") } - - cf0.id <- PEcAn.DB::convert_input(input.id = input.id, - outfolder = outfolder, - formatname = formatname, - mimetype = mimetype, - site.id = site.id, start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, con = con, host = host, - write = TRUE, - format.vars = format.vars, - overwrite = overwrite, - exact.dates = FALSE) - + + cf0.id <- PEcAn.DB::convert_input( + input.id = input.id, + outfolder = outfolder, + formatname = formatname, + mimetype = mimetype, + site.id = site.id, start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, con = con, host = host, + write = TRUE, + format.vars = format.vars, + overwrite = overwrite, + exact.dates = FALSE + ) + input_name <- paste0(met, "_CF_Permute") fcn <- "permute.nc" outfolder <- file.path(dir, input_name) - - cf.id <- PEcAn.DB::convert_input(input.id = cf0.id$input.id, - outfolder = outfolder, - formatname = formatname, - mimetype = mimetype, - site.id = site.id, - start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, con = con, host = host, - write = TRUE, - overwrite = overwrite, - exact.dates = FALSE) - + + cf.id <- PEcAn.DB::convert_input( + input.id = cf0.id$input.id, + outfolder = outfolder, + formatname = formatname, + mimetype = mimetype, + site.id = site.id, + start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, con = con, host = host, + write = TRUE, + overwrite = overwrite, + exact.dates = FALSE + ) } else if (register$scale == "site") { input_name <- paste0(met, "_CF_site_", str_ns) outfolder <- file.path(dir, input_name) @@ -64,24 +66,26 @@ fcn <- fcn1 } else if (exists(fcn2)) { fcn <- fcn2 - } else { - PEcAn.logger::logger.error("met2CF function ", fcn1, " or ", fcn2, " doesn't exists") - } + } else { + PEcAn.logger::logger.error("met2CF function ", fcn1, " or ", fcn2, " doesn't exists") + } format <- PEcAn.DB::query.format.vars(input.id = input.id, bety = bety) - cf.id <- PEcAn.DB::convert_input(input.id = input.id, - outfolder = outfolder, - formatname = formatname, - mimetype = mimetype, - site.id = site.id, - start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, con = con, host = host, - write = TRUE, - lat = lat, lon = lon, - format.vars = format.vars, - overwrite = overwrite, - exact.dates = FALSE) + cf.id <- PEcAn.DB::convert_input( + input.id = input.id, + outfolder = outfolder, + formatname = formatname, + mimetype = mimetype, + site.id = site.id, + start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, con = con, host = host, + write = TRUE, + lat = lat, lon = lon, + format.vars = format.vars, + overwrite = overwrite, + exact.dates = FALSE + ) } - + PEcAn.logger::logger.info("Finished change to CF Standards") return(cf.id) } # .met2cf.module diff --git a/modules/data.atmosphere/R/met2cf.nc.R b/modules/data.atmosphere/R/met2cf.nc.R index 1a4c2dd7007..ea82efb01e3 100644 --- a/modules/data.atmosphere/R/met2cf.nc.R +++ b/modules/data.atmosphere/R/met2cf.nc.R @@ -1,8 +1,7 @@ met2cf.nc <- function(in.path, in.prefix, outfolder, ...) { - script <- paste0("inst/scripts/CF.", in.prefix, "sh") cmdArgs <- paste(c(in.path, in.prefix, outfolder), collapse = " ") - + fcn <- system.file(script, package = "PEcAn.data.atmosphere") system(paste(fcn, cmdArgs)) } # met2cf.nc diff --git a/modules/data.atmosphere/R/met2model.module.R b/modules/data.atmosphere/R/met2model.module.R index 3f0bf2e34ab..0c6810c2509 100644 --- a/modules/data.atmosphere/R/met2model.module.R +++ b/modules/data.atmosphere/R/met2model.module.R @@ -1,72 +1,75 @@ ##' @export -.met2model.module <- function(ready.id, model, con, host, dir, met, str_ns, site, start_date, end_date, - new.site, overwrite = FALSE, exact.dates,spin, register, ensemble_name) { - +.met2model.module <- function(ready.id, model, con, host, dir, met, str_ns, site, start_date, end_date, + new.site, overwrite = FALSE, exact.dates, spin, register, ensemble_name) { # Determine output format name and mimetype - model_info <- PEcAn.DB::db.query(paste0("SELECT f.name, f.id, mt.type_string from modeltypes as m", " join modeltypes_formats as mf on m.id = mf.modeltype_id", - " join formats as f on mf.format_id = f.id", " join mimetypes as mt on f.mimetype_id = mt.id", - " where m.name = '", model, "' AND mf.tag='met'"), con) - + model_info <- PEcAn.DB::db.query(paste0( + "SELECT f.name, f.id, mt.type_string from modeltypes as m", " join modeltypes_formats as mf on m.id = mf.modeltype_id", + " join formats as f on mf.format_id = f.id", " join mimetypes as mt on f.mimetype_id = mt.id", + " where m.name = '", model, "' AND mf.tag='met'" + ), con) + if (model_info[1] == "CF Meteorology") { model.id <- ready.id outfolder <- file.path(dir, paste0(met, "_site_", str_ns)) } else { PEcAn.logger::logger.info("Begin Model Specific Conversion") - + formatname <- model_info[1] mimetype <- model_info[3] - + print("Convert to model format") - + input.id <- ready.id$input.id[1] - - if(host$name == "localhost"){ + + if (host$name == "localhost") { outfolder <- file.path(dir, paste0(met, "_", model, "_site_", str_ns)) } else { - if(is.null(host$folder)){ + if (is.null(host$folder)) { PEcAn.logger::logger.severe("host$folder required when running met2model.module for remote servers") } else { outfolder <- file.path(host$folder, paste0(met, "_", model, "_site_", str_ns)) } } - - #Some data products can be forecasts instead of real time data. - #Not all of the registration.xml files for each data source contains a tag. - forecast = FALSE + + # Some data products can be forecasts instead of real time data. + # Not all of the registration.xml files for each data source contains a tag. + forecast <- FALSE if (!is.null(register$forecast)) { - forecast = as.logical(register$forecast) + forecast <- as.logical(register$forecast) } - + pkg <- paste0("PEcAn.", model) fcn <- paste0("met2model.", model) - lst <- site.lst(site.id=site$id, con=con) - + lst <- site.lst(site.id = site$id, con = con) + # we add the ensemble number to the input name if (!is.null(register$ensemble)) { - outfolder <- paste0(outfolder,"_",ensemble_name) + outfolder <- paste0(outfolder, "_", ensemble_name) } - - model.id <- PEcAn.DB::convert_input(input.id = input.id, - outfolder = outfolder, - formatname = formatname, mimetype = mimetype, - site.id = site$id, - start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, con = con, host = host, - write = TRUE, - lst = lst, - lat = new.site$lat, lon = new.site$lon, - overwrite = overwrite, - exact.dates = exact.dates, - spin_nyear = spin$nyear, - spin_nsample = spin$nsample, - spin_resample = spin$resample, - forecast = forecast, - ensemble = !is.null(register$ensemble) && as.logical(register$ensemble), - ensemble_name = ensemble_name, - dbfile.id=ready.id$dbfile.id) + + model.id <- PEcAn.DB::convert_input( + input.id = input.id, + outfolder = outfolder, + formatname = formatname, mimetype = mimetype, + site.id = site$id, + start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, con = con, host = host, + write = TRUE, + lst = lst, + lat = new.site$lat, lon = new.site$lon, + overwrite = overwrite, + exact.dates = exact.dates, + spin_nyear = spin$nyear, + spin_nsample = spin$nsample, + spin_resample = spin$resample, + forecast = forecast, + ensemble = !is.null(register$ensemble) && as.logical(register$ensemble), + ensemble_name = ensemble_name, + dbfile.id = ready.id$dbfile.id + ) } - + PEcAn.logger::logger.info(paste("Finished Model Specific Conversion", model.id[1])) return(list(outfolder = outfolder, model.id = model.id)) } # .met2model.module diff --git a/modules/data.atmosphere/R/met_temporal_downscale.Gaussian_ensemble.R b/modules/data.atmosphere/R/met_temporal_downscale.Gaussian_ensemble.R index 375c44e1857..53bf356976d 100644 --- a/modules/data.atmosphere/R/met_temporal_downscale.Gaussian_ensemble.R +++ b/modules/data.atmosphere/R/met_temporal_downscale.Gaussian_ensemble.R @@ -1,4 +1,4 @@ -# substr function from right side +# substr function from right side substrRight <- function(x, n) { substr(x, nchar(x) - n + 1, nchar(x)) } @@ -13,34 +13,39 @@ substrRight <- function(x, n) { ##' @param in.prefix ignored ##' @param outfolder path to directory in which to store output. Will be created if it does not exist ##' @param input_met - the source dataset that will temporally downscaled by the train_met dataset -##' @param train_met - the observed dataset that will be used to train the modeled dataset in NC format. i.e. Flux Tower dataset -##' (see download.Fluxnet2015 or download.Ameriflux) -##' @param overwrite logical: replace output file if it already exists? +##' @param train_met - the observed dataset that will be used to train the modeled dataset in NC format. i.e. Flux Tower dataset +##' (see download.Fluxnet2015 or download.Ameriflux) +##' @param overwrite logical: replace output file if it already exists? ##' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions ##' print debugging information as they run? ##' @param swdn_method - Downwelling shortwave flux in air downscaling method (options are "sine", "spline", and "Waichler") ##' @param n_ens - numeric value with the number of ensembles to run -##' @param w_len - numeric value that is the window length in days +##' @param w_len - numeric value that is the window length in days ##' @param utc_diff - numeric value in HOURS that is local standard time difference from UTC time. CST is -6 ##' @param ... further arguments, currently ignored ##' @author James Simkins -met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfolder, - input_met, train_met, overwrite = FALSE, verbose = FALSE, - swdn_method = "sine", n_ens = 10, w_len = 20, utc_diff = -6, ... ) { - +met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfolder, + input_met, train_met, overwrite = FALSE, verbose = FALSE, + swdn_method = "sine", n_ens = 10, w_len = 20, utc_diff = -6, ...) { sub_str <- substrRight(input_met, 7) year <- substr(sub_str, 1, 4) year <- as.numeric(year) eph_year <- year source_name <- substr(input_met, 1, nchar(input_met) - 8) # Variable names - var <- data.frame(CF.name <- c("air_temperature", "air_temperature_max", "air_temperature_min", - "surface_downwelling_longwave_flux_in_air", "air_pressure", "surface_downwelling_shortwave_flux_in_air", - "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux"), - units <- c("Kelvin", "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", - "m/s", "g/g", "kg/m2/s")) + var <- data.frame( + CF.name <- c( + "air_temperature", "air_temperature_max", "air_temperature_min", + "surface_downwelling_longwave_flux_in_air", "air_pressure", "surface_downwelling_shortwave_flux_in_air", + "eastward_wind", "northward_wind", "specific_humidity", "precipitation_flux" + ), + units <- c( + "Kelvin", "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", + "m/s", "g/g", "kg/m2/s" + ) + ) # Reading in the training data train <- list() tem <- ncdf4::nc_open(train_met) @@ -55,7 +60,7 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold lat_train <- as.numeric(ncdf4::ncvar_get(tem, "latitude")) lon_train <- as.numeric(ncdf4::ncvar_get(tem, "longitude")) ncdf4::nc_close(tem) - + train <- data.frame(train) colnames(train) <- var$CF.name if (all(is.na(train$air_temperature_max))) { @@ -75,89 +80,87 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold } } ncdf4::nc_close(tem) - + source <- data.frame(source) colnames(source) <- var$CF.name - + # Default downscale will be to the resolution of the training dataset - reso <- 24/(nrow(train)/365) + reso <- 24 / (nrow(train) / 365) reso_len <- nrow(train) - + # If a source doesn't have leap days, we need to remove them to ensure equal # lengths - if (lubridate::leap_year(year) == TRUE) - { - if (length(source$air_temperature)%%366 > 0) { - if (length(train$air_temperature)%%366 == 0) { - train <- train[1:365 * (nrow(train)/366)] + if (lubridate::leap_year(year) == TRUE) { + if (length(source$air_temperature) %% 366 > 0) { + if (length(train$air_temperature) %% 366 == 0) { + train <- train[1:365 * (nrow(train) / 366)] } eph_year <- year - 1 } - } #chose a non-leap year to use for daylength calculations if we don't have the + } # chose a non-leap year to use for daylength calculations if we don't have the if (lubridate::leap_year(eph_year) == TRUE) { sp <- 366 } else { sp <- 365 } - + # Now we start a for loop for the ensemble members and begin downscaling. A # random normal distribution is used to downscale as so; # (mean <- value of source data) (sd <- +/- window_days of train data at the - # same time intervals) + # same time intervals) results <- list() for (e in seq_len(n_ens)) { - - div <- nrow(train)/nrow(source) #tells us how many values need to be generated (via downscaling) from each source value - sd_step <- nrow(train)/sp #allows us to step through each window at specific times + div <- nrow(train) / nrow(source) # tells us how many values need to be generated (via downscaling) from each source value + sd_step <- nrow(train) / sp # allows us to step through each window at specific times df <- data.frame() - # Temperature - use spline interpolation + # Temperature - use spline interpolation sourtemp <- source$air_temperature temper <- vector() tem.met <- vector() mean_val <- vector() - + # since we begin our temper vec to min temperature, we want this to coincide with the normal # low value - for (l in seq_len(30)){ - mean_val[l] <- which.min(train$air_temperature[1*l:sd_step*l]) + for (l in seq_len(30)) { + mean_val[l] <- which.min(train$air_temperature[1 * l:sd_step * l]) } mean_val <- floor(mean(mean_val)) - + # Daily products typically have tmin and tmax, probably need to make version in case it doesn't - if (length(sourtemp) <= 366){ - for (i in seq_along(sourtemp)){ + if (length(sourtemp) <= 366) { + for (i in seq_along(sourtemp)) { a <- source$air_temperature_min[i] b <- source$air_temperature[i] c <- source$air_temperature_max[i] d <- source$air_temperature[i] - vec <- c(a,b,c,d) - temper <- append(temper,vec) + vec <- c(a, b, c, d) + temper <- append(temper, vec) } - seq_by = 24/reso/length(vec) - sourtemp <- temper - for (x in seq(from=mean_val, to=reso_len, by=seq_by)){ + seq_by <- 24 / reso / length(vec) + sourtemp <- temper + for (x in seq(from = mean_val, to = reso_len, by = seq_by)) { tem.met[x] <- sourtemp[x / seq_by] } len_diff <- reso_len - length(tem.met) - tem.met <- append(tem.met,values = rep(NA,len_diff)) + tem.met <- append(tem.met, values = rep(NA, len_diff)) } else { - for (x in seq(from=0, to=reso_len, by=div)){ - tem.met[x] <- sourtemp[x / div] - } + for (x in seq(from = 0, to = reso_len, by = div)) { + tem.met[x] <- sourtemp[x / div] + } } - - spline.temp = zoo::na.spline(tem.met) + + spline.temp <- zoo::na.spline(tem.met) df[1:reso_len, "air_temperature"] <- spline.temp - + # after this maybe we can run it through the random norm to add variation - # but not sure how models will react - + # but not sure how models will react + # Precipitation_flux this takes the daily total of precipitation and uses that as # a total possible amount of precip. It randomly distributes the values of # precipitation rand_vect_cont <- function(N, M, sd = 1) { - vec <- truncnorm::rtruncnorm(N, a = 0, b = Inf,M/N, sd) - vec/sum(vec) * M + vec <- truncnorm::rtruncnorm(N, a = 0, b = Inf, M / N, sd) + vec / sum(vec) * M } precip <- vector() for (x in seq_along(source$precipitation_flux)) { @@ -172,14 +175,17 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold dwnsc_day <- rand_vect_cont( div, source$precipitation_flux[x], - sd = stats::sd(train$precipitation_flux[lowday:highday])) + sd = stats::sd(train$precipitation_flux[lowday:highday]) + ) precip <- append(precip, dwnsc_day) } df$precipitation_flux <- precip - + # Specific Humidity, eastward wind and northward wind - wnd <- c("specific_humidity", "eastward_wind", "northward_wind", "surface_downwelling_longwave_flux_in_air", - "air_pressure") + wnd <- c( + "specific_humidity", "eastward_wind", "northward_wind", "surface_downwelling_longwave_flux_in_air", + "air_pressure" + ) for (u in wnd) { train_vec <- vector() a <- as.numeric(train[[u]]) @@ -206,62 +212,68 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold } df[1:length(train_vec), u] <- train_vec } - + df$specific_humidity[df$specific_humidity < 0] <- 0 - + # Downwelling shortwave radiation flux Ephemeris is a function to calculate # sunrise/sunset times and daylength for SW calculations in sine swdn_method ephemeris <- function(lat, lon, date, span = 1, tz = "UTC") { - lon.lat <- matrix(c(lon, lat), nrow = 1) - + # using noon gets us around daylight saving time issues day <- as.POSIXct(sprintf("%s 12:00:00", date), tz = tz) sequence <- seq(from = day, length.out = span, by = "days") - - sunrise <- suntools::sunriset(lon.lat, sequence, direction = "sunrise", - POSIXct.out = TRUE) - sunset <- suntools::sunriset(lon.lat, sequence, direction = "sunset", - POSIXct.out = TRUE) + + sunrise <- suntools::sunriset(lon.lat, sequence, + direction = "sunrise", + POSIXct.out = TRUE + ) + sunset <- suntools::sunriset(lon.lat, sequence, + direction = "sunset", + POSIXct.out = TRUE + ) solar_noon <- suntools::solarnoon(lon.lat, sequence, POSIXct.out = TRUE) - - data.frame(date = as.Date(sunrise$time), sunrise = as.numeric(format(sunrise$time, "%H%M")), - solarnoon = as.numeric(format(solar_noon$time, "%H%M")), - sunset = as.numeric(format(sunset$time, "%H%M")), - day_length = as.numeric(sunset$time - sunrise$time)) + + data.frame( + date = as.Date(sunrise$time), sunrise = as.numeric(format(sunrise$time, "%H%M")), + solarnoon = as.numeric(format(solar_noon$time, "%H%M")), + sunset = as.numeric(format(sunset$time, "%H%M")), + day_length = as.numeric(sunset$time - sunrise$time) + ) } - + swsource <- source$surface_downwelling_shortwave_flux_in_air swdn <- vector() - + # The sine swdn_method produces an hourly sine wave of if (swdn_method == "sine") { - - eph <- ephemeris(lat_train, lon_train, date = paste0(eph_year, "-01-01", tz = "UTC"), - span = sp) + eph <- ephemeris(lat_train, lon_train, + date = paste0(eph_year, "-01-01", tz = "UTC"), + span = sp + ) day_len <- eph$day_length - + # Need to have average daily values for this swdn_method, so this upscales the # source data to daily resolution if needed daily_row <- nrow(source) - daily_step <- daily_row/sp + daily_step <- daily_row / sp daily.swdn <- vector() for (x in seq_len(sp)) { daily.swdn[x] <- mean(swsource[(x * daily_step - daily_step + 1):(x * daily_step)]) } - + # creating the sine wave for (i in seq_along(daily.swdn)) { - t <- seq(from = pi/day_len[i], to = pi, by = pi/day_len[i]) - wav <- ((daily.swdn[i] * (24/day_len[i]))/0.637) * sin(t) - + t <- seq(from = pi / day_len[i], to = pi, by = pi / day_len[i]) + wav <- ((daily.swdn[i] * (24 / day_len[i])) / 0.637) * sin(t) + # swdn = 0 without sunlight srs <- eph$sunrise hr <- substr(srs[i], 1, 2) hr <- as.numeric(hr) # utc_diff must be used so we can begin the sine wave at local sunrise hr <- hr + utc_diff - + l <- vector() for (n in seq_len(hr)) { l[n] <- 0 @@ -272,41 +284,42 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold for (n in seq_len(24 - (length(wav) + hr))) { l[n + hr + length(wav)] <- 0 } - + swdn <- append(swdn, l) } - + swflux <- vector() - sw_step <- length(swdn)/reso_len + sw_step <- length(swdn) / reso_len for (x in seq_len(reso_len)) { swflux[x] <- mean(swdn[(x * sw_step - sw_step + 1):(x * sw_step)]) } swflux[swflux < 0] <- 0 } - + # The spline swdn_method uses spline interpolation to connect existing values and # downscale if (swdn_method == "spline") { tem.met <- vector() for (x in seq(from = 0, to = nrow(train), by = div)) { - tem.met[x] <- swsource[x/div] + tem.met[x] <- swsource[x / div] } - + swflux <- vector() swflux <- zoo::na.spline(tem.met) swflux[swflux < 0] <- 0 } - + # The Waichler swdn_method doesn't need averaged SW train values, it sources SW # downwelling flux based on Tmax-Tmin and Precipitation Reference is Waichler and # Wigtosa 2003. Our no-precip coefficient is 2 instead of 1 becuase this better # matches our observations (1 significantly undervalues SW downwelling flux) if (swdn_method == "Waichler") { inter <- paste0(reso, " hour") - days <- seq(as.POSIXct(paste0(eph_year, "-01-01 00:00:00"),tz="UTC"), - as.POSIXct(paste0(eph_year, "-12-31 18:00:00"),tz="UTC"), - by = inter) - days.doy <- as.numeric(format(days,"%j")) + days <- seq(as.POSIXct(paste0(eph_year, "-01-01 00:00:00"), tz = "UTC"), + as.POSIXct(paste0(eph_year, "-12-31 18:00:00"), tz = "UTC"), + by = inter + ) + days.doy <- as.numeric(format(days, "%j")) days.hour <- lubridate::hour(days) + lubridate::minute(days) / 60 + lubridate::second(days) / 3600 cosZ <- PEcAn.data.atmosphere::cos_solar_zenith_angle(days.doy, lat_train, lon_train, inter, days.hour) I <- 1000 * cosZ @@ -315,12 +328,14 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold m[i] <- lubridate::days_in_month(as.Date(paste0(year, "-", i, "-01"))) } bmlist <- vector() - - Bm <- c(0.2089, 0.2857, 0.2689, 0.2137, 0.1925, 0.2209, 0.2527, 0.2495, - 0.2232, 0.1728, 0.1424, 0.1422) + + Bm <- c( + 0.2089, 0.2857, 0.2689, 0.2137, 0.1925, 0.2209, 0.2527, 0.2495, + 0.2232, 0.1728, 0.1424, 0.1422 + ) for (x in seq_along(Bm)) { mlen <- list() - mlen <- rep(Bm[x], m[x] * 24/reso) + mlen <- rep(Bm[x], m[x] * 24 / reso) bmlist <- append(bmlist, mlen) } A <- 0.73 @@ -337,59 +352,68 @@ met_temporal_downscale.Gaussian_ensemble <- function(in.path, in.prefix, outfold } hdry[hdry < 0] <- 0 swflux <- hdry * I - swflux[swflux < 0] <- 0 + swflux[swflux < 0] <- 0 } # Waichler method is the only method with ensembles for downwelling shortwave flux - + df$surface_downwelling_shortwave_flux_in_air <- swflux # Will need to change below if we figure out how to downscale this df$air_temperature_max <- rep(NA, reso_len) df$air_temperature_min <- rep(NA, reso_len) - - + + # Putting all the variables together in a data frame downscaled.met <- data.frame(df) - + train.list <- list() - lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat_train, - create_dimvar = TRUE) - lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon_train, - create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = seq_len(reso_len) * - reso * 3600, create_dimvar = TRUE, unlim = TRUE) + lat <- ncdf4::ncdim_def( + name = "latitude", units = "degree_north", vals = lat_train, + create_dimvar = TRUE + ) + lon <- ncdf4::ncdim_def( + name = "longitude", units = "degree_east", vals = lon_train, + create_dimvar = TRUE + ) + time <- ncdf4::ncdim_def(name = "time", units = "sec", vals = seq_len(reso_len) * + reso * 3600, create_dimvar = TRUE, unlim = TRUE) dim <- list(lat, lon, time) - + for (j in seq_along(var$CF.name)) { - train.list[[j]] <- ncdf4::ncvar_def(name = as.character(var$CF.name[j]), - units = as.character(var$units[j]), dim = dim, missval = -999, verbose = verbose) + train.list[[j]] <- ncdf4::ncvar_def( + name = as.character(var$CF.name[j]), + units = as.character(var$units[j]), dim = dim, missval = -999, verbose = verbose + ) } - + rows <- 1 dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - loc.file <- file.path(outfolder, paste0(source_name, ".dwnsc.gauss.ens", - e, ".", year, ".nc")) - + loc.file <- file.path(outfolder, paste0( + source_name, ".dwnsc.gauss.ens", + e, ".", year, ".nc" + )) + loc <- ncdf4::nc_create(filename = loc.file, vars = train.list, verbose = verbose) for (j in seq_along(var$CF.name)) { ncdf4::ncvar_put(nc = loc, varid = as.character(var$CF.name[j]), vals = downscaled.met[[j]]) } ncdf4::nc_close(loc) - - results[[e]] <- data.frame(file = loc.file, - host = rep(PEcAn.remote::fqdn(),rows), - mimetype = rep("application/x-netcdf",rows), - formatname = rep("CF Meteorology",rows), - startdate = paste0(year, "-01-01 00:00:00", tz = "UTC"), - enddate = paste0(year, "-12-31 23:59:59", tz = "UTC"), - dbfile.name = paste0(source_name, ".dwnsc.ens"), - stringsAsFactors = FALSE) - + + results[[e]] <- data.frame( + file = loc.file, + host = rep(PEcAn.remote::fqdn(), rows), + mimetype = rep("application/x-netcdf", rows), + formatname = rep("CF Meteorology", rows), + startdate = paste0(year, "-01-01 00:00:00", tz = "UTC"), + enddate = paste0(year, "-12-31 23:59:59", tz = "UTC"), + dbfile.name = paste0(source_name, ".dwnsc.ens"), + stringsAsFactors = FALSE + ) } - - return(invisible(results)) + + return(invisible(results)) } # met_temporal_downscale.Gaussian_ensemble( '~', '~', # 'dwnsc','MACA.IPSL-CM5A-LR.rcp85.r1i1p1.2006.nc', 'US-WCr.2006.nc') -# met_temporal_downscale.Gaussian_ensemble( '~', '~', 'dwnsc','MACA.IPSL-CM5A-LR.rcp85.r1i1p1.2006.nc', 'US-WCr.2006.nc') \ No newline at end of file +# met_temporal_downscale.Gaussian_ensemble( '~', '~', 'dwnsc','MACA.IPSL-CM5A-LR.rcp85.r1i1p1.2006.nc', 'US-WCr.2006.nc') diff --git a/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R b/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R index 820028978ba..fbee424e56b 100644 --- a/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R +++ b/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R @@ -18,26 +18,25 @@ #' @export metgapfill.NOAA_GEFS <- function(in.prefix, in.path, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { - PEcAn.logger::logger.info("Starting metgapfill.NOAA_GEFS") - + # These are the variables cf NOAA_GEFS uses - # cf_var_names = c("air_temperature", "air_pressure", "specific_humidity", "surface_downwelling_longwave_flux_in_air", - #"surface_downwelling_shortwave_flux_in_air", "precipitation_flux", "eastward_wind", "northward_wind") - + # cf_var_names = c("air_temperature", "air_pressure", "specific_humidity", "surface_downwelling_longwave_flux_in_air", + # "surface_downwelling_shortwave_flux_in_air", "precipitation_flux", "eastward_wind", "northward_wind") + # Variables whose gapfillings are not directly dependent on splines. - #dependent_vars <- c("specific_humidity", "surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", - # "air_pressure", "eastward_wind", "northward_wind") - - + # dependent_vars <- c("specific_humidity", "surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", + # "air_pressure", "eastward_wind", "northward_wind") + + escaped <- gsub("(\\W)", "\\\\\\1", in.prefix) # The file name may contain special characters that could mess up the regular expression. - matching_files <- grep(escaped, list.files(in.path), value=TRUE) + matching_files <- grep(escaped, list.files(in.path), value = TRUE) if (length(matching_files) == 0) { PEcAn.logger::logger.severe(paste0("No files found matching ", in.prefix, "; cannot process data.")) } - + # This function is supposed to process netcdf files, so we'll search for files the the extension .nc and use those first. - nc_file = grep("\\.nc$", matching_files) + nc_file <- grep("\\.nc$", matching_files) if (length(nc_file) > 0) { in.prefix <- matching_files[1] } else { # no .nc files found... it could be that the extension was left off, or some other problem @@ -45,163 +44,168 @@ metgapfill.NOAA_GEFS <- function(in.prefix, in.path, outfolder, start_date, end_ PEcAn.logger::logger.warn(matching_files) in.prefix <- matching_files[1] } - + # Attach the path. The above procedure doesn't require the path, but acutally opening the file does. full.data.file <- file.path(in.path, in.prefix) - + if (!file.exists(full.data.file)) { PEcAn.logger::logger.warn(paste0("File ", full.data.file, " not found. Unable to perform gapfilling.")) return(data.frame()) } - - flptr = ncdf4::nc_open(full.data.file) - - cf_var_names = names(flptr$var) #to deal with wind speed vs eastward vs northward - dependent_vars = cf_var_names[-which(cf_var_names == "air_temperature" | cf_var_names == "precipitation_flux")] #remove air temp and pres since they are gap filled differently + + flptr <- ncdf4::nc_open(full.data.file) + + cf_var_names <- names(flptr$var) # to deal with wind speed vs eastward vs northward + dependent_vars <- cf_var_names[-which(cf_var_names == "air_temperature" | cf_var_names == "precipitation_flux")] # remove air temp and pres since they are gap filled differently # Put data into a matrix var <- ncdf4::ncvar_get(flptr, "air_temperature") - allvars <- matrix(var, ncol=length(var), nrow=1) + allvars <- matrix(var, ncol = length(var), nrow = 1) var <- ncdf4::ncvar_get(flptr, "precipitation_flux") - allvars = rbind(allvars, var) - + allvars <- rbind(allvars, var) + for (i in 1:length(dependent_vars)) { allvars <- rbind(allvars, ncdf4::ncvar_get(flptr, dependent_vars[i])) } - + # Use this matrix to fill in missing days at the end of the forecast with data from the # same time of day. # First, count how far back needs going. k <- ncol(allvars) - while (length(which(is.na(allvars[,k]))) > 0) { - k = k - 1; + while (length(which(is.na(allvars[, k]))) > 0) { + k <- k - 1 } - + # i = column, row = j for (i in 1:nrow(allvars)) { for (j in k:ncol(allvars)) { - if (is.na(allvars[i,j])) { - allvars[i,j] = sample(stats::na.omit(allvars[i,seq(j, 1, by = -4)]), 1) + if (is.na(allvars[i, j])) { + allvars[i, j] <- sample(stats::na.omit(allvars[i, seq(j, 1, by = -4)]), 1) } } } - + # Use a basic spline to fill in missing values for basic variables # Other variables will be fit to these for internal consistency in gapfilling - air_temperature <- allvars[1,] - precipitation_flux <- allvars[2,] - + air_temperature <- allvars[1, ] + precipitation_flux <- allvars[2, ] + air_temperature <- zoo::na.spline(air_temperature) precipitation_flux <- zoo::na.spline(precipitation_flux) - - fitted.data <- data.frame(air_temperature = air_temperature, - precipitation_flux = precipitation_flux) - time <-flptr$dim$time$vals - + + fitted.data <- data.frame( + air_temperature = air_temperature, + precipitation_flux = precipitation_flux + ) + time <- flptr$dim$time$vals + # This loop does the gapfilling of the other variables, based on air_temperature and precipitation_flux. # It does so in the following way: # A linear model for a variabe (e.g. specific humidity) is fitted to temperature and precipitation # A prediction is made using the predict function on what the values of the missing variables # should be. - # The values that were missing in the original data are filled in with their corresponding + # The values that were missing in the original data are filled in with their corresponding # values in the output of the prediciton function. # The new data is put into the data frame used to fit the next model for (i in 1:length(dependent_vars)) { - var <- allvars[i+2,] - if(is.na(var[1])) { + var <- allvars[i + 2, ] + if (is.na(var[1])) { var[1] <- mean(var, na.rm = TRUE) } - - fitted.data[[dependent_vars[i]]] = var - + + fitted.data[[dependent_vars[i]]] <- var + # Unfortunately, R is picky, and the data.frame[['var_as_string']] notation doesn't work # for the lm function; only the $ notation does, hence this if/else if section. if (dependent_vars[i] == "specific_humidity") { - reg <- stats::lm(fitted.data$specific_humidity ~.,fitted.data) + reg <- stats::lm(fitted.data$specific_humidity ~ ., fitted.data) } else if (dependent_vars[i] == "surface_downwelling_longwave_flux_in_air") { - reg <- stats::lm(fitted.data$surface_downwelling_longwave_flux_in_air ~.,fitted.data) + reg <- stats::lm(fitted.data$surface_downwelling_longwave_flux_in_air ~ ., fitted.data) } else if (dependent_vars[i] == "surface_downwelling_shortwave_flux_in_air") { - reg <- stats::lm(fitted.data$surface_downwelling_shortwave_flux_in_air ~.,fitted.data) + reg <- stats::lm(fitted.data$surface_downwelling_shortwave_flux_in_air ~ ., fitted.data) } else if (dependent_vars[i] == "air_pressure") { - reg <- stats::lm(fitted.data$air_pressure ~.,fitted.data) + reg <- stats::lm(fitted.data$air_pressure ~ ., fitted.data) } else if (dependent_vars[i] == "eastward_wind") { - reg <- stats::lm(fitted.data$eastward_wind ~.,fitted.data) + reg <- stats::lm(fitted.data$eastward_wind ~ ., fitted.data) } else if (dependent_vars[i] == "northward_wind") { - reg <- stats::lm(fitted.data$northward_wind ~.,fitted.data) - }else if (dependent_vars[i] == "wind_speed") { - reg <- stats::lm(fitted.data$wind_speed ~.,fitted.data) + reg <- stats::lm(fitted.data$northward_wind ~ ., fitted.data) + } else if (dependent_vars[i] == "wind_speed") { + reg <- stats::lm(fitted.data$wind_speed ~ ., fitted.data) } - + prediction <- stats::predict(reg, fitted.data) - + # Update the values in the data frame for (j in 1:length(prediction)) { - if(is.na(fitted.data[[dependent_vars[i]]][j])) { + if (is.na(fitted.data[[dependent_vars[i]]][j])) { fitted.data[[dependent_vars[i]]][j] <- prediction[j] } } } - + # Extract ensemble information from file name ensemble <- regmatches(in.prefix, regexpr("NOAA_GEFS\\.[^.]*\\.[0-9]*", in.prefix)) ensemble <- regmatches(ensemble, regexpr("[0-9]+$", ensemble)) - + # Each ensemble gets its own folder to keep things organized out.data.file <- file.path(outfolder, paste0("NOAA_GEFS.", ensemble)) if (!dir.exists(out.data.file)) { - dir.create(out.data.file, recursive=TRUE, showWarnings = FALSE) + dir.create(out.data.file, recursive = TRUE, showWarnings = FALSE) } - + # The file names are the same, but the data is in a different directory. out.data.file <- file.path(out.data.file, in.prefix) - + # Write new, gapfilled file if (!file.exists(out.data.file) || overwrite) { # Setup netcdf dimensions and variables # All variables should be of the same length. - time_dim = ncdf4::ncdim_def(name="time", - paste(units="hours since", start_date), - time, - create_dimvar = TRUE) + time_dim <- ncdf4::ncdim_def( + name = "time", + paste(units = "hours since", start_date), + time, + create_dimvar = TRUE + ) lat <- ncdf4::ncvar_get(nc = flptr, varid = "latitude") lon <- ncdf4::ncvar_get(nc = flptr, varid = "longitude") - lat_dim = ncdf4::ncdim_def("latitude", "degree_north", lat, create_dimvar = TRUE) - lon_dim = ncdf4::ncdim_def("longitude", "degree_east", lon, create_dimvar = TRUE) - - dimensions_list = list(time_dim, lat_dim, lon_dim) - - nc_var_list = list() + lat_dim <- ncdf4::ncdim_def("latitude", "degree_north", lat, create_dimvar = TRUE) + lon_dim <- ncdf4::ncdim_def("longitude", "degree_east", lon, create_dimvar = TRUE) + + dimensions_list <- list(time_dim, lat_dim, lon_dim) + + nc_var_list <- list() for (i in 1:length(cf_var_names)) { units <- flptr$var[[cf_var_names[i]]]$units - nc_var_list[[i]] = ncdf4::ncvar_def(cf_var_names[i], units, dimensions_list, missval=NaN) + nc_var_list[[i]] <- ncdf4::ncvar_def(cf_var_names[i], units, dimensions_list, missval = NaN) } - + # Open file - nc_flptr = ncdf4::nc_create(out.data.file, nc_var_list, verbose=verbose) - + nc_flptr <- ncdf4::nc_create(out.data.file, nc_var_list, verbose = verbose) + # Write data to file for (j in 1:length(cf_var_names)) { ncdf4::ncvar_put(nc_flptr, nc_var_list[[j]], fitted.data[[cf_var_names[j]]]) } - + # Close file ncdf4::nc_close(nc_flptr) } else { PEcAn.logger::logger.info(paste0("File ", out.data.file, " already exists. It was not overwritten.")) } - + # We no longer need the original file ncdf4::nc_close(flptr) - + # This table of results is used to insert the record of the file into the database. - results <- data.frame(file = out.data.file, # file name - host = PEcAn.remote::fqdn(), # machine where file is located - mimetype = "application/x-netcdf", # type of file - formatname = "CF (gapfilled)", # file format - startdate = start_date, # start date of file contents - enddate = end_date, # end date of file contents - dbfile.name = basename(out.data.file), # output file name - stringsAsFactors = FALSE) - + results <- data.frame( + file = out.data.file, # file name + host = PEcAn.remote::fqdn(), # machine where file is located + mimetype = "application/x-netcdf", # type of file + formatname = "CF (gapfilled)", # file format + startdate = start_date, # start date of file contents + enddate = end_date, # end date of file contents + dbfile.name = basename(out.data.file), # output file name + stringsAsFactors = FALSE + ) + return(results) - } # metgapfill.NOAA_GEFS diff --git a/modules/data.atmosphere/R/metgapfill.R b/modules/data.atmosphere/R/metgapfill.R index 9a4bc711df7..2d65a84f507 100644 --- a/modules/data.atmosphere/R/metgapfill.R +++ b/modules/data.atmosphere/R/metgapfill.R @@ -18,29 +18,29 @@ ##' @author Ankur Desai metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, overwrite = FALSE, verbose = FALSE, ...) { - - - sEddyProc <- REddyProc::sEddyProc + sEddyProc <- REddyProc::sEddyProc fCalcVPDfromRHandTair <- REddyProc::fCalcVPDfromRHandTair # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) + end_year <- lubridate::year(end_date) if (!file.exists(outfolder)) { dir.create(outfolder) } rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) for (year in start_year:end_year) { old.file <- file.path(in.path, paste(in.prefix, sprintf("%04d", year), "nc", sep = ".")) @@ -48,37 +48,39 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst # check if input exists if (!file.exists(old.file)) { - PEcAn.logger::logger.warn("Missing input file ", old.file, " for year", sprintf("%04d", year), - "in folder", in.path) + PEcAn.logger::logger.warn( + "Missing input file ", old.file, " for year", sprintf("%04d", year), + "in folder", in.path + ) next } # create array with results row <- year - start_year + 1 - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - if(year == start_year & year != end_year){ - results$startdate[row] <- paste(start_date, "00:00:00") - results$enddate[row] <- sprintf("%04d-12-31 23:59:59", year) + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + if (year == start_year & year != end_year) { + results$startdate[row] <- paste(start_date, "00:00:00") + results$enddate[row] <- sprintf("%04d-12-31 23:59:59", year) diy <- PEcAn.utils::days_in_year(year) - lubridate::yday(start_date) + 1 # can handle partial start-year - }else if(year != start_year & year == end_year){ - results$startdate[row] <- sprintf("%04d-01-01 00:00:00", year) - results$enddate[row] <- paste(end_date, "23:59:59") + } else if (year != start_year & year == end_year) { + results$startdate[row] <- sprintf("%04d-01-01 00:00:00", year) + results$enddate[row] <- paste(end_date, "23:59:59") diy <- lubridate::yday(end_date) # can handle partial end-year - }else{ - if(year == start_year & year == end_year){ - results$startdate[row] <- paste(start_date, "00:00:00") - results$enddate[row] <- paste(end_date, "23:59:59") + } else { + if (year == start_year & year == end_year) { + results$startdate[row] <- paste(start_date, "00:00:00") + results$enddate[row] <- paste(end_date, "23:59:59") diy <- lubridate::yday(end_date) - lubridate::yday(start_date) + 1 # can handle single partial year - }else{ + } else { # regular full year - results$startdate[row] <- sprintf("%04d-01-01 00:00:00", year) - results$enddate[row] <- sprintf("%04d-12-31 23:59:59", year) + results$startdate[row] <- sprintf("%04d-01-01 00:00:00", year) + results$enddate[row] <- sprintf("%04d-12-31 23:59:59", year) diy <- PEcAn.utils::days_in_year(year) # regular full (mid-)year } } - results$mimetype[row] <- "application/x-netcdf" + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF (gapfilled)" if (file.exists(new.file) && !overwrite) { @@ -96,8 +98,8 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst # extract time, lat, lon time <- ncdf4::ncvar_get(nc = nc, varid = "time") - lat <- ncdf4::ncvar_get(nc = nc, varid = "latitude") - lon <- ncdf4::ncvar_get(nc = nc, varid = "longitude") + lat <- ncdf4::ncvar_get(nc = nc, varid = "latitude") + lon <- ncdf4::ncvar_get(nc = nc, varid = "longitude") ## create time lat lon dimensions for adding new variables x <- ncdf4::ncdim_def("longitude", "degrees_east", lon) @@ -106,19 +108,19 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst xytdim <- list(x, y, t) # extract elevation and timezone for radiation calculations - elev <- ncdf4::ncatt_get(nc = nc, varid = 0, "elevation") + elev <- ncdf4::ncatt_get(nc = nc, varid = 0, "elevation") tzone <- ncdf4::ncatt_get(nc = nc, varid = "time", "units") ## Future: query elevation from site.id if (elev$hasatt) { elevation <- as.numeric((unlist(strsplit(elev$value, " ")))[1]) } else { - elevation <- 0 #assume sea level by default + elevation <- 0 # assume sea level by default } if (tzone$hasatt) { tdimunit <- unlist(strsplit(tzone$value, " ")) tdimtz <- substr(tdimunit[length(tdimunit)], 1, 1) if ((tdimtz == "+") || (tdimtz == "-")) { - lst <- as.numeric(tdimunit[length(tdimunit)]) #extract timezone from file + lst <- as.numeric(tdimunit[length(tdimunit)]) # extract timezone from file } } @@ -140,20 +142,25 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst Rg <- try(ncdf4::ncvar_get(nc = nc, varid = "surface_downwelling_shortwave_flux_in_air"), silent = TRUE) if (!is.numeric(Rg)) { Rg <- missingarr - myvar <- ncdf4::ncvar_def(name = "surface_downwelling_shortwave_flux_in_air", - units = "W m-2", - dim = xytdim) + myvar <- ncdf4::ncvar_def( + name = "surface_downwelling_shortwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = myvar) ncdf4::ncvar_put(nc, varid = myvar, missingarr) } PAR <- try(ncdf4::ncvar_get(nc = nc, varid = "surface_downwelling_photosynthetic_photon_flux_in_air"), - silent = TRUE) + silent = TRUE + ) if (!is.numeric(PAR)) { PAR <- missingarr - myvar <- ncdf4::ncvar_def(name = "surface_downwelling_photosynthetic_photon_flux_in_air", - units = "mol m-2 s-1", - dim = xytdim) + myvar <- ncdf4::ncvar_def( + name = "surface_downwelling_photosynthetic_photon_flux_in_air", + units = "mol m-2 s-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = myvar) ncdf4::ncvar_put(nc, varid = myvar, missingarr) } @@ -167,11 +174,11 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst } ## Use Rg and PAR to gap fill - badPAR <- is.na(PAR) - badRg <- is.na(Rg) - PAR[badPAR] <- Rg[badPAR] * 2.1/1e+06 - Rg[badRg] <- 1e+06 * PAR[badRg]/2.1 - Rg[Rg < 0] <- 0 + badPAR <- is.na(PAR) + badRg <- is.na(Rg) + PAR[badPAR] <- Rg[badPAR] * 2.1 / 1e+06 + Rg[badRg] <- 1e+06 * PAR[badRg] / 2.1 + Rg[Rg < 0] <- 0 PAR[PAR < 0] <- 0 ## make night dark - based on met2model.ED2.R in models/ed/R First: calculate potential radiation @@ -183,12 +190,12 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst cosz <- PEcAn.data.atmosphere::cos_solar_zenith_angle(doy, lat, lon, dt, hr) - rpot <- 1366 * cosz #in UTC + rpot <- 1366 * cosz # in UTC tz <- as.numeric(lst) - if(is.na(tz)){ + if (is.na(tz)) { tz <- PEcAn.utils::timezone_hour(lst) } - toff <- tz * 3600/dt #timezone offset correction + toff <- tz * 3600 / dt # timezone offset correction if (toff < 0) { slen <- length(rpot) rpot <- c(rpot[(abs(toff) + 1):slen], rpot[1:abs(toff)]) @@ -207,9 +214,11 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst co2 <- try(ncdf4::ncvar_get(nc = nc, varid = "mole_fraction_of_carbon_dioxide_in_air"), silent = TRUE) if (!is.numeric(co2)) { co2 <- missingarr - myvar <- ncdf4::ncvar_def(name = "mole_fraction_of_carbon_dioxide_in_air", - units = "mol mol-1", - dim = xytdim) + myvar <- ncdf4::ncvar_def( + name = "mole_fraction_of_carbon_dioxide_in_air", + units = "mol mol-1", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = myvar) ncdf4::ncvar_put(nc, varid = myvar, missingarr) } @@ -223,7 +232,7 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst } # default pressure (in Pascals) if no pressure observations are available (based on NOAA 1976 # equation for pressure altitude for WMO international standard atmosphere) - standard_pressure <- ((1 - ((3.28084 * elevation) / 145366.45)) ^ (1 / 0.190284)) * 101325 + standard_pressure <- ((1 - ((3.28084 * elevation) / 145366.45))^(1 / 0.190284)) * 101325 if (length(which(is.na(press))) == length(press)) { press[is.na(press)] <- standard_pressure } @@ -231,9 +240,11 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst Lw <- try(ncdf4::ncvar_get(nc = nc, varid = "surface_downwelling_longwave_flux_in_air"), silent = TRUE) if (!is.numeric(Lw)) { Lw <- missingarr - myvar <- ncdf4::ncvar_def(name = "surface_downwelling_longwave_flux_in_air", - units = "W m-2", - dim = xytdim) + myvar <- ncdf4::ncvar_def( + name = "surface_downwelling_longwave_flux_in_air", + units = "W m-2", + dim = xytdim + ) nc <- ncdf4::ncvar_add(nc = nc, v = myvar) ncdf4::ncvar_put(nc, varid = myvar, missingarr) } @@ -315,18 +326,18 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst VPD[badVPD] <- as.numeric(fCalcVPDfromRHandTair(rH[badVPD], Tair_degC[badVPD])) * 100 } - ##Once all are filled, do one more consistency check + ## Once all are filled, do one more consistency check es <- get.es(Tair_degC) * 100 rH[rH < 0] <- 0 rH[rH > 100] <- 100 VPD[VPD < 0] <- 0 - badVPD_es <- which(VPD > es) - VPD[badVPD_es] <- es[badVPD_es] + badVPD_es <- which(VPD > es) + VPD[badVPD_es] <- es[badVPD_es] sHum[sHum < 0] <- 0 - + ## one set of these must exist (either wind_speed or east+north wind) ws <- try(ncdf4::ncvar_get(nc = nc, varid = "wind_speed"), silent = TRUE) if (!is.numeric(ws)) { @@ -353,8 +364,10 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst # Rn <- ncdf4::ncvar_get(nc=nc,varid='Rn') Ts2 <-ncdf4::ncvar_get(nc=nc,varid='TS2') ## make a data frame, convert -9999 to NA, convert to degrees C - EddyData.F <- data.frame(Tair, Rg, rH, PAR, precip, sHum, Lw, Ts1, - VPD, ws, co2, press, east_wind, north_wind) + EddyData.F <- data.frame( + Tair, Rg, rH, PAR, precip, sHum, Lw, Ts1, + VPD, ws, co2, press, east_wind, north_wind + ) EddyData.F[["Tair"]] <- PEcAn.utils::ud_convert(EddyData.F[["Tair"]], "K", "degC") EddyData.F[["Tair"]] <- EddyData.F[["Tair"]] EddyData.F[["Ts1"]] <- PEcAn.utils::ud_convert(EddyData.F[["Ts1"]], "K", "degC") @@ -364,21 +377,21 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst ## Compute VPD EddyData.F <- cbind(EddyData.F,VPD=fCalcVPDfromRHandTair(EddyData.F$rH, EddyData.F$Tair)) ## Estimate number of good values, don't gap fill if no gaps or all gaps - n_Tair <- sum(is.na(EddyData.F[["Tair"]])) - n_Rg <- sum(is.na(EddyData.F[["Rg"]])) - n_rH <- sum(is.na(EddyData.F[["rH"]])) - n_PAR <- sum(is.na(EddyData.F[["PAR"]])) + n_Tair <- sum(is.na(EddyData.F[["Tair"]])) + n_Rg <- sum(is.na(EddyData.F[["Rg"]])) + n_rH <- sum(is.na(EddyData.F[["rH"]])) + n_PAR <- sum(is.na(EddyData.F[["PAR"]])) n_precip <- sum(is.na(EddyData.F[["precip"]])) # n_Rn <- sum(is.na(EddyData.F['Rn'])) - n_sHum <- sum(is.na(EddyData.F[["sHum"]])) - n_Lw <- sum(is.na(EddyData.F[["Lw"]])) - n_Ts1 <- sum(is.na(EddyData.F[["Ts1"]])) + n_sHum <- sum(is.na(EddyData.F[["sHum"]])) + n_Lw <- sum(is.na(EddyData.F[["Lw"]])) + n_Ts1 <- sum(is.na(EddyData.F[["Ts1"]])) # n_Ts2 <- sum(is.na(EddyData.F['Ts2'])) - n_VPD <- sum(is.na(EddyData.F[["VPD"]])) - n_ws <- sum(is.na(EddyData.F[["ws"]])) - n_co2 <- sum(is.na(EddyData.F[["co2"]])) - n_press <- sum(is.na(EddyData.F[["press"]])) - n_east_wind <- sum(is.na(EddyData.F[["east_wind"]])) + n_VPD <- sum(is.na(EddyData.F[["VPD"]])) + n_ws <- sum(is.na(EddyData.F[["ws"]])) + n_co2 <- sum(is.na(EddyData.F[["co2"]])) + n_press <- sum(is.na(EddyData.F[["press"]])) + n_east_wind <- sum(is.na(EddyData.F[["east_wind"]])) n_north_wind <- sum(is.na(EddyData.F[["north_wind"]])) # figure out datetime of nc file and convert to POSIX @@ -386,7 +399,8 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst tunit <- ncdf4::ncatt_get(nc = nc, varid = "time", attname = "units", verbose = verbose) origin <- "1900-01-01 00:00:00" time <- round(as.POSIXlt(PEcAn.utils::ud_convert(time, tunit$value, paste("seconds since", origin)), - origin = origin, tz = "UTC")) + origin = origin, tz = "UTC" + )) dtime <- as.numeric(diff(time), units = "mins") if (dtime[1] == 30) { DTS.n <- 48 @@ -403,30 +417,33 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst ## Create EddyProc object EddyProc.C <- sEddyProc$new("Site", - EddyData.F, - c("Tair", "Rg", "rH", "PAR", "precip", "sHum", - "Lw", "Ts1", "VPD", "ws", "co2", - "press", "east_wind", "north_wind"), - DTS.n = DTS.n) + EddyData.F, + c( + "Tair", "Rg", "rH", "PAR", "precip", "sHum", + "Lw", "Ts1", "VPD", "ws", "co2", + "press", "east_wind", "north_wind" + ), + DTS.n = DTS.n + ) maxbad <- nelem / 2 ## Gap fill with default (see below for examples of advanced options) Have to do Rg, Tair, VPD ## first ## First, define filled variable and do some simple gap filling where possible - Rg_f <- Rg - Tair_f <- Tair - VPD_f <- VPD - rH_f <- rH - PAR_f <- PAR + Rg_f <- Rg + Tair_f <- Tair + VPD_f <- VPD + rH_f <- rH + PAR_f <- PAR precip_f <- precip - sHum_f <- sHum - Lw_f <- Lw - Ts1_f <- Ts1 - ws_f <- ws + sHum_f <- sHum + Lw_f <- Lw + Ts1_f <- Ts1 + ws_f <- ws ws_f[is.na(ws_f)] <- mean(ws, na.rm = TRUE) ws_f[is.na(ws_f)] <- 1 - co2_f <- co2 + co2_f <- co2 press_f <- press press_f[is.na(press_f)] <- mean(press, na.rm = TRUE) east_wind_f <- east_wind @@ -435,60 +452,88 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst north_wind_f[is.na(north_wind_f)] <- ws_f[is.na(north_wind_f)] if (n_Rg > 0 && n_Rg < maxbad) { - EddyProc.C$sMDSGapFill("Rg", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("Rg", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_Tair > 0 && n_Tair < maxbad) { - EddyProc.C$sMDSGapFill("Tair", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("Tair", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_VPD > 0 && n_VPD < maxbad) { - EddyProc.C$sMDSGapFill("VPD", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("VPD", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_rH > 0 && n_rH < maxbad) { - EddyProc.C$sMDSGapFill("rH", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("rH", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_PAR > 0 && n_PAR < maxbad) { - EddyProc.C$sMDSGapFill("PAR", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("PAR", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_precip > 0 && n_precip < maxbad) { - EddyProc.C$sMDSGapFill("precip", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("precip", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_sHum > 0 && n_sHum < maxbad) { - EddyProc.C$sMDSGapFill("sHum", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("sHum", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_Lw > 0 && n_Lw < maxbad) { - EddyProc.C$sMDSGapFill("Lw", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("Lw", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_Ts1 > 0 && n_Ts1 < maxbad) { - EddyProc.C$sMDSGapFill("Ts1", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("Ts1", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_ws > 0 && n_ws < maxbad) { - EddyProc.C$sMDSGapFill("ws", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("ws", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_co2 > 0 && n_co2 < maxbad) { - EddyProc.C$sMDSGapFill("co2", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("co2", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_press > 0 && n_press < maxbad) { - EddyProc.C$sMDSGapFill("press", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("press", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_east_wind > 0 && n_east_wind < maxbad) { - EddyProc.C$sMDSGapFill("east_wind", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("east_wind", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } if (n_north_wind > 0 && n_north_wind < maxbad) { - EddyProc.C$sMDSGapFill("north_wind", FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", - Verbose.b = verbose) + EddyProc.C$sMDSGapFill("north_wind", + FillAll.b = FALSE, V1.s = "Rg", V2.s = "VPD", V3.s = "Tair", + Verbose.b = verbose + ) } ## Extract filled variables into data frame print('Extracting dataframe elements and writing back @@ -583,9 +628,8 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst Tair_f_degC <- PEcAn.utils::ud_convert(Tair_f, "K", "degC") es <- get.es(Tair_f_degC) * 100 - badVPD_f <- which(VPD_f > es) - VPD_f[badVPD_f] <- es[badVPD_f] - + badVPD_f <- which(VPD_f > es) + VPD_f[badVPD_f] <- es[badVPD_f] } } if (length(which(is.na(VPD_f))) > 0) { @@ -636,7 +680,7 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst } ncdf4::ncvar_put(nc, varid = "northward_wind", vals = north_wind_f) - ws_f[is.na(ws_f)] <- sqrt(north_wind_f[is.na(ws_f)] ^ 2 + east_wind_f[is.na(ws_f)] ^ 2) + ws_f[is.na(ws_f)] <- sqrt(north_wind_f[is.na(ws_f)]^2 + east_wind_f[is.na(ws_f)]^2) if (length(which(is.na(ws_f))) > 0) { error <- c(error, "wind_speed") } @@ -645,13 +689,17 @@ metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst ncdf4::nc_close(nc) if (length(error) > 0) { - fail.file <- file.path(outfolder, - paste(in.prefix, sprintf("%04d", year), "failure", "nc", sep = ".")) + fail.file <- file.path( + outfolder, + paste(in.prefix, sprintf("%04d", year), "failure", "nc", sep = ".") + ) file.rename(from = new.file, to = fail.file) - PEcAn.logger::logger.severe("Could not do gapfill, results are in", fail.file, ".", - "The following variables have NA's:", paste(error, sep = ", ")) + PEcAn.logger::logger.severe( + "Could not do gapfill, results are in", fail.file, ".", + "The following variables have NA's:", paste(error, sep = ", ") + ) } - } # end loop + } # end loop #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Extra: Examples of extended usage for advanced users diff --git a/modules/data.atmosphere/R/metgapfill.module.R b/modules/data.atmosphere/R/metgapfill.module.R index 4856f1900f7..dd7feebdef7 100644 --- a/modules/data.atmosphere/R/metgapfill.module.R +++ b/modules/data.atmosphere/R/metgapfill.module.R @@ -1,47 +1,49 @@ -.metgapfill.module <- function(cf.id, register, dir, met, str_ns, site, new.site, con, +.metgapfill.module <- function(cf.id, register, dir, met, str_ns, site, new.site, con, start_date, end_date, host, overwrite = FALSE, ensemble_name = NULL) { PEcAn.logger::logger.info("Gapfilling") - - input.id <- cf.id[1] - outfolder <- file.path(dir, paste0(met, "_CF_gapfill_site_", str_ns)) - pkg <- "PEcAn.data.atmosphere" - fcn <- "metgapfill" + + input.id <- cf.id[1] + outfolder <- file.path(dir, paste0(met, "_CF_gapfill_site_", str_ns)) + pkg <- "PEcAn.data.atmosphere" + fcn <- "metgapfill" # fcn <- register$gapfill formatname <- "CF Meteorology" - mimetype <- "application/x-netcdf" - lst <- site.lst(site.id=site$id, con=con) - + mimetype <- "application/x-netcdf" + lst <- site.lst(site.id = site$id, con = con) + if (!is.null(register$forecast)) { forecast <- isTRUE(as.logical(register$forecast)) } else { forecast <- FALSE } - + # met products requiring special gapfilling functions (incompatable with metgapfill) # Overrides default value of "fcn" if (met %in% c("NOAA_GEFS")) { fcn <- "metgapfill.NOAA_GEFS" } - - ready.id <- PEcAn.DB::convert_input(input.id = input.id, - outfolder = outfolder, - formatname = formatname, - mimetype = mimetype, - site.id = site$id, - start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, con = con, host = host, - write = TRUE, - lst = lst, - overwrite = overwrite, - exact.dates = FALSE, - forecast = forecast, - pattern = met, - ensemble = !is.null(register$ensemble) && as.logical(register$ensemble), - ensemble_name = ensemble_name) - + + ready.id <- PEcAn.DB::convert_input( + input.id = input.id, + outfolder = outfolder, + formatname = formatname, + mimetype = mimetype, + site.id = site$id, + start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, con = con, host = host, + write = TRUE, + lst = lst, + overwrite = overwrite, + exact.dates = FALSE, + forecast = forecast, + pattern = met, + ensemble = !is.null(register$ensemble) && as.logical(register$ensemble), + ensemble_name = ensemble_name + ) + print(ready.id) - + PEcAn.logger::logger.info("Finished Gapfilling Met") - + return(ready.id) } # .metgapfill.module diff --git a/modules/data.atmosphere/R/metutils.R b/modules/data.atmosphere/R/metutils.R index c5662e2db66..6cb13ed40a0 100644 --- a/modules/data.atmosphere/R/metutils.R +++ b/modules/data.atmosphere/R/metutils.R @@ -4,7 +4,7 @@ qcsolar <- function(x) ifelse(x < 0, 0, ifelse(abs(x) > 1300, mean(x[x < 1300]), qcwind <- function(x) ifelse(abs(x) > 102, mean(abs(x[x < 102])), x) qcprecip <- function(x) ifelse(x > 0.005 | x < 0, mean(x[x < 0.005 & x > 0]), x) qcrh <- function(x) { - return(ifelse(x > 100 | x < 0, mean(x[x < 100 & x > 0]), x)) # using logical range (0-100) rather than 'valid range (-25-125)' + return(ifelse(x > 100 | x < 0, mean(x[x < 100 & x > 0]), x)) # using logical range (0-100) rather than 'valid range (-25-125)' } # qcrh qcshum <- function(x) { @@ -69,7 +69,7 @@ get.vpd <- function(rh, temp) { ## calculate saturation vapor pressure es <- get.es(temp) ## calculate vapor pressure deficit - return(((100 - rh)/100) * es) + return(((100 - rh) / 100) * es) } # get.vpd ##' Calculate saturation vapor pressure @@ -83,16 +83,16 @@ get.vpd <- function(rh, temp) { ##' temp <- -30:30 ##' plot(temp, get.es(temp)) get.es <- function(temp) { - return(6.11 * exp((2500000/461) * (1/273 - 1/(273 + temp)))) + return(6.11 * exp((2500000 / 461) * (1 / 273 - 1 / (273 + temp)))) } # get.es ## TODO: merge SatVapPress with get.es; add option to choose method SatVapPres <- function(T) { # /estimates saturation vapor pressure (kPa) Goff-Gratch 1946 /input: T = absolute temperature - T_st <- 373.15 ##steam temperature (K) - e_st <- 1013.25 ##/saturation vapor pressure at steam temp (hPa) - return(0.1 * exp(-7.90298 * (T_st/T - 1) + 5.02808 * log(T_st/T) - 1.3816e-07 * (10^(11.344 * (1 - T/T_st)) - - 1) + 0.0081328 * (10^(-3.49149 * (T_st/T - 1)) - 1) + log(e_st))) + T_st <- 373.15 ## steam temperature (K) + e_st <- 1013.25 ## /saturation vapor pressure at steam temp (hPa) + return(0.1 * exp(-7.90298 * (T_st / T - 1) + 5.02808 * log(T_st / T) - 1.3816e-07 * (10^(11.344 * (1 - T / T_st)) - + 1) + 0.0081328 * (10^(-3.49149 * (T_st / T - 1)) - 1) + log(e_st))) } # SatVapPres @@ -103,9 +103,9 @@ SatVapPres <- function(T) { ##' A Simple Conversion and Applications. BAMS ##' https://doi.org/10.1175/BAMS-86-2-225 ##' R = 461.5 K-1 kg-1 gas constant H2O -##' L enthalpy of vaporization +##' L enthalpy of vaporization ##' linear dependence on T (p 226, following eq 9) -##' +##' ##' @title get RH ##' @param T air temperature, Kelvin ##' @param Td dewpoint, Kelvin @@ -113,11 +113,11 @@ SatVapPres <- function(T) { ##' @export ##' @author David LeBauer get.rh <- function(T, Td) { - if(Td >= T){ + if (Td >= T) { rh <- 100 } else { Rw <- 461.5 # gas constant for water vapor, J K-1 kg-1 - L <- 2.501e6 + (T-273.15) * (-2430) + L <- 2.501e6 + (T - 273.15) * (-2430) arg <- -L / (Rw * T * Td) * (T - Td) rh <- 100 * exp(arg) } @@ -157,7 +157,7 @@ wide2long <- function(data.wide, lat, lon, var) { ##' @return PPFD (umol / m2 / s) ##' @author David LeBauer par2ppfd <- function(watts) { - ppfd <- watts/(2.35 * 10^5) + ppfd <- watts / (2.35 * 10^5) return(PEcAn.utils::ud_convert(ppfd, "mol ", "umol")) } # par2ppfd @@ -222,7 +222,7 @@ solarMJ2ppfd <- function(solarMJ) { ##' @export ##' @author Mike Dietze exner <- function(pres) { - return(1004 * pres ^ (287 / 1004)) + return(1004 * pres^(287 / 1004)) } # exner ##' estimate air density from pressure, temperature, and humidity @@ -245,5 +245,5 @@ AirDens <- function(pres, T, rv) { ##' @return lV latent heat of vaporization (J kg-1) get.lv <- function(airtemp = 268.6465) { airtemp_C <- PEcAn.utils::ud_convert(airtemp, "K", "degC") - return((94.21 * (365 - airtemp_C) ^ 0.31249) * 4.183 * 1000) + return((94.21 * (365 - airtemp_C)^0.31249) * 4.183 * 1000) } # get.lv diff --git a/modules/data.atmosphere/R/nc_merge.R b/modules/data.atmosphere/R/nc_merge.R index 963ea9e998f..7de90c57a6c 100644 --- a/modules/data.atmosphere/R/nc_merge.R +++ b/modules/data.atmosphere/R/nc_merge.R @@ -8,8 +8,8 @@ ##' @author James Simkins, Christy Rollinson ##' @description This is the 1st function for the tdm (Temporally Downscale Meteorology) workflow. The nc2dat.train function ##' parses multiple netCDF files into one central training data file called 'dat.train_file'. This netCDF -##' file will be used to generate the subdaily models in the next step of the workflow, generate.subdaily.models(). -##' It is also called in tdm_predict_subdaily_met which is the final step of the tdm workflow. +##' file will be used to generate the subdaily models in the next step of the workflow, generate.subdaily.models(). +##' It is also called in tdm_predict_subdaily_met which is the final step of the tdm workflow. # ----------------------------------- # Parameters # ----------------------------------- @@ -30,178 +30,194 @@ #---------------------------------------------------------------------- # Begin Function #---------------------------------------------------------------------- -nc.merge <- function(outfolder, in.path, in.prefix, start_date, end_date, +nc.merge <- function( + outfolder, in.path, in.prefix, start_date, end_date, upscale = FALSE, overwrite = FALSE, verbose = FALSE, ...) { - - start_date <- as.POSIXlt(start_date, tz = "UTC") - end_date <- as.POSIXlt(end_date, tz = "UTC") - - start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - - yr_seq <- seq(start_year, end_year) - - #------------ Read in the data - input_met <- list() - for (j in seq_along(yr_seq)) { - input_met[[j]] <- file.path(in.path, paste0(in.prefix, ".", yr_seq[j], - ".nc", sep = "")) + start_date <- as.POSIXlt(start_date, tz = "UTC") + end_date <- as.POSIXlt(end_date, tz = "UTC") + + start_year <- lubridate::year(start_date) + end_year <- lubridate::year(end_date) + + yr_seq <- seq(start_year, end_year) + + #------------ Read in the data + input_met <- list() + for (j in seq_along(yr_seq)) { + input_met[[j]] <- file.path(in.path, paste0(in.prefix, ".", yr_seq[j], + ".nc", + sep = "" + )) + } + + + vars.info <- data.frame(CF.name = c( + "air_temperature", "air_temperature_max", + "air_temperature_min", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "surface_downwelling_shortwave_flux_in_air", "eastward_wind", + "northward_wind", "specific_humidity", "precipitation_flux" + ), units = c( + "Kelvin", + "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "g/g", + "kg/m2/s" + )) + + stepby <- list() # list of time stepbys just in case they vary + + # Have to do 1 go first to initialize the dataframe + raw_train_data <- list() + tem <- ncdf4::nc_open(input_met[[1]]) + dim <- tem$dim + for (j in seq_along(vars.info$CF.name)) { + if (exists(as.character(vars.info$CF.name[j]), tem$var) == FALSE) { + raw_train_data[[j]] <- NA + } else { + raw_train_data[[j]] <- ncdf4::ncvar_get(tem, as.character(vars.info$CF.name[j])) } - - - vars.info <- data.frame(CF.name = c("air_temperature", "air_temperature_max", - "air_temperature_min", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "surface_downwelling_shortwave_flux_in_air", "eastward_wind", - "northward_wind", "specific_humidity", "precipitation_flux"), units = c("Kelvin", - "Kelvin", "Kelvin", "W/m2", "Pascal", "W/m2", "m/s", "m/s", "g/g", - "kg/m2/s")) - - stepby <- list() # list of time stepbys just in case they vary - - # Have to do 1 go first to initialize the dataframe - raw_train_data <- list() - tem <- ncdf4::nc_open(input_met[[1]]) - dim <- tem$dim - for (j in seq_along(vars.info$CF.name)) { - if (exists(as.character(vars.info$CF.name[j]), tem$var) == FALSE) { + } + names(raw_train_data) <- vars.info$CF.name + train_df <- data.frame(raw_train_data) + + # Figure out what temporal resolution the given data is in + # stepby[] helps us create the data sequence length + raw_train_data <- data.frame(raw_train_data) + if ((nrow(raw_train_data) == 17520) | (nrow(raw_train_data) == 17568)) { + stepby[1] <- 2 + } + if ((nrow(raw_train_data) == 8760) | (nrow(raw_train_data) == 8784)) { + stepby[1] <- 1 + } + if ((nrow(raw_train_data) == 1460) | (nrow(raw_train_data) == 1464)) { + stepby[1] <- (1 / 6) + } + # Add a time stamp + start_time <- as.POSIXlt(paste0(yr_seq[1], "-01-01"), tz = "UTC") + end_time <- as.POSIXlt(paste0(yr_seq[1] + 1, "-01-01"), tz = "UTC") + train_df$date <- seq.POSIXt( + from = start_time, by = 60 * 60 / stepby[[1]], + length.out = nrow(train_df) + ) + summary(train_df) + + lat_raw_train_data <- as.numeric(ncdf4::ncvar_get(tem, "latitude")) + lon_raw_train_data <- as.numeric(ncdf4::ncvar_get(tem, "longitude")) + ncdf4::nc_close(tem) + + # Loop through remaining files and format Just a safe guard in case we + # run this for a single year + if (length(input_met) > 1) { + for (i in 2:length(input_met)) { + if (file.exists(input_met[[i]])) { + raw_train_data <- list() + tem <- ncdf4::nc_open(input_met[[i]]) + dim <- tem$dim + for (j in seq_along(vars.info$CF.name)) { + if (exists(as.character(vars.info$CF.name[j]), tem$var) == + FALSE) { raw_train_data[[j]] <- NA - } else { + } else { raw_train_data[[j]] <- ncdf4::ncvar_get(tem, as.character(vars.info$CF.name[j])) + } } - } - names(raw_train_data) <- vars.info$CF.name - train_df <- data.frame(raw_train_data) - - # Figure out what temporal resolution the given data is in - # stepby[] helps us create the data sequence length - raw_train_data <- data.frame(raw_train_data) - if ((nrow(raw_train_data) == 17520) | (nrow(raw_train_data) == 17568)) { - stepby[1] <- 2 - } - if ((nrow(raw_train_data) == 8760) | (nrow(raw_train_data) == 8784)) { - stepby[1] <- 1 - } - if ((nrow(raw_train_data) == 1460) | (nrow(raw_train_data) == 1464)) { - stepby[1] <- (1/6) - } - # Add a time stamp - start_time <- as.POSIXlt(paste0(yr_seq[1], "-01-01"), tz = "UTC") - end_time <- as.POSIXlt(paste0(yr_seq[1] + 1, "-01-01"), tz = "UTC") - train_df$date <- seq.POSIXt(from = start_time, by = 60 * 60/stepby[[1]], - length.out = nrow(train_df)) - summary(train_df) - - lat_raw_train_data <- as.numeric(ncdf4::ncvar_get(tem, "latitude")) - lon_raw_train_data <- as.numeric(ncdf4::ncvar_get(tem, "longitude")) - ncdf4::nc_close(tem) - - # Loop through remaining files and format Just a safe guard in case we - # run this for a single year - if (length(input_met) > 1) { - for (i in 2:length(input_met)) { - if (file.exists(input_met[[i]])) { - raw_train_data <- list() - tem <- ncdf4::nc_open(input_met[[i]]) - dim <- tem$dim - for (j in seq_along(vars.info$CF.name)) { - if (exists(as.character(vars.info$CF.name[j]), tem$var) == - FALSE) { - raw_train_data[[j]] <- NA - } else { - raw_train_data[[j]] <- ncdf4::ncvar_get(tem, as.character(vars.info$CF.name[j])) - } - } - names(raw_train_data) <- vars.info$CF.name - raw_train_data <- data.frame(raw_train_data) - - # Figure out what temporal resolution the given data is in - # stepby[] helps us create the data sequence length - if ((nrow(raw_train_data) == 17520) | (nrow(raw_train_data) == - 17568)) { - stepby[i] <- 2 - } - if ((nrow(raw_train_data) == 8760) | (nrow(raw_train_data) == - 8784)) { - stepby[i] <- 1 - } - if ((nrow(raw_train_data) == 1460) | (nrow(raw_train_data) == 1464)) { - stepby[1] <- (1/6) - } - - # Add a time stamp - start_time <- as.POSIXlt(paste0(yr_seq[i], "-01-01"), tz = "UTC") - end_time <- as.POSIXlt(paste0(yr_seq[i] + 1, "-01-01"), - tz = "UTC") - raw_train_data$date <- seq.POSIXt(from = start_time, by = 60 * - 60/stepby[[i]], length.out = nrow(raw_train_data)) - summary(raw_train_data) - - train_df <- rbind(train_df, raw_train_data) - } else { - stepby[i] <- NA - } - ncdf4::nc_close(tem) - } # End year loop - } - - # Quick & dirty way of gap-filling any lingering NAs - for (i in 1:ncol(train_df)) { - train_df[is.na(train_df[, i]), i] <- mean(train_df[, i], na.rm = TRUE) - } + names(raw_train_data) <- vars.info$CF.name + raw_train_data <- data.frame(raw_train_data) - if (upscale == FALSE) { - dat.train <- train_df - } else { - # Need to create column of each of these for the aggregate function to work - time.vars <- c("year", "doy", "hour") - agg.ind <- which(time.vars==upscale) - time.vars <- time.vars[1:agg.ind] - train_df["year"] = lubridate::year(train_df$date) - train_df["doy"] = lubridate::yday(train_df$date) - train_df["hour"] = lubridate::hour(train_df$date) - # Figure out which temporal variables we're aggregating over - if (upscale == "year") { - upscale_timestep = 365 - } - if (upscale == "doy") { - upscale_timestep = 1 - } - if (upscale == "hour") { - upscale_timestep = 1/24 + # Figure out what temporal resolution the given data is in + # stepby[] helps us create the data sequence length + if ((nrow(raw_train_data) == 17520) | (nrow(raw_train_data) == + 17568)) { + stepby[i] <- 2 + } + if ((nrow(raw_train_data) == 8760) | (nrow(raw_train_data) == + 8784)) { + stepby[i] <- 1 + } + if ((nrow(raw_train_data) == 1460) | (nrow(raw_train_data) == 1464)) { + stepby[1] <- (1 / 6) + } + + # Add a time stamp + start_time <- as.POSIXlt(paste0(yr_seq[i], "-01-01"), tz = "UTC") + end_time <- as.POSIXlt(paste0(yr_seq[i] + 1, "-01-01"), + tz = "UTC" + ) + raw_train_data$date <- seq.POSIXt(from = start_time, by = 60 * + 60 / stepby[[i]], length.out = nrow(raw_train_data)) + summary(raw_train_data) + + train_df <- rbind(train_df, raw_train_data) + } else { + stepby[i] <- NA } - dat.train <- stats::aggregate(train_df[, names(train_df)[!names(train_df) %in% - c("year", "doy", "hour")]], by = train_df[time.vars], FUN = mean, - na.rm = FALSE) - dat.train <- dat.train[order(dat.train$date), ] + ncdf4::nc_close(tem) + } # End year loop + } + + # Quick & dirty way of gap-filling any lingering NAs + for (i in 1:ncol(train_df)) { + train_df[is.na(train_df[, i]), i] <- mean(train_df[, i], na.rm = TRUE) + } + + if (upscale == FALSE) { + dat.train <- train_df + } else { + # Need to create column of each of these for the aggregate function to work + time.vars <- c("year", "doy", "hour") + agg.ind <- which(time.vars == upscale) + time.vars <- time.vars[1:agg.ind] + train_df["year"] <- lubridate::year(train_df$date) + train_df["doy"] <- lubridate::yday(train_df$date) + train_df["hour"] <- lubridate::hour(train_df$date) + # Figure out which temporal variables we're aggregating over + if (upscale == "year") { + upscale_timestep <- 365 } - # --------------------------------- - - # Add dataset name - - dat.train$dataset <- paste0(in.prefix) - - # Create dimensions for NC file - ntime = nrow(dat.train) - days_elapsed <- (1:ntime) * upscale_timestep - .5*upscale_timestep - time <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", start_year, "-01-01T00:00:00Z"), - vals = as.array(days_elapsed), create_dimvar = TRUE, unlim = TRUE) - dim$time = time - # Create var.list for the NC file - var.list <- list() - for (j in seq_along(vars.info$CF.name)) { - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(vars.info$CF.name[j]), - units = as.character(vars.info$units[j]), dim = dim, missval = -9999, - verbose = verbose) + if (upscale == "doy") { + upscale_timestep <- 1 } - - # Create NC file - dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - - loc.file <- file.path(outfolder, paste0(in.prefix, "_dat.train.nc")) - loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) - - for (j in vars.info$CF.name) { - ncdf4::ncvar_put(nc = loc, varid = as.character(j), vals = dat.train[[j]][seq_len(nrow(dat.train))]) + if (upscale == "hour") { + upscale_timestep <- 1 / 24 } - ncdf4::nc_close(loc) + dat.train <- stats::aggregate( + train_df[, names(train_df)[!names(train_df) %in% + c("year", "doy", "hour")]], + by = train_df[time.vars], FUN = mean, + na.rm = FALSE + ) + dat.train <- dat.train[order(dat.train$date), ] + } + # --------------------------------- + + # Add dataset name + + dat.train$dataset <- paste0(in.prefix) + + # Create dimensions for NC file + ntime <- nrow(dat.train) + days_elapsed <- (1:ntime) * upscale_timestep - .5 * upscale_timestep + time <- ncdf4::ncdim_def( + name = "time", units = paste0("days since ", start_year, "-01-01T00:00:00Z"), + vals = as.array(days_elapsed), create_dimvar = TRUE, unlim = TRUE + ) + dim$time <- time + # Create var.list for the NC file + var.list <- list() + for (j in seq_along(vars.info$CF.name)) { + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(vars.info$CF.name[j]), + units = as.character(vars.info$units[j]), dim = dim, missval = -9999, + verbose = verbose + ) + } + + # Create NC file + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) + + loc.file <- file.path(outfolder, paste0(in.prefix, "_dat.train.nc")) + loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + + for (j in vars.info$CF.name) { + ncdf4::ncvar_put(nc = loc, varid = as.character(j), vals = dat.train[[j]][seq_len(nrow(dat.train))]) + } + ncdf4::nc_close(loc) } diff --git a/modules/data.atmosphere/R/noaa_gefs_efi_helper.R b/modules/data.atmosphere/R/noaa_gefs_efi_helper.R index 99d7798d75e..c44870b24f5 100644 --- a/modules/data.atmosphere/R/noaa_gefs_efi_helper.R +++ b/modules/data.atmosphere/R/noaa_gefs_efi_helper.R @@ -1,7 +1,7 @@ -#code taken from https://github.com/eco4cast/neon4cast/blob/main/R/noaa_gefs.R +# code taken from https://github.com/eco4cast/neon4cast/blob/main/R/noaa_gefs.R #' noaa_stage2 #' -#' @param cycle Hour at which forecast was made, as character string +#' @param cycle Hour at which forecast was made, as character string #' (`"00"`, `"06"`, `"12"` or `"18"`). Only `"00"` (default) has 30 days horizon. #' @param version GEFS forecast version. Prior versions correspond to forecasts #' issued before 2020-09-25 which have different ensemble number and horizon, @@ -9,7 +9,7 @@ #' @param endpoint the EFI host address (leave as default) #' @param verbose logical, displays or hides messages #' @param start_date forecast start date yyyy-mm-dd format -#' +#' #' @export #' #' @author Alexis Helgeson (taken from neon4cast package) @@ -18,55 +18,58 @@ noaa_stage2 <- function(cycle = 0, endpoint = "data.ecoforecast.org", verbose = TRUE, start_date = "") { - noaa_gefs_stage(file.path("stage2/parquet",cycle, start_date), - partitioning = "start_date", - version = version, - endpoint = endpoint, - verbose = verbose, - start_date = start_date) - + noaa_gefs_stage(file.path("stage2/parquet", cycle, start_date), + partitioning = "start_date", + version = version, + endpoint = endpoint, + verbose = verbose, + start_date = start_date + ) } noaa_gefs_stage <- function(stage = "stage1", - partitioning = c("cycle","start_date"), + partitioning = c("cycle", "start_date"), cycle = 0, version = "v12", endpoint = "data.ecoforecast.org", verbose = getOption("verbose", TRUE), start_date = start_date) { - if(verbose) + if (verbose) { message(paste("establishing connection to", stage, "at", endpoint, "...")) + } s3 <- noaa_gefs(version, endpoint) if (!is.na(as.Date(start_date))) { ds <- arrow::open_dataset(s3$path(stage)) } else { ds <- arrow::open_dataset(s3$path(stage), partitioning = partitioning) } - if(verbose) - message(paste0("connected! Use dplyr functions to filter and summarise.\n", - "Then, use collect() to read result into R\n")) - ds + if (verbose) { + message(paste0( + "connected! Use dplyr functions to filter and summarise.\n", + "Then, use collect() to read result into R\n" + )) + } + ds } noaa_gefs <- function(version = "v12", endpoint = "data.ecoforecast.org") { - vars <- arrow_env_vars() gefs <- arrow::s3_bucket(paste0("neon4cast-drivers/noaa/gefs-", version), - endpoint_override = endpoint, - anonymous = TRUE) - #error is coming from this chunk Error: NotImplemented: Got S3 URI but Arrow compiled without S3 support + endpoint_override = endpoint, + anonymous = TRUE + ) + # error is coming from this chunk Error: NotImplemented: Got S3 URI but Arrow compiled without S3 support on.exit(unset_arrow_vars(vars)) gefs - } -arrow_env_vars <- function(){ +arrow_env_vars <- function() { user_region <- Sys.getenv("AWS_DEFAULT_REGION") user_meta <- Sys.getenv("AWS_EC2_METADATA_DISABLED") Sys.unsetenv("AWS_DEFAULT_REGION") - Sys.setenv(AWS_EC2_METADATA_DISABLED="TRUE") - - list(user_region=user_region, user_meta = user_meta) + Sys.setenv(AWS_EC2_METADATA_DISABLED = "TRUE") + + list(user_region = user_region, user_meta = user_meta) } unset_arrow_vars <- function(vars) { diff --git a/modules/data.atmosphere/R/pecan_standard_met_table.R b/modules/data.atmosphere/R/pecan_standard_met_table.R index 557e1322959..2525d61699e 100644 --- a/modules/data.atmosphere/R/pecan_standard_met_table.R +++ b/modules/data.atmosphere/R/pecan_standard_met_table.R @@ -2,23 +2,23 @@ #' #' @export pecan_standard_met_table <- tibble::tribble( - ~`cf_standard_name` , ~units , ~is_required, ~bety , ~isimip , ~cruncep , ~narr , ~ameriflux , - "air_temperature" , "K" , TRUE, "airT" , "tasAdjust" , "tair" , "air" , "TA (C)" , - "air_temperature_max" , "K" , FALSE, NA , "tasmaxAdjust" , NA , "tmax" , NA , - "air_temperature_min" , "K" , FALSE, NA , "tasminAdjust" , NA , "tmin" , NA , - "air_pressure" , "Pa" , TRUE, "air_pressure" , NA , NA , NA , "PRESS (KPa)" , - "mole_fraction_of_carbon_dioxide_in_air" , "mol/mol" , FALSE, NA , NA , NA , NA , "CO2" , - "moisture_content_of_soil_layer" , "kg m-2" , FALSE, NA , NA , NA , NA , NA , - "soil_temperature" , "K" , FALSE, "soilT" , NA , NA , NA , "TS1 *(NOT DONE)*" , - "relative_humidity" , "%" , FALSE, "relative_humidity" , "rhurs" , NA , "rhum" , "RH" , - "specific_humidity" , "1" , TRUE, "specific_humidity" , NA , "qair" , "shum" , "CALC(RH)" , - "water_vapor_saturation_deficit" , "Pa" , FALSE, "VPD" , NA , NA , NA , "VPD *(NOT DONE)*" , - "surface_downwelling_longwave_flux_in_air" , "W m-2" , TRUE, "same" , "rldsAdjust" , "lwdown" , "dlwrf" , "Rgl" , - "surface_downwelling_shortwave_flux_in_air" , "W m-2" , TRUE, "solar_radiation" , "rsdsAdjust" , "swdown" , "dswrf" , "Rg" , - "surface_downwelling_photosynthetic_photon_flux_in_air" , "mol m-2 s-1" , FALSE, "PAR" , NA , NA , NA , "PAR *(NOT DONE)*" , - "precipitation_flux" , "kg m-2 s-1" , TRUE, "cccc" , "prAdjust" , "rain" , "acpc" , "PREC (mm/s)" , - "wind_to_direction" , "degrees" , FALSE, "wind_direction" , NA , NA , NA , "WD" , - "wind_speed" , "m/s" , FALSE, "Wspd" , NA , NA , NA , "WS" , - "eastward_wind" , "m/s" , TRUE, "eastward_wind" , NA , NA , NA , "CALC(WS+WD)" , - "northward_wind" , "m/s" , TRUE, "northward_wind" , NA , NA , NA , "CALC(WS+WD)" + ~`cf_standard_name`, ~units, ~is_required, ~bety, ~isimip, ~cruncep, ~narr, ~ameriflux, + "air_temperature", "K", TRUE, "airT", "tasAdjust", "tair", "air", "TA (C)", + "air_temperature_max", "K", FALSE, NA, "tasmaxAdjust", NA, "tmax", NA, + "air_temperature_min", "K", FALSE, NA, "tasminAdjust", NA, "tmin", NA, + "air_pressure", "Pa", TRUE, "air_pressure", NA, NA, NA, "PRESS (KPa)", + "mole_fraction_of_carbon_dioxide_in_air", "mol/mol", FALSE, NA, NA, NA, NA, "CO2", + "moisture_content_of_soil_layer", "kg m-2", FALSE, NA, NA, NA, NA, NA, + "soil_temperature", "K", FALSE, "soilT", NA, NA, NA, "TS1 *(NOT DONE)*", + "relative_humidity", "%", FALSE, "relative_humidity", "rhurs", NA, "rhum", "RH", + "specific_humidity", "1", TRUE, "specific_humidity", NA, "qair", "shum", "CALC(RH)", + "water_vapor_saturation_deficit", "Pa", FALSE, "VPD", NA, NA, NA, "VPD *(NOT DONE)*", + "surface_downwelling_longwave_flux_in_air", "W m-2", TRUE, "same", "rldsAdjust", "lwdown", "dlwrf", "Rgl", + "surface_downwelling_shortwave_flux_in_air", "W m-2", TRUE, "solar_radiation", "rsdsAdjust", "swdown", "dswrf", "Rg", + "surface_downwelling_photosynthetic_photon_flux_in_air", "mol m-2 s-1", FALSE, "PAR", NA, NA, NA, "PAR *(NOT DONE)*", + "precipitation_flux", "kg m-2 s-1", TRUE, "cccc", "prAdjust", "rain", "acpc", "PREC (mm/s)", + "wind_to_direction", "degrees", FALSE, "wind_direction", NA, NA, NA, "WD", + "wind_speed", "m/s", FALSE, "Wspd", NA, NA, NA, "WS", + "eastward_wind", "m/s", TRUE, "eastward_wind", NA, NA, NA, "CALC(WS+WD)", + "northward_wind", "m/s", TRUE, "northward_wind", NA, NA, NA, "CALC(WS+WD)" ) diff --git a/modules/data.atmosphere/R/permute.nc.R b/modules/data.atmosphere/R/permute.nc.R index 24a3a96c117..6076272368b 100644 --- a/modules/data.atmosphere/R/permute.nc.R +++ b/modules/data.atmosphere/R/permute.nc.R @@ -13,55 +13,59 @@ ##' @param ... further arguments, currently ignored ##' ##' @author Elizabeth Cowdery, Rob Kooper -permute.nc <- function(in.path, in.prefix, outfolder, start_date, end_date, +permute.nc <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { # get start/end year code works on whole years only start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) - + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - + rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) + for (year in start_year:end_year) { old.file <- file.path(in.path, paste(in.prefix, year, "nc", sep = ".")) tmp.file <- file.path(outfolder, paste(in.prefix, "temp", year, "nc", sep = ".")) new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) - + # create array with results row <- year - start_year + 1 - results$file[row] <- new.file - results$host[row] <- PEcAn.remote::fqdn() - results$startdate[row] <- paste0(year, "-01-01 00:00:00") - results$enddate[row] <- paste0(year, "-12-31 23:59:59") - results$mimetype[row] <- "application/x-netcdf" + results$file[row] <- new.file + results$host[row] <- PEcAn.remote::fqdn() + results$startdate[row] <- paste0(year, "-01-01 00:00:00") + results$enddate[row] <- paste0(year, "-12-31 23:59:59") + results$mimetype[row] <- "application/x-netcdf" results$formatname[row] <- "CF (permuted)" - + if (file.exists(new.file) && !overwrite) { PEcAn.logger::logger.debug("File '", new.file, "' already exists, skipping to next file.") next } - + if (verbose) { - print(paste(c("nccopy", list("-k", "3", "-u", "-c", "time/8,y/277,x/349", old.file, tmp.file)), - collapse = " ")) + print(paste(c("nccopy", list("-k", "3", "-u", "-c", "time/8,y/277,x/349", old.file, tmp.file)), + collapse = " " + )) } system2("nccopy", list("-k", "3", "-u", "-c", "time/8,y/277,x/349", old.file, tmp.file)) if (verbose) { - print(paste(c("ncpdq", list("--no_tmp_fl", "-h", "-O", "-a", "y,x,time", tmp.file, new.file)), - collapse = " ")) - } + print(paste(c("ncpdq", list("--no_tmp_fl", "-h", "-O", "-a", "y,x,time", tmp.file, new.file)), + collapse = " " + )) + } system2("ncpdq", list("--no_tmp_fl", "-h", "-O", "-a", "y,x,time", tmp.file, new.file)) unlink(tmp.file) } - + return(invisible(results)) } # permute.nc diff --git a/modules/data.atmosphere/R/read.register.R b/modules/data.atmosphere/R/read.register.R index 1fb851ddda3..babf2bc5b55 100644 --- a/modules/data.atmosphere/R/read.register.R +++ b/modules/data.atmosphere/R/read.register.R @@ -3,56 +3,54 @@ ##' @export ##' @param register.xml path of xml file ##' @param con betydb connection -##' +##' ##' @author Betsy Cowdery read.register <- function(register.xml, con) { - register <- XML::xmlToList(XML::xmlParse(register.xml)) PEcAn.logger::logger.debug(as.data.frame(register)) - + # check scale if (is.null(register$scale)) { - PEcAn.logger::logger.error("Scale is not defined") + PEcAn.logger::logger.error("Scale is not defined") } else { if (register$scale == "regional" & is.null(register$siteid)) { - PEcAn.logger::logger.warn("Region site id is not defined") + PEcAn.logger::logger.warn("Region site id is not defined") } } - + # check format format is not defined if (is.null(register$format)) { - PEcAn.logger::logger.error("Format is not defined") + PEcAn.logger::logger.error("Format is not defined") } else if (is.null(register$format$inputtype)) { - PEcAn.logger::logger.error("Input type is not defined") #Ultimatly can get this from the format table in betydb + PEcAn.logger::logger.error("Input type is not defined") # Ultimatly can get this from the format table in betydb } else { # format is defined - if ((is.null(register$format$id) & is.null(register$format$name) & is.null(register$format$mimetype)) - | - (is.null(register$format$id) & is.null(register$format$name)) - | - (is.null(register$format$id) & is.null(register$format$mimetype))) { - PEcAn.logger::logger.error("Not enough format info") - } else if ((!is.null(register$format$id) & is.null(register$format$name)) - | - (!is.null(register$format$id) & is.null(register$format$mimetype))) { + if ((is.null(register$format$id) & is.null(register$format$name) & is.null(register$format$mimetype)) | + (is.null(register$format$id) & is.null(register$format$name)) | + (is.null(register$format$id) & is.null(register$format$mimetype))) { + PEcAn.logger::logger.error("Not enough format info") + } else if ((!is.null(register$format$id) & is.null(register$format$name)) | + (!is.null(register$format$id) & is.null(register$format$mimetype))) { # Retrieve format name and mimetype from the database query.format.info <- PEcAn.DB::db.query( paste( "SELECT name, type_string AS mimetype", "FROM formats JOIN mimetypes ON formats.mimetype_id = mimetypes.id", - "WHERE formats.id = ", register$format$id), + "WHERE formats.id = ", register$format$id + ), con ) - + register$format$name <- query.format.info$name register$format$mimetype <- query.format.info$mimetype - } else if (is.null(register$format$id) & !is.null(register$format$name) & !is.null(register$format$mimetype)) { register$format$id <- PEcAn.DB::db.query( paste0( "SELECT formats.id FROM formats JOIN mimetypes ON formats.mimetype_id = mimetypes.id ", "WHERE name = '", register$format$name, - "' AND type_string = '", register$format$mimetype, "'"), con)[[1]] + "' AND type_string = '", register$format$mimetype, "'" + ), con + )[[1]] } } return(invisible(register)) diff --git a/modules/data.atmosphere/R/site.lst.R b/modules/data.atmosphere/R/site.lst.R index 69f80b7b3d3..76d66267f3e 100644 --- a/modules/data.atmosphere/R/site.lst.R +++ b/modules/data.atmosphere/R/site.lst.R @@ -10,8 +10,10 @@ site.lst <- function(site.id, con) { if (!is.na(time.zone) && !is.na(as.character(time.zone))) { lst <- PEcAn.utils::timezone_hour(time.zone) } else { - site <- PEcAn.DB::db.query(paste("SELECT ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat", - "FROM sites WHERE id =", site.id), con) + site <- PEcAn.DB::db.query(paste( + "SELECT ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat", + "FROM sites WHERE id =", site.id + ), con) if (is.null(getOption("geonamesUsername"))) { options(geonamesUsername = "carya") } diff --git a/modules/data.atmosphere/R/solar_angle.R b/modules/data.atmosphere/R/solar_angle.R index 306642ff0d3..e586c4d6fd7 100644 --- a/modules/data.atmosphere/R/solar_angle.R +++ b/modules/data.atmosphere/R/solar_angle.R @@ -1,6 +1,6 @@ #' Cosine of Solar Zenith Angle #' -#' Calculates the cosine of the solar zenith angle based on the given parameters. +#' Calculates the cosine of the solar zenith angle based on the given parameters. #' This angle is crucial in determining the amount of solar radiation reaching a point on Earth. #' #' For explanations of formulae, see https://web.archive.org/web/20180307133425/http://www.itacanet.org/the-sun-as-a-source-of-energy/part-3-calculating-solar-angles/ @@ -14,7 +14,7 @@ #' #' @return Numeric value representing the cosine of the solar zenith angle. #' -#' @references +#' @references #' "Understanding Solar Position and Solar Radiation" - RAMMB: [Link](https://rammb.cira.colostate.edu/wmovl/vrl/tutorials/euromet/courses/english/nwp/n5720/n5720005.htm) #' #' @examples @@ -22,18 +22,18 @@ #' #' @export cos_solar_zenith_angle <- function(doy, lat, lon, dt, hr) { - et <- equation_of_time(doy) - merid <- floor(lon / 15) * 15 - merid[merid < 0] <- merid[merid < 0] + 15 - lc <- (lon - merid) * -4/60 ## longitude correction - tz <- merid / 360 * 24 ## time zone - midbin <- 0.5 * dt / 86400 * 24 ## shift calc to middle of bin - t0 <- 12 + lc - et - tz - midbin ## solar time - h <- pi/12 * (hr - t0) ## solar hour - dec <- -23.45 * pi / 180 * cos(2 * pi * (doy + 10) / 365) ## declination - cosz <- sin(lat * pi / 180) * sin(dec) + cos(lat * pi / 180) * cos(dec) * cos(h) - cosz[cosz < 0] <- 0 - return(cosz) + et <- equation_of_time(doy) + merid <- floor(lon / 15) * 15 + merid[merid < 0] <- merid[merid < 0] + 15 + lc <- (lon - merid) * -4 / 60 ## longitude correction + tz <- merid / 360 * 24 ## time zone + midbin <- 0.5 * dt / 86400 * 24 ## shift calc to middle of bin + t0 <- 12 + lc - et - tz - midbin ## solar time + h <- pi / 12 * (hr - t0) ## solar hour + dec <- -23.45 * pi / 180 * cos(2 * pi * (doy + 10) / 365) ## declination + cosz <- sin(lat * pi / 180) * sin(dec) + cos(lat * pi / 180) * cos(dec) * cos(h) + cosz[cosz < 0] <- 0 + return(cosz) } #' Equation of time: Eccentricity and obliquity @@ -45,11 +45,11 @@ cos_solar_zenith_angle <- function(doy, lat, lon, dt, hr) { #' @return `numeric(1)` length of the solar day, in hours. #' @export equation_of_time <- function(doy) { - stopifnot(doy <= 367) #changed from 366 to 367 to account for leap years - - f <- pi / 180 * (279.5 + 0.9856 * doy) - et <- (-104.7 * sin(f) + 596.2 * sin(2 * f) + 4.3 * - sin(4 * f) - 429.3 * cos(f) - 2 * - cos(2 * f) + 19.3 * cos(3 * f)) / 3600 # equation of time -> eccentricity and obliquity + stopifnot(doy <= 367) # changed from 366 to 367 to account for leap years + + f <- pi / 180 * (279.5 + 0.9856 * doy) + et <- (-104.7 * sin(f) + 596.2 * sin(2 * f) + 4.3 * + sin(4 * f) - 429.3 * cos(f) - 2 * + cos(2 * f) + 19.3 * cos(3 * f)) / 3600 # equation of time -> eccentricity and obliquity return(et) } diff --git a/modules/data.atmosphere/R/spin.met.R b/modules/data.atmosphere/R/spin.met.R index 14bde6b6594..f733997d846 100644 --- a/modules/data.atmosphere/R/spin.met.R +++ b/modules/data.atmosphere/R/spin.met.R @@ -1,7 +1,7 @@ #' Spin-up meteorology #' #' @param in.path met input folder path -#' @param in.prefix met input file prefix (shared by all annual files, can be "") +#' @param in.prefix met input file prefix (shared by all annual files, can be "") #' @param start_date start of met #' @param end_date end of met #' @param nyear number of years of spin-up, default 1000 @@ -9,10 +9,10 @@ #' @param resample resample (TRUE, default) or cycle (FALSE) meteorology #' @param run_start_date date the run itself starts, which can be different than the start of met #' @param overwrite whether to replace previous resampling -#' -#' @details -#' spin.met works by creating symbolic links to the sampled met file, -#' rather than copying the whole file. Be aware that the internal dates in +#' +#' @details +#' spin.met works by creating symbolic links to the sampled met file, +#' rather than copying the whole file. Be aware that the internal dates in #' those files are not modified. Right now this is designed to be called within #' met2model.[MODEL] before the met is processed (it's designed to work with annual CF #' files not model-specific files) for example with models that process met @@ -23,86 +23,83 @@ #' #' @examples #' start_date <- "0850-01-01 00:00:00" -#' end_date <- "2010-12-31 23:59:59" -#' nyear <- 10 -#' nsample <- 50 -#' resample <- TRUE -#' +#' end_date <- "2010-12-31 23:59:59" +#' nyear <- 10 +#' nsample <- 50 +#' resample <- TRUE +#' #' \dontrun{ -#' if(!is.null(spin)){ -#' ## if spinning up, extend processed met by resampling or cycling met -#' start_date <- PEcAn.data.atmosphere::spin.met( -#' in.path, in.prefix, -#' start_date, end_date, -#' nyear, nsample, resample) +#' if (!is.null(spin)) { +#' ## if spinning up, extend processed met by resampling or cycling met +#' start_date <- PEcAn.data.atmosphere::spin.met( +#' in.path, in.prefix, +#' start_date, end_date, +#' nyear, nsample, resample +#' ) #' } #' } -spin.met <- function(in.path, in.prefix, start_date, end_date, nyear = 1000, nsample = 50, resample = TRUE, run_start_date = start_date, overwrite = TRUE){ - +spin.met <- function(in.path, in.prefix, start_date, end_date, nyear = 1000, nsample = 50, resample = TRUE, run_start_date = start_date, overwrite = TRUE) { ### input checking - + # paths - if(missing(in.path) | is.null(in.path)){ - in.path <- "./" ## if path is missing, assume current working directory + if (missing(in.path) | is.null(in.path)) { + in.path <- "./" ## if path is missing, assume current working directory } - if(missing(in.prefix) | is.null(in.prefix)){ - in.prefix <- "" ## if prefix is missing, assume blank (files just YYYY.nc) + if (missing(in.prefix) | is.null(in.prefix)) { + in.prefix <- "" ## if prefix is missing, assume blank (files just YYYY.nc) } # dates start_year <- lubridate::year(start_date) end_year <- lubridate::year(end_date) avail.years <- start_year:end_year - + # spin settings - if(missing(nyear)|is.null(nyear) | is.na(nyear)) nyear <- 1000 + if (missing(nyear) | is.null(nyear) | is.na(nyear)) nyear <- 1000 nyear <- as.numeric(nyear) - if(missing(nsample)|is.null(nsample) | is.na(nsample)) nsample <- 50 + if (missing(nsample) | is.null(nsample) | is.na(nsample)) nsample <- 50 nsample <- as.numeric(nsample) - nsample <- min(nsample,length(avail.years)) + nsample <- min(nsample, length(avail.years)) avail.years <- avail.years[seq_len(nsample)] - if(missing(resample) | is.null(resample)|is.na(resample)) resample <- TRUE + if (missing(resample) | is.null(resample) | is.na(resample)) resample <- TRUE resample <- as.logical(resample) - spin_start_date <- as.POSIXct(run_start_date,"UTC") - lubridate::years(nyear) - + spin_start_date <- as.POSIXct(run_start_date, "UTC") - lubridate::years(nyear) + ### define the met years to sample - new_year <- seq(lubridate::year(spin_start_date),by=1,length.out=nyear) + new_year <- seq(lubridate::year(spin_start_date), by = 1, length.out = nyear) is.leap <- lubridate::leap_year(avail.years) spin_year <- NA - if(resample){ - for(t in seq_along(new_year)){ - if(lubridate::leap_year(new_year[t])){ - spin_year[t] <- sample(avail.years[is.leap],size = 1) + if (resample) { + for (t in seq_along(new_year)) { + if (lubridate::leap_year(new_year[t])) { + spin_year[t] <- sample(avail.years[is.leap], size = 1) } else { - spin_year[t] <- sample(avail.years[!is.leap],size = 1) + spin_year[t] <- sample(avail.years[!is.leap], size = 1) } } } else { - spin_year <- rep(avail.years,length.out=nyear) + spin_year <- rep(avail.years, length.out = nyear) } - + ## loop over spin-up years - for(t in seq_along(new_year)){ - + for (t in seq_along(new_year)) { spin_year_txt <- formatC(spin_year[t], width = 4, format = "d", flag = "0") - source.file <- file.path(in.path,paste0(in.prefix,spin_year_txt,".nc")) - + source.file <- file.path(in.path, paste0(in.prefix, spin_year_txt, ".nc")) + new_year_txt <- formatC(new_year[t], width = 4, format = "d", flag = "0") - target.file <- file.path(in.path,paste0(in.prefix,new_year_txt,".nc")) - - if(overwrite){ - system2("rm",target.file) + target.file <- file.path(in.path, paste0(in.prefix, new_year_txt, ".nc")) + + if (overwrite) { + system2("rm", target.file) } - + ## check if a met file already exists - if(!file.exists(target.file)){ + if (!file.exists(target.file)) { ## if not, create a symbolic link to the year to be sampled - file.symlink(from = source.file,target.file) + file.symlink(from = source.file, target.file) } - } - + ## return new start_date return(spin_start_date) - } diff --git a/modules/data.atmosphere/R/split_wind.R b/modules/data.atmosphere/R/split_wind.R index a43de879ec8..fba6b33bdbb 100644 --- a/modules/data.atmosphere/R/split_wind.R +++ b/modules/data.atmosphere/R/split_wind.R @@ -1,12 +1,12 @@ #' Split wind_speed into eastward_wind and northward_wind #' -#' Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. +#' Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. #' #' @param in.path path to original data #' @param in.prefix prefix of original data #' @param start_date,end_date date (or character in a standard date format). Only year component is used. -#' @param overwrite logical: replace output file if it already exists? -#' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run? +#' @param overwrite logical: replace output file if it already exists? +#' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run? #' @param ... other arguments, currently ignored #' #' @return nothing. TODO: Return data frame summarizing results @@ -15,87 +15,90 @@ #' #' @examples #' \dontrun{ -#' in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" -#' in.prefix <- "" -#' outfolder <- "~/paleon/metTest/" +#' in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" +#' in.prefix <- "" +#' outfolder <- "~/paleon/metTest/" #' start_date <- "0850-01-01" -#' end_date <- "2010-12-31" -#' overwrite <- FALSE -#' verbose <- TRUE -#' +#' end_date <- "2010-12-31" +#' overwrite <- FALSE +#' verbose <- TRUE +#' #' split_wind(in.path, in.prefix, start_date, end_date, merge.file, overwrite, verbose) #' } split_wind <- function(in.path, in.prefix, start_date, end_date, - overwrite = FALSE, verbose = FALSE, ...){ - + overwrite = FALSE, verbose = FALSE, ...) { # get start/end year code works on whole years only start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - if(nchar(in.prefix)>0) in.prefix <- paste0(in.prefix, ".") - + end_year <- lubridate::year(end_date) + if (nchar(in.prefix) > 0) in.prefix <- paste0(in.prefix, ".") + ## prep data structure for results rows <- end_year - start_year + 1 - results <- data.frame(file = character(rows), - host = character(rows), - mimetype = character(rows), - formatname = character(rows), - startdate = character(rows), - enddate = character(rows), - dbfile.name = in.prefix, - stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), + host = character(rows), + mimetype = character(rows), + formatname = character(rows), + startdate = character(rows), + enddate = character(rows), + dbfile.name = in.prefix, + stringsAsFactors = FALSE + ) + for (year in start_year:end_year) { year_txt <- formatC(year, width = 4, format = "d", flag = "0") old.file <- file.path(in.path, paste0(in.prefix, year_txt, ".nc")) -# new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) - + # new.file <- file.path(outfolder, paste(in.prefix, year, "nc", sep = ".")) + ## open target file nc <- ncdf4::nc_open(old.file, write = TRUE) - - if("eastward_wind" %in% names(nc$var)) { + + if ("eastward_wind" %in% names(nc$var)) { PEcAn.logger::logger.info("eastward_wind already exists", year_txt) ncdf4::nc_close(nc) next } - if(!("wind_speed" %in% names(nc$var))) { + if (!("wind_speed" %in% names(nc$var))) { PEcAn.logger::logger.error("wind_speed does not exist", year_txt) ncdf4::nc_close(nc) next } - - ##extract wind_speed & direction + + ## extract wind_speed & direction wind_speed <- ncdf4::ncvar_get(nc, "wind_speed") wind_speed.attr <- ncdf4::ncatt_get(nc, "wind_speed") WD <- "wind_direction" %in% names(nc$var) - if(WD){ - wind_dir <- pi/2 - PEcAn.utils::ud_convert(ncdf4::ncvar_get(nc, "wind_direction"), wind_dir$units, "radians") + if (WD) { + wind_dir <- pi / 2 - PEcAn.utils::ud_convert(ncdf4::ncvar_get(nc, "wind_direction"), wind_dir$units, "radians") wind_dir.attr <- ncdf4::ncatt_get(nc, "wind_direction") - east <- wind_speed*cos(wind_dir) - north <- wind_speed*sin(wind_dir) + east <- wind_speed * cos(wind_dir) + north <- wind_speed * sin(wind_dir) } else { east <- wind_speed - north <- 0*wind_speed + north <- 0 * wind_speed } - + ## get dimensions [latitude, longitude, time] dims <- list(nc$dim$latitude, nc$dim$longitude, nc$dim$time) - + ## insert east - eastward <- ncdf4::ncvar_def(name = "eastward_wind", units = wind_speed.attr$units, dim = dims, - missval = wind_speed.attr$`_FillValue`, verbose = verbose) + eastward <- ncdf4::ncvar_def( + name = "eastward_wind", units = wind_speed.attr$units, dim = dims, + missval = wind_speed.attr$`_FillValue`, verbose = verbose + ) nc <- ncdf4::ncvar_add(nc = nc, v = eastward, verbose = verbose) ncdf4::ncvar_put(nc = nc, varid = "eastward_wind", vals = east) - + ## insert north - northward <- ncdf4::ncvar_def(name = "northward_wind", units = wind_speed.attr$units, dim = dims, - missval = wind_speed.attr$`_FillValue`, verbose = verbose) + northward <- ncdf4::ncvar_def( + name = "northward_wind", units = wind_speed.attr$units, dim = dims, + missval = wind_speed.attr$`_FillValue`, verbose = verbose + ) nc <- ncdf4::ncvar_add(nc = nc, v = northward, verbose = verbose) ncdf4::ncvar_put(nc = nc, varid = "northward_wind", vals = north) - + ## close file ncdf4::nc_close(nc) - } ## end loop over year - } diff --git a/modules/data.atmosphere/R/tdm_generate_subdaily_models.R b/modules/data.atmosphere/R/tdm_generate_subdaily_models.R index 87e45b405d2..0957048cdb3 100644 --- a/modules/data.atmosphere/R/tdm_generate_subdaily_models.R +++ b/modules/data.atmosphere/R/tdm_generate_subdaily_models.R @@ -15,174 +15,201 @@ #' #' @param outfolder - directory where models will be stored *** storage required varies by size of training dataset, but prepare for >10 GB #' @param path.train - path to CF/PEcAn style training data where each year is in a separate file. -#' @param yrs.train - which years of the training data should be used for to generate the model for +#' @param yrs.train - which years of the training data should be used for to generate the model for #' the subdaily cycle. If NULL, will default to all years #' @param direction.filter - Whether the model will be filtered backward or forward in time. options = c("backward", "forward") -#' (PalEON will go backward, anybody interested in the future will go forward) +#' (PalEON will go backward, anybody interested in the future will go forward) #' @param in.prefix not used #' @param n.beta - number of betas to save from linear regression model #' @param resids - logical stating whether to pass on residual data or not (this increases both memory & storage requirements) -#' @param parallel - logical stating whether to run temporal_downscale_functions.R in parallel +#' @param parallel - logical stating whether to run temporal_downscale_functions.R in parallel #' @param n.cores - deals with parallelization -#' @param day.window - integer specifying number of days around the day being modeled you want to use data from for that +#' @param day.window - integer specifying number of days around the day being modeled you want to use data from for that #' specific hours coefficients. Must be integer because we want statistics from the same time of day #' for each day surrounding the model day -#' @param seed - seed for randomization to allow for reproducible results +#' @param seed - seed for randomization to allow for reproducible results #' @param overwrite logical: replace output file if it already exists? #' @param verbose logical, currently ignored #' @param print.progress - print progress bar? (gets passed through) #' @export #' -gen.subdaily.models <- function(outfolder, path.train, yrs.train, direction.filter="forward", in.prefix, - n.beta, day.window, seed=Sys.time(), resids = FALSE, parallel = FALSE, n.cores = NULL, overwrite = TRUE, - verbose = FALSE, print.progress=FALSE) { - - # pb.index <- 1 - # pb <- utils::txtProgressBar(min = 1, max = 8, style = 3) - - # Just in case we have a capitalization or singular/plural issue - if(direction.filter %in% toupper( c("backward", "backwards"))) direction.filter="backward" - - # ----- 1.0 Read data & Make time stamps ---------- Load the data - - vars.info <- data.frame(CF.name = c("air_temperature", "precipitation_flux", "air_temperature_max", - "air_temperature_min", "surface_downwelling_shortwave_flux_in_air", - "surface_downwelling_longwave_flux_in_air", "air_pressure", "specific_humidity", - "eastward_wind", "northward_wind", "wind_speed")) - - - - # Getting a list of all the available files and then subsetting to just the ones we - # actually want to use - files.train <- dir(path.train, ".nc") - yrs.file <- strsplit(files.train, "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - - if(!is.null(yrs.train)){ - files.train <- files.train[which(yrs.file %in% yrs.train)] - yrs.file <- yrs.file[which(yrs.file %in% yrs.train)] - } - - met.out <- align.met(train.path=path.train, source.path=path.train, - yrs.train=yrs.file, yrs.source=yrs.file[1], - n.ens=1, seed=seed, pair.mems = FALSE) - - dat.train <- data.frame(year = met.out$dat.train$time$Year, - doy = met.out$dat.train$time$DOY, - date = met.out$dat.train$time$Date, - hour = met.out$dat.train$time$Hour, - air_temperature = met.out$dat.train$air_temperature, - precipitation_flux = met.out$dat.train$precipitation_flux, - surface_downwelling_shortwave_flux_in_air = met.out$dat.train$surface_downwelling_shortwave_flux_in_air, - surface_downwelling_longwave_flux_in_air = met.out$dat.train$surface_downwelling_longwave_flux_in_air, - air_pressure = met.out$dat.train$air_pressure, - specific_humidity = met.out$dat.train$specific_humidity - ) - - if(!"wind_speed" %in% names(met.out$dat.train)){ - dat.train$wind_speed <- sqrt(met.out$dat.train$eastward_wind^2 + met.out$dat.train$northward_wind^2) - } else { - dat.train$wind_speed <- met.out$dat.train$wind_speed - } - - # these non-standard variables help us organize our modeling approach - # Reference everything off of the earliest date; avoiding 0s because that makes life difficult - dat.train$sim.hr <- trunc(as.numeric(difftime(dat.train$date, min(dat.train$date), tz = "GMT", units = "hour")))+1 - dat.train$sim.day <- trunc(as.numeric(difftime(dat.train$date, min(dat.train$date), tz = "GMT", units = "day")))+1 - # dat.train$time.day2 <- as.integer(dat.train$time.day + 1/(48 * 2)) + 1 # Offset by half a time step to get time stamps to line up - - # ----- 1.1 Coming up with the daily means that are what we can - # use as predictors ---------- - vars.use <- vars.info$CF.name[vars.info$CF.name %in% names(dat.train)] - - train.day <- stats::aggregate(dat.train[, c("air_temperature", "precipitation_flux", - "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "specific_humidity", "wind_speed")], by = dat.train[, - c("year", "doy")], FUN = mean) - names(train.day)[3:9] <- c("air_temperature_mean.day", "precipitation_flux.day", - "surface_downwelling_shortwave_flux_in_air.day", "surface_downwelling_longwave_flux_in_air.day", - "air_pressure.day", "specific_humidity.day", "wind_speed.day") - train.day$air_temperature_max.day <- stats::aggregate(dat.train[, c("air_temperature")], - by = dat.train[, c("year", "doy")], FUN = max)$x - train.day$air_temperature_min.day <- stats::aggregate(dat.train[, c("air_temperature")], - by = dat.train[, c("year", "doy")], FUN = min)$x - - dat.train <- merge(dat.train[, ], train.day, all.x = T, all.y = T) - - # ----- 1.2 Setting up a 1-hour lag -- smooth transitions at - # midnight NOTE: because we're filtering from the present back through - # the past, -1 will associate the closest hour that we've already done - # (midnight) with the day we're currently working on ---------- - vars.hour <- c("air_temperature", "precipitation_flux", "surface_downwelling_shortwave_flux_in_air", - "surface_downwelling_longwave_flux_in_air", "air_pressure", "specific_humidity", - "wind_speed") - vars.lag <- c("lag.air_temperature", "lag.precipitation_flux", "lag.surface_downwelling_shortwave_flux_in_air", - "lag.surface_downwelling_longwave_flux_in_air", "lag.air_pressure", - "lag.specific_humidity", "lag.wind_speed") - - # Specifying what hour we want to lag - # Note: For forward filtering, we want to associate today with tomorrow (+1 day) using the last observation of today - # For backward filtering, we want to associate today with yesterday (-1 day) using the first obs of today - met.lag <- ifelse(direction.filter=="backward", -1, +1) - lag.time <- ifelse(direction.filter=="backward", min(dat.train$hour), max(dat.train$hour)) - - lag.day <- dat.train[dat.train$hour == lag.time, c("year", "doy", "sim.day", vars.hour)] - names(lag.day)[4:ncol(lag.day)] <- vars.lag - - lag.day$lag.air_temperature_min <- stats::aggregate(dat.train[, c("air_temperature")], - by = dat.train[, c("year", "doy", "sim.day")], FUN = min)[, "x"] # Add in a lag for the next day's min temp - lag.day$lag.air_temperature_max <- stats::aggregate(dat.train[, c("air_temperature")], - by = dat.train[, c("year", "doy", "sim.day")], FUN = max)[, "x"] # Add in a lag for the next day's min temp - lag.day$sim.day <- lag.day$sim.day + met.lag # - - - dat.train <- merge(dat.train, lag.day[, c("sim.day", vars.lag, "lag.air_temperature_min", - "lag.air_temperature_max")], all.x = T) - - # ----- 1.3 Setting up a variable to 'preview' the next day's mean - # to help get smoother transitions - # NOTE: If we're filtering forward in time, -1 will associate tomorrow with our downscaling - # for today - # ---------- - vars.day <- c("air_temperature_mean.day", "air_temperature_max.day", - "air_temperature_mean.day", "precipitation_flux.day", "surface_downwelling_shortwave_flux_in_air.day", - "surface_downwelling_longwave_flux_in_air.day", "air_pressure.day", - "specific_humidity.day", "wind_speed.day") - vars.next <- c("next.air_temperature_mean", "next.air_temperature_max", - "next.air_temperature_min", "next.precipitation_flux", "next.surface_downwelling_shortwave_flux_in_air", - "next.surface_downwelling_longwave_flux_in_air", "next.air_pressure", - "next.specific_humidity", "next.wind_speed") - - next.day <- dat.train[c("year", "doy", "sim.day", vars.day)] - names(next.day)[4:ncol(next.day)] <- vars.next - next.day <- stats::aggregate(next.day[, vars.next], by = next.day[, c("year", "doy", "sim.day")], FUN = mean) - next.day$sim.day <- next.day$sim.day - met.lag - - dat.train <- merge(dat.train, next.day[, c("sim.day", vars.next)], all.x = T) - - # Order the data just to make life easier - dat.train <- dat.train[order(dat.train$date),] - - # ----- 1.4 calculate air_temperature_min & air_temperature_max as - # departure from mean; order data ---------- Lookign at max & min as - # departure from mean - dat.train$max.dep <- dat.train$air_temperature_max.day - dat.train$air_temperature_mean.day - dat.train$min.dep <- dat.train$air_temperature_min.day - dat.train$air_temperature_mean.day - - # ----- 2.1 Generating all the daily models, save the output as - # .Rdata files, then clear memory Note: Could save Betas as .nc files - # that we pull from as needed to save memory; but for now just leaving - # it in the .Rdata file for eas Note: To avoid propogating too much - # wonkiness in hourly data, any co-variates are at the daily level - # Note: If mod.precipitation_flux.doy doesn't run, try increasing the - # day.window for this variable. The lack of non-zero values makes it - # difficult for the linear regression model to calculate coefficients - # sometimes --------- - - temporal.downscale.functions(dat.train = dat.train, n.beta = n.beta, day.window = day.window, - resids = resids, n.cores = n.cores, - seed = seed, outfolder = outfolder, print.progress=print.progress) +gen.subdaily.models <- function( + outfolder, path.train, yrs.train, direction.filter = "forward", in.prefix, + n.beta, day.window, seed = Sys.time(), resids = FALSE, parallel = FALSE, n.cores = NULL, overwrite = TRUE, + verbose = FALSE, print.progress = FALSE) { + # pb.index <- 1 + # pb <- utils::txtProgressBar(min = 1, max = 8, style = 3) + + # Just in case we have a capitalization or singular/plural issue + if (direction.filter %in% toupper(c("backward", "backwards"))) direction.filter <- "backward" + + # ----- 1.0 Read data & Make time stamps ---------- Load the data + + vars.info <- data.frame(CF.name = c( + "air_temperature", "precipitation_flux", "air_temperature_max", + "air_temperature_min", "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", "air_pressure", "specific_humidity", + "eastward_wind", "northward_wind", "wind_speed" + )) + + + + # Getting a list of all the available files and then subsetting to just the ones we + # actually want to use + files.train <- dir(path.train, ".nc") + yrs.file <- strsplit(files.train, "[.]") + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + + if (!is.null(yrs.train)) { + files.train <- files.train[which(yrs.file %in% yrs.train)] + yrs.file <- yrs.file[which(yrs.file %in% yrs.train)] + } + + met.out <- align.met( + train.path = path.train, source.path = path.train, + yrs.train = yrs.file, yrs.source = yrs.file[1], + n.ens = 1, seed = seed, pair.mems = FALSE + ) + + dat.train <- data.frame( + year = met.out$dat.train$time$Year, + doy = met.out$dat.train$time$DOY, + date = met.out$dat.train$time$Date, + hour = met.out$dat.train$time$Hour, + air_temperature = met.out$dat.train$air_temperature, + precipitation_flux = met.out$dat.train$precipitation_flux, + surface_downwelling_shortwave_flux_in_air = met.out$dat.train$surface_downwelling_shortwave_flux_in_air, + surface_downwelling_longwave_flux_in_air = met.out$dat.train$surface_downwelling_longwave_flux_in_air, + air_pressure = met.out$dat.train$air_pressure, + specific_humidity = met.out$dat.train$specific_humidity + ) + + if (!"wind_speed" %in% names(met.out$dat.train)) { + dat.train$wind_speed <- sqrt(met.out$dat.train$eastward_wind^2 + met.out$dat.train$northward_wind^2) + } else { + dat.train$wind_speed <- met.out$dat.train$wind_speed + } + + # these non-standard variables help us organize our modeling approach + # Reference everything off of the earliest date; avoiding 0s because that makes life difficult + dat.train$sim.hr <- trunc(as.numeric(difftime(dat.train$date, min(dat.train$date), tz = "GMT", units = "hour"))) + 1 + dat.train$sim.day <- trunc(as.numeric(difftime(dat.train$date, min(dat.train$date), tz = "GMT", units = "day"))) + 1 + # dat.train$time.day2 <- as.integer(dat.train$time.day + 1/(48 * 2)) + 1 # Offset by half a time step to get time stamps to line up + + # ----- 1.1 Coming up with the daily means that are what we can + # use as predictors ---------- + vars.use <- vars.info$CF.name[vars.info$CF.name %in% names(dat.train)] + + train.day <- stats::aggregate(dat.train[, c( + "air_temperature", "precipitation_flux", + "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "specific_humidity", "wind_speed" + )], by = dat.train[ + , + c("year", "doy") + ], FUN = mean) + names(train.day)[3:9] <- c( + "air_temperature_mean.day", "precipitation_flux.day", + "surface_downwelling_shortwave_flux_in_air.day", "surface_downwelling_longwave_flux_in_air.day", + "air_pressure.day", "specific_humidity.day", "wind_speed.day" + ) + train.day$air_temperature_max.day <- stats::aggregate(dat.train[, c("air_temperature")], + by = dat.train[, c("year", "doy")], FUN = max + )$x + train.day$air_temperature_min.day <- stats::aggregate(dat.train[, c("air_temperature")], + by = dat.train[, c("year", "doy")], FUN = min + )$x + + dat.train <- merge(dat.train[, ], train.day, all.x = T, all.y = T) + + # ----- 1.2 Setting up a 1-hour lag -- smooth transitions at + # midnight NOTE: because we're filtering from the present back through + # the past, -1 will associate the closest hour that we've already done + # (midnight) with the day we're currently working on ---------- + vars.hour <- c( + "air_temperature", "precipitation_flux", "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", "air_pressure", "specific_humidity", + "wind_speed" + ) + vars.lag <- c( + "lag.air_temperature", "lag.precipitation_flux", "lag.surface_downwelling_shortwave_flux_in_air", + "lag.surface_downwelling_longwave_flux_in_air", "lag.air_pressure", + "lag.specific_humidity", "lag.wind_speed" + ) + + # Specifying what hour we want to lag + # Note: For forward filtering, we want to associate today with tomorrow (+1 day) using the last observation of today + # For backward filtering, we want to associate today with yesterday (-1 day) using the first obs of today + met.lag <- ifelse(direction.filter == "backward", -1, +1) + lag.time <- ifelse(direction.filter == "backward", min(dat.train$hour), max(dat.train$hour)) + + lag.day <- dat.train[dat.train$hour == lag.time, c("year", "doy", "sim.day", vars.hour)] + names(lag.day)[4:ncol(lag.day)] <- vars.lag + + lag.day$lag.air_temperature_min <- stats::aggregate(dat.train[, c("air_temperature")], + by = dat.train[, c("year", "doy", "sim.day")], FUN = min + )[, "x"] # Add in a lag for the next day's min temp + lag.day$lag.air_temperature_max <- stats::aggregate(dat.train[, c("air_temperature")], + by = dat.train[, c("year", "doy", "sim.day")], FUN = max + )[, "x"] # Add in a lag for the next day's min temp + lag.day$sim.day <- lag.day$sim.day + met.lag # + + + dat.train <- merge(dat.train, lag.day[, c( + "sim.day", vars.lag, "lag.air_temperature_min", + "lag.air_temperature_max" + )], all.x = T) + + # ----- 1.3 Setting up a variable to 'preview' the next day's mean + # to help get smoother transitions + # NOTE: If we're filtering forward in time, -1 will associate tomorrow with our downscaling + # for today + # ---------- + vars.day <- c( + "air_temperature_mean.day", "air_temperature_max.day", + "air_temperature_mean.day", "precipitation_flux.day", "surface_downwelling_shortwave_flux_in_air.day", + "surface_downwelling_longwave_flux_in_air.day", "air_pressure.day", + "specific_humidity.day", "wind_speed.day" + ) + vars.next <- c( + "next.air_temperature_mean", "next.air_temperature_max", + "next.air_temperature_min", "next.precipitation_flux", "next.surface_downwelling_shortwave_flux_in_air", + "next.surface_downwelling_longwave_flux_in_air", "next.air_pressure", + "next.specific_humidity", "next.wind_speed" + ) + + next.day <- dat.train[c("year", "doy", "sim.day", vars.day)] + names(next.day)[4:ncol(next.day)] <- vars.next + next.day <- stats::aggregate(next.day[, vars.next], by = next.day[, c("year", "doy", "sim.day")], FUN = mean) + next.day$sim.day <- next.day$sim.day - met.lag + + dat.train <- merge(dat.train, next.day[, c("sim.day", vars.next)], all.x = T) + + # Order the data just to make life easier + dat.train <- dat.train[order(dat.train$date), ] + + # ----- 1.4 calculate air_temperature_min & air_temperature_max as + # departure from mean; order data ---------- Lookign at max & min as + # departure from mean + dat.train$max.dep <- dat.train$air_temperature_max.day - dat.train$air_temperature_mean.day + dat.train$min.dep <- dat.train$air_temperature_min.day - dat.train$air_temperature_mean.day + + # ----- 2.1 Generating all the daily models, save the output as + # .Rdata files, then clear memory Note: Could save Betas as .nc files + # that we pull from as needed to save memory; but for now just leaving + # it in the .Rdata file for eas Note: To avoid propogating too much + # wonkiness in hourly data, any co-variates are at the daily level + # Note: If mod.precipitation_flux.doy doesn't run, try increasing the + # day.window for this variable. The lack of non-zero values makes it + # difficult for the linear regression model to calculate coefficients + # sometimes --------- + + temporal.downscale.functions( + dat.train = dat.train, n.beta = n.beta, day.window = day.window, + resids = resids, n.cores = n.cores, + seed = seed, outfolder = outfolder, print.progress = print.progress + ) } # Helper function diff --git a/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R b/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R index 5b7a04e07e5..908c3ad7fc3 100644 --- a/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R +++ b/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R @@ -38,17 +38,16 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags.list = NULL, lags.init = NULL, dat.train, precip.distribution, - force.sanity=TRUE, sanity.tries=25, sanity.sd=6, - seed=Sys.time(), print.progress=FALSE) { - + force.sanity = TRUE, sanity.tries = 25, sanity.sd = 6, + seed = Sys.time(), print.progress = FALSE) { # Set our random seed set.seed(seed) # Just in case we have a capitalization or singular/plural issue - if(direction.filter %in% toupper( c("backward", "backwards"))) direction.filter="backward" + if (direction.filter %in% toupper(c("backward", "backwards"))) direction.filter <- "backward" # Setting our our time indexes - if(direction.filter=="backward"){ + if (direction.filter == "backward") { days.sim <- max(dat.mod$sim.day):min(dat.mod$sim.day) lag.time <- min(dat.mod$hour) } else { @@ -58,30 +57,34 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # Declare the variables of interest that will be called in the # overarching loop - vars.list <- c("surface_downwelling_shortwave_flux_in_air", "air_temperature", - "precipitation_flux", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "specific_humidity", "wind_speed") + vars.list <- c( + "surface_downwelling_shortwave_flux_in_air", "air_temperature", + "precipitation_flux", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "specific_humidity", "wind_speed" + ) # Data info that will be used to help organize dataframe for # downscaling - dat.info <- c("sim.day", "year", "doy", "hour", "air_temperature_max.day", - "air_temperature_min.day", "precipitation_flux.day", "surface_downwelling_shortwave_flux_in_air.day", - "surface_downwelling_longwave_flux_in_air.day", "air_pressure.day", - "specific_humidity.day", "wind_speed.day", "next.air_temperature_max", - "next.air_temperature_min", "next.precipitation_flux", "next.surface_downwelling_shortwave_flux_in_air", - "next.surface_downwelling_longwave_flux_in_air", "next.air_pressure", - "next.specific_humidity", "next.wind_speed") + dat.info <- c( + "sim.day", "year", "doy", "hour", "air_temperature_max.day", + "air_temperature_min.day", "precipitation_flux.day", "surface_downwelling_shortwave_flux_in_air.day", + "surface_downwelling_longwave_flux_in_air.day", "air_pressure.day", + "specific_humidity.day", "wind_speed.day", "next.air_temperature_max", + "next.air_temperature_min", "next.precipitation_flux", "next.surface_downwelling_shortwave_flux_in_air", + "next.surface_downwelling_longwave_flux_in_air", "next.air_pressure", + "next.specific_humidity", "next.wind_speed" + ) # # Set progress bar - if(print.progress==TRUE){ + if (print.progress == TRUE) { pb.index <- 1 - pb <- utils::txtProgressBar(min = 1, max = length(vars.list)*length(days.sim), style = 3) + pb <- utils::txtProgressBar(min = 1, max = length(vars.list) * length(days.sim), style = 3) utils::setTxtProgressBar(pb, pb.index) } # Figure out if we need to extract the approrpiate if (is.null(lags.list) & is.null(lags.init)) { - PEcAn.logger::logger.error("lags.init & lags.list are NULL, this is a required argument") + PEcAn.logger::logger.error("lags.init & lags.list are NULL, this is a required argument") } if (is.null(lags.init)) { lags.init <- lags.list[[unique(dat.mod$ens.day)]] @@ -92,20 +95,20 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # propogated dat.sim <- list() - # ------ Beginning of Downscaling For Loop + # ------ Beginning of Downscaling For Loop for (v in vars.list) { # Initalize our ouroutput - dat.sim[[v]] <- array(dim=c(nrow(dat.mod), n.ens)) + dat.sim[[v]] <- array(dim = c(nrow(dat.mod), n.ens)) # create column propagation list and betas progagation list - cols.list <- array(dim=c(length(days.sim), n.ens)) # An array with number of days x number of ensembles - rows.beta <- vector(length=n.ens) # A vector that ends up being the length of the number of our days + cols.list <- array(dim = c(length(days.sim), n.ens)) # An array with number of days x number of ensembles + rows.beta <- vector(length = n.ens) # A vector that ends up being the length of the number of our days # This gives us a for (i in seq_len(length(days.sim))) { cols.tem <- sample(1:n.ens, n.ens, replace = TRUE) - cols.list[i,] <- cols.tem + cols.list[i, ] <- cols.tem } # Read in the first linear regression model @@ -139,9 +142,11 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. thresh.swdown <- stats::quantile(dat.train$surface_downwelling_shortwave_flux_in_air[dat.train$surface_downwelling_shortwave_flux_in_air > 0], 0.05) - hrs.day <- unique(dat.train$time[dat.train$time$DOY == day.now & - dat.train$surface_downwelling_shortwave_flux_in_air > thresh.swdown, - "Hour"]) + hrs.day <- unique(dat.train$time[ + dat.train$time$DOY == day.now & + dat.train$surface_downwelling_shortwave_flux_in_air > thresh.swdown, + "Hour" + ]) rows.mod <- which(dat.mod$sim.day == days.sim[i] & dat.mod$hour %in% hrs.day) dat.temp <- dat.mod[rows.mod, dat.info] @@ -150,13 +155,11 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. if (i == 1) { sim.lag <- utils::stack(lags.init[[v]]) names(sim.lag) <- c(paste0("lag.", v), "ens") - } else { - sim.lag <- utils::stack(data.frame(array(0,dim = c(1, ncol(dat.sim[[v]]))))) + sim.lag <- utils::stack(data.frame(array(0, dim = c(1, ncol(dat.sim[[v]]))))) names(sim.lag) <- c(paste0("lag.", v), "ens") } dat.temp <- merge(dat.temp, sim.lag, all.x = TRUE) - } else if (v == "air_temperature") { dat.temp <- dat.mod[rows.now, dat.info] @@ -165,21 +168,23 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. sim.lag <- utils::stack(lags.init$air_temperature) names(sim.lag) <- c("lag.air_temperature", "ens") - sim.lag$lag.air_temperature_min <- utils::stack(lags.init$air_temperature_min)[,1] - sim.lag$lag.air_temperature_max <- utils::stack(lags.init$air_temperature_max)[,1] + sim.lag$lag.air_temperature_min <- utils::stack(lags.init$air_temperature_min)[, 1] + sim.lag$lag.air_temperature_max <- utils::stack(lags.init$air_temperature_max)[, 1] } else { - sim.lag <- utils::stack(data.frame(array(dat.sim[["air_temperature"]][dat.mod$sim.day == (days.sim[i-1]) & - dat.mod$hour == lag.time, ], - dim = c(1, ncol(dat.sim$air_temperature))))) + sim.lag <- utils::stack(data.frame(array( + dat.sim[["air_temperature"]][dat.mod$sim.day == (days.sim[i - 1]) & + dat.mod$hour == lag.time, ], + dim = c(1, ncol(dat.sim$air_temperature)) + ))) names(sim.lag) <- c("lag.air_temperature", "ens") - sim.lag$lag.air_temperature_min <- utils::stack(apply(data.frame(dat.sim[["air_temperature"]][dat.mod$sim.day == days.sim[i-1], ]), 2, min))[, 1] - sim.lag$lag.air_temperature_max <- utils::stack(apply(data.frame(dat.sim[["air_temperature"]][dat.mod$sim.day == days.sim[i-1], ]), 2, max))[, 1] + sim.lag$lag.air_temperature_min <- utils::stack(apply(data.frame(dat.sim[["air_temperature"]][dat.mod$sim.day == days.sim[i - 1], ]), 2, min))[, 1] + sim.lag$lag.air_temperature_max <- utils::stack(apply(data.frame(dat.sim[["air_temperature"]][dat.mod$sim.day == days.sim[i - 1], ]), 2, max))[, 1] } dat.temp <- merge(dat.temp, sim.lag, all.x = TRUE) } else if (v == "precipitation_flux") { dat.temp <- dat.mod[rows.now, dat.info] - dat.temp[,v] <- 99999 + dat.temp[, v] <- 99999 dat.temp$rain.prop <- 99999 day.now <- unique(dat.temp$doy) @@ -189,11 +194,12 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. if (i == 1) { sim.lag <- utils::stack(lags.init[[v]]) names(sim.lag) <- c(paste0("lag.", v), "ens") - } else { - sim.lag <- utils::stack(data.frame(array(dat.sim[[v]][dat.mod$sim.day == days.sim[i-1] & - dat.mod$hour == lag.time, ], - dim = c(1, ncol(dat.sim[[v]]))))) + sim.lag <- utils::stack(data.frame(array( + dat.sim[[v]][dat.mod$sim.day == days.sim[i - 1] & + dat.mod$hour == lag.time, ], + dim = c(1, ncol(dat.sim[[v]])) + ))) names(sim.lag) <- c(paste0("lag.", v), "ens") } dat.temp <- merge(dat.temp, sim.lag, all.x = TRUE) @@ -205,33 +211,36 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. if (i == 1) { sim.lag <- utils::stack(lags.init[[v]]) names(sim.lag) <- c(paste0("lag.", v), "ens") - } else { - sim.lag <- utils::stack(data.frame(array(dat.sim[[v]][dat.mod$sim.day == days.sim[i-1] & - dat.mod$hour == lag.time, ], - dim = c(1, ncol(dat.sim[[v]]))))) + sim.lag <- utils::stack(data.frame(array( + dat.sim[[v]][dat.mod$sim.day == days.sim[i - 1] & + dat.mod$hour == lag.time, ], + dim = c(1, ncol(dat.sim[[v]])) + ))) names(sim.lag) <- c(paste0("lag.", v), "ens") } dat.temp <- merge(dat.temp, sim.lag, all.x = TRUE) } # End special formatting # Create dummy value - dat.temp[,v] <- 99999 + dat.temp[, v] <- 99999 # Creating some necessary dummy variable names vars.sqrt <- c("surface_downwelling_longwave_flux_in_air", "wind_speed") vars.log <- c("specific_humidity") if (v %in% vars.sqrt) { - dat.temp[,paste0("sqrt(",v,")")] <- sqrt(dat.temp[,v]) + dat.temp[, paste0("sqrt(", v, ")")] <- sqrt(dat.temp[, v]) } else if (v %in% vars.log) { - dat.temp[,paste0("log(",v,")")] <- log(dat.temp[,v]) + dat.temp[, paste0("log(", v, ")")] <- log(dat.temp[, v]) } # Load the saved model - model.file <- file.path(path.model, v, paste0("model_", v, "_", day.now, - ".Rdata")) - if(file.exists(model.file)) { - env = new.env() + model.file <- file.path(path.model, v, paste0( + "model_", v, "_", day.now, + ".Rdata" + )) + if (file.exists(model.file)) { + env <- new.env() load(model.file, envir = env) mod.save <- env$mod.save } @@ -239,32 +248,31 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # Pull coefficients (betas) from our saved matrix # for (i in seq_len(length(days.sim))) { - # betas.tem <- sample(1:(n.beta-n.ens), 1, replace = TRUE) - # rows.beta[i] <- betas.tem + # betas.tem <- sample(1:(n.beta-n.ens), 1, replace = TRUE) + # rows.beta[i] <- betas.tem # } # rows.beta <- as.numeric(rows.beta) # n.new <- ifelse(n.ens==1, 10, n.ens) # If we're not creating an ensemble, we'll add a mean step to remove chance of odd values n.new <- n.ens cols.redo <- 1:n.new - sane.attempt=0 + sane.attempt <- 0 betas_nc <- ncdf4::nc_open(file.path(path.model, v, paste0("betas_", v, "_", day.now, ".nc"))) col.beta <- betas_nc$var[[1]]$dim[[2]]$len # number of coefficients - while(n.new>0 & sane.attempt <= sanity.tries){ - - if(n.ens==1){ - Rbeta <- matrix(mod.save$coef, ncol=col.beta) + while (n.new > 0 & sane.attempt <= sanity.tries) { + if (n.ens == 1) { + Rbeta <- matrix(mod.save$coef, ncol = col.beta) } else { - betas.tem <- sample(1:max((n.beta-n.new), 1), 1, replace = TRUE) - Rbeta <- matrix(ncdf4::ncvar_get(betas_nc, paste(day.now), c(betas.tem,1), c(n.new,col.beta)), ncol = col.beta) + betas.tem <- sample(1:max((n.beta - n.new), 1), 1, replace = TRUE) + Rbeta <- matrix(ncdf4::ncvar_get(betas_nc, paste(day.now), c(betas.tem, 1), c(n.new, col.beta)), ncol = col.beta) } - if(ncol(Rbeta)!=col.beta) Rbeta <- t(Rbeta) + if (ncol(Rbeta) != col.beta) Rbeta <- t(Rbeta) # If we're starting from scratch, set up the prediction matrix - if(sane.attempt==0){ - dat.pred <- matrix(nrow=nrow(dat.temp), ncol=n.ens) + if (sane.attempt == 0) { + dat.pred <- matrix(nrow = nrow(dat.temp), ncol = n.ens) } # if(n.ens==1){ @@ -273,32 +281,34 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # n.ens = n.new) # dat.pred[,1] <- apply(dat.dum, 1, mean) # } else { - dat.pred[,cols.redo] <- subdaily_pred(newdata = dat.temp, model.predict = mod.save, - Rbeta = Rbeta, resid.err = FALSE, model.resid = NULL, Rbeta.resid = NULL, - n.ens = n.new) + dat.pred[, cols.redo] <- subdaily_pred( + newdata = dat.temp, model.predict = mod.save, + Rbeta = Rbeta, resid.err = FALSE, model.resid = NULL, Rbeta.resid = NULL, + n.ens = n.new + ) # } # Occasionally specific humidty may go serioulsy off the rails - if(v=="specific_humidity" & (max(dat.pred)>log(40e-3) | min(dat.pred)log(40e-3)] <- log(40e-3) - dat.pred[dat.pred log(40e-3) | min(dat.pred) < log(1e-6))) { + dat.pred[dat.pred > log(40e-3)] <- log(40e-3) + dat.pred[dat.pred < log(1e-6)] <- log(1e-6) } # Precipitation Re-distribute negative probabilities -- add randomly to # make more peaky If there's no rain on this day, skip the # re-proportioning if (v == "precipitation_flux") { - # if(n.ens == 1) next cols.check <- cols.redo - if (max(dat.pred[,cols.check]) > 0) { - tmp <- 1:nrow(dat.pred) # A dummy vector of the + if (max(dat.pred[, cols.check]) > 0) { + tmp <- 1:nrow(dat.pred) # A dummy vector of the for (j in cols.check) { if (min(dat.pred[, j]) >= 0) next # skip if no negative rain to redistribute rows.neg <- which(dat.pred[, j] < 0) rows.add <- sample(tmp[!tmp %in% rows.neg], length(rows.neg), - replace = TRUE) + replace = TRUE + ) # Redistribute days with negative rain for (z in 1:length(rows.neg)) { @@ -316,7 +326,7 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # Total Daily precip = precipitaiton_flux.day*24*60*60 # precip.day <- dat.temp$precipitation_flux.day[1]*nrow(dat.temp) precip.day <- dat.temp$precipitation_flux.day[1] - dat.pred[,cols.check] <- dat.pred[,cols.check] * precip.day + dat.pred[, cols.check] <- dat.pred[, cols.check] * precip.day } # End Precip re-propogation # ----- @@ -329,39 +339,38 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # vars.log <- c("specific_humidity") # Determine which ensemble members fail sanity checks - #don't forget to check for transformed variables + # don't forget to check for transformed variables # vars.transform <- c("surface_downwelling_shortwave_flux_in_air", "specific_humidity", "surface_downwelling_longwave_flux_in_air", "wind_speed") # dat.sim[[v]][rows.now, j] - if(i>14){ - if(direction.filter=="backward"){ - rows.filter <- which(dat.mod$sim.day >= days.sim[i] & dat.mod$sim.day <= days.sim[i]+14) + if (i > 14) { + if (direction.filter == "backward") { + rows.filter <- which(dat.mod$sim.day >= days.sim[i] & dat.mod$sim.day <= days.sim[i] + 14) } else { - rows.filter <- which(dat.mod$sim.day <= days.sim[i] & dat.mod$sim.day >= days.sim[i]-14) + rows.filter <- which(dat.mod$sim.day <= days.sim[i] & dat.mod$sim.day >= days.sim[i] - 14) } - if(n.ens>1){ - dat.filter <- utils::stack(dat.sim[[v]][rows.filter,])[,1] + if (n.ens > 1) { + dat.filter <- utils::stack(dat.sim[[v]][rows.filter, ])[, 1] } else { - dat.filter <- dat.sim[[v]][rows.filter,] + dat.filter <- dat.sim[[v]][rows.filter, ] } - filter.mean <- mean(dat.filter, na.rm=T) - filter.sd <- stats::sd(dat.filter, na.rm=T) + filter.mean <- mean(dat.filter, na.rm = T) + filter.sd <- stats::sd(dat.filter, na.rm = T) } else { - - if(v %in% vars.sqrt){ - filter.mean <- mean(c(dat.pred^2, utils::stack(dat.sim[[v]])[,1]), na.rm=T) - filter.sd <- stats::sd(c(dat.pred^2, utils::stack(dat.sim[[v]])[,1]), na.rm=T) - } else if(v %in% vars.log){ - filter.mean <- mean(c(exp(dat.pred), utils::stack(dat.sim[[v]])[,1]), na.rm=T) - filter.sd <- stats::sd(c(exp(dat.pred), utils::stack(dat.sim[[v]])[,1]), na.rm=T) + if (v %in% vars.sqrt) { + filter.mean <- mean(c(dat.pred^2, utils::stack(dat.sim[[v]])[, 1]), na.rm = T) + filter.sd <- stats::sd(c(dat.pred^2, utils::stack(dat.sim[[v]])[, 1]), na.rm = T) + } else if (v %in% vars.log) { + filter.mean <- mean(c(exp(dat.pred), utils::stack(dat.sim[[v]])[, 1]), na.rm = T) + filter.sd <- stats::sd(c(exp(dat.pred), utils::stack(dat.sim[[v]])[, 1]), na.rm = T) } else { - filter.mean <- mean(c(dat.pred, utils::stack(dat.sim[[v]])[,1]), na.rm=T) - filter.sd <- stats::sd(c(dat.pred, utils::stack(dat.sim[[v]])[,1]), na.rm=T) + filter.mean <- mean(c(dat.pred, utils::stack(dat.sim[[v]])[, 1]), na.rm = T) + filter.sd <- stats::sd(c(dat.pred, utils::stack(dat.sim[[v]])[, 1]), na.rm = T) } } - if(v %in% c("air_temperature", "air_temperature_maximum", "air_temperature_minimum")){ + if (v %in% c("air_temperature", "air_temperature_maximum", "air_temperature_minimum")) { # max air temp = 70 C; hottest temperature from sattellite; very ridiculous # min air temp = -95 C; colder than coldest natural temperature recorded in Antarctica @@ -370,162 +379,159 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # we'll allow some drift outside of what we have for our max/min, but not too much; # - right now general rule of thumb of 2 degrees leeway on the prescribed - cols.redo <- which(apply(dat.pred, 2, function(x) min(x) < 184 | max(x) > 331 | - # min(x) < tmin.ens-2 | max(x) > tmax.ens+2 | - min(x) < filter.mean-sanity.sd*filter.sd | max(x) > filter.mean+sanity.sd*filter.sd - )) + cols.redo <- which(apply(dat.pred, 2, function(x) { + min(x) < 184 | max(x) > 331 | + # min(x) < tmin.ens-2 | max(x) > tmax.ens+2 | + min(x) < filter.mean - sanity.sd * filter.sd | max(x) > filter.mean + sanity.sd * filter.sd + })) } - #"specific_humidity", - if(v == "specific_humidity"){ #LOG!! + # "specific_humidity", + if (v == "specific_humidity") { # LOG!! # Based on google, it looks like values of 30 g/kg can occur in the tropics, so lets go above that # Also, the minimum humidity can't be 0 so lets just make it extremely dry; lets set this for 1 g/Mg - cols.redo <- which(apply(dat.pred, 2, function(x) min(exp(x)) < 1e-6 | max(exp(x)) > 3.2e-2 | - min(exp(x)) < filter.mean-sanity.sd*filter.sd | - max(exp(x)) > filter.mean+sanity.sd*filter.sd - ) ) + cols.redo <- which(apply(dat.pred, 2, function(x) { + min(exp(x)) < 1e-6 | max(exp(x)) > 3.2e-2 | + min(exp(x)) < filter.mean - sanity.sd * filter.sd | + max(exp(x)) > filter.mean + sanity.sd * filter.sd + })) } - #"surface_downwelling_shortwave_flux_in_air", - if(v == "surface_downwelling_shortwave_flux_in_air"){ + # "surface_downwelling_shortwave_flux_in_air", + if (v == "surface_downwelling_shortwave_flux_in_air") { # Based on something found from Columbia, average Radiative flux at ATM is 1360 W/m2, so for a daily average it should be less than this # Lets round 1360 and divide that by 2 (because it should be a daily average) and conservatively assume albedo of 20% (average value is more like 30) # Source http://eesc.columbia.edu/courses/ees/climate/lectures/radiation/ dat.pred[dat.pred < 0] <- 0 - cols.redo <- which(apply(dat.pred, 2, function(x) max(x) > 1500 | min(x) < filter.mean-sanity.sd*filter.sd | - max(x) > filter.mean+sanity.sd*filter.sd - )) + cols.redo <- which(apply(dat.pred, 2, function(x) { + max(x) > 1500 | min(x) < filter.mean - sanity.sd * filter.sd | + max(x) > filter.mean + sanity.sd * filter.sd + })) } - if(v == "air_pressure"){ + if (v == "air_pressure") { # According to wikipedia the highest barometric pressure ever recorded was 1085.7 hPa = 1085.7*100 Pa; Dead sea has average pressure of 1065 hPa # - Lets round up to 1100 hPA # Also according to Wikipedia, the lowest non-tornadic pressure ever measured was 870 hPA - cols.redo <- which(apply(dat.pred, 2, function(x) min(x) < 45000 | max(x) > 110000 | - min(x) < filter.mean-sanity.sd*filter.sd | - max(x) > filter.mean+sanity.sd*filter.sd - )) + cols.redo <- which(apply(dat.pred, 2, function(x) { + min(x) < 45000 | max(x) > 110000 | + min(x) < filter.mean - sanity.sd * filter.sd | + max(x) > filter.mean + sanity.sd * filter.sd + })) } - if(v == "surface_downwelling_longwave_flux_in_air"){ # SQRT + if (v == "surface_downwelling_longwave_flux_in_air") { # SQRT # A NASA presentation has values topping out ~300 and min ~0: https://ceres.larc.nasa.gov/documents/STM/2003-05/pdf/smith.pdf # A random journal article has 130 - 357.3: http://www.tandfonline.com/doi/full/10.1080/07055900.2012.760441 # Based on what what CRUNCEP did, lets assume these are annual averages, so we can do 50% above it and for the min, in case we run tropics, lets go 130/4 # ED2 sanity checks bound longwave at 40 & 600 - cols.redo <- which(apply(dat.pred, 2, function(x) min(x^2) < 40 | max(x^2) > 600 | - min(x^2) < filter.mean-sanity.sd*filter.sd | - max(x^2) > filter.mean+sanity.sd*filter.sd - )) - + cols.redo <- which(apply(dat.pred, 2, function(x) { + min(x^2) < 40 | max(x^2) > 600 | + min(x^2) < filter.mean - sanity.sd * filter.sd | + max(x^2) > filter.mean + sanity.sd * filter.sd + })) } - if(v == "wind_speed"){ + if (v == "wind_speed") { # According to wikipedia, the hgihest wind speed ever recorded is a gust of 113 m/s; the maximum 5-mind wind speed is 49 m/s - cols.redo <- which(apply(dat.pred, 2, function(x) max(x^2) > 85 | - min(x^2) < filter.mean-sanity.sd*filter.sd | - max(x^2) > filter.mean+sanity.sd*filter.sd - )) + cols.redo <- which(apply(dat.pred, 2, function(x) { + max(x^2) > 85 | + min(x^2) < filter.mean - sanity.sd * filter.sd | + max(x^2) > filter.mean + sanity.sd * filter.sd + })) } - if(v == "precipitation_flux"){ + if (v == "precipitation_flux") { # According to wunderground, ~16" in 1 hr is the max # https://www.wunderground.com/blog/weatherhistorian/what-is-the-most-rain-to-ever-fall-in-one-minute-or-one-hour.html # 16; x25.4 = inches to mm; /(60*60) = hr to sec # Updated to ED2 max: 400 mm/hr - cols.redo <- which(apply(dat.pred, 2, function(x) max(x) > 0.1111 - )) + cols.redo <- which(apply(dat.pred, 2, function(x) max(x) > 0.1111)) } - n.new = length(cols.redo) - if(force.sanity){ - sane.attempt = sane.attempt + 1 + n.new <- length(cols.redo) + if (force.sanity) { + sane.attempt <- sane.attempt + 1 } else { # If we're not forcing sanity, just stop now - sane.attempt=sanity.tries + 1 + sane.attempt <- sanity.tries + 1 } # ----- } # End while case # If we ran out of attempts, but want to foce sanity, do so now - if(force.sanity & n.new>0){ + if (force.sanity & n.new > 0) { # If we're still struggling, but we have at least some workable columns, lets just duplicate those: - if(n.new 273.15+70 warning(paste("Forcing Sanity:", v)) - if(min(dat.pred) < max(filter.mean-sanity.sd*filter.sd)){ - qtrim <- max(filter.mean-sanity.sd*filter.sd) + if (min(dat.pred) < max(filter.mean - sanity.sd * filter.sd)) { + qtrim <- max(filter.mean - sanity.sd * filter.sd) dat.pred[dat.pred < qtrim] <- qtrim } - if(max(dat.pred) > min(1360, filter.mean+sanity.sd*filter.sd)){ - qtrim <- min(1360, filter.mean+sanity.sd*filter.sd) + if (max(dat.pred) > min(1360, filter.mean + sanity.sd * filter.sd)) { + qtrim <- min(1360, filter.mean + sanity.sd * filter.sd) dat.pred[dat.pred > qtrim] <- qtrim } - - } else if(v=="air_temperature"){ + } else if (v == "air_temperature") { # Shouldn't be a huge problem, but it's not looking good # min(x) < 273.15-95 | max(x) > 273.15+70 warning(paste("Forcing Sanity:", v)) - if(min(dat.pred) < max(273.15-95, filter.mean-sanity.sd*filter.sd )){ - qtrim <- max(273.15-95, filter.mean-sanity.sd*filter.sd) + if (min(dat.pred) < max(273.15 - 95, filter.mean - sanity.sd * filter.sd)) { + qtrim <- max(273.15 - 95, filter.mean - sanity.sd * filter.sd) dat.pred[dat.pred < qtrim] <- qtrim } - if(max(dat.pred) > min(273.15+70, filter.mean+sanity.sd*filter.sd)){ - qtrim <- min(273.15+70, filter.mean+sanity.sd*filter.sd) + if (max(dat.pred) > min(273.15 + 70, filter.mean + sanity.sd * filter.sd)) { + qtrim <- min(273.15 + 70, filter.mean + sanity.sd * filter.sd) dat.pred[dat.pred > qtrim] <- qtrim } - - } else if(v=="air_pressure"){ + } else if (v == "air_pressure") { # A known problem child warning(paste("Forcing Sanity:", v)) - if(min(dat.pred) < max(870*100, filter.mean-sanity.sd*filter.sd )){ - qtrim <- max(870*100, filter.mean-sanity.sd*filter.sd) + if (min(dat.pred) < max(870 * 100, filter.mean - sanity.sd * filter.sd)) { + qtrim <- max(870 * 100, filter.mean - sanity.sd * filter.sd) dat.pred[dat.pred < qtrim] <- qtrim } - if(max(dat.pred) > min(1100*100, filter.mean+sanity.sd*filter.sd)){ - qtrim <- min(1100*100, filter.mean+sanity.sd*filter.sd) + if (max(dat.pred) > min(1100 * 100, filter.mean + sanity.sd * filter.sd)) { + qtrim <- min(1100 * 100, filter.mean + sanity.sd * filter.sd) dat.pred[dat.pred > qtrim] <- qtrim } - - } else if(v=="surface_downwelling_longwave_flux_in_air"){ + } else if (v == "surface_downwelling_longwave_flux_in_air") { # A known problem child # ED2 sanity checks boudn longwave at 40 & 600 warning(paste("Forcing Sanity:", v)) - if(min(dat.pred^2) < max(40, filter.mean-sanity.sd*filter.sd )){ - qtrim <- max(40, filter.mean-sanity.sd*filter.sd) + if (min(dat.pred^2) < max(40, filter.mean - sanity.sd * filter.sd)) { + qtrim <- max(40, filter.mean - sanity.sd * filter.sd) dat.pred[dat.pred^2 < qtrim] <- sqrt(qtrim) } - if(max(dat.pred^2) > min(600, filter.mean+sanity.sd*filter.sd)){ - qtrim <- min(600, filter.mean+sanity.sd*filter.sd) + if (max(dat.pred^2) > min(600, filter.mean + sanity.sd * filter.sd)) { + qtrim <- min(600, filter.mean + sanity.sd * filter.sd) dat.pred[dat.pred^2 > qtrim] <- sqrt(qtrim) } - - } else if(v=="specific_humidity") { + } else if (v == "specific_humidity") { warning(paste("Forcing Sanity:", v)) - if(min(exp(dat.pred)) < max(1e-6, filter.mean-sanity.sd*filter.sd )){ - qtrim <- max(1e-6, filter.mean-sanity.sd*filter.sd) + if (min(exp(dat.pred)) < max(1e-6, filter.mean - sanity.sd * filter.sd)) { + qtrim <- max(1e-6, filter.mean - sanity.sd * filter.sd) dat.pred[exp(dat.pred) < qtrim] <- log(qtrim) } - if(max(exp(dat.pred)) > min(30e-3, filter.mean+sanity.sd*filter.sd)){ - qtrim <- min(40e-3, filter.mean+sanity.sd*filter.sd) + if (max(exp(dat.pred)) > min(30e-3, filter.mean + sanity.sd * filter.sd)) { + qtrim <- min(40e-3, filter.mean + sanity.sd * filter.sd) dat.pred[exp(dat.pred) > qtrim] <- log(qtrim) } - - } else if(v=="wind_speed"){ + } else if (v == "wind_speed") { # A known problem child warning(paste("Forcing Sanity:", v)) # if(min(dat.pred^2) < max(0, filter.mean-sanity.sd*filter.sd )){ # qtrim <- max(0, 1) # dat.pred[dat.pred < qtrim] <- qtrim # } - if(max(dat.pred^2) > min(50, filter.mean+sanity.sd*filter.sd)){ - qtrim <- min(50, filter.mean+sanity.sd*filter.sd) + if (max(dat.pred^2) > min(50, filter.mean + sanity.sd * filter.sd)) { + qtrim <- min(50, filter.mean + sanity.sd * filter.sd) dat.pred[dat.pred^2 > qtrim] <- sqrt(qtrim) } - } else { - stop(paste("Unable to produce a sane prediction:", v, "- day", day.now, "; problem child =", paste(cols.redo, collapse=" "))) + stop(paste("Unable to produce a sane prediction:", v, "- day", day.now, "; problem child =", paste(cols.redo, collapse = " "))) } - } } # End force sanity @@ -544,41 +550,41 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. # Re-distribute precip so we don't get the constant drizzle problem # -- this could go earlier, but I'm being lazy because I don't want to mess with cols.redo # ---------- - if(v == "precipitation_flux"){ + if (v == "precipitation_flux") { # Pick the number of hours to spread rain across from our observed distribution # in case we don't have a large distribution, use multiple days - if(day.now <=3) { - rain.ind <- c(1:(day.now+3), (length(precip.distribution$hrs.rain)-3+day.now):length(precip.distribution$hrs.rain)) - } else if (day.now >= length(precip.distribution$hrs.rain)-3 ){ - rain.ind <-c(day.now:length(precip.distribution$hrs.rain), 1:(length(precip.distribution$hrs.rain)-day.now+3)) + if (day.now <= 3) { + rain.ind <- c(1:(day.now + 3), (length(precip.distribution$hrs.rain) - 3 + day.now):length(precip.distribution$hrs.rain)) + } else if (day.now >= length(precip.distribution$hrs.rain) - 3) { + rain.ind <- c(day.now:length(precip.distribution$hrs.rain), 1:(length(precip.distribution$hrs.rain) - day.now + 3)) } else { - rain.ind <- (day.now-3):(day.now+3) + rain.ind <- (day.now - 3):(day.now + 3) } - hrs.rain <- sample(unlist(precip.distribution$hrs.rain[rain.ind]),1) + hrs.rain <- sample(unlist(precip.distribution$hrs.rain[rain.ind]), 1) # hr.max <- sample(precip.distribution$hrs.max[[day.now]],1) - for(j in 1:ncol(dat.pred)){ - obs.day <- nrow(dat.pred)/ncol(dat.pred) - start.ind <- seq(1, nrow(dat.pred), by=obs.day) - for(z in seq_along(start.ind)){ - rain.now <- dat.pred[start.ind[z]:(start.ind[z]+obs.day-1),j] - hrs.now <- which(rain.now>0) + for (j in 1:ncol(dat.pred)) { + obs.day <- nrow(dat.pred) / ncol(dat.pred) + start.ind <- seq(1, nrow(dat.pred), by = obs.day) + for (z in seq_along(start.ind)) { + rain.now <- dat.pred[start.ind[z]:(start.ind[z] + obs.day - 1), j] + hrs.now <- which(rain.now > 0) - if(length(hrs.now)<=hrs.rain) next # If we don't need to redistribute, skip what's next + if (length(hrs.now) <= hrs.rain) next # If we don't need to redistribute, skip what's next # Figure out when it's going to rain based on what normally has the most number of hours - hrs.add <- sample(unlist(precip.distribution$hrs.max[rain.ind]), hrs.rain, replace=T) + hrs.add <- sample(unlist(precip.distribution$hrs.max[rain.ind]), hrs.rain, replace = T) hrs.go <- hrs.now[!hrs.now %in% hrs.add] - hrs.wet <- sample(hrs.add, length(hrs.go), replace=T) + hrs.wet <- sample(hrs.add, length(hrs.go), replace = T) - for(dry in seq_along(hrs.go)){ + for (dry in seq_along(hrs.go)) { rain.now[hrs.wet[dry]] <- rain.now[hrs.wet[dry]] + rain.now[hrs.go[dry]] rain.now[hrs.go[dry]] <- 0 } # Put the rain back into place - dat.pred[start.ind[z]:(start.ind[z]+obs.day-1),j] <- rain.now + dat.pred[start.ind[z]:(start.ind[z] + obs.day - 1), j] <- rain.now } # End row loop } # End column loop } # End hour redistribution @@ -590,8 +596,8 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. if (v == "surface_downwelling_shortwave_flux_in_air") { # Randomly pick which values to save & propogate - if(ncol(dat.sim[[v]])>1){ - cols.prop <- as.integer(cols.list[i,]) + if (ncol(dat.sim[[v]]) > 1) { + cols.prop <- as.integer(cols.list[i, ]) for (j in 1:ncol(dat.sim[[v]])) { dat.sim[[v]][rows.mod, j] <- dat.pred[dat.temp$ens == paste0("X", j), cols.prop[j]] } @@ -602,9 +608,8 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. dat.sim[[v]][rows.now[!rows.now %in% rows.mod], ] <- 0 } else { - - if(ncol(dat.sim[[v]])>1){ - cols.prop <- as.integer(cols.list[i,]) + if (ncol(dat.sim[[v]]) > 1) { + cols.prop <- as.integer(cols.list[i, ]) for (j in 1:ncol(dat.sim[[v]])) { dat.sim[[v]][rows.now, j] <- dat.pred[dat.temp$ens == paste0("X", j), cols.prop[j]] } @@ -612,9 +617,9 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. dat.sim[[v]][rows.now, 1] <- dat.pred } } - rm(mod.save) # Clear out the model to save memory + rm(mod.save) # Clear out the model to save memory - if(print.progress==TRUE){ + if (print.progress == TRUE) { utils::setTxtProgressBar(pb, pb.index) pb.index <- pb.index + 1 } @@ -622,8 +627,7 @@ lm_ensemble_sims <- function(dat.mod, n.ens, path.model, direction.filter, lags. rm(dat.temp, dat.pred) } # end day loop # -------------------------------- - - } # End vars.list + } # End vars.list # ---------- End of downscaling for loop return(dat.sim) } diff --git a/modules/data.atmosphere/R/tdm_model_train.R b/modules/data.atmosphere/R/tdm_model_train.R index 03a8060cd82..3e420922958 100644 --- a/modules/data.atmosphere/R/tdm_model_train.R +++ b/modules/data.atmosphere/R/tdm_model_train.R @@ -1,6 +1,6 @@ ##' TDM Model Train ##' Linear regression calculations for specific met variables -# ----------------------------------- +# ----------------------------------- # Description # ----------------------------------- ##' @title model.train @@ -10,7 +10,7 @@ ##' variables. This is used in conjunction with temporal.downscale.functions() ##' to generate linear regression statistics and save their output to be called ##' later in lm_ensemble_sims(). -# ----------------------------------- +# ----------------------------------- # Parameters # ----------------------------------- ##' @param dat.subset data.frame containing lags, next, and downscale period data @@ -27,162 +27,175 @@ # Begin Function #---------------------------------------------------------------------- model.train <- function(dat.subset, v, n.beta, resids = resids, threshold = NULL, ...) { - dat.subset$year <- as.ordered(dat.subset$year) + dat.subset$year <- as.ordered(dat.subset$year) if (v == "air_temperature") { - - mod.doy <- stats::lm(air_temperature ~ as.ordered(hour) * air_temperature_max.day * - (lag.air_temperature + lag.air_temperature_min + air_temperature_min.day) + - as.ordered(hour) * air_temperature_min.day * next.air_temperature_max - - 1 - as.ordered(hour) - lag.air_temperature - lag.air_temperature_min - - next.air_temperature_max - air_temperature_max.day - - air_temperature_min.day, data = dat.subset) # + mod.doy <- stats::lm(air_temperature ~ as.ordered(hour) * air_temperature_max.day * + (lag.air_temperature + lag.air_temperature_min + air_temperature_min.day) + + as.ordered(hour) * air_temperature_min.day * next.air_temperature_max - + 1 - as.ordered(hour) - lag.air_temperature - lag.air_temperature_min - + next.air_temperature_max - air_temperature_max.day - + air_temperature_min.day, data = dat.subset) # } - + if (v == "surface_downwelling_shortwave_flux_in_air") { # Don't bother trying to fit hours that are completely or pretty darn # close to dark - hrs.day <- unique(dat.subset[dat.subset$surface_downwelling_shortwave_flux_in_air > - threshold, "hour"]) - - mod.doy <- stats::lm(surface_downwelling_shortwave_flux_in_air ~ - as.ordered(hour) * surface_downwelling_shortwave_flux_in_air.day - - 1 - surface_downwelling_shortwave_flux_in_air.day - - as.ordered(hour), data = dat.subset[dat.subset$hour %in% - hrs.day, ]) ### + hrs.day <- unique(dat.subset[dat.subset$surface_downwelling_shortwave_flux_in_air > + threshold, "hour"]) + + mod.doy <- stats::lm(surface_downwelling_shortwave_flux_in_air ~ + as.ordered(hour) * surface_downwelling_shortwave_flux_in_air.day - + 1 - surface_downwelling_shortwave_flux_in_air.day - + as.ordered(hour), data = dat.subset[dat.subset$hour %in% + hrs.day, ]) ### } - + if (v == "surface_downwelling_longwave_flux_in_air") { - mod.doy <- stats::lm(sqrt(surface_downwelling_longwave_flux_in_air) ~ - as.ordered(hour) * surface_downwelling_longwave_flux_in_air.day * - (lag.surface_downwelling_longwave_flux_in_air + next.surface_downwelling_longwave_flux_in_air) - - as.ordered(hour) - 1 - lag.surface_downwelling_longwave_flux_in_air - - next.surface_downwelling_longwave_flux_in_air - surface_downwelling_longwave_flux_in_air.day - - surface_downwelling_longwave_flux_in_air.day * lag.surface_downwelling_longwave_flux_in_air - - surface_downwelling_longwave_flux_in_air.day * next.surface_downwelling_longwave_flux_in_air, - data = dat.subset) ### - + mod.doy <- stats::lm( + sqrt(surface_downwelling_longwave_flux_in_air) ~ + as.ordered(hour) * surface_downwelling_longwave_flux_in_air.day * + (lag.surface_downwelling_longwave_flux_in_air + next.surface_downwelling_longwave_flux_in_air) - + as.ordered(hour) - 1 - lag.surface_downwelling_longwave_flux_in_air - + next.surface_downwelling_longwave_flux_in_air - surface_downwelling_longwave_flux_in_air.day - + surface_downwelling_longwave_flux_in_air.day * lag.surface_downwelling_longwave_flux_in_air - + surface_downwelling_longwave_flux_in_air.day * next.surface_downwelling_longwave_flux_in_air, + data = dat.subset + ) ### } - + if (v == "precipitation_flux") { - # Precip needs to be a bit different. We're going to calculate the # fraction of precip occuring in each hour we're going to estimate the # probability distribution of rain occuring in a given hour - dat.subset$rain.prop <- dat.subset$precipitation_flux/(dat.subset$precipitation_flux.day) - mod.doy <- stats::lm(rain.prop ~ as.ordered(hour) - 1 , data = dat.subset) + dat.subset$rain.prop <- dat.subset$precipitation_flux / (dat.subset$precipitation_flux.day) + mod.doy <- stats::lm(rain.prop ~ as.ordered(hour) - 1, data = dat.subset) } - + if (v == "air_pressure") { - mod.doy <- stats::lm(air_pressure ~ as.ordered(hour) * (air_pressure.day + - lag.air_pressure + next.air_pressure) - as.ordered(hour) - - 1 - air_pressure.day - lag.air_pressure - next.air_pressure, - data = dat.subset) + mod.doy <- stats::lm( + air_pressure ~ as.ordered(hour) * (air_pressure.day + + lag.air_pressure + next.air_pressure) - as.ordered(hour) - + 1 - air_pressure.day - lag.air_pressure - next.air_pressure, + data = dat.subset + ) } - + if (v == "specific_humidity") { - mod.doy <- stats::lm(log(specific_humidity) ~ as.ordered(hour) * - specific_humidity.day * (lag.specific_humidity + next.specific_humidity + - air_temperature_max.day) - as.ordered(hour) - 1 - air_temperature_max.day, - data = dat.subset) + mod.doy <- stats::lm( + log(specific_humidity) ~ as.ordered(hour) * + specific_humidity.day * (lag.specific_humidity + next.specific_humidity + + air_temperature_max.day) - as.ordered(hour) - 1 - air_temperature_max.day, + data = dat.subset + ) } - + if (v == "wind_speed") { - mod.doy <- stats::lm(sqrt(wind_speed) ~ as.ordered(hour) * wind_speed.day * - (lag.wind_speed + next.wind_speed) - as.ordered(hour) - - 1 - wind_speed.day - lag.wind_speed - next.wind_speed - - wind_speed.day * lag.wind_speed - wind_speed.day * next.wind_speed, - data = dat.subset) + mod.doy <- stats::lm( + sqrt(wind_speed) ~ as.ordered(hour) * wind_speed.day * + (lag.wind_speed + next.wind_speed) - as.ordered(hour) - + 1 - wind_speed.day - lag.wind_speed - next.wind_speed - + wind_speed.day * lag.wind_speed - wind_speed.day * next.wind_speed, + data = dat.subset + ) } - - - + + + # If we can't estimate the covariance matrix, stop & increase the # moving window if (is.na(summary(mod.doy)$adj.r.squared)) { - stop(paste0("Can not estimate covariance matrix for day of year: ", - unique(dat.subset$doy), "; Increase day.window and try again")) + stop(paste0( + "Can not estimate covariance matrix for day of year: ", + unique(dat.subset$doy), "; Increase day.window and try again" + )) } - + # ----- Each variable must do this Generate a bunch of random # coefficients that we can pull from without needing to do this step # every day - if(n.beta>1){ + if (n.beta > 1) { mod.coef <- stats::coef(mod.doy) mod.cov <- stats::vcov(mod.doy) piv <- as.numeric(which(!is.na(mod.coef))) - Rbeta <- MASS::mvrnorm(n = n.beta, mod.coef[piv], mod.cov[piv,piv]) + Rbeta <- MASS::mvrnorm(n = n.beta, mod.coef[piv], mod.cov[piv, piv]) } else { - Rbeta <- matrix(stats::coef(mod.doy), nrow=1) + Rbeta <- matrix(stats::coef(mod.doy), nrow = 1) colnames(Rbeta) <- names(stats::coef(mod.doy)) } - + list.out <- list(model = mod.doy, betas = Rbeta) - + # Model residuals as a function of hour so we can increase our # uncertainty if (resids == TRUE) { if (v == "air_temperature") { - dat.subset[!is.na(dat.subset$lag.air_temperature) & !is.na(dat.subset$next.air_temperature_max), - "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * (air_temperature_max.day * - air_temperature_min.day) - 1, data = dat.subset[!is.na(dat.subset$lag.air_temperature), - ]) + dat.subset[ + !is.na(dat.subset$lag.air_temperature) & !is.na(dat.subset$next.air_temperature_max), + "resid" + ] <- stats::resid(mod.doy) + resid.model <- stats::lm(resid ~ as.ordered(hour) * (air_temperature_max.day * + air_temperature_min.day) - 1, data = dat.subset[!is.na(dat.subset$lag.air_temperature), ]) } - + if (v == "surface_downwelling_shortwave_flux_in_air") { dat.subset[dat.subset$hour %in% hrs.day, "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * surface_downwelling_shortwave_flux_in_air.day - - 1, data = dat.subset[dat.subset$hour %in% hrs.day, - ]) + resid.model <- stats::lm(resid ~ as.ordered(hour) * surface_downwelling_shortwave_flux_in_air.day - + 1, data = dat.subset[dat.subset$hour %in% hrs.day, ]) } - + if (v == "surface_downwelling_longwave_flux_in_air") { - dat.subset[!is.na(dat.subset$lag.surface_downwelling_longwave_flux_in_air) & - !is.na(dat.subset$next.surface_downwelling_longwave_flux_in_air), - "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * surface_downwelling_longwave_flux_in_air.day - - 1, data = dat.subset[, ]) + dat.subset[ + !is.na(dat.subset$lag.surface_downwelling_longwave_flux_in_air) & + !is.na(dat.subset$next.surface_downwelling_longwave_flux_in_air), + "resid" + ] <- stats::resid(mod.doy) + resid.model <- stats::lm(resid ~ as.ordered(hour) * surface_downwelling_longwave_flux_in_air.day - + 1, data = dat.subset[, ]) } - + if (v == "precipitation_flux") { dat.subset[, "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * precipitation_flux.day - - 1, data = dat.subset[, ]) + resid.model <- stats::lm(resid ~ as.ordered(hour) * precipitation_flux.day - + 1, data = dat.subset[, ]) } - + if (v == "air_pressure") { - dat.subset[!is.na(dat.subset$lag.air_pressure) & !is.na(dat.subset$next.air_pressure), - "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * air_pressure.day - - 1, data = dat.subset[, ]) + dat.subset[ + !is.na(dat.subset$lag.air_pressure) & !is.na(dat.subset$next.air_pressure), + "resid" + ] <- stats::resid(mod.doy) + resid.model <- stats::lm(resid ~ as.ordered(hour) * air_pressure.day - + 1, data = dat.subset[, ]) } - + if (v == "specific_humidity") { - dat.subset[!is.na(dat.subset$lag.specific_humidity) & - !is.na(dat.subset$next.specific_humidity), "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * specific_humidity.day - - 1, data = dat.subset[, ]) + dat.subset[!is.na(dat.subset$lag.specific_humidity) & + !is.na(dat.subset$next.specific_humidity), "resid"] <- stats::resid(mod.doy) + resid.model <- stats::lm(resid ~ as.ordered(hour) * specific_humidity.day - + 1, data = dat.subset[, ]) } - + if (v == "wind_speed") { - dat.subset[!is.na(dat.subset$lag.wind_speed) & !is.na(dat.subset$next.wind_speed), - "resid"] <- stats::resid(mod.doy) - resid.model <- stats::lm(resid ~ as.ordered(hour) * wind_speed.day - - 1, data = dat.subset[, ]) + dat.subset[ + !is.na(dat.subset$lag.wind_speed) & !is.na(dat.subset$next.wind_speed), + "resid" + ] <- stats::resid(mod.doy) + resid.model <- stats::lm(resid ~ as.ordered(hour) * wind_speed.day - + 1, data = dat.subset[, ]) } - - if(n.beta>1){ + + if (n.beta > 1) { res.coef <- stats::coef(resid.model) res.cov <- stats::vcov(resid.model) res.piv <- as.numeric(which(!is.na(res.coef))) - - beta.resid <- MASS::mvrnorm(n = n.beta, res.coef[res.piv], res.cov) + + beta.resid <- MASS::mvrnorm(n = n.beta, res.coef[res.piv], res.cov) } else { - beta.resid <- matrix(stats::coef(resid.model), nrow=1) + beta.resid <- matrix(stats::coef(resid.model), nrow = 1) colnames(beta.resid) <- names(stats::coef(mod.doy)) } - + list.out[["model.resid"]] <- resid.model list.out[["betas.resid"]] <- beta.resid } diff --git a/modules/data.atmosphere/R/tdm_predict_subdaily_met.R b/modules/data.atmosphere/R/tdm_predict_subdaily_met.R index 3e4526d8b50..e9744546850 100644 --- a/modules/data.atmosphere/R/tdm_predict_subdaily_met.R +++ b/modules/data.atmosphere/R/tdm_predict_subdaily_met.R @@ -18,20 +18,20 @@ # Parameters # ----------------------------------- ##' @param outfolder - directory where output file will be stored -##' @param in.path - base path to dataset you wish to temporally downscale; Note: in order for parallelization -##' to work, the in.prefix will need to be appended as the final level of the file structure. +##' @param in.path - base path to dataset you wish to temporally downscale; Note: in order for parallelization +##' to work, the in.prefix will need to be appended as the final level of the file structure. ##' For example, if prefix is GFDL.CM3.rcp45.r1i1p1, there should be a directory with that title in in.path. ##' @param in.prefix - prefix of model dataset, i.e. if file is GFDL.CM3.rcp45.r1i1p1.2006 the prefix is 'GFDL.CM3.rcp45.r1i1p1' ##' @param path.train - path to CF/PEcAn style training data where each year is in a separate file. ##' @param direction.filter - Whether the model will be filtered backward or forwards in time. options = c("backward", "forwards") -##' (default is forward; PalEON will go backward, anybody interested in the future will go forwards) +##' (default is forward; PalEON will go backward, anybody interested in the future will go forwards) ##' @param lm.models.base - path to linear regression model folders generated using gen.subdaily.models ##' @param yrs.predict - years for which you want to generate met. if NULL, all years in in.path will be done -##' @param ens.labs - vector containing the labels (suffixes) for each ensemble member; this allows you to add to your +##' @param ens.labs - vector containing the labels (suffixes) for each ensemble member; this allows you to add to your ##' ensemble rather than overwriting with a default naming scheme ##' @param resids - logical stating whether to pass on residual data or not ##' @param adjust.pr - adjustment factor fore precipitation when the extracted values seem off -##' @param force.sanity - (logical) do we force the data to meet sanity checks? +##' @param force.sanity - (logical) do we force the data to meet sanity checks? ##' @param sanity.tries - how many time should we try to predict a reasonable value before giving up? We don't want to end up in an infinite loop ##' @param overwrite logical: replace output file if it already exists? ##' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run? @@ -57,193 +57,206 @@ # Begin Script #---------------------------------------------------------------------- -predict_subdaily_met <- function(outfolder, in.path, in.prefix, path.train, direction.filter="forward", lm.models.base, - yrs.predict=NULL, ens.labs = 1:3, resids = FALSE, adjust.pr=1, force.sanity=TRUE, sanity.tries=25, - overwrite = FALSE, verbose = FALSE, seed=format(Sys.time(), "%m%d"), print.progress=FALSE, ...) { - - if(direction.filter %in% toupper( c("backward", "backwards"))) direction.filter="backward" - - if(!tolower(direction.filter) %in% c("backward", "forward", "backwards", "forwards")) PEcAn.logger::logger.severe("Invalid direction.filter") - - vars.hour <- c("air_temperature", "precipitation_flux", "surface_downwelling_shortwave_flux_in_air", - "surface_downwelling_longwave_flux_in_air", "air_pressure", "specific_humidity", - "wind_speed") - vars.lag <- c("lag.air_temperature", "lag.precipitation_flux", "lag.surface_downwelling_shortwave_flux_in_air", - "lag.surface_downwelling_longwave_flux_in_air", "lag.air_pressure", - "lag.specific_humidity", "lag.wind_speed") - +predict_subdaily_met <- function(outfolder, in.path, in.prefix, path.train, direction.filter = "forward", lm.models.base, + yrs.predict = NULL, ens.labs = 1:3, resids = FALSE, adjust.pr = 1, force.sanity = TRUE, sanity.tries = 25, + overwrite = FALSE, verbose = FALSE, seed = format(Sys.time(), "%m%d"), print.progress = FALSE, ...) { + if (direction.filter %in% toupper(c("backward", "backwards"))) direction.filter <- "backward" + + if (!tolower(direction.filter) %in% c("backward", "forward", "backwards", "forwards")) PEcAn.logger::logger.severe("Invalid direction.filter") + + vars.hour <- c( + "air_temperature", "precipitation_flux", "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", "air_pressure", "specific_humidity", + "wind_speed" + ) + vars.lag <- c( + "lag.air_temperature", "lag.precipitation_flux", "lag.surface_downwelling_shortwave_flux_in_air", + "lag.surface_downwelling_longwave_flux_in_air", "lag.air_pressure", + "lag.specific_humidity", "lag.wind_speed" + ) + n.ens <- length(ens.labs) - + # Update in.path with our prefix (seems silly, but helps with parallelization) # in.path <- file.path(in.path, in.prefix) - + # Extract the lat/lon info from the first of the source files fnow <- dir(in.path, ".nc")[1] ncT <- ncdf4::nc_open(file.path(in.path, fnow)) lat.in <- ncdf4::ncvar_get(ncT, "latitude") lon.in <- ncdf4::ncvar_get(ncT, "longitude") ncdf4::nc_close(ncT) - + # Getting a list of all files/years we want to downscale files.tdm <- dir(in.path, ".nc") - + yrs.tdm <- strsplit(files.tdm, "[.]") - yrs.tdm <- matrix(unlist(yrs.tdm), ncol=length(yrs.tdm[[1]]), byrow=T) - yrs.tdm <- as.numeric(yrs.tdm[,ncol(yrs.tdm)-1]) # Assumes year is always last thing before the file extension - - if(!is.null(yrs.predict)){ + yrs.tdm <- matrix(unlist(yrs.tdm), ncol = length(yrs.tdm[[1]]), byrow = T) + yrs.tdm <- as.numeric(yrs.tdm[, ncol(yrs.tdm) - 1]) # Assumes year is always last thing before the file extension + + if (!is.null(yrs.predict)) { files.tdm <- files.tdm[which(yrs.tdm %in% yrs.predict)] yrs.tdm <- yrs.tdm[which(yrs.tdm %in% yrs.predict)] } - + # make sure files and years are ordered in the direction we want to go - if(direction.filter=="backward"){ + if (direction.filter == "backward") { yrs.tdm <- yrs.tdm[order(yrs.tdm, decreasing = T)] files.tdm <- files.tdm[order(files.tdm, decreasing = T)] } - met.lag <- ifelse(direction.filter=="backward", -1, +1) - + met.lag <- ifelse(direction.filter == "backward", -1, +1) + # Create wind speed variable if it doesn't exist # if (all(is.na(dat.train$wind_speed) == TRUE)) { - # dat.train$wind_speed <- sqrt(dat.train$eastward_wind^2 + dat.train$northward_wind^2) + # dat.train$wind_speed <- sqrt(dat.train$eastward_wind^2 + dat.train$northward_wind^2) # } # Defining variable names, longname & units - nc.info <- data.frame(CF.name = c("air_temperature", "precipitation_flux", - "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "specific_humidity", "wind_speed"), longname = c("2 meter mean air temperature", - "cumulative precipitation (water equivalent)", "incident (downwelling) showtwave radiation", - "incident (downwelling) longwave radiation", "air_pressureure at the surface", - "Specific humidity measured at the lowest level of the atmosphere", - "Wind speed"), units = c("K", "kg m-2 s-1", "W m-2", "W m-2", "Pa", - "kg kg-1", "m s-1")) - - + nc.info <- data.frame(CF.name = c( + "air_temperature", "precipitation_flux", + "surface_downwelling_shortwave_flux_in_air", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "specific_humidity", "wind_speed" + ), longname = c( + "2 meter mean air temperature", + "cumulative precipitation (water equivalent)", "incident (downwelling) showtwave radiation", + "incident (downwelling) longwave radiation", "air_pressureure at the surface", + "Specific humidity measured at the lowest level of the atmosphere", + "Wind speed" + ), units = c( + "K", "kg m-2 s-1", "W m-2", "W m-2", "Pa", + "kg kg-1", "m s-1" + )) + + # ---------------------------------- # Prep some info on precipitation distribution # ---------------------------------- # Read in the data and dupe it into the temporal resolution we want to end up with (based on our training data) files.train <- dir(path.train, ".nc") - + # Getting a list of years just to make things faster for align.met yrs.file <- strsplit(files.train, "[.]") - yrs.file <- matrix(unlist(yrs.file), ncol=length(yrs.file[[1]]), byrow=T) - yrs.file <- as.numeric(yrs.file[,ncol(yrs.file)-1]) # Assumes year is always last thing before the file extension - + yrs.file <- matrix(unlist(yrs.file), ncol = length(yrs.file[[1]]), byrow = T) + yrs.file <- as.numeric(yrs.file[, ncol(yrs.file) - 1]) # Assumes year is always last thing before the file extension + train.nl <- yrs.file[which(!lubridate::leap_year(yrs.file))[1]] train.leap <- yrs.file[which(lubridate::leap_year(yrs.file))[1]] - + # Set up our preciptiation distribution info precip.dist <- list() precip.dist$hrs.rain <- list() precip.dist$hrs.max <- list() - for(i in 1:366){ + for (i in 1:366) { precip.dist$hrs.rain[[i]] <- vector() precip.dist$hrs.max[[i]] <- vector() } - for(i in 1:length(files.train)){ + for (i in 1:length(files.train)) { nday <- ifelse(lubridate::leap_year(yrs.file[i]), 366, 365) - + ncT <- ncdf4::nc_open(file.path(path.train, files.train[i])) precip.hr <- ncdf4::ncvar_get(ncT, "precipitation_flux") ncdf4::nc_close(ncT) - - obs.day <- round(length(precip.hr)/nday) + + obs.day <- round(length(precip.hr) / nday) # Setting up precip as a list to make doing precip.temp <- list() - day.ind <- seq(1, length(precip.hr), by=obs.day) - for(j in seq_along(day.ind)){ - precip.temp[[j]] <- precip.hr[day.ind[j]:(day.ind[j]+23)] + day.ind <- seq(1, length(precip.hr), by = obs.day) + for (j in seq_along(day.ind)) { + precip.temp[[j]] <- precip.hr[day.ind[j]:(day.ind[j] + 23)] } # end j loop - for(j in 1:length(precip.temp)){ - if(j <= 7){ - rain.train <- c(unlist(precip.temp[1:(j+7)]), unlist(precip.temp[(length(precip.temp)-7+j):length(precip.temp)])) - } else if(j>=(length(precip.temp)-7)){ - rain.train <- c(unlist(precip.temp[j:length(precip.temp)]), unlist(precip.temp[1:(length(precip.temp)-j+6)])) + for (j in 1:length(precip.temp)) { + if (j <= 7) { + rain.train <- c(unlist(precip.temp[1:(j + 7)]), unlist(precip.temp[(length(precip.temp) - 7 + j):length(precip.temp)])) + } else if (j >= (length(precip.temp) - 7)) { + rain.train <- c(unlist(precip.temp[j:length(precip.temp)]), unlist(precip.temp[1:(length(precip.temp) - j + 6)])) } else { - rain.train <- unlist(precip.temp[(j-7):(j+7)]) + rain.train <- unlist(precip.temp[(j - 7):(j + 7)]) } - hrs.tmp <- max(round(length(which(rain.train>0))/length(rain.train)*obs.day), 1) # Getting the average number of obs of rain per day - hrs.tmp <- min(hrs.tmp, obs.day/2) # Just to prevent constant drizzle problem, it's can't rain in more than half the observations + hrs.tmp <- max(round(length(which(rain.train > 0)) / length(rain.train) * obs.day), 1) # Getting the average number of obs of rain per day + hrs.tmp <- min(hrs.tmp, obs.day / 2) # Just to prevent constant drizzle problem, it's can't rain in more than half the observations precip.dist$hrs.rain[[j]] <- c(precip.dist$hrs.rain[[j]], hrs.tmp) - - rain.train <- matrix(rain.train, ncol=length(rain.train)/obs.day, byrow = FALSE) + + rain.train <- matrix(rain.train, ncol = length(rain.train) / obs.day, byrow = FALSE) rain.tot <- apply(rain.train, 1, sum) - - precip.dist$hrs.max[[j]] <- c(precip.dist$hrs.max[[j]], which(rain.tot==max(rain.tot))[1]) - } # end j loop + + precip.dist$hrs.max[[j]] <- c(precip.dist$hrs.max[[j]], which(rain.tot == max(rain.tot))[1]) + } # end j loop } # end file loop # ---------------------------------- - - - + + + # ---------------------------------- # Set progress bar # pb.index <- 1 - if(print.progress==TRUE) pb <- utils::txtProgressBar(min = 0, max = length(yrs.tdm), style = 3) + if (print.progress == TRUE) pb <- utils::txtProgressBar(min = 0, max = length(yrs.tdm), style = 3) # utils::setTxtProgressBar(pb, pb.index) for (y in 1:length(yrs.tdm)) { yr.train <- ifelse(lubridate::leap_year(yrs.tdm[y]), train.leap, train.nl) - - met.out <- align.met(train.path=path.train, source.path=in.path, - yrs.train=yr.train, yrs.source=yrs.tdm[y], - n.ens=1, seed=201708, pair.mems = FALSE) - + + met.out <- align.met( + train.path = path.train, source.path = in.path, + yrs.train = yr.train, yrs.source = yrs.tdm[y], + n.ens = 1, seed = 201708, pair.mems = FALSE + ) + # Adjust the preciptiation for the source data if it can't be right (default = 1) - met.out$dat.source$precipitation_flux <- met.out$dat.source$precipitation_flux*adjust.pr - + met.out$dat.source$precipitation_flux <- met.out$dat.source$precipitation_flux * adjust.pr + # Create wind speed variable if it doesn't exist - if(!"wind_speed" %in% names(met.out$dat.train) & "eastward_wind" %in% names(met.out$dat.train)){ + if (!"wind_speed" %in% names(met.out$dat.train) & "eastward_wind" %in% names(met.out$dat.train)) { met.out$dat.train$wind_speed <- sqrt(met.out$dat.train$eastward_wind^2 + met.out$dat.train$northward_wind^2) - } - if(!"wind_speed" %in% names(met.out$dat.source) & "eastward_wind" %in% names(met.out$dat.source)){ + } + if (!"wind_speed" %in% names(met.out$dat.source) & "eastward_wind" %in% names(met.out$dat.source)) { met.out$dat.source$wind_speed <- sqrt(met.out$dat.source$eastward_wind^2 + met.out$dat.source$northward_wind^2) - } - - + } + + # Package the raw data into the dataframe that will get passed into the function - dat.ens <- data.frame(year = met.out$dat.source$time$Year, - doy = met.out$dat.source$time$DOY, - date = met.out$dat.source$time$Date, - hour = met.out$dat.source$time$Hour, - air_temperature_max.day = met.out$dat.source$air_temperature_maximum, - air_temperature_min.day = met.out$dat.source$air_temperature_minimum, - precipitation_flux.day = met.out$dat.source$precipitation_flux, - surface_downwelling_shortwave_flux_in_air.day = met.out$dat.source$surface_downwelling_shortwave_flux_in_air, - surface_downwelling_longwave_flux_in_air.day = met.out$dat.source$surface_downwelling_longwave_flux_in_air, - air_pressure.day = met.out$dat.source$air_pressure, - specific_humidity.day = met.out$dat.source$specific_humidity, - wind_speed.day = met.out$dat.source$wind_speed) - - + dat.ens <- data.frame( + year = met.out$dat.source$time$Year, + doy = met.out$dat.source$time$DOY, + date = met.out$dat.source$time$Date, + hour = met.out$dat.source$time$Hour, + air_temperature_max.day = met.out$dat.source$air_temperature_maximum, + air_temperature_min.day = met.out$dat.source$air_temperature_minimum, + precipitation_flux.day = met.out$dat.source$precipitation_flux, + surface_downwelling_shortwave_flux_in_air.day = met.out$dat.source$surface_downwelling_shortwave_flux_in_air, + surface_downwelling_longwave_flux_in_air.day = met.out$dat.source$surface_downwelling_longwave_flux_in_air, + air_pressure.day = met.out$dat.source$air_pressure, + specific_humidity.day = met.out$dat.source$specific_humidity, + wind_speed.day = met.out$dat.source$wind_speed + ) + + # Set up our simulation time variables; it *should* be okay that this resets each year since it's really only doy that matters - dat.ens$sim.hr <- trunc(as.numeric(difftime(dat.ens$date, min(dat.ens$date), tz = "GMT", units = "hour")))+1 - dat.ens$sim.day <- trunc(as.numeric(difftime(dat.ens$date, min(dat.ens$date), tz = "GMT", units = "day")))+1 + dat.ens$sim.hr <- trunc(as.numeric(difftime(dat.ens$date, min(dat.ens$date), tz = "GMT", units = "hour"))) + 1 + dat.ens$sim.day <- trunc(as.numeric(difftime(dat.ens$date, min(dat.ens$date), tz = "GMT", units = "day"))) + 1 # lag.time <- ifelse(direction.filter=="backward", min(dat.train$hour), max(dat.train$hour)) - + # ------------------------------ - # If this is our first time through, we need to initalize our lags; + # If this is our first time through, we need to initalize our lags; # we can do so with the data we extracted with met.out # ------------------------------ # Figure out whether we want to use the first or last value to initalize our lags - # Note: Data should be ordered Jan 1 -> Dec 31; If we're moving backward, we start with + # Note: Data should be ordered Jan 1 -> Dec 31; If we're moving backward, we start with # Dec 31 and we'll want to pull Jan 1. If we're going forward, we want the opposite - if(y == 1){ - lag.use <- ifelse(direction.filter=="backward", 1, nrow(met.out$dat.source$time)) + if (y == 1) { + lag.use <- ifelse(direction.filter == "backward", 1, nrow(met.out$dat.source$time)) lags.init <- list() - - lags.init[["air_temperature"]] <- data.frame(array(mean(met.out$dat.source$air_temperature_maximum[lag.use], met.out$dat.source$air_temperature_minimum[lag.use]), dim=c(1, n.ens))) - lags.init[["air_temperature_min"]] <- data.frame(array(met.out$dat.source$air_temperature_minimum[lag.use], dim=c(1, n.ens))) - lags.init[["air_temperature_max"]] <- data.frame(array(met.out$dat.source$air_temperature_maximum[lag.use], dim=c(1, n.ens))) - for(v in vars.hour[2:length(vars.hour)]){ - lags.init[[v]] <- data.frame(array(met.out$dat.source[[v]][lag.use], dim=c(1,n.ens))) + + lags.init[["air_temperature"]] <- data.frame(array(mean(met.out$dat.source$air_temperature_maximum[lag.use], met.out$dat.source$air_temperature_minimum[lag.use]), dim = c(1, n.ens))) + lags.init[["air_temperature_min"]] <- data.frame(array(met.out$dat.source$air_temperature_minimum[lag.use], dim = c(1, n.ens))) + lags.init[["air_temperature_max"]] <- data.frame(array(met.out$dat.source$air_temperature_maximum[lag.use], dim = c(1, n.ens))) + for (v in vars.hour[2:length(vars.hour)]) { + lags.init[[v]] <- data.frame(array(met.out$dat.source[[v]][lag.use], dim = c(1, n.ens))) } } # ------------------------------ - + # ------------------------------ # Set up the "next" values # Unless this is our last year one of the values should be pulled from the next year to process @@ -251,163 +264,183 @@ predict_subdaily_met <- function(outfolder, in.path, in.prefix, path.train, dire # ------------------------------ # As long as we're not at the end, we can use align.met to pull the appropriate files; temporal resolution doesn't really matter here # Note: This gets us everything at the native daily resolution - if(y < length(yrs.tdm)){ - met.nxt <- align.met(train.path=in.path, source.path=in.path, yrs.train=yrs.tdm[y], yrs.source=yrs.tdm[y+1], n.ens=1, seed=201708, pair.mems = FALSE) + if (y < length(yrs.tdm)) { + met.nxt <- align.met(train.path = in.path, source.path = in.path, yrs.train = yrs.tdm[y], yrs.source = yrs.tdm[y + 1], n.ens = 1, seed = 201708, pair.mems = FALSE) } else { # Yes, this is redundant, but it works and helps keep me sane - met.nxt <- align.met(train.path=in.path, source.path=in.path, yrs.train=yrs.tdm[y], yrs.source=yrs.tdm[y], n.ens=1, seed=201708, pair.mems = FALSE) + met.nxt <- align.met(train.path = in.path, source.path = in.path, yrs.train = yrs.tdm[y], yrs.source = yrs.tdm[y], n.ens = 1, seed = 201708, pair.mems = FALSE) } - + # Adjust precipitation rate for both "train" and "source" since both are for the data being downscaled - met.nxt$dat.train$precipitation_flux <- met.nxt$dat.train$precipitation_flux*adjust.pr - met.nxt$dat.source$precipitation_flux <- met.nxt$dat.source$precipitation_flux*adjust.pr - - if(!"wind_speed" %in% names(met.nxt$dat.train) & "eastward_wind" %in% names(met.nxt$dat.train)){ + met.nxt$dat.train$precipitation_flux <- met.nxt$dat.train$precipitation_flux * adjust.pr + met.nxt$dat.source$precipitation_flux <- met.nxt$dat.source$precipitation_flux * adjust.pr + + if (!"wind_speed" %in% names(met.nxt$dat.train) & "eastward_wind" %in% names(met.nxt$dat.train)) { met.nxt$dat.train$wind_speed <- sqrt(met.nxt$dat.train$eastward_wind^2 + met.nxt$dat.train$northward_wind^2) - } - if(!"wind_speed" %in% names(met.nxt$dat.source) & "eastward_wind" %in% names(met.nxt$dat.source)){ + } + if (!"wind_speed" %in% names(met.nxt$dat.source) & "eastward_wind" %in% names(met.nxt$dat.source)) { met.nxt$dat.source$wind_speed <- sqrt(met.nxt$dat.source$eastward_wind^2 + met.nxt$dat.source$northward_wind^2) - } - - dat.nxt <- data.frame(year = met.nxt$dat.train$time$Year, - doy = met.nxt$dat.train$time$DOY-met.lag, - next.air_temperature_max = met.nxt$dat.train$air_temperature_maximum, - next.air_temperature_min = met.nxt$dat.train$air_temperature_minimum, - next.precipitation_flux = met.nxt$dat.train$precipitation_flux, - next.surface_downwelling_shortwave_flux_in_air = met.nxt$dat.train$surface_downwelling_shortwave_flux_in_air, - next.surface_downwelling_longwave_flux_in_air = met.nxt$dat.train$surface_downwelling_longwave_flux_in_air, - next.air_pressure = met.nxt$dat.train$air_pressure, - next.specific_humidity = met.nxt$dat.train$specific_humidity, - next.wind_speed = met.nxt$dat.train$wind_speed) - - if(direction.filter=="backward"){ - # If we're filtering backward, and starting with Dec. 31 of yrs.tdm[1] the first "next" is Dec. 30 (doy - 1) - # Jan 1 then needs the "next" pulled from the LAST row of yrs.tdm[2] + } + + dat.nxt <- data.frame( + year = met.nxt$dat.train$time$Year, + doy = met.nxt$dat.train$time$DOY - met.lag, + next.air_temperature_max = met.nxt$dat.train$air_temperature_maximum, + next.air_temperature_min = met.nxt$dat.train$air_temperature_minimum, + next.precipitation_flux = met.nxt$dat.train$precipitation_flux, + next.surface_downwelling_shortwave_flux_in_air = met.nxt$dat.train$surface_downwelling_shortwave_flux_in_air, + next.surface_downwelling_longwave_flux_in_air = met.nxt$dat.train$surface_downwelling_longwave_flux_in_air, + next.air_pressure = met.nxt$dat.train$air_pressure, + next.specific_humidity = met.nxt$dat.train$specific_humidity, + next.wind_speed = met.nxt$dat.train$wind_speed + ) + + if (direction.filter == "backward") { + # If we're filtering backward, and starting with Dec. 31 of yrs.tdm[1] the first "next" is Dec. 30 (doy - 1) + # Jan 1 then needs the "next" pulled from the LAST row of yrs.tdm[2] row.last <- nrow(met.nxt$dat.source$time) - dat.nxt2 <- data.frame(year = met.nxt$dat.train$time$Year[1], - doy = met.nxt$dat.train$time$DOY[1], - next.air_temperature_max = met.nxt$dat.source$air_temperature_maximum[row.last], - next.air_temperature_min = met.nxt$dat.source$air_temperature_minimum[row.last], - next.precipitation_flux = met.nxt$dat.source$precipitation_flux[row.last], - next.surface_downwelling_shortwave_flux_in_air = met.nxt$dat.source$surface_downwelling_shortwave_flux_in_air[row.last], - next.surface_downwelling_longwave_flux_in_air = met.nxt$dat.source$surface_downwelling_longwave_flux_in_air[row.last], - next.air_pressure = met.nxt$dat.source$air_pressure[row.last], - next.specific_humidity = met.nxt$dat.source$specific_humidity[row.last], - next.wind_speed = met.nxt$dat.source$wind_speed[row.last]) - dat.nxt <- rbind(dat.nxt2, dat.nxt[1:(nrow(dat.nxt)-1),]) + dat.nxt2 <- data.frame( + year = met.nxt$dat.train$time$Year[1], + doy = met.nxt$dat.train$time$DOY[1], + next.air_temperature_max = met.nxt$dat.source$air_temperature_maximum[row.last], + next.air_temperature_min = met.nxt$dat.source$air_temperature_minimum[row.last], + next.precipitation_flux = met.nxt$dat.source$precipitation_flux[row.last], + next.surface_downwelling_shortwave_flux_in_air = met.nxt$dat.source$surface_downwelling_shortwave_flux_in_air[row.last], + next.surface_downwelling_longwave_flux_in_air = met.nxt$dat.source$surface_downwelling_longwave_flux_in_air[row.last], + next.air_pressure = met.nxt$dat.source$air_pressure[row.last], + next.specific_humidity = met.nxt$dat.source$specific_humidity[row.last], + next.wind_speed = met.nxt$dat.source$wind_speed[row.last] + ) + dat.nxt <- rbind(dat.nxt2, dat.nxt[1:(nrow(dat.nxt) - 1), ]) } else { - # If we're filtering FORWRDS, and starting with Jan 1 of yrs.tdm[1] the first "next" is Jan 2 (doy + 1) - # Dec. 31 then needs the "next" pulled from the FIRST row of yrs.tdm[2] + # If we're filtering FORWRDS, and starting with Jan 1 of yrs.tdm[1] the first "next" is Jan 2 (doy + 1) + # Dec. 31 then needs the "next" pulled from the FIRST row of yrs.tdm[2] row.last <- nrow(met.nxt$dat.train$time) - dat.nxt2 <- data.frame(year = met.nxt$dat.train$time$Year[row.last], - doy = met.nxt$dat.train$time$DOY[row.last], - next.air_temperature_max = met.nxt$dat.source$air_temperature_maximum[1], - next.air_temperature_min = met.nxt$dat.source$air_temperature_minimum[1], - next.precipitation_flux = met.nxt$dat.source$precipitation_flux[1], - next.surface_downwelling_shortwave_flux_in_air = met.nxt$dat.source$surface_downwelling_shortwave_flux_in_air[1], - next.surface_downwelling_longwave_flux_in_air = met.nxt$dat.source$surface_downwelling_longwave_flux_in_air[1], - next.air_pressure = met.nxt$dat.source$air_pressure[1], - next.specific_humidity = met.nxt$dat.source$specific_humidity[1], - next.wind_speed = met.nxt$dat.source$wind_speed[1]) - dat.nxt <- rbind(dat.nxt[2:nrow(dat.nxt),], dat.nxt2) + dat.nxt2 <- data.frame( + year = met.nxt$dat.train$time$Year[row.last], + doy = met.nxt$dat.train$time$DOY[row.last], + next.air_temperature_max = met.nxt$dat.source$air_temperature_maximum[1], + next.air_temperature_min = met.nxt$dat.source$air_temperature_minimum[1], + next.precipitation_flux = met.nxt$dat.source$precipitation_flux[1], + next.surface_downwelling_shortwave_flux_in_air = met.nxt$dat.source$surface_downwelling_shortwave_flux_in_air[1], + next.surface_downwelling_longwave_flux_in_air = met.nxt$dat.source$surface_downwelling_longwave_flux_in_air[1], + next.air_pressure = met.nxt$dat.source$air_pressure[1], + next.specific_humidity = met.nxt$dat.source$specific_humidity[1], + next.wind_speed = met.nxt$dat.source$wind_speed[1] + ) + dat.nxt <- rbind(dat.nxt[2:nrow(dat.nxt), ], dat.nxt2) } - + # Merging the next into our ensemble data - dat.ens <- merge(dat.ens, dat.nxt, all.x=T) - - dat.ens <- dat.ens[order(dat.ens$date),] + dat.ens <- merge(dat.ens, dat.nxt, all.x = T) + + dat.ens <- dat.ens[order(dat.ens$date), ] # ------------------------------ - - # ----------------------------------- - # 2. Predict met vars for each ensemble member + + # ----------------------------------- + # 2. Predict met vars for each ensemble member # ----------------------------------- - ens.sims <- lm_ensemble_sims(dat.mod = dat.ens, n.ens = n.ens, - path.model = file.path(lm.models.base), lags.list = NULL, - lags.init = lags.init, - direction.filter=direction.filter, - dat.train = met.out$dat.train, precip.distribution=precip.dist, - force.sanity=force.sanity, sanity.tries=sanity.tries, seed=seed, print.progress=F) - + ens.sims <- lm_ensemble_sims( + dat.mod = dat.ens, n.ens = n.ens, + path.model = file.path(lm.models.base), lags.list = NULL, + lags.init = lags.init, + direction.filter = direction.filter, + dat.train = met.out$dat.train, precip.distribution = precip.dist, + force.sanity = force.sanity, sanity.tries = sanity.tries, seed = seed, print.progress = F + ) + # ----------------------------------- # ----------------------------------- # Set up the lags for the next year # ----------------------------------- - for(v in names(ens.sims)) { - lag.use <- ifelse(direction.filter=="backward", 1, nrow(ens.sims[[v]])) - lags.init[[v]] <- data.frame(ens.sims[[v]][lag.use,]) + for (v in names(ens.sims)) { + lag.use <- ifelse(direction.filter == "backward", 1, nrow(ens.sims[[v]])) + lags.init[[v]] <- data.frame(ens.sims[[v]][lag.use, ]) } # ----------------------------------- - + # ----------------------------------- # Save as netcdf file # ----------------------------------- # Set up the time dimension for this year hrs.now <- as.numeric(difftime(dat.ens$date, paste0(yrs.tdm[y], "-01-01"), - tz = "GMT", units = "hour")) + tz = "GMT", units = "hour" + )) # Write each year for each ensemble member into its own .nc file - lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", - vals = lat.in, create_dimvar = TRUE) - lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", - vals = lon.in, create_dimvar = TRUE) + lat <- ncdf4::ncdim_def( + name = "latitude", units = "degree_north", + vals = lat.in, create_dimvar = TRUE + ) + lon <- ncdf4::ncdim_def( + name = "longitude", units = "degree_east", + vals = lon.in, create_dimvar = TRUE + ) ntime <- nrow(dat.ens) # diy <- PEcAn.utils::days_in_year(yrs.tdm[y]) diy <- ifelse(lubridate::leap_year(yrs.tdm[y]), 366, 365) days_elapsed <- (seq_len(ntime) * diy / ntime) - (0.5 * diy / ntime) - time <- ncdf4::ncdim_def(name = "time", units = paste0("days since ", - yrs.tdm[y], "-01-01T00:00:00Z"), vals = as.array(days_elapsed), create_dimvar = TRUE, - unlim = TRUE) + time <- ncdf4::ncdim_def( + name = "time", units = paste0( + "days since ", + yrs.tdm[y], "-01-01T00:00:00Z" + ), vals = as.array(days_elapsed), create_dimvar = TRUE, + unlim = TRUE + ) dim <- list(lat, lon, time) var.list <- list() for (j in seq_along(nc.info$CF.name)) { - var.list[[j]] <- ncdf4::ncvar_def(name = as.character(nc.info$CF.name[j]), - units = as.character(nc.info$units[j]), dim = dim, missval = -9999, - verbose = verbose) + var.list[[j]] <- ncdf4::ncvar_def( + name = as.character(nc.info$CF.name[j]), + units = as.character(nc.info$units[j]), dim = dim, missval = -9999, + verbose = verbose + ) } # End j loop for (i in seq_len(n.ens)) { - df <- data.frame(matrix(ncol = length(nc.info$CF.name), nrow = nrow(dat.ens))) - colnames(df) <- nc.info$CF.name - for (j in nc.info$CF.name) { - # ens.sims[[j]][["X1"]] - if(n.ens>1){ - e <- paste0("X", i) - df[,paste(j)] <- ens.sims[[j]][[e]] - } else { - df[,paste(j)] <- ens.sims[[j]] - } + df <- data.frame(matrix(ncol = length(nc.info$CF.name), nrow = nrow(dat.ens))) + colnames(df) <- nc.info$CF.name + for (j in nc.info$CF.name) { + # ens.sims[[j]][["X1"]] + if (n.ens > 1) { + e <- paste0("X", i) + df[, paste(j)] <- ens.sims[[j]][[e]] + } else { + df[, paste(j)] <- ens.sims[[j]] } + } - df <- df[, c("air_temperature", "precipitation_flux", "surface_downwelling_shortwave_flux_in_air", - "surface_downwelling_longwave_flux_in_air", "air_pressure", - "specific_humidity", "wind_speed")] - colnames(df) <- nc.info$CF.name + df <- df[, c( + "air_temperature", "precipitation_flux", "surface_downwelling_shortwave_flux_in_air", + "surface_downwelling_longwave_flux_in_air", "air_pressure", + "specific_humidity", "wind_speed" + )] + colnames(df) <- nc.info$CF.name - # Set up the home folder - out.ens <- file.path(outfolder, paste(in.prefix, ens.labs[i], sep=".")) - dir.create(out.ens, showWarnings = FALSE, recursive = TRUE) - - loc.file <- file.path(out.ens, paste(in.prefix, ens.labs[i], stringr::str_pad(yrs.tdm[y], 4, "left", pad="0"), "nc", sep=".")) - loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + # Set up the home folder + out.ens <- file.path(outfolder, paste(in.prefix, ens.labs[i], sep = ".")) + dir.create(out.ens, showWarnings = FALSE, recursive = TRUE) - for (j in nc.info$CF.name) { - ncdf4::ncvar_put(nc = loc, varid = as.character(j), vals = df[[j]][seq_len(nrow(df))]) - } - ncdf4::nc_close(loc) - } # End writing ensemble members - if(print.progress==TRUE) utils::setTxtProgressBar(pb, y) + loc.file <- file.path(out.ens, paste(in.prefix, ens.labs[i], stringr::str_pad(yrs.tdm[y], 4, "left", pad = "0"), "nc", sep = ".")) + loc <- ncdf4::nc_create(filename = loc.file, vars = var.list, verbose = verbose) + + for (j in nc.info$CF.name) { + ncdf4::ncvar_put(nc = loc, varid = as.character(j), vals = df[[j]][seq_len(nrow(df))]) + } + ncdf4::nc_close(loc) + } # End writing ensemble members + if (print.progress == TRUE) utils::setTxtProgressBar(pb, y) # print(paste0("finished year ", yrs.tdm[y])) # ----------------------------------- - + rm(met.out, dat.ens, met.nxt, dat.nxt, dat.nxt2, ens.sims) - } # End year loop - msg.done <- paste("Temporal Downscaling Complete:", in.prefix, min(yrs.tdm), "-", max(yrs.tdm), sep=" ") + msg.done <- paste("Temporal Downscaling Complete:", in.prefix, min(yrs.tdm), "-", max(yrs.tdm), sep = " ") return(msg.done) } # End function diff --git a/modules/data.atmosphere/R/tdm_save_betas.R b/modules/data.atmosphere/R/tdm_save_betas.R index d8054d7955f..8acc696fd51 100644 --- a/modules/data.atmosphere/R/tdm_save_betas.R +++ b/modules/data.atmosphere/R/tdm_save_betas.R @@ -1,6 +1,6 @@ ##' TDM Save Betas ##' Saves betas that are calculated during gen.subdaily.models() -# ----------------------------------- +# ----------------------------------- # Description # ----------------------------------- ##' @title save.betas @@ -8,7 +8,7 @@ ##' @author Christy Rollinson, James Simkins ##' @description Function to save betas as a .nc file. This is utilized in ##' gen.subdaily.models() when linear regression models are created -# ----------------------------------- +# ----------------------------------- # Parameters # ----------------------------------- ##' @param model.out list linear regression model output @@ -20,24 +20,27 @@ # Begin Function #---------------------------------------------------------------------- save.betas <- function(model.out, betas, outfile) { - var.list <- list() for (v in names(model.out)) { # Note: Need a separate list of coefficients for each variable - dimY <- ncdf4::ncdim_def(paste0("coeffs_", v), units = "unitless", - longname = "model.out coefficients", vals = 1:ncol(model.out[[v]][[betas]])) - dimX <- ncdf4::ncdim_def("random", units = "unitless", longname = "random betas", - vals = 1:nrow(model.out[[v]][[betas]])) - - var.list[[v]] <- ncdf4::ncvar_def(v, units = "coefficients", dim = list(dimX, - dimY), longname = paste0("day ", v, " model.out coefficients")) + dimY <- ncdf4::ncdim_def(paste0("coeffs_", v), + units = "unitless", + longname = "model.out coefficients", vals = 1:ncol(model.out[[v]][[betas]]) + ) + dimX <- ncdf4::ncdim_def("random", + units = "unitless", longname = "random betas", + vals = 1:nrow(model.out[[v]][[betas]]) + ) + + var.list[[v]] <- ncdf4::ncvar_def(v, units = "coefficients", dim = list( + dimX, + dimY + ), longname = paste0("day ", v, " model.out coefficients")) } - + nc <- ncdf4::nc_create(outfile, var.list) for (v in names(model.out)) { ncdf4::ncvar_put(nc, var.list[[v]], model.out[[v]][[betas]]) } ncdf4::nc_close(nc) - - -} \ No newline at end of file +} diff --git a/modules/data.atmosphere/R/tdm_save_model.R b/modules/data.atmosphere/R/tdm_save_model.R index 9435a8352d1..a0ab903f66e 100644 --- a/modules/data.atmosphere/R/tdm_save_model.R +++ b/modules/data.atmosphere/R/tdm_save_model.R @@ -1,6 +1,6 @@ ##' TDM Save Models ##' Saves models that are created during gen.subdaily.models() -# ----------------------------------- +# ----------------------------------- # Description # ----------------------------------- ##' @title save.model @@ -8,7 +8,7 @@ ##' @author Christy Rollinson, James Simkins ##' @description Function to save models as a .nc file. This is utilized in ##' gen.subdaily.models() when linear regression models are created -# ----------------------------------- +# ----------------------------------- # Parameters # ----------------------------------- ##' @param model.out list linear regression model output @@ -20,12 +20,10 @@ # Begin Function #---------------------------------------------------------------------- save.model <- function(model.out, model, outfile) { - mod.list <- list() for (v in names(model.out)) { mod.list[[v]] <- model.out[[v]][[model]] } - + save(mod.list, file = outfile) } - diff --git a/modules/data.atmosphere/R/tdm_subdaily_pred.R b/modules/data.atmosphere/R/tdm_subdaily_pred.R index a7bca5cb515..5fb12e5369c 100644 --- a/modules/data.atmosphere/R/tdm_subdaily_pred.R +++ b/modules/data.atmosphere/R/tdm_subdaily_pred.R @@ -1,6 +1,6 @@ ##' Subdaily Prediction ##' Pulls information from linear regression models to predict subdaily meteorology -# ----------------------------------- +# ----------------------------------- # Description # ----------------------------------- ##' @title subdaily_pred @@ -10,7 +10,7 @@ ##' and are used to predict subdaily meteorology. This function is called in ##' lm_ensemble_sims() to downscale a meteorology product. ##' Linear regression models are created in gen.subdaily.models() -# ----------------------------------- +# ----------------------------------- # Parameters # ----------------------------------- ##' @param newdata dataframe with data to be downscaled @@ -25,60 +25,58 @@ #---------------------------------------------------------------------- # Begin Function #---------------------------------------------------------------------- -subdaily_pred <- function(newdata, model.predict, Rbeta, resid.err = FALSE, model.resid = NULL, +subdaily_pred <- function(newdata, model.predict, Rbeta, resid.err = FALSE, model.resid = NULL, Rbeta.resid = NULL, n.ens) { - - err.resid <- 0 # dummy residual error term; if we want to add residual error, we're modeling it by hour + err.resid <- 0 # dummy residual error term; if we want to add residual error, we're modeling it by hour df.hr <- data.frame(hour = model.predict$xlev[[1]]) - df.hr[,"as.ordered(hour)"] <- as.ordered(df.hr$hour) - + df.hr[, "as.ordered(hour)"] <- as.ordered(df.hr$hour) + piv <- as.numeric(which(!is.na(model.predict$coef))) - - model.predict$factors[model.predict$factors=="as.ordered(hour)"] <- "hour" - m <- newdata[,model.predict$factors] - m[,"as.ordered(hour)"] <- as.ordered(m$hour) + + model.predict$factors[model.predict$factors == "as.ordered(hour)"] <- "hour" + m <- newdata[, model.predict$factors] + m[, "as.ordered(hour)"] <- as.ordered(m$hour) m$hour <- as.numeric(m$hour) # Adding hours to make sure prediction works okay # Note: This really messes with the order of things! - if(length(unique(df.hr$hour))!= length(unique(m$hour))){ + if (length(unique(df.hr$hour)) != length(unique(m$hour))) { m$ens <- newdata$ens - - m <- merge(m, df.hr, all=T) - + + m <- merge(m, df.hr, all = T) + # Ordering the newdata in the same way as m (by hour) - m <- m[order(m$ens, m$hour),] + m <- m[order(m$ens, m$hour), ] # newdata <- newdata[order(newdata$hour),] - + # # Fixing the ordering so that it comes back looking like newdata # dat.sim <- dat.sim[order(dat.sim$ens, dat.sim$hour),] # newdata <- newdata[order(newdata$ens, newdata$hour),] - } - - Xp <- stats::model.matrix(eval(model.predict$formula), m, contrasts.arg=model.predict$contr) + } + + Xp <- stats::model.matrix(eval(model.predict$formula), m, contrasts.arg = model.predict$contr) if (resid.err == TRUE) { newdata$resid <- 99999 - resid.piv <- as.numeric(which(!is.na(model.resid$coef))) - - model.resid$factors[model.resid$factors=="as.ordered(hour)"] <- "hour" - resid.m <- newdata[,model.resid$factors] - resid.m[,"as.ordered(hour)"] <- resid.m$hour - if(length(df.hr$hour)!= length(resid.m$hour)) resid.m <- merge(resid.m, df.hr, all=T) - Xp.res <- stats::model.matrix(eval(model.resid$formula), resid.m, contrasts.arg=model.resid$contr) + resid.piv <- as.numeric(which(!is.na(model.resid$coef))) + + model.resid$factors[model.resid$factors == "as.ordered(hour)"] <- "hour" + resid.m <- newdata[, model.resid$factors] + resid.m[, "as.ordered(hour)"] <- resid.m$hour + if (length(df.hr$hour) != length(resid.m$hour)) resid.m <- merge(resid.m, df.hr, all = T) + Xp.res <- stats::model.matrix(eval(model.resid$formula), resid.m, contrasts.arg = model.resid$contr) err.resid <- Xp.res[, resid.piv] %*% t(Rbeta.resid) } # End residual error - - if(length(piv)==ncol(Rbeta)){ + + if (length(piv) == ncol(Rbeta)) { dat.sim <- Xp[, piv] %*% t(Rbeta) + err.resid } else { # dat.sim <- Xp[,piv] %*% t(Rbeta[,piv]) + err.resid - dat.sim <- Xp[,piv] %*% t(matrix(Rbeta[,piv], nrow=nrow(Rbeta))) + err.resid + dat.sim <- Xp[, piv] %*% t(matrix(Rbeta[, piv], nrow = nrow(Rbeta))) + err.resid } - - - + + + return(dat.sim) - -} \ No newline at end of file +} diff --git a/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R b/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R index 76331439a08..c2d525238b5 100644 --- a/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R +++ b/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R @@ -1,20 +1,20 @@ ##' Temporal Downscale Functions ##' Met variable functions that are called in gen.subdaily.models and predict.subdaily.workflow -# ----------------------------------- +# ----------------------------------- # Description # ----------------------------------- ##' @title temporal_downscale_functions ##' @family tdm - Temporally Downscale Meteorology ##' @author Christy Rollinson, James Simkins ##' @description This function contains the functions that do the heavy lifting in gen.subdaily.models() -##' and predict.subdaily.workflow(). Individual variable functions actually generate the models -##' and betas from the dat.train_file and save them in the output file. save.model() and -##' save.betas() are helper functions that save the linear regression model output to a -##' specific location. In the future, we should only save the data that we actually use from the +##' and predict.subdaily.workflow(). Individual variable functions actually generate the models +##' and betas from the dat.train_file and save them in the output file. save.model() and +##' save.betas() are helper functions that save the linear regression model output to a +##' specific location. In the future, we should only save the data that we actually use from the ##' linear regression model because this is a large file. predict.met() is called from -##' predict.subdaily.workflow() and references the linear regression model output to -##' predict the ensemble data. -# ----------------------------------- +##' predict.subdaily.workflow() and references the linear regression model output to +##' predict the ensemble data. +# ----------------------------------- # Parameters # ----------------------------------- ##' @param dat.train - training data generated by tdm_nc2dat.train.R @@ -35,210 +35,249 @@ #---------------------------------------------------------------------- # Begin Function #---------------------------------------------------------------------- -temporal.downscale.functions <- function(dat.train, n.beta, day.window, - resids = FALSE, parallel = FALSE, n.cores = NULL, seed = format(Sys.time(), "%m%d"), outfolder, print.progress=FALSE, ...) { - - if(print.progress==TRUE){ - pb.index <- 1 - pb <- utils::txtProgressBar(min = 1, max = 8, style = 3) - utils::setTxtProgressBar(pb, pb.index) +temporal.downscale.functions <- function( + dat.train, n.beta, day.window, + resids = FALSE, parallel = FALSE, n.cores = NULL, seed = format(Sys.time(), "%m%d"), outfolder, print.progress = FALSE, ...) { + if (print.progress == TRUE) { + pb.index <- 1 + pb <- utils::txtProgressBar(min = 1, max = 8, style = 3) + utils::setTxtProgressBar(pb, pb.index) + } + # Declare the variables of interest that will be called in the + # overarching loop + vars.list <- c( + "surface_downwelling_shortwave_flux_in_air", "air_temperature", + "precipitation_flux", "surface_downwelling_longwave_flux_in_air", + "air_pressure", "specific_humidity", "wind_speed" + ) + + # Data info that will be used to help organize dataframe for + # downscaling + dat.info <- c( + "time.day", "year", "doy", "hour", "air_temperature_max.day", + "air_temperature_min.day", "precipitation_flux.day", "surface_downwelling_shortwave_flux_in_air.day", + "surface_downwelling_longwave_flux_in_air.day", "air_pressure.day", + "specific_humidity.day", "wind_speed.day", "next.air_temperature_max", + "next.air_temperature_min", "next.precipitation_flux", "next.surface_downwelling_shortwave_flux_in_air", + "next.surface_downwelling_longwave_flux_in_air", "next.air_pressure", + "next.specific_humidity", "next.wind_speed" + ) + + # ------ Beginning of Downscaling For Loop + + for (v in vars.list) { + # Define the path + path.out <- file.path(outfolder, v) + if (!dir.exists(path.out)) dir.create(path.out, recursive = T) + + # Set our seed + set.seed(seed) + + # Create empty lists + dat.list <- list() + mod.out <- list() + + # Utilize window days for insufficient datasets + for (i in unique(dat.train$doy)) { + if (i >= 365) { + # Lump leap day in with non-leap Dec 31 + dat.list[[paste(i)]] <- dat.train[dat.train$doy >= 365 - + day.window / 2 | dat.train$doy <= day.window / 2, ] + } else if (i == 1) { + dat.list[[paste(i)]] <- dat.train[dat.train$doy <= i + + day.window / 2 | dat.train$doy >= 365 - day.window / 2, ] + } else { + dat.list[[paste(i)]] <- dat.train[dat.train$doy >= i - + day.window / 2 & dat.train$doy <= i + day.window / 2, ] + } } - # Declare the variables of interest that will be called in the - # overarching loop - vars.list <- c("surface_downwelling_shortwave_flux_in_air", "air_temperature", - "precipitation_flux", "surface_downwelling_longwave_flux_in_air", - "air_pressure", "specific_humidity", "wind_speed") - - # Data info that will be used to help organize dataframe for - # downscaling - dat.info <- c("time.day", "year", "doy", "hour", "air_temperature_max.day", - "air_temperature_min.day", "precipitation_flux.day", "surface_downwelling_shortwave_flux_in_air.day", - "surface_downwelling_longwave_flux_in_air.day", "air_pressure.day", - "specific_humidity.day", "wind_speed.day", "next.air_temperature_max", - "next.air_temperature_min", "next.precipitation_flux", "next.surface_downwelling_shortwave_flux_in_air", - "next.surface_downwelling_longwave_flux_in_air", "next.air_pressure", - "next.specific_humidity", "next.wind_speed") - - # ------ Beginning of Downscaling For Loop - - for (v in vars.list) { - - # Define the path - path.out <- file.path(outfolder, v) - if (!dir.exists(path.out)) dir.create(path.out, recursive = T) - - # Set our seed - set.seed(seed) - - # Create empty lists - dat.list <- list() - mod.out <- list() - - # Utilize window days for insufficient datasets - for (i in unique(dat.train$doy)) { - if (i >= 365) { - # Lump leap day in with non-leap Dec 31 - dat.list[[paste(i)]] <- dat.train[dat.train$doy >= 365 - - day.window/2 | dat.train$doy <= day.window/2, ] - } else if (i == 1) { - dat.list[[paste(i)]] <- dat.train[dat.train$doy <= i + - day.window/2 | dat.train$doy >= 365 - day.window/2, ] - } else { - dat.list[[paste(i)]] <- dat.train[dat.train$doy >= i - - day.window/2 & dat.train$doy <= i + day.window/2, ] - } - } - - - # ----- generate the mod.out file - if (parallel) { - warning("Running model calculation in parallel. This will probably crash if you do not have access to a LOT of memory!") - if (! requireNamespace("parallel", quietly = TRUE)) { - PEcAn.logger::logger.severe( - "Cannot find package 'parallel', ", - "which is needed for parallel model calculations. ", - "Either set parallel = FALSE, ", - "or run 'install.packages(\"parallel\")' and try again.") - } - - if (v == "surface_downwelling_shortwave_flux_in_air") { - mod.out <- parallel::mclapply(dat.list, model.train, mc.cores = n.cores, - n.beta = n.beta, resids = resids, threshold = stats::quantile(dat.train[dat.train$surface_downwelling_shortwave_flux_in_air > - 0, "surface_downwelling_shortwave_flux_in_air"], 0.05)) - } else { - mod.out <- parallel::mclapply(dat.list, model.train, mc.cores = n.cores, - n.beta = n.beta, resids = resids) - } - - - - # Use a loop to save each day of year independently - for (i in names(mod.out)) { - # Save the betas as .nc - outfile <- file.path(path.out, paste0("betas_", v, "_", - i, ".nc")) - dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), units = "unitless", - longname = "model.out coefficients", vals = 1:ncol(mod.out[[i]][["betas"]])) - dimX <- ncdf4::ncdim_def("random", units = "unitless", - longname = "random betas", vals = 1:nrow(mod.out[[i]][["betas"]])) - var.list <- ncdf4::ncvar_def(i, units = "coefficients", - dim = list(dimX, dimY), longname = paste0("day ", i, - " model.out coefficients")) - nc <- ncdf4::nc_create(outfile, var.list) - ncdf4::ncvar_put(nc, var.list, mod.out[[i]][["betas"]]) - ncdf4::nc_close(nc) - - # Save the model as a .Rdata - mod.save <- list() - mod.save$call <- mod.out[[i]]$model$call - mod.save$coef <- stats::coef(mod.out[[i]]$model) - mod.save$formula <- parse(text=mod.out[[i]]$model$call[[2]][c(1,3)]) - mod.save$factors <- rownames(attr(mod.out[[i]]$model$terms, "factors")) - mod.save$xlev <- mod.out[[i]]$model$xlevels - mod.save$contr <- mod.out[[i]]$model$contrasts - save(mod.save, file = file.path(path.out, paste0("model_", - v, "_", i, ".Rdata"))) - - if(resids) { - # Save the betas as .nc - outfile <- file.path(path.out, paste0("resids_betas_", v, "_", - i, ".nc")) - dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), units = "unitless", - longname = "model.out coefficients", vals = 1:ncol(mod.out[[i]][["betas.resid"]])) - dimX <- ncdf4::ncdim_def("random", units = "unitless", - longname = "random betas", vals = 1:nrow(mod.out[[i]][["betas.resid"]])) - var.list <- ncdf4::ncvar_def(i, units = "coefficients", - dim = list(dimX, dimY), longname = paste0("day ", i, - "resid model.out coefficients")) - nc <- ncdf4::nc_create(outfile, var.list) - ncdf4::ncvar_put(nc, var.list, mod.out[[i]][["betas.resid"]]) - ncdf4::nc_close(nc) - - # Save the model as a .Rdata - mod.save <- list() - mod.save$call <- mod.out[[i]]$model.resid$call - mod.save$coef <- stats::coef(mod.out[[i]]$model.resid) - mod.save$formula <- parse(text=mod.out[[i]]$model.resid$call[[2]][c(1,3)]) - mod.save$factors <- rownames(attr(mod.out[[i]]$model.resid$terms, "factors")) - mod.save$xlev <- mod.out[[i]]$model.resid$xlevels - mod.save$contr <- mod.out[[i]]$model.resid$contrasts - save(mod.save, file = file.path(path.out, paste0("resids_model_", - v, "_", i, ".Rdata"))) - } # End resids case - } # End save loop - - } else { # Doing this is series - for (i in names(dat.list)) { - - if (v == "surface_downwelling_shortwave_flux_in_air") { - mod.out <- model.train(dat.subset = dat.list[[i]], n.beta = n.beta, v = v, - threshold = stats::quantile(dat.train[dat.train$surface_downwelling_shortwave_flux_in_air > 0, "surface_downwelling_shortwave_flux_in_air"], 0.05), - resids = resids) - } else { - mod.out <- model.train(dat.subset = dat.list[[i]], n.beta = n.beta, v = v, - resids = resids) - } - - - # Save the betas as .nc - outfile <- file.path(path.out, paste0("betas_", v, "_", i, ".nc")) - dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), units = "unitless", - longname = "model.out coefficients", vals = 1:ncol(mod.out[["betas"]])) - dimX <- ncdf4::ncdim_def("random", units = "unitless", - longname = "random betas", vals = 1:nrow(mod.out[["betas"]])) - var.list <- ncdf4::ncvar_def(paste(i), units = "coefficients", dim = list(dimX, dimY), - longname = paste0("day ", i, " model.out coefficients")) - nc <- ncdf4::nc_create(outfile, var.list) - ncdf4::ncvar_put(nc, var.list, mod.out[["betas"]]) - ncdf4::nc_close(nc) - - # Save the model as a .Rdata with only the info we need to recreate it - # (saves a lot of space & memory) - mod.save <- list() - mod.save$call <- mod.out$model$call - mod.save$coef <- stats::coef(mod.out$model) - mod.save$formula <- parse(text=mod.out$model$call[[2]][c(1,3)]) - mod.save$factors <- rownames(attr(mod.out$model$terms, "factors")) - mod.save$xlev <- mod.out$model$xlevels - mod.save$contr <- mod.out$model$contrasts - save(mod.save, file = file.path(path.out, paste0("model_", v, "_", i, ".Rdata"))) - - if(resids) { - # Save the betas as .nc - outfile <- file.path(path.out, paste0("resids_betas_", v, "_", i, ".nc")) - dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), units = "unitless", - longname = "model.out coefficients", vals = 1:ncol(mod.out[["betas.resid"]])) - dimX <- ncdf4::ncdim_def("random", units = "unitless", - longname = "random betas", vals = 1:nrow(mod.out[["betas.resid"]])) - var.list <- ncdf4::ncvar_def(i, units = "coefficients", dim = list(dimX, dimY), - longname = paste0("day ", i, "resid model.out coefficients")) - nc <- ncdf4::nc_create(outfile, var.list) - ncdf4::ncvar_put(nc, var.list, mod.out[["betas.resid"]]) - ncdf4::nc_close(nc) - - # Save the model as a .Rdata - mod.save <- list() - mod.save$call <- mod.out$model.resid$call - mod.save$coef <- stats::coef(mod.out$model.resid) - mod.save$formula <- parse(text=mod.out$model.resid$call[[2]][c(1,3)]) - mod.save$factors <- rownames(attr(mod.out$model.resid$terms, "factors")) - mod.save$xlev <- mod.out$model.resid$xlevels - mod.save$contr <- mod.out$model.resid$contrasts - save(mod.save, file = file.path(path.out, paste0("resids_model_", v, "_", i, ".Rdata"))) - } # End resids case - - } # End day loop - } # End if else case - - if(print.progress==TRUE){ - pb.index <- pb.index + 1 - utils::setTxtProgressBar(pb, pb.index) + + + # ----- generate the mod.out file + if (parallel) { + warning("Running model calculation in parallel. This will probably crash if you do not have access to a LOT of memory!") + if (!requireNamespace("parallel", quietly = TRUE)) { + PEcAn.logger::logger.severe( + "Cannot find package 'parallel', ", + "which is needed for parallel model calculations. ", + "Either set parallel = FALSE, ", + "or run 'install.packages(\"parallel\")' and try again." + ) + } + + if (v == "surface_downwelling_shortwave_flux_in_air") { + mod.out <- parallel::mclapply(dat.list, model.train, + mc.cores = n.cores, + n.beta = n.beta, resids = resids, threshold = stats::quantile(dat.train[dat.train$surface_downwelling_shortwave_flux_in_air > + 0, "surface_downwelling_shortwave_flux_in_air"], 0.05) + ) + } else { + mod.out <- parallel::mclapply(dat.list, model.train, + mc.cores = n.cores, + n.beta = n.beta, resids = resids + ) + } + + + + # Use a loop to save each day of year independently + for (i in names(mod.out)) { + # Save the betas as .nc + outfile <- file.path(path.out, paste0( + "betas_", v, "_", + i, ".nc" + )) + dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), + units = "unitless", + longname = "model.out coefficients", vals = 1:ncol(mod.out[[i]][["betas"]]) + ) + dimX <- ncdf4::ncdim_def("random", + units = "unitless", + longname = "random betas", vals = 1:nrow(mod.out[[i]][["betas"]]) + ) + var.list <- ncdf4::ncvar_def(i, + units = "coefficients", + dim = list(dimX, dimY), longname = paste0( + "day ", i, + " model.out coefficients" + ) + ) + nc <- ncdf4::nc_create(outfile, var.list) + ncdf4::ncvar_put(nc, var.list, mod.out[[i]][["betas"]]) + ncdf4::nc_close(nc) + + # Save the model as a .Rdata + mod.save <- list() + mod.save$call <- mod.out[[i]]$model$call + mod.save$coef <- stats::coef(mod.out[[i]]$model) + mod.save$formula <- parse(text = mod.out[[i]]$model$call[[2]][c(1, 3)]) + mod.save$factors <- rownames(attr(mod.out[[i]]$model$terms, "factors")) + mod.save$xlev <- mod.out[[i]]$model$xlevels + mod.save$contr <- mod.out[[i]]$model$contrasts + save(mod.save, file = file.path(path.out, paste0( + "model_", + v, "_", i, ".Rdata" + ))) + + if (resids) { + # Save the betas as .nc + outfile <- file.path(path.out, paste0( + "resids_betas_", v, "_", + i, ".nc" + )) + dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), + units = "unitless", + longname = "model.out coefficients", vals = 1:ncol(mod.out[[i]][["betas.resid"]]) + ) + dimX <- ncdf4::ncdim_def("random", + units = "unitless", + longname = "random betas", vals = 1:nrow(mod.out[[i]][["betas.resid"]]) + ) + var.list <- ncdf4::ncvar_def(i, + units = "coefficients", + dim = list(dimX, dimY), longname = paste0( + "day ", i, + "resid model.out coefficients" + ) + ) + nc <- ncdf4::nc_create(outfile, var.list) + ncdf4::ncvar_put(nc, var.list, mod.out[[i]][["betas.resid"]]) + ncdf4::nc_close(nc) + + # Save the model as a .Rdata + mod.save <- list() + mod.save$call <- mod.out[[i]]$model.resid$call + mod.save$coef <- stats::coef(mod.out[[i]]$model.resid) + mod.save$formula <- parse(text = mod.out[[i]]$model.resid$call[[2]][c(1, 3)]) + mod.save$factors <- rownames(attr(mod.out[[i]]$model.resid$terms, "factors")) + mod.save$xlev <- mod.out[[i]]$model.resid$xlevels + mod.save$contr <- mod.out[[i]]$model.resid$contrasts + save(mod.save, file = file.path(path.out, paste0( + "resids_model_", + v, "_", i, ".Rdata" + ))) + } # End resids case + } # End save loop + } else { # Doing this is series + for (i in names(dat.list)) { + if (v == "surface_downwelling_shortwave_flux_in_air") { + mod.out <- model.train( + dat.subset = dat.list[[i]], n.beta = n.beta, v = v, + threshold = stats::quantile(dat.train[dat.train$surface_downwelling_shortwave_flux_in_air > 0, "surface_downwelling_shortwave_flux_in_air"], 0.05), + resids = resids + ) + } else { + mod.out <- model.train( + dat.subset = dat.list[[i]], n.beta = n.beta, v = v, + resids = resids + ) } - } # end of the variable for loop - -} # end of the function + # Save the betas as .nc + outfile <- file.path(path.out, paste0("betas_", v, "_", i, ".nc")) + dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), + units = "unitless", + longname = "model.out coefficients", vals = 1:ncol(mod.out[["betas"]]) + ) + dimX <- ncdf4::ncdim_def("random", + units = "unitless", + longname = "random betas", vals = 1:nrow(mod.out[["betas"]]) + ) + var.list <- ncdf4::ncvar_def(paste(i), + units = "coefficients", dim = list(dimX, dimY), + longname = paste0("day ", i, " model.out coefficients") + ) + nc <- ncdf4::nc_create(outfile, var.list) + ncdf4::ncvar_put(nc, var.list, mod.out[["betas"]]) + ncdf4::nc_close(nc) + + # Save the model as a .Rdata with only the info we need to recreate it + # (saves a lot of space & memory) + mod.save <- list() + mod.save$call <- mod.out$model$call + mod.save$coef <- stats::coef(mod.out$model) + mod.save$formula <- parse(text = mod.out$model$call[[2]][c(1, 3)]) + mod.save$factors <- rownames(attr(mod.out$model$terms, "factors")) + mod.save$xlev <- mod.out$model$xlevels + mod.save$contr <- mod.out$model$contrasts + save(mod.save, file = file.path(path.out, paste0("model_", v, "_", i, ".Rdata"))) + if (resids) { + # Save the betas as .nc + outfile <- file.path(path.out, paste0("resids_betas_", v, "_", i, ".nc")) + dimY <- ncdf4::ncdim_def(paste0("coeffs_", i), + units = "unitless", + longname = "model.out coefficients", vals = 1:ncol(mod.out[["betas.resid"]]) + ) + dimX <- ncdf4::ncdim_def("random", + units = "unitless", + longname = "random betas", vals = 1:nrow(mod.out[["betas.resid"]]) + ) + var.list <- ncdf4::ncvar_def(i, + units = "coefficients", dim = list(dimX, dimY), + longname = paste0("day ", i, "resid model.out coefficients") + ) + nc <- ncdf4::nc_create(outfile, var.list) + ncdf4::ncvar_put(nc, var.list, mod.out[["betas.resid"]]) + ncdf4::nc_close(nc) + # Save the model as a .Rdata + mod.save <- list() + mod.save$call <- mod.out$model.resid$call + mod.save$coef <- stats::coef(mod.out$model.resid) + mod.save$formula <- parse(text = mod.out$model.resid$call[[2]][c(1, 3)]) + mod.save$factors <- rownames(attr(mod.out$model.resid$terms, "factors")) + mod.save$xlev <- mod.out$model.resid$xlevels + mod.save$contr <- mod.out$model.resid$contrasts + save(mod.save, file = file.path(path.out, paste0("resids_model_", v, "_", i, ".Rdata"))) + } # End resids case + } # End day loop + } # End if else case + if (print.progress == TRUE) { + pb.index <- pb.index + 1 + utils::setTxtProgressBar(pb, pb.index) + } + } # end of the variable for loop +} # end of the function diff --git a/modules/data.atmosphere/R/temporal.downscaling.R b/modules/data.atmosphere/R/temporal.downscaling.R index 21d10026dbb..5fd0ae6b132 100644 --- a/modules/data.atmosphere/R/temporal.downscaling.R +++ b/modules/data.atmosphere/R/temporal.downscaling.R @@ -1,4 +1,3 @@ - #' Temporal downscaling of daily or subdaily CF met data #' #' @param cfmet data frame with CF variables generated by \code{\link{load.cfmet}} @@ -11,25 +10,27 @@ #' @author David LeBauer cfmet.downscale.time <- function(cfmet, output.dt = 1, lat = lat, ...) { ## time step - dt_hr <- as.numeric(round(difftime(cfmet$date[2], cfmet$date[1], units = "hours"))) + dt_hr <- as.numeric(round(difftime(cfmet$date[2], cfmet$date[1], units = "hours"))) if (dt_hr == output.dt) { downscaled.result <- cfmet } -# if("specific_humidity" %in% colnames(cfmet) & (!"relative_humidity" %in% colnames(cfmet))){ -# cfmet$relative_humidity <- cfmet[,list(qair2rh(qair = specific_humidity, -# temp = PEcAn.utils::ud_convert(air_temperature, "Kelvin", "Celsius"), -# press = PEcAn.utils::ud_convert(air_pressure, "Pa", "millibar"))] -# } + # if("specific_humidity" %in% colnames(cfmet) & (!"relative_humidity" %in% colnames(cfmet))){ + # cfmet$relative_humidity <- cfmet[,list(qair2rh(qair = specific_humidity, + # temp = PEcAn.utils::ud_convert(air_temperature, "Kelvin", "Celsius"), + # press = PEcAn.utils::ud_convert(air_pressure, "Pa", "millibar"))] + # } - if(dt_hr > output.dt & dt_hr <= 6) { + if (dt_hr > output.dt & dt_hr <= 6) { downscaled.result <- cfmet.downscale.subdaily(subdailymet = cfmet, output.dt = output.dt) } else if (dt_hr > 6 & dt_hr < 24) { # cfmet <- cfmet[,list(air_temperature_max = max(air_temperature), air_temperature_min = # min(air_temperature), ), by = 'year,doy']) dt_hr <- 24 - PEcAn.logger::logger.error("timestep of input met data is between 6 and 24 hours.\n", "PEcAn will automatically convert this to daily data\n", - "you should confirm validity of downscaling, in particular that min / max temperatures are realistic") + PEcAn.logger::logger.error( + "timestep of input met data is between 6 and 24 hours.\n", "PEcAn will automatically convert this to daily data\n", + "you should confirm validity of downscaling, in particular that min / max temperatures are realistic" + ) } if (dt_hr == 24) { @@ -41,7 +42,7 @@ cfmet.downscale.time <- function(cfmet, output.dt = 1, lat = lat, ...) { } downscaled.result <- cfmet.downscale.daily(dailymet = cfmet, output.dt = output.dt, lat = lat) } else if (dt_hr > 24) { - PEcAn.logger::logger.error("only daily and sub-daily downscaling supported") + PEcAn.logger::logger.error("only daily and sub-daily downscaling supported") } return(downscaled.result) @@ -62,24 +63,24 @@ cfmet.downscale.subdaily <- function(subdailymet, output.dt = 1) { ## converting surface_downwelling_shortwave_flux_in_air from W/m2 avg to PPFD new.date <- subdailymet %>% dplyr::group_by(.data$year, .data$month, .data$day, .data$doy) %>% - dplyr::group_modify(~data.frame(hour = 0:(23 / output.dt) / output.dt)) + dplyr::group_modify(~ data.frame(hour = 0:(23 / output.dt) / output.dt)) new.date$date <- lubridate::ymd_h(paste(new.date$year, new.date$month, new.date$day, new.date$hour)) downscaled.result <- list() - tint <- nrow(new.date)/ nrow(subdailymet) - if(all(c("eastward_wind", "northward_wind") %in% colnames(subdailymet))){ - if(!"wind_speed" %in% colnames(subdailymet)){ + tint <- nrow(new.date) / nrow(subdailymet) + if (all(c("eastward_wind", "northward_wind") %in% colnames(subdailymet))) { + if (!"wind_speed" %in% colnames(subdailymet)) { subdailymet$wind_speed <- sqrt(subdailymet$northward_wind^2 + subdailymet$eastward_wind^2) } downscaled.result[["northward_wind"]] <- rep(subdailymet$northward_wind, each = tint) - downscaled.result[["eastward_wind"]] <- rep(subdailymet$eastward_wind, each = tint) - } else if (!'wind_speed' %in% colnames(subdailymet)){ - PEcAn.logger::logger.error("no wind speed data") + downscaled.result[["eastward_wind"]] <- rep(subdailymet$eastward_wind, each = tint) + } else if (!"wind_speed" %in% colnames(subdailymet)) { + PEcAn.logger::logger.error("no wind speed data") } downscaled.result[["wind_speed"]] <- rep(subdailymet$wind_speed, each = tint) - solarMJ <- PEcAn.utils::ud_convert(subdailymet$surface_downwelling_shortwave_flux_in_air, paste0("W ", tint, "h"), "MJ" ) + solarMJ <- PEcAn.utils::ud_convert(subdailymet$surface_downwelling_shortwave_flux_in_air, paste0("W ", tint, "h"), "MJ") PAR <- 0.486 * solarMJ ## Cambell and Norman 1998 p 151, ch 10 subdailymet$ppfd <- PEcAn.utils::ud_convert(PAR, "mol s", "micromol h") downscaled.result[["ppfd"]] <- subdailymet$ppfd @@ -87,13 +88,16 @@ cfmet.downscale.subdaily <- function(subdailymet, output.dt = 1) { downscaled.result[["surface_downwelling_shortwave_flux_in_air"]] <- subdailymet$surface_downwelling_shortwave_flux_in_air - for(var in c("air_pressure", "specific_humidity", - "precipitation_flux", "air_temperature", - "surface_downwelling_shortwave_flux_in_air", "ppfd", "relative_humidity")){ - if(var %in% colnames(subdailymet)){ + for (var in c( + "air_pressure", "specific_humidity", + "precipitation_flux", "air_temperature", + "surface_downwelling_shortwave_flux_in_air", "ppfd", "relative_humidity" + )) { + if (var %in% colnames(subdailymet)) { ## convert units from subdaily to hourly hrscale <- ifelse(var %in% c("surface_downwelling_shortwave_flux_in_air", "precipitation_flux"), - output.dt, 1) + output.dt, 1 + ) f <- stats::splinefun(as.double(subdailymet$date), (subdailymet[[var]] / hrscale), method = "monoH.FC") downscaled.result[[var]] <- f(as.double(new.date$date)) @@ -130,32 +134,36 @@ downscale_one_cfmet_day <- function(df, tseq, lat) { as.data.frame() %>% dplyr::mutate( Itot = .data$I.dir + .data$I.diff, - resC2 = (.data$Itot - min(.data$Itot)) / max(.data$Itot)) + resC2 = (.data$Itot - min(.data$Itot)) / max(.data$Itot) + ) rhscale <- (cos(2 * pi * (tseq - 10) / n) + 1) / 2 data.frame( hour = tseq, solarR = rep( - df$surface_downwelling_shortwave_flux_in_air * 2.07 * 10^5 / 36000, - each = n) - * light$resC2, + df$surface_downwelling_shortwave_flux_in_air * 2.07 * 10^5 / 36000, + each = n + ) + * light$resC2, # Note: When dt >= 6, all times get the same T prediction Temp = df$tmin + (sin(2 * pi * (tseq - 10) / n) + 1) / - 2 * (df$tmax - df$tmin), + 2 * (df$tmax - df$tmin), relative_humidity = df$rhmin + rhscale * (df$rhmax - df$rhmin), # TODO: Why do we divide by n? # isn't precipitation_flux already an intensity? precipitation_flux = rep(df$precipitation_flux / n, each = n), - wind = rep(df$wind_speed, each = n)) %>% - dplyr::mutate( - # TODO: Computation of solarR above already multiplies by resC2. - # Is multiplying it again here really correct? - # That's how the old data.table version did it - # (once when computing `solarR` and again when computing `SolarR`), - # so keeping it until proven wrong. - downwelling_photosynthetic_photon_flux = .data$solarR * light$resC2) + wind = rep(df$wind_speed, each = n) + ) %>% + dplyr::mutate( + # TODO: Computation of solarR above already multiplies by resC2. + # Is multiplying it again here really correct? + # That's how the old data.table version did it + # (once when computing `solarR` and again when computing `SolarR`), + # so keeping it until proven wrong. + downwelling_photosynthetic_photon_flux = .data$solarR * light$resC2 + ) } @@ -176,8 +184,7 @@ downscale_one_cfmet_day <- function(df, tseq, lat) { ##' @return weather file with subdaily timesteps ##' @author David LeBauer cfmet.downscale.daily <- function(dailymet, output.dt = 1, lat) { - - tint <- 24/output.dt + tint <- 24 / output.dt tseq <- seq(from = 0, to = 23, by = output.dt) if (all(c("air_temperature_max", "air_temperature_min") %in% colnames(dailymet))) { @@ -187,11 +194,11 @@ cfmet.downscale.daily <- function(dailymet, output.dt = 1, lat) { colnames(dailymet) <- nm } - if (! "wind_speed" %in% colnames(dailymet)) { + if (!"wind_speed" %in% colnames(dailymet)) { if (all(c("eastward_wind", "northward_wind") %in% colnames(dailymet))) { dailymet$wind_speed <- sqrt(dailymet$northward_wind^2 + dailymet$eastward_wind^2) } else { - PEcAn.logger::logger.error("no wind_speed found in daily met dataset") + PEcAn.logger::logger.error("no wind_speed found in daily met dataset") } } @@ -201,19 +208,22 @@ cfmet.downscale.daily <- function(dailymet, output.dt = 1, lat) { qmax = rh2qair(rh = .data$relative_humidity / 100, T = .data$tmax), pressure = PEcAn.utils::ud_convert(.data$air_pressure, "Pa", "millibar"), rhmin = qair2rh(.data$qmin, .data$air_temperature, .data$pressure), - rhmax = qair2rh(.data$qmax, .data$air_temperature, .data$pressure)) %>% + rhmax = qair2rh(.data$qmax, .data$air_temperature, .data$pressure) + ) %>% dplyr::group_by(.data$year, .data$doy) %>% - dplyr::group_modify(~downscale_one_cfmet_day(.x, tseq, lat), .keep = TRUE) %>% + dplyr::group_modify(~ downscale_one_cfmet_day(.x, tseq, lat), .keep = TRUE) %>% dplyr::ungroup() %>% dplyr::mutate( - air_temperature = PEcAn.utils::ud_convert(.data$Temp, "kelvin", "celsius")) %>% + air_temperature = PEcAn.utils::ud_convert(.data$Temp, "kelvin", "celsius") + ) %>% dplyr::select( "year", "doy", "hour", "downwelling_photosynthetic_photon_flux", "air_temperature", "relative_humidity", "wind", - "precipitation_flux")) + "precipitation_flux" + )) } # cfmet.downscale.daily @@ -232,7 +242,6 @@ cfmet.downscale.daily <- function(dailymet, output.dt = 1, lat) { ##' @export ##' @author David Shaner LeBauer get.ncvector <- function(var, lati = lati, loni = loni, run.dates = run.dates, met.nc) { - start.idx <- c(latitude = lati, longitude = loni, time = run.dates$index[1]) count.idx <- c(latitude = 1, longitude = 1, time = nrow(run.dates)) dim.order <- sapply(met.nc$var$air_temperature$dim, function(x) x$name) diff --git a/modules/data.atmosphere/R/upscale_met.R b/modules/data.atmosphere/R/upscale_met.R index 6d7f3847c22..423e2c9cb50 100644 --- a/modules/data.atmosphere/R/upscale_met.R +++ b/modules/data.atmosphere/R/upscale_met.R @@ -3,7 +3,7 @@ ##' @param step integer step size ##' @return numeric of length length(x)/step ##' @details User should check that length(x) is an even multiple of step -step_means <- function(x, step){ +step_means <- function(x, step) { colMeans(matrix(x, nrow = step)) } @@ -20,12 +20,11 @@ step_means <- function(x, step){ ##' @param ... other arguments, currently ignored ##' @author James Simkins, Chris Black -upscale_met <- function(outfolder, input_met, resolution = 1/24, overwrite = FALSE, +upscale_met <- function(outfolder, input_met, resolution = 1 / 24, overwrite = FALSE, verbose = FALSE, ...) { - - loc.file = file.path(outfolder, paste("upscaled", basename(input_met), sep = ".")) - if (file.exists(loc.file) && !isTRUE(overwrite)){ - PEcAn.logger::logger.severe("Output file", loc.file, "already exists. To replace it, set overwrite = TRUE") + loc.file <- file.path(outfolder, paste("upscaled", basename(input_met), sep = ".")) + if (file.exists(loc.file) && !isTRUE(overwrite)) { + PEcAn.logger::logger.severe("Output file", loc.file, "already exists. To replace it, set overwrite = TRUE") } tem <- ncdf4::nc_open(input_met) @@ -43,13 +42,14 @@ upscale_met <- function(outfolder, input_met, resolution = 1/24, overwrite = FAL time_unit <- sub(" since.*", "", tem$dim$time$units) time_base <- lubridate::parse_date_time(sub(".*since ", "", tem$dim$time$units), - orders = c("ymdHMSz", "ymdHMS", "ymd")) + orders = c("ymdHMSz", "ymdHMS", "ymd") + ) time_data <- PEcAn.utils::ud_convert(tem$dim$time$vals, time_unit, "days") lat_data <- as.numeric(ncdf4::ncvar_get(tem, "latitude")) lon_data <- as.numeric(ncdf4::ncvar_get(tem, "longitude")) ncdf4::nc_close(tem) - + # Here's where the magic happens: find the stepsize that generates requested # output resolution, then take means of each variable in increments of stepsize. # N.B. Drops rows from the end of met_data if necessary to end at a full step. @@ -57,56 +57,68 @@ upscale_met <- function(outfolder, input_met, resolution = 1/24, overwrite = FAL stepsize <- round(nrow(met_data) / n_times, 0) rows_used <- nrow(met_data) - (nrow(met_data) %% stepsize) n_steps <- (rows_used %/% stepsize) - met_data <- met_data[seq_len(rows_used),] - upscaled_time = step_means(time_data[seq_len(rows_used)], step = stepsize) + met_data <- met_data[seq_len(rows_used), ] + upscaled_time <- step_means(time_data[seq_len(rows_used)], step = stepsize) upscale_data <- as.data.frame(lapply(met_data, step_means, step = stepsize)) - - if (!is.null(upscale_data$air_temperature) - && is.null(upscale_data$air_temperature_max) - && is.null(upscale_data$air_temperature_min)) { + + if (!is.null(upscale_data$air_temperature) && + is.null(upscale_data$air_temperature_max) && + is.null(upscale_data$air_temperature_min)) { for (step_i in seq_len(n_steps)) { upscale_data$air_temperature_max[step_i] <- max( - met_data$air_temperature[(step_i * stepsize - stepsize + 1):(step_i * stepsize)]) + met_data$air_temperature[(step_i * stepsize - stepsize + 1):(step_i * stepsize)] + ) upscale_data$air_temperature_min[step_i] <- min( - met_data$air_temperature[(step_i * stepsize - stepsize + 1):(step_i * stepsize)]) + met_data$air_temperature[(step_i * stepsize - stepsize + 1):(step_i * stepsize)] + ) } met_units$air_temperature_max <- met_units$air_temperature_min <- met_units$air_temperature } - lat <- ncdf4::ncdim_def(name = "latitude", units = "degree_north", vals = lat_data, - create_dimvar = TRUE) - lon <- ncdf4::ncdim_def(name = "longitude", units = "degree_east", vals = lon_data, - create_dimvar = TRUE) - time <- ncdf4::ncdim_def(name = "time", units = paste(time_unit, "since", time_base), - vals = PEcAn.utils::ud_convert(upscaled_time, "days", time_unit), - create_dimvar = TRUE, unlim = TRUE) + lat <- ncdf4::ncdim_def( + name = "latitude", units = "degree_north", vals = lat_data, + create_dimvar = TRUE + ) + lon <- ncdf4::ncdim_def( + name = "longitude", units = "degree_east", vals = lon_data, + create_dimvar = TRUE + ) + time <- ncdf4::ncdim_def( + name = "time", units = paste(time_unit, "since", time_base), + vals = PEcAn.utils::ud_convert(upscaled_time, "days", time_unit), + create_dimvar = TRUE, unlim = TRUE + ) dim <- list(lat, lon, time) - + upscale.list <- list() for (name in names(upscale_data)) { - upscale.list[[name]] <- ncdf4::ncvar_def(name = name, units = met_units[[name]], - dim = dim, missval = -999, verbose = verbose) + upscale.list[[name]] <- ncdf4::ncvar_def( + name = name, units = met_units[[name]], + dim = dim, missval = -999, verbose = verbose + ) } - + rows <- 1 dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - results <- data.frame(file = character(rows), host = character(rows), mimetype = character(rows), - formatname = character(rows), startdate = character(rows), enddate = character(rows), - dbfile.name = paste("upscaled", sep = "."), stringsAsFactors = FALSE) - + results <- data.frame( + file = character(rows), host = character(rows), mimetype = character(rows), + formatname = character(rows), startdate = character(rows), enddate = character(rows), + dbfile.name = paste("upscaled", sep = "."), stringsAsFactors = FALSE + ) + loc <- ncdf4::nc_create(filename = loc.file, vars = upscale.list, verbose = verbose) - + for (name in names(upscale_data)) { ncdf4::ncvar_put(nc = loc, varid = name, vals = upscale_data[[name]]) } ncdf4::nc_close(loc) - + results$file <- loc.file results$host <- PEcAn.remote::fqdn() results$startdate <- time_base + PEcAn.utils::ud_convert(upscaled_time[[1]], "days", "sec") results$enddate <- time_base + PEcAn.utils::ud_convert(upscaled_time[[nrow(upscale_data)]], "days", "sec") results$mimetype <- "application/x-netcdf" results$formatname <- "CF Meteorology" - + return(invisible(results)) } diff --git a/modules/data.atmosphere/data/FLUXNET.sitemap.R b/modules/data.atmosphere/data/FLUXNET.sitemap.R index 823665ae51a..f9a80ac5523 100644 --- a/modules/data.atmosphere/data/FLUXNET.sitemap.R +++ b/modules/data.atmosphere/data/FLUXNET.sitemap.R @@ -1,5 +1,5 @@ - FLUXNET.sitemap <- utils::read.csv( - file = "FLUXNET.sitemap.csv", - colClasses = "character", - row.names = 1) + file = "FLUXNET.sitemap.csv", + colClasses = "character", + row.names = 1 +) diff --git a/modules/data.atmosphere/inst/ERA5/ERA5_NA_download.R b/modules/data.atmosphere/inst/ERA5/ERA5_NA_download.R index 700cdf88f1e..536ebed4434 100644 --- a/modules/data.atmosphere/inst/ERA5/ERA5_NA_download.R +++ b/modules/data.atmosphere/inst/ERA5/ERA5_NA_download.R @@ -8,46 +8,53 @@ if (future::supportsMulticore()) { } else { future::plan(future::multisession) } -options(timeout=360000) +options(timeout = 360000) c(2012:2021) %>% future_map(function(year) { - # you need to have an account for downloaing the files # Read the documantion for how to setup your account and settings before trying this # https://confluence.ecmwf.int/display/CKB/How+to+download+ERA5#HowtodownloadERA5-3-DownloadERA5datathroughtheCDSAPI - cdsapi <-import("cdsapi") + cdsapi <- import("cdsapi") c <- cdsapi$Client() - + c$retrieve( - 'reanalysis-era5-single-levels', + "reanalysis-era5-single-levels", list( - 'product_type' = 'ensemble_members', - 'format' = 'netcdf', - 'day' = list('01','02','03', - '04','05','06', - '07','08','09', - '10','11','12', - '13','14','15', - '16','17','18', - '19','20','21', - '22','23','24', - '25','26','27', - '28','29','30', - '31'), - 'time' = list('00:00','03:00','06:00', - '09:00','12:00','15:00', - '18:00','21:00'), - 'month' = list('01','02','03', - '04','05','06', - '07','08','09', - '10','11','12'), - 'year' = as.character(year), + "product_type" = "ensemble_members", + "format" = "netcdf", + "day" = list( + "01", "02", "03", + "04", "05", "06", + "07", "08", "09", + "10", "11", "12", + "13", "14", "15", + "16", "17", "18", + "19", "20", "21", + "22", "23", "24", + "25", "26", "27", + "28", "29", "30", + "31" + ), + "time" = list( + "00:00", "03:00", "06:00", + "09:00", "12:00", "15:00", + "18:00", "21:00" + ), + "month" = list( + "01", "02", "03", + "04", "05", "06", + "07", "08", "09", + "10", "11", "12" + ), + "year" = as.character(year), "area" = "84/-179/14/-52", - 'variable' = list( "2m_temperature","surface_pressure", - "2m_dewpoint_temperature","total_precipitation", - "10m_u_component_of_wind","10m_v_component_of_wind", - "surface_solar_radiation_downwards","surface_thermal_radiation_downwards") + "variable" = list( + "2m_temperature", "surface_pressure", + "2m_dewpoint_temperature", "total_precipitation", + "10m_u_component_of_wind", "10m_v_component_of_wind", + "surface_solar_radiation_downwards", "surface_thermal_radiation_downwards" + ) ), - paste0('ERA5_',year,'.nc') + paste0("ERA5_", year, ".nc") ) - },.progress = T ) + }, .progress = T) diff --git a/modules/data.atmosphere/inst/ERA5/ERA5_db_register.R b/modules/data.atmosphere/inst/ERA5/ERA5_db_register.R index cb0aa72c4ed..f9474d421d1 100644 --- a/modules/data.atmosphere/inst/ERA5/ERA5_db_register.R +++ b/modules/data.atmosphere/inst/ERA5/ERA5_db_register.R @@ -1,20 +1,20 @@ library(tidyverse) ERA5.files.path <- "/fs/data1/pecan.data/input/ERA5_ENS" -#read the settings -settings <-PEcAn.settings::read.settings(file.choose()) -#start the connection +# read the settings +settings <- PEcAn.settings::read.settings(file.choose()) +# start the connection con <- PEcAn.DB::db.open(settings$database$bety) -#adding a record to the input table. -added<-PEcAn.DB::dbfile.input.insert( - in.path=ERA5.files.path, - in.prefix='ERA5_', - siteid='1000026755', # This is site USA - startdate=as.Date(paste0(1986,"-01-01"), tz="UTC"), # look into the files and make sure you have all the files for this time period - enddate=as.Date(paste0(2018,"-12-31"), tz="UTC"), - mimetype="application/x-netcdf", - formatname="CF Meteorology", +# adding a record to the input table. +added <- PEcAn.DB::dbfile.input.insert( + in.path = ERA5.files.path, + in.prefix = "ERA5_", + siteid = "1000026755", # This is site USA + startdate = as.Date(paste0(1986, "-01-01"), tz = "UTC"), # look into the files and make sure you have all the files for this time period + enddate = as.Date(paste0(2018, "-12-31"), tz = "UTC"), + mimetype = "application/x-netcdf", + formatname = "CF Meteorology", parentid = NA, con, hostname = PEcAn.remote::fqdn(), diff --git a/modules/data.atmosphere/inst/integrationTests/test.download.AmerifluxLBL.R b/modules/data.atmosphere/inst/integrationTests/test.download.AmerifluxLBL.R index 8a28883fcfd..ca4f0925e29 100644 --- a/modules/data.atmosphere/inst/integrationTests/test.download.AmerifluxLBL.R +++ b/modules/data.atmosphere/inst/integrationTests/test.download.AmerifluxLBL.R @@ -8,8 +8,8 @@ test_download_AmerifluxLBL <- function(start_date, end_date, sitename, lat.in, l PEcAn.logger::logger.setLevel("DEBUG") # mocking functions - mockery::stub(convert_input, 'dbfile.input.check', data.frame()) - mockery::stub(convert_input, 'db.query', data.frame(id = 1)) + mockery::stub(convert_input, "dbfile.input.check", data.frame()) + mockery::stub(convert_input, "db.query", data.frame(id = 1)) withr::with_dir(tempdir(), { tmpdir <- getwd() @@ -22,8 +22,8 @@ test_download_AmerifluxLBL <- function(start_date, end_date, sitename, lat.in, l site.id = 1, start_date = start_date, end_date = end_date, - pkg = 'PEcAn.data.atmosphere', - fcn = 'download.AmerifluxLBL', + pkg = "PEcAn.data.atmosphere", + fcn = "download.AmerifluxLBL", con = NULL, host = data.frame(name = "localhost"), write = FALSE, @@ -40,10 +40,10 @@ test_download_AmerifluxLBL <- function(start_date, end_date, sitename, lat.in, l test_that("Downloaded data files have the right format", { firstline <- system(paste0("head -4 ", paste0(tmpdir, "/AMF_US-Akn_BASE_HH_6-5.csv")), intern = TRUE) lastline <- system(paste0("tail -1 ", paste0(tmpdir, "/AMF_US-Akn_BASE_HH_6-5.csv")), intern = TRUE) - + # checking if first line of CSV has the sitename expect_true(grepl(sitename, firstline[1])) - + # fourth and last row checked to contain non-alphabetical data since these are used to verify start and end dates expect_false(grepl("[A-Za-z]", firstline[4])) expect_false(grepl("[A-Za-z]", lastline[1])) @@ -54,7 +54,7 @@ test_download_AmerifluxLBL <- function(start_date, end_date, sitename, lat.in, l test_download_AmerifluxLBL( start_date = "2011-01-01", end_date = "2011-12-31", - sitename = 'US-Akn', + sitename = "US-Akn", lat.in = 40, lon.in = -88 -) \ No newline at end of file +) diff --git a/modules/data.atmosphere/inst/integrationTests/test.download.CRUNCEP.R b/modules/data.atmosphere/inst/integrationTests/test.download.CRUNCEP.R index 7188bfb4b5b..03b02392f20 100644 --- a/modules/data.atmosphere/inst/integrationTests/test.download.CRUNCEP.R +++ b/modules/data.atmosphere/inst/integrationTests/test.download.CRUNCEP.R @@ -9,8 +9,8 @@ test_download_CRUNCEP <- function(start_date, end_date, lat.in, lon.in, method, PEcAn.logger::logger.setLevel("DEBUG") # mocking functions - mockery::stub(convert_input, 'dbfile.input.check', data.frame()) - mockery::stub(convert_input, 'db.query', data.frame(id = 1)) + mockery::stub(convert_input, "dbfile.input.check", data.frame()) + mockery::stub(convert_input, "db.query", data.frame(id = 1)) withr::with_dir(tempdir(), { tmpdir <- getwd() @@ -22,8 +22,8 @@ test_download_CRUNCEP <- function(start_date, end_date, lat.in, lon.in, method, site.id = 1, start_date = start_date, end_date = end_date, - pkg = 'PEcAn.data.atmosphere', - fcn = 'download.CRUNCEP', + pkg = "PEcAn.data.atmosphere", + fcn = "download.CRUNCEP", con = NULL, host = data.frame(name = "localhost"), write = FALSE, @@ -37,24 +37,23 @@ test_download_CRUNCEP <- function(start_date, end_date, lat.in, lon.in, method, test_that("File exists at desired location", { # Set the desired file path file_path <- paste0(tmpdir, "/cruncep_landwater_mask.nc") - + # Check if file exists at desired location expect_true(file.exists(file_path)) }) test_that("NetCDF file contains lat and lon variables", { - mask_nc <- ncdf4::nc_open(paste0(tmpdir, "/cruncep_landwater_mask.nc")) on.exit(ncdf4::nc_close(mask_nc), add = TRUE) expect_true("land_water_mask" %in% names(mask_nc$var)) - + # Check the dimensions of "land_water_mask" variable expect_equal(mask_nc$var$land_water_mask$dim[[1]]$name, "lon") expect_equal(mask_nc$var$land_water_mask$dim[[2]]$name, "lat") }) test_that("All the required files are downloaded and stored at desired location", { - # Cached raw CRUNCEP files + # Cached raw CRUNCEP files expect_true(file.exists(paste0(tmpdir, "/cruncep-raw-2000-40--88-tair.nc"))) expect_true(file.exists(paste0(tmpdir, "/cruncep-raw-2000-40--88-lwdown.nc"))) expect_true(file.exists(paste0(tmpdir, "/cruncep-raw-2000-40--88-press.nc"))) @@ -63,7 +62,7 @@ test_download_CRUNCEP <- function(start_date, end_date, lat.in, lon.in, method, expect_true(file.exists(paste0(tmpdir, "/cruncep-raw-2000-40--88-vwind.nc"))) expect_true(file.exists(paste0(tmpdir, "/cruncep-raw-2000-40--88-qair.nc"))) expect_true(file.exists(paste0(tmpdir, "/cruncep-raw-2000-40--88-rain.nc"))) - + # CRUNCEP file expect_true(file.exists(paste0(tmpdir, "/CRUNCEP.2000.nc"))) }) @@ -126,4 +125,4 @@ test_download_CRUNCEP( method = "ncss", maxErrors = 10, sleep = 2 -) \ No newline at end of file +) diff --git a/modules/data.atmosphere/inst/integrationTests/test.download.ERA5.R b/modules/data.atmosphere/inst/integrationTests/test.download.ERA5.R index 9ee30b42573..2166d6ee20b 100644 --- a/modules/data.atmosphere/inst/integrationTests/test.download.ERA5.R +++ b/modules/data.atmosphere/inst/integrationTests/test.download.ERA5.R @@ -10,12 +10,12 @@ test_download_ERA5 <- function(start_date, end_date, lat.in, lon.in, product_typ # mocking functions - mockery::stub(convert_input, 'dbfile.input.check', data.frame()) - mockery::stub(convert_input, 'db.query', data.frame(id = 1)) + mockery::stub(convert_input, "dbfile.input.check", data.frame()) + mockery::stub(convert_input, "db.query", data.frame(id = 1)) # additional mocks needed since download.ERA5 does not return data as other download functions - mockery::stub(convert_input, 'length', 2) - mockery::stub(convert_input, 'purrr::map_dfr', data.frame(missing = c(FALSE), empty = c(FALSE))) + mockery::stub(convert_input, "length", 2) + mockery::stub(convert_input, "purrr::map_dfr", data.frame(missing = c(FALSE), empty = c(FALSE))) withr::with_dir(tempdir(), { tmpdir <- getwd() @@ -27,8 +27,8 @@ test_download_ERA5 <- function(start_date, end_date, lat.in, lon.in, product_typ site.id = 1, start_date = start_date, end_date = end_date, - pkg = 'PEcAn.data.atmosphere', - fcn = 'download.ERA5.old', + pkg = "PEcAn.data.atmosphere", + fcn = "download.ERA5.old", con = NULL, host = data.frame(name = "localhost"), write = FALSE, @@ -37,8 +37,8 @@ test_download_ERA5 <- function(start_date, end_date, lat.in, lon.in, product_typ product_types = product_types, reticulate_python = reticulate_python ) - - test_that("All the required files are downloaded and stored at desired location", { + + test_that("All the required files are downloaded and stored at desired location", { expect_true(file.exists(paste0(tmpdir, "/era5.2m_dewpoint_temperature.nc"))) expect_true(file.exists(paste0(tmpdir, "/era5.2m_temperature.nc"))) expect_true(file.exists(paste0(tmpdir, "/era5.10m_u_component_of_wind.nc"))) @@ -92,4 +92,4 @@ test_download_ERA5( lon.in = -84.6738, product_types = "all", reticulate_python = NULL -) \ No newline at end of file +) diff --git a/modules/data.atmosphere/inst/scripts/DUKE_FACE_MET.v2.R b/modules/data.atmosphere/inst/scripts/DUKE_FACE_MET.v2.R index dffb98323fa..30e92d45bc5 100644 --- a/modules/data.atmosphere/inst/scripts/DUKE_FACE_MET.v2.R +++ b/modules/data.atmosphere/inst/scripts/DUKE_FACE_MET.v2.R @@ -1,32 +1,39 @@ - -library(hdf5,lib.loc="~/lib/R/Rhdf") +library(hdf5, lib.loc = "~/lib/R/Rhdf") ### FUNCTIONS -dm <- c(0,32,60,91,121,152,182,213,244,274,305,335,366) -dl <- c(0,32,61,92,122,153,183,214,245,275,306,336,367) -month <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") -mon_num <- c("01","02","03","04","05","06","07","08","09","10","11","12") -day2mo <- function(year,day){ +dm <- c(0, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +dl <- c(0, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336, 367) +month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") +mon_num <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") +day2mo <- function(year, day) { leap <- lubridate::leap_year(year) - mo <- rep(NA,length(day)) - mo[leap] <- findInterval(day[leap],dl) - mo[!leap] <- findInterval(day[!leap],dm) + mo <- rep(NA, length(day)) + mo[leap] <- findInterval(day[leap], dl) + mo[!leap] <- findInterval(day[!leap], dm) return(mo) } -ndays <- c(31,28,31,30,31,30, 31,31,30,31,30,31) -ndayl <- c(31,29,31,30,31,30, 31,31,30,31,30,31) -nday <- function(mo,year){ +ndays <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) +ndayl <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) +nday <- function(mo, year) { leap <- lubridate::leap_year(year) - if(leap) return(ndayl[mo]) + if (leap) { + return(ndayl[mo]) + } ndays[mo] } -num <- function(d){as.numeric(as.character(d))} -checkNA <- function(val,master){ - if(length(val) == 0) return(rep(master,each=6)) +num <- function(d) { + as.numeric(as.character(d)) +} +checkNA <- function(val, master) { + if (length(val) == 0) { + return(rep(master, each = 6)) + } sNA <- which(is.na(val)) - if(length(sNA) == 0) return(val) + if (length(sNA) == 0) { + return(val) + } ## translate NA's - mNA <- round(sNA*length(master)/length(val)) + mNA <- round(sNA * length(master) / length(val)) mNA[mNA < 1] <- 1 mNA[mNA > length(master)] <- length(master) val[sNA] <- master[mNA] @@ -34,185 +41,184 @@ checkNA <- function(val,master){ } -lat <- read.table("../../veg/northing",header=FALSE,skip=1) -minN <- num(substr(lat[,4],1,8)) -lon <- read.table("../../veg/easting",header=FALSE,skip=0) -minE <- num(substr(lon[,4],1,8)) -lat <- 35+58/60+mean(minN)/3600 -lon <- 79+05/50+mean(minE)/3600 +lat <- read.table("../../veg/northing", header = FALSE, skip = 1) +minN <- num(substr(lat[, 4], 1, 8)) +lon <- read.table("../../veg/easting", header = FALSE, skip = 0) +minE <- num(substr(lon[, 4], 1, 8)) +lat <- 35 + 58 / 60 + mean(minN) / 3600 +lon <- 79 + 05 / 50 + mean(minE) / 3600 #### for duke, files divided by VARIABLE not by TIME ## Air Temperature -Tair1 <- read.csv("AT97-03gap.csv",header=FALSE,skip=2) -Tair1 <- cbind(Tair1[,1:3],apply(Tair1[,4:11],1,mean,na.rm=TRUE)) -colnames(Tair1) <- c("JDT","DOY","Time","TempC") -Tair2 <- read.csv("AT04-08gap.csv",header=FALSE,skip=2) -Tair2 <- cbind(Tair2[,1:3],apply(Tair2[,c(4,8,10,12,16,18,20,21)],1,mean,na.rm=TRUE)) -colnames(Tair2) <- c("JDT","DOY","Time","TempC") -Tair <- rbind(Tair1,Tair2,deparse.level=0) -breaks <- c(1,which(Tair$DOY == 1 & Tair$Time == 30),nrow(Tair)+1) +Tair1 <- read.csv("AT97-03gap.csv", header = FALSE, skip = 2) +Tair1 <- cbind(Tair1[, 1:3], apply(Tair1[, 4:11], 1, mean, na.rm = TRUE)) +colnames(Tair1) <- c("JDT", "DOY", "Time", "TempC") +Tair2 <- read.csv("AT04-08gap.csv", header = FALSE, skip = 2) +Tair2 <- cbind(Tair2[, 1:3], apply(Tair2[, c(4, 8, 10, 12, 16, 18, 20, 21)], 1, mean, na.rm = TRUE)) +colnames(Tair2) <- c("JDT", "DOY", "Time", "TempC") +Tair <- rbind(Tair1, Tair2, deparse.level = 0) +breaks <- c(1, which(Tair$DOY == 1 & Tair$Time == 30), nrow(Tair) + 1) yrs <- 1997:2008 -yr <- rep(NA,nrow(Tair)) -for(y in 1:length(yrs)){ - yr[breaks[y]:(breaks[y+1]-1)] <- yrs[y] +yr <- rep(NA, nrow(Tair)) +for (y in 1:length(yrs)) { + yr[breaks[y]:(breaks[y + 1] - 1)] <- yrs[y] } -mo <- day2mo(yr,Tair$DOY) -TempK <- Tair$TempC+273.15 -Tair <- cbind(Tair,yr,mo) +mo <- day2mo(yr, Tair$DOY) +TempK <- Tair$TempC + 273.15 +Tair <- cbind(Tair, yr, mo) ## set partial year to NA's so it will be gapfilled -dfull <- rep(1:365,each=48) -mfull <- day2mo(1997,dfull) -sel <- 1:(length(mfull)-length(which(yr == 1997))) -Tadd <- cbind(rep(NA,length(sel)),dfull[sel],matrix(NA,length(sel),2),rep(1997,length(sel)),mfull[sel]) +dfull <- rep(1:365, each = 48) +mfull <- day2mo(1997, dfull) +sel <- 1:(length(mfull) - length(which(yr == 1997))) +Tadd <- cbind(rep(NA, length(sel)), dfull[sel], matrix(NA, length(sel), 2), rep(1997, length(sel)), mfull[sel]) colnames(Tadd) <- colnames(Tair) -Tair <- rbind(Tadd,Tair) +Tair <- rbind(Tadd, Tair) ## RH -RH1 <- read.csv("RH97-03gap.csv",header=FALSE,skip=2) -RH1 <- cbind(RH1[,1:3],apply(RH1[,4:11],1,mean,na.rm=TRUE)) -colnames(RH1) <- c("JDT","DOY","Time","RH") -RH2 <- read.csv("RH04-08gap.csv",header=FALSE,skip=2) -RH2 <- cbind(RH2[,1:3],apply(RH2[,c(4,8,10,12,16,18,20,21)],1,mean,na.rm=TRUE)) -colnames(RH2) <- c("JDT","DOY","Time","RH") -RH <- rbind(RH1,RH2,deparse.level=0) -breaks <- c(1,which(RH$DOY == 1 & RH$Time == 30),nrow(RH)+1) +RH1 <- read.csv("RH97-03gap.csv", header = FALSE, skip = 2) +RH1 <- cbind(RH1[, 1:3], apply(RH1[, 4:11], 1, mean, na.rm = TRUE)) +colnames(RH1) <- c("JDT", "DOY", "Time", "RH") +RH2 <- read.csv("RH04-08gap.csv", header = FALSE, skip = 2) +RH2 <- cbind(RH2[, 1:3], apply(RH2[, c(4, 8, 10, 12, 16, 18, 20, 21)], 1, mean, na.rm = TRUE)) +colnames(RH2) <- c("JDT", "DOY", "Time", "RH") +RH <- rbind(RH1, RH2, deparse.level = 0) +breaks <- c(1, which(RH$DOY == 1 & RH$Time == 30), nrow(RH) + 1) yrs <- 1997:2008 -yr <- rep(NA,nrow(RH)) -for(y in 1:length(yrs)){ - yr[breaks[y]:(breaks[y+1]-1)] <- yrs[y] +yr <- rep(NA, nrow(RH)) +for (y in 1:length(yrs)) { + yr[breaks[y]:(breaks[y + 1] - 1)] <- yrs[y] } -mo <- day2mo(yr,RH$DOY) -SH <- (RH$RH/100.)*2.541e6*exp(-5415./(TempK))*(18./29.) -RH <- cbind(RH,SH,yr,mo) +mo <- day2mo(yr, RH$DOY) +SH <- (RH$RH / 100.) * 2.541e6 * exp(-5415. / (TempK)) * (18. / 29.) +RH <- cbind(RH, SH, yr, mo) ## set partial year to NA's so it will be gapfilled -dfull <- rep(1:365,each=48) -mfull <- day2mo(1997,dfull) -sel <- 1:(length(mfull)-length(which(yr == 1997))) -Radd <- cbind(rep(NA,length(sel)),dfull[sel],matrix(NA,length(sel),3),rep(1997,length(sel)),mfull[sel]) +dfull <- rep(1:365, each = 48) +mfull <- day2mo(1997, dfull) +sel <- 1:(length(mfull) - length(which(yr == 1997))) +Radd <- cbind(rep(NA, length(sel)), dfull[sel], matrix(NA, length(sel), 3), rep(1997, length(sel)), mfull[sel]) colnames(Radd) <- colnames(RH) -RH <- rbind(Radd,RH) +RH <- rbind(Radd, RH) + +### PRETREATMENT +newdat <- read.csv("DUKE_TRH93-95.csv", header = FALSE, skip = 2) -### PRETREATMENT -newdat <- read.csv("DUKE_TRH93-95.csv",header=FALSE,skip=2) - ### Precip -PPT <- read.csv("PRECIPgap.csv",header=FALSE,skip=2) -colnames(PPT) <- c("JDT","DOY","Time","PPT") -breaks <- c(1,which(PPT$DOY == 1 & PPT$Time == 30),nrow(PPT)+1) +PPT <- read.csv("PRECIPgap.csv", header = FALSE, skip = 2) +colnames(PPT) <- c("JDT", "DOY", "Time", "PPT") +breaks <- c(1, which(PPT$DOY == 1 & PPT$Time == 30), nrow(PPT) + 1) yrs <- 1997:2007 -yr <- rep(NA,nrow(PPT)) -for(y in 1:length(yrs)){ - yr[breaks[y]:(breaks[y+1]-1)] <- yrs[y] +yr <- rep(NA, nrow(PPT)) +for (y in 1:length(yrs)) { + yr[breaks[y]:(breaks[y + 1] - 1)] <- yrs[y] } -mo <- day2mo(yr,PPT$DOY) -PPT$PPT <- PPT$PPT/1800. -PPT <- cbind(PPT,yr,mo) +mo <- day2mo(yr, PPT$DOY) +PPT$PPT <- PPT$PPT / 1800. +PPT <- cbind(PPT, yr, mo) sel <- which(PPT$JDT > 1000) -PPT <- PPT[sel,] +PPT <- PPT[sel, ] ## set partial year to NA's so it will be gapfilled -dfull <- rep(1:365,each=48) -mfull <- day2mo(1997,dfull) -sel <- 1:(length(mfull)-length(which(yr == 1997))+1) -Padd <- cbind(rep(NA,length(sel)),dfull[sel],matrix(NA,length(sel),2),rep(1997,length(sel)),mfull[sel]) +dfull <- rep(1:365, each = 48) +mfull <- day2mo(1997, dfull) +sel <- 1:(length(mfull) - length(which(yr == 1997)) + 1) +Padd <- cbind(rep(NA, length(sel)), dfull[sel], matrix(NA, length(sel), 2), rep(1997, length(sel)), mfull[sel]) colnames(Padd) <- colnames(PPT) -PPT <- rbind(Padd,PPT) +PPT <- rbind(Padd, PPT) ## RAD/WIND -TOW <- read.csv("RadWindGap.csv",header=FALSE,skip=2) -colnames(TOW) <- c("JDT","DOY","Time","Z","ind","WS","WD","PAR","Rn","SWup","SWdn","LWup","LWdn") -breaks <- c(which(TOW$DOY == 1 & TOW$Time == 30),nrow(TOW)+1) +TOW <- read.csv("RadWindGap.csv", header = FALSE, skip = 2) +colnames(TOW) <- c("JDT", "DOY", "Time", "Z", "ind", "WS", "WD", "PAR", "Rn", "SWup", "SWdn", "LWup", "LWdn") +breaks <- c(which(TOW$DOY == 1 & TOW$Time == 30), nrow(TOW) + 1) yrs <- 1998:2007 -yr <- rep(NA,nrow(TOW)) -for(y in 1:length(yrs)){ - yr[breaks[y]:(breaks[y+1]-1)] <- yrs[y] +yr <- rep(NA, nrow(TOW)) +for (y in 1:length(yrs)) { + yr[breaks[y]:(breaks[y + 1] - 1)] <- yrs[y] } -mo <- day2mo(yr,TOW$DOY) -SW <- TOW$PAR/(0.45*4.6) # umol/m2/s -> w/m2 +mo <- day2mo(yr, TOW$DOY) +SW <- TOW$PAR / (0.45 * 4.6) # umol/m2/s -> w/m2 sel <- (TOW$SWup == 0 & TOW$Z < 90) TOW$SWup[sel] <- SW[sel] -TOW$SWup[TOW$SWup<0] <- 0 -TOW <- cbind(TOW,yr,mo) +TOW$SWup[TOW$SWup < 0] <- 0 +TOW <- cbind(TOW, yr, mo) SW <- TOW$SWup - ### RADIATION CALCULATION - ##build time variables (year, month, day of year) - dt <- 1800 - yr <- TOW$yr - doy <- TOW$DOY - hr <- TOW$Time/100 - hr[hr %% 1 > 0] <- floor(hr[hr %% 1 > 0]) + 0.5 - mo <- day2mo(yr,doy) - - ## calculate potential radiation - ## in order to estimate diffuse/direct - f <- pi/180*(279.5+0.9856*doy) - et <- (-104.7*sin(f)+596.2*sin(2*f)+4.3*sin(4*f)-429.3*cos(f)-2.0*cos(2*f)+19.3*cos(3*f))/3600 #equation of time -> eccentricity and obliquity - merid <- floor(lon/15)*15 - if(merid<0) merid <- merid+15 - lc <- (lon-merid)*-4/60 ## longitude correction - tz <- merid/360*24 ## time zone - midbin <- 0.5*dt/86400*24 ## shift calc to middle of bin +### RADIATION CALCULATION +## build time variables (year, month, day of year) +dt <- 1800 +yr <- TOW$yr +doy <- TOW$DOY +hr <- TOW$Time / 100 +hr[hr %% 1 > 0] <- floor(hr[hr %% 1 > 0]) + 0.5 +mo <- day2mo(yr, doy) + +## calculate potential radiation +## in order to estimate diffuse/direct +f <- pi / 180 * (279.5 + 0.9856 * doy) +et <- (-104.7 * sin(f) + 596.2 * sin(2 * f) + 4.3 * sin(4 * f) - 429.3 * cos(f) - 2.0 * cos(2 * f) + 19.3 * cos(3 * f)) / 3600 # equation of time -> eccentricity and obliquity +merid <- floor(lon / 15) * 15 +if (merid < 0) merid <- merid + 15 +lc <- (lon - merid) * -4 / 60 ## longitude correction +tz <- merid / 360 * 24 ## time zone +midbin <- 0.5 * dt / 86400 * 24 ## shift calc to middle of bin ## t0 <- 12+lc-et-tz-midbin ## solar time -t0 <- lc-et-tz-midbin ## solar time - h <- pi/12*(hr-t0) ## solar hour - dec <- -23.45*pi/180*cos(2*pi*(doy+10)/365) ## declination - - cosz <- sin(lat*pi/180)*sin(dec)+cos(lat*pi/180)*cos(dec)*cos(h) - cosz[cosz<0] <- 0 - - rpot <- 1366*cosz - rpot <- rpot[1:length(SW)] - rpotL <-(rpot[c(13:length(SW),1:12)])#[1:length(SW)] -##rpotL <-(rpot[c(12:1,1:length(SW))])[1:length(SW)] - - - SW[rpotL < SW] <- rpotL[rpotL0.9] <- 0.9 ## ensure some diffuse - frac[frac < 0.0] <- 0.0 - frac[is.na(frac)] <- 0.0 - frac[is.nan(frac)] <- 0.0 - SWd <- SW*(1-frac) ## Diffuse portion of total short wave rad - +t0 <- lc - et - tz - midbin ## solar time +h <- pi / 12 * (hr - t0) ## solar hour +dec <- -23.45 * pi / 180 * cos(2 * pi * (doy + 10) / 365) ## declination + +cosz <- sin(lat * pi / 180) * sin(dec) + cos(lat * pi / 180) * cos(dec) * cos(h) +cosz[cosz < 0] <- 0 + +rpot <- 1366 * cosz +rpot <- rpot[1:length(SW)] +rpotL <- (rpot[c(13:length(SW), 1:12)]) # [1:length(SW)] +## rpotL <-(rpot[c(12:1,1:length(SW))])[1:length(SW)] + + +SW[rpotL < SW] <- rpotL[rpotL < SW] ## ensure radiation < max +### this causes trouble at twilight bc of missmatch btw bin avergage and bin midpoint +frac <- SW / rpot +frac[frac > 0.9] <- 0.9 ## ensure some diffuse +frac[frac < 0.0] <- 0.0 +frac[is.na(frac)] <- 0.0 +frac[is.nan(frac)] <- 0.0 +SWd <- SW * (1 - frac) ## Diffuse portion of total short wave rad + ### convert to ED2.1 hdf met variables - nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] - nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] - vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] - vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] +nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] +nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] +vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] +vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] -###LOAD CO2 +### LOAD CO2 -CO2 <- read.csv("CO2gap.csv",header=FALSE,skip=2) -colnames(CO2) <- c("JDT","DOY","Time","R1","R2","R3","R4","R5","R6","R7","R8","RA") -breaks <- c(1,which(CO2$DOY == 1 & CO2$Time == 30),nrow(CO2)+1) +CO2 <- read.csv("CO2gap.csv", header = FALSE, skip = 2) +colnames(CO2) <- c("JDT", "DOY", "Time", "R1", "R2", "R3", "R4", "R5", "R6", "R7", "R8", "RA") +breaks <- c(1, which(CO2$DOY == 1 & CO2$Time == 30), nrow(CO2) + 1) yrs <- 1996:2007 -yr <- rep(NA,nrow(CO2)) -for(y in 1:length(yrs)){ - yr[breaks[y]:(breaks[y+1]-1)] <- yrs[y] +yr <- rep(NA, nrow(CO2)) +for (y in 1:length(yrs)) { + yr[breaks[y]:(breaks[y + 1] - 1)] <- yrs[y] } -mo <- day2mo(yr,CO2$DOY) -AMB <- (CO2$R1+CO2$R5+CO2$R6+CO2$R8)/4 -ELEV <- (CO2$R2+CO2$R3+CO2$R4+CO2$R7)/4 -CO2 <- cbind(CO2,AMB,ELEV,yr,mo) +mo <- day2mo(yr, CO2$DOY) +AMB <- (CO2$R1 + CO2$R5 + CO2$R6 + CO2$R8) / 4 +ELEV <- (CO2$R2 + CO2$R3 + CO2$R4 + CO2$R7) / 4 +CO2 <- cbind(CO2, AMB, ELEV, yr, mo) ## gap-fill 96 CO2 w/ 1997 -C96full <- CO2[CO2$yr == 1997,] -C96full <- rbind(C96full[1:48,],C96full) ## rep day 1 to account for leap year +C96full <- CO2[CO2$yr == 1997, ] +C96full <- rbind(C96full[1:48, ], C96full) ## rep day 1 to account for leap year C96full$mo[1:48] <- 2 ## set mo to feb (doesn't have to be in order) C96full$yr <- 1996 -CO2 <- rbind(C96full[1:(nrow(C96full)-sum(CO2$yr == 1996)),],CO2) +CO2 <- rbind(C96full[1:(nrow(C96full) - sum(CO2$yr == 1996)), ], CO2) ### loop over years to produce output yrs <- 1996:2007 -for(y in 1:length(yrs)){ - for(mo in 1:12){ - +for (y in 1:length(yrs)) { + for (mo in 1:12) { ## select month - Tsel <- which(Tair$yr == yrs[y] & Tair$mo == mo) - 12#also for RH + Tsel <- which(Tair$yr == yrs[y] & Tair$mo == mo) - 12 # also for RH Psel <- which(PPT$yr == yrs[y] & PPT$mo == mo) - 12 Wsel <- which(TOW$yr == yrs[y] & TOW$mo == mo) - 12 Csel <- which(CO2$yr == yrs[y] & CO2$mo == mo) - 12 @@ -220,65 +226,68 @@ for(y in 1:length(yrs)){ Psel[Psel < 1] <- 1 Wsel[Wsel < 1] <- 1 Csel[Csel < 1] <- 1 - + ## set dims - Tdims <- c(1,1,length(Tsel)) - if(length(Tsel) == 0) Tdims <- c(1,1,48*nday(mo,yrs[y])) - Pdims <- c(1,1,length(Psel)) - if(length(Psel) == 0) Pdims <- c(1,1,48*nday(mo,yrs[y])) - Wdims <- c(1,1,length(Wsel)) - if(length(Wsel) == 0) Wdims <- c(1,1,48*nday(mo,yrs[y])) - Cdims <- c(1,1,length(Csel)) - if(length(Csel) == 0) Cdims <- c(1,1,48*nday(mo,yrs[y])) - -# pres <- array(presA[selm],dim=dims) - + Tdims <- c(1, 1, length(Tsel)) + if (length(Tsel) == 0) Tdims <- c(1, 1, 48 * nday(mo, yrs[y])) + Pdims <- c(1, 1, length(Psel)) + if (length(Psel) == 0) Pdims <- c(1, 1, 48 * nday(mo, yrs[y])) + Wdims <- c(1, 1, length(Wsel)) + if (length(Wsel) == 0) Wdims <- c(1, 1, 48 * nday(mo, yrs[y])) + Cdims <- c(1, 1, length(Csel)) + if (length(Csel) == 0) Cdims <- c(1, 1, 48 * nday(mo, yrs[y])) + + # pres <- array(presA[selm],dim=dims) + ## define variables - nbdsf <- array(nbdsfA[Wsel],dim=Wdims) - nddsf <- array(nddsfA[Wsel],dim=Wdims) - vbdsf <- array(vbdsfA[Wsel],dim=Wdims) - vddsf <- array(vddsfA[Wsel],dim=Wdims) - prate <- array(PPT$PPT[Psel],dim=Pdims) - dlwrf <- array(TOW$LWup[Wsel],dim=Wdims) - - hgt <- array(50,dim=Wdims) - ugrd <- array(TOW$WS[Wsel],dim=Wdims) - vgrd <- array(0,dim=Wdims) - sh <- array(RH$SH[Tsel],dim=Tdims) - tmp <- array(TempK[Tsel],dim=Tdims) - - + nbdsf <- array(nbdsfA[Wsel], dim = Wdims) + nddsf <- array(nddsfA[Wsel], dim = Wdims) + vbdsf <- array(vbdsfA[Wsel], dim = Wdims) + vddsf <- array(vddsfA[Wsel], dim = Wdims) + prate <- array(PPT$PPT[Psel], dim = Pdims) + dlwrf <- array(TOW$LWup[Wsel], dim = Wdims) + + hgt <- array(50, dim = Wdims) + ugrd <- array(TOW$WS[Wsel], dim = Wdims) + vgrd <- array(0, dim = Wdims) + sh <- array(RH$SH[Tsel], dim = Tdims) + tmp <- array(TempK[Tsel], dim = Tdims) + + ## grab & fill in other vars - narr <- hdf5load(paste("NARR/duke_",yrs[y],month[mo],".h5",sep=""),load=FALSE) - ##pres <- 1e5*(rep(ncep[,6],each=6)/1004.)^(1004./287.) ## exner -> pres - pres <- narr$pres + narr <- hdf5load(paste("NARR/duke_", yrs[y], month[mo], ".h5", sep = ""), load = FALSE) + ## pres <- 1e5*(rep(ncep[,6],each=6)/1004.)^(1004./287.) ## exner -> pres + pres <- narr$pres dlwrf[dlwrf < 50] <- NA ## fill missing - nbdsf <- checkNA(nbdsf,narr$nbdsf) - nddsf <- checkNA(nddsf,narr$nddsf) - vbdsf <- checkNA(vbdsf,narr$vbdsf) - vddsf <- checkNA(vddsf,narr$vddsf) - dlwrf <- checkNA(dlwrf,narr$dlwrf) - prate <- checkNA(prate,narr$prate) - hgt <- checkNA(hgt,narr$hgt) - ugrd <- checkNA(ugrd,sqrt(narr$ugrd^2+narr$vgrd^2)) - sh <- checkNA(sh,narr$sh) - tmp <- checkNA(tmp,narr$tmp) - -#### OUTPUT #### - - ##ambient - co2 <- array(CO2$AMB[Csel],dim=Cdims) - mout <- paste("AMB_",yrs[y],month[mo],".h5",sep="") - hdf5save(mout,"nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","hgt" - ,"ugrd","vgrd","sh","tmp","co2") + nbdsf <- checkNA(nbdsf, narr$nbdsf) + nddsf <- checkNA(nddsf, narr$nddsf) + vbdsf <- checkNA(vbdsf, narr$vbdsf) + vddsf <- checkNA(vddsf, narr$vddsf) + dlwrf <- checkNA(dlwrf, narr$dlwrf) + prate <- checkNA(prate, narr$prate) + hgt <- checkNA(hgt, narr$hgt) + ugrd <- checkNA(ugrd, sqrt(narr$ugrd^2 + narr$vgrd^2)) + sh <- checkNA(sh, narr$sh) + tmp <- checkNA(tmp, narr$tmp) + + #### OUTPUT #### + + ## ambient + co2 <- array(CO2$AMB[Csel], dim = Cdims) + mout <- paste("AMB_", yrs[y], month[mo], ".h5", sep = "") + hdf5save( + mout, "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", + "ugrd", "vgrd", "sh", "tmp", "co2" + ) ## elevated - co2 <- array(CO2$ELEV[Csel],dim=Cdims) - mout <- paste("ELEV_",yrs[y],month[mo],".h5",sep="") - hdf5save(mout,"nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","hgt" - ,"ugrd","vgrd","sh","tmp","co2") - + co2 <- array(CO2$ELEV[Csel], dim = Cdims) + mout <- paste("ELEV_", yrs[y], month[mo], ".h5", sep = "") + hdf5save( + mout, "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", + "ugrd", "vgrd", "sh", "tmp", "co2" + ) } } diff --git a/modules/data.atmosphere/inst/scripts/ExtractNOAAstation.R b/modules/data.atmosphere/inst/scripts/ExtractNOAAstation.R index 5bab783ea9c..ffe7ed5f5c4 100644 --- a/modules/data.atmosphere/inst/scripts/ExtractNOAAstation.R +++ b/modules/data.atmosphere/inst/scripts/ExtractNOAAstation.R @@ -1,11 +1,11 @@ -###define defaults for debugging -##elem <- NULL -##ID <- NULL -##DSETcheck <- NULL -##Flag1check <- c("M","S") -##Flag2check <- c("2","3","T","U") - -ExtractNOAAstation <- function(data,elem=NULL,ID=NULL,DSETcheck=NULL,Flag1check=c("M","S"),Flag2check=c("2","3","T","U")){ +### define defaults for debugging +## elem <- NULL +## ID <- NULL +## DSETcheck <- NULL +## Flag1check <- c("M","S") +## Flag2check <- c("2","3","T","U") + +ExtractNOAAstation <- function(data, elem = NULL, ID = NULL, DSETcheck = NULL, Flag1check = c("M", "S"), Flag2check = c("2", "3", "T", "U")) { ## function that converts/extracts NOAA met station data ## INPUTS: ## data - raw data table @@ -25,101 +25,100 @@ ExtractNOAAstation <- function(data,elem=NULL,ID=NULL,DSETcheck=NULL,Flag1check= ## list with a "date" table and a table for each elem (date X ID) ## date table contains year, month, day, decimal day (aka "julian") and decimal year ## values are converted to NA if they fail the checks - + sel <- which(!(as.character(data$DSET) == "----")) - data <- data[sel,] - + data <- data[sel, ] + ## fill in defaults & set up storage out <- list() - if(is.null(elem)) { ## set list of variables if not given + if (is.null(elem)) { ## set list of variables if not given elem <- as.character(unique(data$ELEM)) elem <- elem[which(!(elem == "----"))] } obsID <- as.character(unique(data$COOPID)) # get list of observed stations obsID <- obsID[which(!(obsID == "------"))] - if(is.null(ID)) ID <- obsID - ID <- ID[ID %in% obsID] ## exclude ID's not in data set + if (is.null(ID)) ID <- obsID + ID <- ID[ID %in% obsID] ## exclude ID's not in data set - nvar <- length(elem) # number of variables - nstation <- length(ID) # number of stations + nvar <- length(elem) # number of variables + nstation <- length(ID) # number of stations ## remove invalid DSET - if(!is.null(DSETcheck)){ + if (!is.null(DSETcheck)) { sel <- which(!(data$DSET %in% DSETcheck)) - data <- data[sel,] + data <- data[sel, ] } - ##create date + ## create date yrmo <- sort(as.character(unique(data$YEARMO))) - yr <- as.numeric(substr(as.character(data$YEARMO),1,4)) - mo <- as.numeric(substr(as.character(data$YEARMO),5,6)) - data <- cbind(data,yr,mo) + yr <- as.numeric(substr(as.character(data$YEARMO), 1, 4)) + mo <- as.numeric(substr(as.character(data$YEARMO), 5, 6)) + data <- cbind(data, yr, mo) days <- diff(monthday) ldays <- diff(leapmonthday) date <- NULL - for(i in 1:length(yrmo)){ - y <- as.numeric(substr(yrmo[i],1,4)) - m <- as.numeric(substr(yrmo[i],5,6)) + for (i in 1:length(yrmo)) { + y <- as.numeric(substr(yrmo[i], 1, 4)) + m <- as.numeric(substr(yrmo[i], 5, 6)) n <- days[m] - if(y %% 4 == 0) n <- ldays[m] - dtmp <- cbind(rep(y,n),rep(m,n),1:n,(1:n)+monthday[m]) - if(is.null(date)) { + if (y %% 4 == 0) n <- ldays[m] + dtmp <- cbind(rep(y, n), rep(m, n), 1:n, (1:n) + monthday[m]) + if (is.null(date)) { date <- dtmp - } else {date <- rbind(date,dtmp)} + } else { + date <- rbind(date, dtmp) + } } - date <- cbind(date,jday(date[,1],date[,2],date[,3])) - colnames(date) <- c("year","month","day","julian","decyr") + date <- cbind(date, jday(date[, 1], date[, 2], date[, 3])) + colnames(date) <- c("year", "month", "day", "julian", "decyr") out[[1]] <- date - ddate <- date[,1]*10000+date[,2]*100+date[,3] + ddate <- date[, 1] * 10000 + date[, 2] * 100 + date[, 3] ## create variables - for(i in 1:nvar){ - vtemp <- matrix(NA,nrow(date),nstation) + for (i in 1:nvar) { + vtemp <- matrix(NA, nrow(date), nstation) vsel <- which(as.character(data$ELEM) == elem[i]) - for(j in 1:nstation){ - - idsel <- vsel[which(as.character(data$COOPID[vsel]) == ID[j])] - - for(d in 1:31){ - ##find right column - coln <- which(colnames(data) == paste("DAY",cday[d],sep="")) - val <- as.numeric(as.character(data[idsel,coln])) - ## check flags - flag1 <- as.character(data[idsel,coln+1]) - flag2 <- as.character(data[idsel,coln+2]) - val[flag1 %in% Flag1check] <- NA - val[flag2 %in% Flag2check] <- NA - - ## define units conversion - if(elem[i] %in% c("PRCP","SNOW","SNWD","TMIN","TMAX","TOBS")){ - val <- switch(elem[i], - PRCP = 0.254*val, ## hundreth inches/day -> mm/day - SNOW = 0.245*val, ## tenth in/day ->cm/day - SNWD = 2.45*val, ## in ->cm - TMAX = (val-32)*5/9, ## F -> C - TMIN = (val-32)*5/9, ## F -> C - TOBS = (val-32)*5/9 ## F -> C - ) - } - - ## match data to correct date - mydate <- data$yr[idsel]*10000+data$mo[idsel]*100+d - mtch <- match(mydate,ddate) - vtemp[mtch[!is.na(mtch)],j] <- val[!is.na(mtch)] - - } ## loop over days - } ## loop over stations - - out[[i+1]] <- vtemp - + for (j in 1:nstation) { + idsel <- vsel[which(as.character(data$COOPID[vsel]) == ID[j])] + + for (d in 1:31) { + ## find right column + coln <- which(colnames(data) == paste("DAY", cday[d], sep = "")) + val <- as.numeric(as.character(data[idsel, coln])) + ## check flags + flag1 <- as.character(data[idsel, coln + 1]) + flag2 <- as.character(data[idsel, coln + 2]) + val[flag1 %in% Flag1check] <- NA + val[flag2 %in% Flag2check] <- NA + + ## define units conversion + if (elem[i] %in% c("PRCP", "SNOW", "SNWD", "TMIN", "TMAX", "TOBS")) { + val <- switch(elem[i], + PRCP = 0.254 * val, ## hundreth inches/day -> mm/day + SNOW = 0.245 * val, ## tenth in/day ->cm/day + SNWD = 2.45 * val, ## in ->cm + TMAX = (val - 32) * 5 / 9, ## F -> C + TMIN = (val - 32) * 5 / 9, ## F -> C + TOBS = (val - 32) * 5 / 9 ## F -> C + ) + } + + ## match data to correct date + mydate <- data$yr[idsel] * 10000 + data$mo[idsel] * 100 + d + mtch <- match(mydate, ddate) + vtemp[mtch[!is.na(mtch)], j] <- val[!is.na(mtch)] + } ## loop over days + } ## loop over stations + + out[[i + 1]] <- vtemp } ## loop over variables - names(out) <- c("date",elem) + names(out) <- c("date", elem) return(out) } -ExtractNOAAstationMonthly <- function(data,elem=NULL,ID=NULL,DSETcheck=NULL,Flag1check=c("M","S"),Flag2check=c("2","3","T","U")){ +ExtractNOAAstationMonthly <- function(data, elem = NULL, ID = NULL, DSETcheck = NULL, Flag1check = c("M", "S"), Flag2check = c("2", "3", "T", "U")) { ## function that converts/extracts NOAA met station data ## INPUTS: ## data - raw data table @@ -139,78 +138,75 @@ ExtractNOAAstationMonthly <- function(data,elem=NULL,ID=NULL,DSETcheck=NULL,Flag ## list with a "date" table and a table for each elem (date X ID) ## date table contains year, month, day, decimal day (aka "julian") and decimal year ## values are converted to NA if they fail the checks - + sel <- which(!(as.character(data$COOPID) == "------")) - data <- data[sel,] - + data <- data[sel, ] + ## fill in defaults & set up storage out <- list() - if(is.null(elem)) { ## set list of variables if not given + if (is.null(elem)) { ## set list of variables if not given elem <- as.character(unique(data$ELEM)) elem <- elem[which(!(elem == "----"))] } obsID <- as.character(unique(data$COOPID)) # get list of observed stations obsID <- obsID[which(!(obsID == "------"))] - if(is.null(ID)) ID <- obsID - ID <- ID[ID %in% obsID] ## exclude ID's not in data set + if (is.null(ID)) ID <- obsID + ID <- ID[ID %in% obsID] ## exclude ID's not in data set - nvar <- length(elem) # number of variables - nstation <- length(ID) # number of stations + nvar <- length(elem) # number of variables + nstation <- length(ID) # number of stations ## remove invalid DSET -## if(!is.null(DSETcheck)){ -## sel <- which(!(data$DSET %in% DSETcheck)) -## data <- data[sel,] -## } + ## if(!is.null(DSETcheck)){ + ## sel <- which(!(data$DSET %in% DSETcheck)) + ## data <- data[sel,] + ## } - ##create date + ## create date yr <- as.numeric(as.character(sort(unique(data$YEAR)))) - mo <- rep(1:12,length(yr)) - yr <- rep(yr,each=12) - out[[1]] <- date <- cbind(yr,mo,yr+(mo-0.5)/12) - + mo <- rep(1:12, length(yr)) + yr <- rep(yr, each = 12) + out[[1]] <- date <- cbind(yr, mo, yr + (mo - 0.5) / 12) + ## create variables - for(i in 1:nvar){ - vtemp <- matrix(NA,nrow(date),nstation) ## set storage - vsel <- which(as.character(data$ELEM) == elem[i]) ## select variable - for(j in 1:nstation){ - + for (i in 1:nvar) { + vtemp <- matrix(NA, nrow(date), nstation) ## set storage + vsel <- which(as.character(data$ELEM) == elem[i]) ## select variable + for (j in 1:nstation) { idsel <- vsel[which(as.character(data$COOPID[vsel]) == ID[j])] ## select station - - for(m in 1:12){ - ##find right column - coln <- 7+5*m #which(colnames(data) == paste("DAY",cday[d],sep="")) - val <- as.numeric(as.character(data[idsel,coln])) + + for (m in 1:12) { + ## find right column + coln <- 7 + 5 * m # which(colnames(data) == paste("DAY",cday[d],sep="")) + val <- as.numeric(as.character(data[idsel, coln])) ## check flags - flag1 <- as.character(data[idsel,coln+1]) - flag2 <- as.character(data[idsel,coln+2]) + flag1 <- as.character(data[idsel, coln + 1]) + flag2 <- as.character(data[idsel, coln + 2]) val[flag1 %in% Flag1check] <- NA val[flag2 %in% Flag2check] <- NA - + ## define units conversion - if(elem[i] %in% c("TPCP","TSNW","MNTM")){ + if (elem[i] %in% c("TPCP", "TSNW", "MNTM")) { val <- switch(elem[i], - TPCP = 0.254*val, ## hundreth inches/month -> mm/month - TSNW = 0.254*val, ## tenth in/month ->cm/month - MNTM = (val*0.1-32)*5/9, ## tenth F -> C - ) + TPCP = 0.254 * val, ## hundreth inches/month -> mm/month + TSNW = 0.254 * val, ## tenth in/month ->cm/month + MNTM = (val * 0.1 - 32) * 5 / 9, ## tenth F -> C + ) } - + ## match data to correct date - mydate <- as.numeric(as.character(data$YEAR[idsel]))+(m-0.5)/12 - mtch <- match(mydate,date[,3]) - vtemp[mtch[!is.na(mtch)],j] <- val[!is.na(mtch)] - + mydate <- as.numeric(as.character(data$YEAR[idsel])) + (m - 0.5) / 12 + mtch <- match(mydate, date[, 3]) + vtemp[mtch[!is.na(mtch)], j] <- val[!is.na(mtch)] } ## loop over months } ## loop over stations - - out[[i+1]] <- vtemp - + + out[[i + 1]] <- vtemp } ## loop over variables - out[[i+2]] <- ID - - names(out) <- c("date",elem,"ID") + out[[i + 2]] <- ID + + names(out) <- c("date", elem, "ID") return(out) } diff --git a/modules/data.atmosphere/inst/scripts/GetLSHRG.R b/modules/data.atmosphere/inst/scripts/GetLSHRG.R index d7fcd3c647b..871168dfb36 100644 --- a/modules/data.atmosphere/inst/scripts/GetLSHRG.R +++ b/modules/data.atmosphere/inst/scripts/GetLSHRG.R @@ -1,11 +1,10 @@ -variable = c("dlwrf","dswrf","prcp","pres","shum","tas","wind") +variable <- c("dlwrf", "dswrf", "prcp", "pres", "shum", "tas", "wind") -year = 1948:2008 +year <- 1948:2008 -for(v in variable){ - for(y in year){ - - fname = paste(v,"_3hourly_",y,"-",y,".nc",sep="") - system(paste("wget http://hydrology.princeton.edu/data/pgf/1.0deg/3hourly/",fname,sep=""),wait=TRUE) +for (v in variable) { + for (y in year) { + fname <- paste(v, "_3hourly_", y, "-", y, ".nc", sep = "") + system(paste("wget http://hydrology.princeton.edu/data/pgf/1.0deg/3hourly/", fname, sep = ""), wait = TRUE) } } diff --git a/modules/data.atmosphere/inst/scripts/MetMerge.R b/modules/data.atmosphere/inst/scripts/MetMerge.R index abe90ea4263..57bbba5a34f 100644 --- a/modules/data.atmosphere/inst/scripts/MetMerge.R +++ b/modules/data.atmosphere/inst/scripts/MetMerge.R @@ -21,161 +21,156 @@ ## outpath output path ## width width of smoothing window ## nsmooth number of measurements on each side of day to be smoothed -month <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") +month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") Kelvin <- 273.15 sfun <- combl(width) ## Loop over met/rad files -for(yr in metstart[2]:metstop[2]){ +for (yr in metstart[2]:metstop[2]) { fmo <- 1 lmo <- 12 - if(yr == metstart[2]) fmo <- metstart[1] - if(yr == metstop[2]) lmo <- metstop[1] - for(mo in fmo:lmo){ + if (yr == metstart[2]) fmo <- metstart[1] + if (yr == metstop[2]) lmo <- metstop[1] + for (mo in fmo:lmo) { + print(c(mo, yr)) - print(c(mo,yr)) - ## read met file met <- rad <- NULL - if(mettype == 0){ - met <- read.met(metbase,mo,yr) - rad <- read.rad(metbase,mo,yr) + if (mettype == 0) { + met <- read.met(metbase, mo, yr) + rad <- read.rad(metbase, mo, yr) } else { - if(mettype == 1){ - met <- read.met.region(metbase,mo,yr,loc) - rad <- read.rad.region(metbase,mo,yr,loc) + if (mettype == 1) { + met <- read.met.region(metbase, mo, yr, loc) + rad <- read.rad.region(metbase, mo, yr, loc) } else { ## met is hdf - met <- hdf5load(paste(metbase,yr,month[mo],".h5",sep=""),load=FALSE) + met <- hdf5load(paste(metbase, yr, month[mo], ".h5", sep = ""), load = FALSE) } } - + ## select matching daily - sel <- which(apply(cbind(daily.time[,1] == yr,daily.time[,2]==mo),1,sum)==2) + sel <- which(apply(cbind(daily.time[, 1] == yr, daily.time[, 2] == mo), 1, sum) == 2) dtemp <- daily.ppt[sel] - dtemp.days <- daily.time[sel,3] + dtemp.days <- daily.time[sel, 3] - sel <- which(apply(cbind(temp.time[,1] == yr,temp.time[,2]==mo),1,sum)==2) + sel <- which(apply(cbind(temp.time[, 1] == yr, temp.time[, 2] == mo), 1, sum) == 2) temp.lowres <- daily.temp[sel] - temp.lowres.days <- temp.time[sel,3] - + temp.lowres.days <- temp.time[sel, 3] + ## first establish wet/dry day status in both records, calculate daily ppt - wetThresh <- 2.45/86400 # if < .1 inch/day assume is dry (below detection) - days <- rep(1:31,each=metres)[1:nrow(met)] - raddays <- rep(1:31,each=96)[1:nrow(rad)] - wet <- tapply((met$conprr+met$pcpg),days,sum)/metres - stormlen <- tapply((met$conprr+met$pcpg)>0,days,sum) + wetThresh <- 2.45 / 86400 # if < .1 inch/day assume is dry (below detection) + days <- rep(1:31, each = metres)[1:nrow(met)] + raddays <- rep(1:31, each = 96)[1:nrow(rad)] + wet <- tapply((met$conprr + met$pcpg), days, sum) / metres + stormlen <- tapply((met$conprr + met$pcpg) > 0, days, sum) iswet <- wet > wetThresh all.days <- sort(unique(days)) - + ## create temperature record - temp.hires <- tapply((met$theta),days,mean) - + temp.hires <- tapply((met$theta), days, mean) + ## id miss-match days daymatch <- 1:length(all.days) - daymatch[dtemp.days[apply(cbind(iswet[dtemp.days],dtemp > 0),1,sum) == 1]] <- NA + daymatch[dtemp.days[apply(cbind(iswet[dtemp.days], dtemp > 0), 1, sum) == 1]] <- NA daymatch[is.na(dtemp)] <- which(is.na(dtemp)) ## set missing to "match" to keep original data - for(t in 1:length(daymatch)){ - dmatch <- match(t,dtemp.days) - if(is.na(daymatch[t])&&!is.na(dmatch)){ - ##if target is wet, find nearest wet day - if(dtemp[dmatch] > 0){ + for (t in 1:length(daymatch)) { + dmatch <- match(t, dtemp.days) + if (is.na(daymatch[t]) && !is.na(dmatch)) { + ## if target is wet, find nearest wet day + if (dtemp[dmatch] > 0) { j <- t - for(i in 1:length(all.days)){ - j <- max(1,t-i) - if(iswet[j]) break; - j <- min(length(all.days),t+i) - if(iswet[j]) break; + for (i in 1:length(all.days)) { + j <- max(1, t - i) + if (iswet[j]) break + j <- min(length(all.days), t + i) + if (iswet[j]) break } daymatch[t] <- j } - ##if target is dry, find nearest dry day - if(dtemp[dmatch] == 0){ + ## if target is dry, find nearest dry day + if (dtemp[dmatch] == 0) { j <- t - for(i in 1:length(all.days)){ - j <- max(1,t-i) - if(!iswet[j]) break; - j <- min(length(all.days),t+i) - if(!iswet[j]) break; + for (i in 1:length(all.days)) { + j <- max(1, t - i) + if (!iswet[j]) break + j <- min(length(all.days), t + i) + if (!iswet[j]) break } daymatch[t] <- j } } - } - #temp.bias <- mean(temp.hires-temp.lowres) - + } + # temp.bias <- mean(temp.hires-temp.lowres) + ## make new met & rad files metnew <- met radnew <- rad - for(i in 1:length(all.days)){ - dmatch <- match(i,dtemp.days) + for (i in 1:length(all.days)) { + dmatch <- match(i, dtemp.days) ## select individual measurements to swap sel1 <- which(days == i) sel2 <- which(days == daymatch[i]) rsel1 <- which(raddays == i) rsel2 <- which(raddays == daymatch[i]) nrep <- length(sel2) - length(sel1) - if(nrep > 0){ - sel1 <- c(sel1,rep(sel1[length(sel1)],nrep)) + if (nrep > 0) { + sel1 <- c(sel1, rep(sel1[length(sel1)], nrep)) } - if(nrep < 0){ + if (nrep < 0) { nrep <- -nrep - sel2 <- c(sel2,rep(sel2[length(sel2)],nrep)) + sel2 <- c(sel2, rep(sel2[length(sel2)], nrep)) } - ## splice in nearest match day (both met and rad) - metnew[sel1,] <- met[sel2,] - radnew[sel1,] <- rad[sel2,] + ## splice in nearest match day (both met and rad) + metnew[sel1, ] <- met[sel2, ] + radnew[sel1, ] <- rad[sel2, ] ## rescale ppt to preserve daily sum - if(!is.na(dmatch) && !is.na(dtemp[dmatch])){ - if(dtemp[dmatch] > 0){ - fac <- dtemp[dmatch]/wet[daymatch[i]] + if (!is.na(dmatch) && !is.na(dtemp[dmatch])) { + if (dtemp[dmatch] > 0) { + fac <- dtemp[dmatch] / wet[daymatch[i]] metnew$conprr[sel1] <- metnew$conprr[sel1] * fac - metnew$pcpg[sel1] <- metnew$pcpg[sel1] * fac - } else { ## make sure dry days stay dry + metnew$pcpg[sel1] <- metnew$pcpg[sel1] * fac + } else { ## make sure dry days stay dry metnew$conprr[sel1] <- 0.0 metnew$pcpg[sel1] <- 0.0 } } - + ## linearly rescale temperature - tmatch <- match(i,temp.lowres.days) - if(!is.na(tmatch) && !is.na(temp.lowres[tmatch])){ - airtemp <- met$theta[sel1]*met$pi0[sel1]/1004 - b <-mean(airtemp)-temp.lowres[tmatch] ##mean(metnew$theta[sel1]) - metnew$theta[sel1] <- metnew$theta[sel1] - b*1004/met$pi0[sel1] + tmatch <- match(i, temp.lowres.days) + if (!is.na(tmatch) && !is.na(temp.lowres[tmatch])) { + airtemp <- met$theta[sel1] * met$pi0[sel1] / 1004 + b <- mean(airtemp) - temp.lowres[tmatch] ## mean(metnew$theta[sel1]) + metnew$theta[sel1] <- metnew$theta[sel1] - b * 1004 / met$pi0[sel1] } - + ## smooth transition on spliced - if(i > 1 && (i != daymatch[i] || (i-1) != daymatch[i-1])){ - splice <- matrix(NA,2*nsmooth+1,12) - rsplice <- matrix(NA,2*nsmooth+1,ncol(rad)) + if (i > 1 && (i != daymatch[i] || (i - 1) != daymatch[i - 1])) { + splice <- matrix(NA, 2 * nsmooth + 1, 12) + rsplice <- matrix(NA, 2 * nsmooth + 1, ncol(rad)) ## loop over data to be smoothed - for(w in 1:(2*nsmooth + 1)){ - row <- w - nsmooth -1 + sel1[1] - window <- 1:length(sfun) + row-floor(length(sfun)/2) - 1 - ##print(c(row,window)) - splice[w,] <- apply(metnew[window,]*sfun,2,sum) - - rrow <- w - nsmooth -1 + rsel1[1] - rwindow <- 1:length(sfun) + rrow-floor(length(sfun)/2) - 1 - ##print(c(row,window)) - rsplice[w,] <- apply(radnew[rwindow,]*sfun,2,sum) + for (w in 1:(2 * nsmooth + 1)) { + row <- w - nsmooth - 1 + sel1[1] + window <- 1:length(sfun) + row - floor(length(sfun) / 2) - 1 + ## print(c(row,window)) + splice[w, ] <- apply(metnew[window, ] * sfun, 2, sum) + + rrow <- w - nsmooth - 1 + rsel1[1] + rwindow <- 1:length(sfun) + rrow - floor(length(sfun) / 2) - 1 + ## print(c(row,window)) + rsplice[w, ] <- apply(radnew[rwindow, ] * sfun, 2, sum) } - metnew[-nsmooth:nsmooth+sel1[1],] <- splice - radnew[-nsmooth:nsmooth+rsel1[1],] <- rsplice + metnew[-nsmooth:nsmooth + sel1[1], ] <- splice + radnew[-nsmooth:nsmooth + rsel1[1], ] <- rsplice } - } - - redowet <- tapply((metnew$conprr+metnew$pcpg),days,sum)/metres - - ## write out new file - write.met(metnew,outpath,mo,yr) - write.rad(radnew,outpath,mo,yr) - - } ### end MONTH -} ### end YEAR + redowet <- tapply((metnew$conprr + metnew$pcpg), days, sum) / metres + ## write out new file + write.met(metnew, outpath, mo, yr) + write.rad(radnew, outpath, mo, yr) + } ### end MONTH +} ### end YEAR diff --git a/modules/data.atmosphere/inst/scripts/NACP2DRIVER.R b/modules/data.atmosphere/inst/scripts/NACP2DRIVER.R index 3afd6a96784..1af1dbce75b 100644 --- a/modules/data.atmosphere/inst/scripts/NACP2DRIVER.R +++ b/modules/data.atmosphere/inst/scripts/NACP2DRIVER.R @@ -5,192 +5,191 @@ library(ncdf) library(hdf5) ### FUNCTIONS -dm <- c(0,32,60,91,121,152,182,213,244,274,305,335,366) -dl <- c(0,32,61,92,122,153,183,214,245,275,306,336,367) -month <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") -mon_num <- c("01","02","03","04","05","06","07","08","09","10","11","12") -day2mo <- function(year, day){ +dm <- c(0, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +dl <- c(0, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336, 367) +month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") +mon_num <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") +day2mo <- function(year, day) { leap <- lubridate::leap_year(year) - mo <- rep(NA,length(day)) - mo[leap] <- findInterval(day[leap],dl) - mo[!leap] <- findInterval(day[!leap],dm) + mo <- rep(NA, length(day)) + mo[leap] <- findInterval(day[leap], dl) + mo[!leap] <- findInterval(day[!leap], dm) return(mo) } ## get site file -site <- read.csv("site_location.csv",header=TRUE) +site <- read.csv("site_location.csv", header = TRUE) ## Get file list -fname <- dir(".",".nc") +fname <- dir(".", ".nc") ## loop over files -for(i in 33:34){#35:length(fname)){ +for (i in 33:34) { # 35:length(fname)){ ## extract file root name - froot <- substr(fname[i],1,6) - print(c(i,froot)) + froot <- substr(fname[i], 1, 6) + print(c(i, froot)) ## open netcdf nc <- open.ncdf(fname[i]) ## determine GMT adjustment lst <- site$LST_shift[which(site$acro == froot)] - + ## extract variables - lat <- get.var.ncdf(nc,"lat") - lon <- get.var.ncdf(nc,"lon") - sec <- nc$dim$t$vals - Tair <- get.var.ncdf(nc,"Tair") - Qair <- get.var.ncdf(nc,"Qair") #humidity (kg/kg) - Wind <- get.var.ncdf(nc,"Wind") - Rain <- get.var.ncdf(nc,"Rainf") - pres <- get.var.ncdf(nc,"Psurf") - SW <- get.var.ncdf(nc,"SWdown") - LW <- get.var.ncdf(nc,"LWdown") - CO2 <- get.var.ncdf(nc,"CO2air") - - dt <- sec[2]-sec[1] - toff <- -lst*3600/dt - - ##buffer to get to GMT + lat <- get.var.ncdf(nc, "lat") + lon <- get.var.ncdf(nc, "lon") + sec <- nc$dim$t$vals + Tair <- get.var.ncdf(nc, "Tair") + Qair <- get.var.ncdf(nc, "Qair") # humidity (kg/kg) + Wind <- get.var.ncdf(nc, "Wind") + Rain <- get.var.ncdf(nc, "Rainf") + pres <- get.var.ncdf(nc, "Psurf") + SW <- get.var.ncdf(nc, "SWdown") + LW <- get.var.ncdf(nc, "LWdown") + CO2 <- get.var.ncdf(nc, "CO2air") + + dt <- sec[2] - sec[1] + toff <- -lst * 3600 / dt + + ## buffer to get to GMT slen <- length(SW) - Tair <- c(rep(Tair[1],toff),Tair)[1:slen] - Qair <- c(rep(Qair[1],toff),Qair)[1:slen] - Wind <- c(rep(Wind[1],toff),Wind)[1:slen] - Rain <- c(rep(Rain[1],toff),Rain)[1:slen] - pres <- c(rep(pres[1],toff),pres)[1:slen] - SW <- c(rep(SW[1],toff),SW)[1:slen] - LW <- c(rep(LW[1],toff),LW)[1:slen] - CO2 <- c(rep(CO2[1],toff),CO2)[1:slen] - + Tair <- c(rep(Tair[1], toff), Tair)[1:slen] + Qair <- c(rep(Qair[1], toff), Qair)[1:slen] + Wind <- c(rep(Wind[1], toff), Wind)[1:slen] + Rain <- c(rep(Rain[1], toff), Rain)[1:slen] + pres <- c(rep(pres[1], toff), pres)[1:slen] + SW <- c(rep(SW[1], toff), SW)[1:slen] + LW <- c(rep(LW[1], toff), LW)[1:slen] + CO2 <- c(rep(CO2[1], toff), CO2)[1:slen] + ## determine starting year - base.time <- as.numeric(substr(nc$dim$t$units,15,18)) - if(is.na(base.time)){ - print(c("did not extract base time correctly",froot,i,nc$dim$t$units)) + base.time <- as.numeric(substr(nc$dim$t$units, 15, 18)) + if (is.na(base.time)) { + print(c("did not extract base time correctly", froot, i, nc$dim$t$units)) break } - - ##build time variables (year, month, day of year) - nyr <- floor(length(sec)/86400/365*dt) + + ## build time variables (year, month, day of year) + nyr <- floor(length(sec) / 86400 / 365 * dt) yr <- NULL doy <- NULL hr <- NULL asec <- sec - for(y in base.time+1:nyr-1){ - ytmp <- rep(y,365*86400/dt) - dtmp <- rep(1:365,each=86400/dt) - if(lubridate::leap_year(y)) { ## is leap - ytmp <- rep(y, 366*86400 / dt) - dtmp <- rep(1: 366, each=86400 / dt) + for (y in base.time + 1:nyr - 1) { + ytmp <- rep(y, 365 * 86400 / dt) + dtmp <- rep(1:365, each = 86400 / dt) + if (lubridate::leap_year(y)) { ## is leap + ytmp <- rep(y, 366 * 86400 / dt) + dtmp <- rep(1:366, each = 86400 / dt) } - if(is.null(yr)){ + if (is.null(yr)) { yr <- ytmp doy <- dtmp - hr <- rep(NA,length(dtmp)) + hr <- rep(NA, length(dtmp)) } else { - yr <- c(yr,ytmp) - doy <- c(doy,dtmp) - hr <- c(hr,rep(NA,length(dtmp))) + yr <- c(yr, ytmp) + doy <- c(doy, dtmp) + hr <- c(hr, rep(NA, length(dtmp))) } rng <- length(doy) - length(ytmp):1 + 1 asec[rng] <- asec[rng] - asec[rng[1]] - hr[rng] <- (asec[rng] - (dtmp-1)*86400)/86400*24 + hr[rng] <- (asec[rng] - (dtmp - 1) * 86400) / 86400 * 24 } - mo <- day2mo(yr,doy) - if(length(yr) < length(sec)){ - rng <- (length(yr)+1):length(sec) - yr[rng] <- rep(y+1,length(rng)) - doy[rng] <- rep(1:366,each=86400/dt)[1:length(rng)] - hr[rng] <- rep(seq(0,length=86400/dt,by=dt/86400*24),366)[1:length(rng)] + mo <- day2mo(yr, doy) + if (length(yr) < length(sec)) { + rng <- (length(yr) + 1):length(sec) + yr[rng] <- rep(y + 1, length(rng)) + doy[rng] <- rep(1:366, each = 86400 / dt)[1:length(rng)] + hr[rng] <- rep(seq(0, length = 86400 / dt, by = dt / 86400 * 24), 366)[1:length(rng)] } - + ## calculate potential radiation ## in order to estimate diffuse/direct - f <- pi/180*(279.5+0.9856*doy) - et <- (-104.7*sin(f)+596.2*sin(2*f)+4.3*sin(4*f)-429.3*cos(f)-2.0*cos(2*f)+19.3*cos(3*f))/3600 #equation of time -> eccentricity and obliquity - merid <- floor(lon/15)*15 - if(merid<0) merid <- merid+15 - lc <- (lon-merid)*-4/60 ## longitude correction - tz <- merid/360*24 ## time zone - midbin <- 0.5*dt/86400*24 ## shift calc to middle of bin - t0 <- 12+lc-et-tz-midbin ## solar time - h <- pi/12*(hr-t0) ## solar hour - dec <- -23.45*pi/180*cos(2*pi*(doy+10)/365) ## declination - - cosz <- sin(lat*pi/180)*sin(dec)+cos(lat*pi/180)*cos(dec)*cos(h) - cosz[cosz<0] <- 0 - - rpot <- 1366*cosz + f <- pi / 180 * (279.5 + 0.9856 * doy) + et <- (-104.7 * sin(f) + 596.2 * sin(2 * f) + 4.3 * sin(4 * f) - 429.3 * cos(f) - 2.0 * cos(2 * f) + 19.3 * cos(3 * f)) / 3600 # equation of time -> eccentricity and obliquity + merid <- floor(lon / 15) * 15 + if (merid < 0) merid <- merid + 15 + lc <- (lon - merid) * -4 / 60 ## longitude correction + tz <- merid / 360 * 24 ## time zone + midbin <- 0.5 * dt / 86400 * 24 ## shift calc to middle of bin + t0 <- 12 + lc - et - tz - midbin ## solar time + h <- pi / 12 * (hr - t0) ## solar hour + dec <- -23.45 * pi / 180 * cos(2 * pi * (doy + 10) / 365) ## declination + + cosz <- sin(lat * pi / 180) * sin(dec) + cos(lat * pi / 180) * cos(dec) * cos(h) + cosz[cosz < 0] <- 0 + + rpot <- 1366 * cosz rpot <- rpot[1:length(SW)] - - SW[rpot < SW] <- rpot[rpot0.9] <- 0.9 ## ensure some diffuse + + SW[rpot < SW] <- rpot[rpot < SW] ## ensure radiation < max + ### this causes trouble at twilight bc of missmatch btw bin avergage and bin midpoint + frac <- SW / rpot + frac[frac > 0.9] <- 0.9 ## ensure some diffuse frac[frac < 0.0] <- 0.0 frac[is.na(frac)] <- 0.0 frac[is.nan(frac)] <- 0.0 - SWd <- SW*(1-frac) ## Diffuse portion of total short wave rad - -### convert to ED2.1 hdf met variables - n <- length(Tair) + SWd <- SW * (1 - frac) ## Diffuse portion of total short wave rad + + ### convert to ED2.1 hdf met variables + n <- length(Tair) nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] - nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] + nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] - vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] - prateA <- Rain # precipitation rate [kg_H2O/m2/s] - dlwrfA <- LW # downward long wave radiation [W/m2] - presA <- pres # pressure [Pa] - hgtA <- rep(50,n) # geopotential height [m] - ugrdA <- Wind # zonal wind [m/s] - vgrdA <- rep(0,n) # meridional wind [m/s] - shA <- Qair # specific humidity [kg_H2O/kg_air] - tmpA <- Tair # temperature [K] - co2A <- CO2 # surface co2 concentration [ppm] + vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] + prateA <- Rain # precipitation rate [kg_H2O/m2/s] + dlwrfA <- LW # downward long wave radiation [W/m2] + presA <- pres # pressure [Pa] + hgtA <- rep(50, n) # geopotential height [m] + ugrdA <- Wind # zonal wind [m/s] + vgrdA <- rep(0, n) # meridional wind [m/s] + shA <- Qair # specific humidity [kg_H2O/kg_air] + tmpA <- Tair # temperature [K] + co2A <- CO2 # surface co2 concentration [ppm] ## create directory - if(system(paste("ls",froot),ignore.stderr=TRUE)>0) system(paste("mkdir",froot)) - + if (system(paste("ls", froot), ignore.stderr = TRUE) > 0) system(paste("mkdir", froot)) + ## write by year and month - for(y in base.time+1:nyr-1){ + for (y in base.time + 1:nyr - 1) { sely <- which(yr == y) - for(m in unique(mo[sely])){ + for (m in unique(mo[sely])) { selm <- sely[which(mo[sely] == m)] - mout <- paste(froot,"/",froot,"_",y,month[m],".h5",sep="") - dims <- c(1,1,length(selm)) - nbdsf <- array(nbdsfA[selm],dim=dims) - nddsf <- array(nddsfA[selm],dim=dims) - vbdsf <- array(vbdsfA[selm],dim=dims) - vddsf <- array(vddsfA[selm],dim=dims) - prate <- array(prateA[selm],dim=dims) - dlwrf <- array(dlwrfA[selm],dim=dims) - pres <- array(presA[selm],dim=dims) - hgt <- array(hgtA[selm],dim=dims) - ugrd <- array(ugrdA[selm],dim=dims) - vgrd <- array(vgrdA[selm],dim=dims) - sh <- array(shA[selm],dim=dims) - tmp <- array(tmpA[selm],dim=dims) - co2 <- array(co2A[selm],dim=dims) - hdf5save(mout,"nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","hgt","ugrd","vgrd","sh","tmp","co2") + mout <- paste(froot, "/", froot, "_", y, month[m], ".h5", sep = "") + dims <- c(1, 1, length(selm)) + nbdsf <- array(nbdsfA[selm], dim = dims) + nddsf <- array(nddsfA[selm], dim = dims) + vbdsf <- array(vbdsfA[selm], dim = dims) + vddsf <- array(vddsfA[selm], dim = dims) + prate <- array(prateA[selm], dim = dims) + dlwrf <- array(dlwrfA[selm], dim = dims) + pres <- array(presA[selm], dim = dims) + hgt <- array(hgtA[selm], dim = dims) + ugrd <- array(ugrdA[selm], dim = dims) + vgrd <- array(vgrdA[selm], dim = dims) + sh <- array(shA[selm], dim = dims) + tmp <- array(tmpA[selm], dim = dims) + co2 <- array(co2A[selm], dim = dims) + hdf5save(mout, "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", "ugrd", "vgrd", "sh", "tmp", "co2") } } ## write DRIVER file sites <- 1 - metfile <- paste(froot,"/ED_MET_DRIVER_HEADER",sep="") - metpath <- paste(getwd(),"/",froot,"/",froot,"_",sep="") - metgrid <- c(1,1,1,1,floor(lon),floor(lat)) - metvar <- c("nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","hgt","ugrd","vgrd","sh","tmp","co2") + metfile <- paste(froot, "/ED_MET_DRIVER_HEADER", sep = "") + metpath <- paste(getwd(), "/", froot, "/", froot, "_", sep = "") + metgrid <- c(1, 1, 1, 1, floor(lon), floor(lat)) + metvar <- c("nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", "ugrd", "vgrd", "sh", "tmp", "co2") nmet <- length(metvar) - metfrq <- rep(dt,nmet) - metflag <- rep(1,nmet) - write.table("#header",metfile,row.names=FALSE,col.names=FALSE) - write.table(sites,metfile,row.names=FALSE,col.names=FALSE,append=TRUE) - write.table(metpath,metfile,row.names=FALSE,col.names=FALSE,append=TRUE,quote=FALSE) - write.table(matrix(metgrid,nrow=1),metfile,row.names=FALSE,col.names=FALSE,append=TRUE,quote=FALSE) - write.table(nmet,metfile,row.names=FALSE,col.names=FALSE,append=TRUE,quote=FALSE) - write.table(matrix(metvar,nrow=1),metfile,row.names=FALSE,col.names=FALSE,append=TRUE) - write.table(matrix(metfrq,nrow=1),metfile,row.names=FALSE,col.names=FALSE,append=TRUE,quote=FALSE) - write.table(matrix(metflag,nrow=1),metfile,row.names=FALSE,col.names=FALSE,append=TRUE,quote=FALSE) - + metfrq <- rep(dt, nmet) + metflag <- rep(1, nmet) + write.table("#header", metfile, row.names = FALSE, col.names = FALSE) + write.table(sites, metfile, row.names = FALSE, col.names = FALSE, append = TRUE) + write.table(metpath, metfile, row.names = FALSE, col.names = FALSE, append = TRUE, quote = FALSE) + write.table(matrix(metgrid, nrow = 1), metfile, row.names = FALSE, col.names = FALSE, append = TRUE, quote = FALSE) + write.table(nmet, metfile, row.names = FALSE, col.names = FALSE, append = TRUE, quote = FALSE) + write.table(matrix(metvar, nrow = 1), metfile, row.names = FALSE, col.names = FALSE, append = TRUE) + write.table(matrix(metfrq, nrow = 1), metfile, row.names = FALSE, col.names = FALSE, append = TRUE, quote = FALSE) + write.table(matrix(metflag, nrow = 1), metfile, row.names = FALSE, col.names = FALSE, append = TRUE, quote = FALSE) } ### end loop over met files diff --git a/modules/data.atmosphere/inst/scripts/NARRdriver.R b/modules/data.atmosphere/inst/scripts/NARRdriver.R index e8808a70132..82312013d8e 100644 --- a/modules/data.atmosphere/inst/scripts/NARRdriver.R +++ b/modules/data.atmosphere/inst/scripts/NARRdriver.R @@ -1,12 +1,12 @@ ## script to drive NARR download -#year <- 2008 -#mo <- 8 -moTXT <- c("01","02","03","04","05","06","07","08","09","10","11","12") +# year <- 2008 +# mo <- 8 +moTXT <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") -for(year in seq(1984,1979,by=-1)){ - for(mo in c(1:12)){ - system(paste("./NARR.wget",year,moTXT[mo])) +for (year in seq(1984, 1979, by = -1)) { + for (mo in c(1:12)) { + system(paste("./NARR.wget", year, moTXT[mo])) } } diff --git a/modules/data.atmosphere/inst/scripts/NARRinventory.R b/modules/data.atmosphere/inst/scripts/NARRinventory.R index 400c820e172..225e9d5da25 100644 --- a/modules/data.atmosphere/inst/scripts/NARRinventory.R +++ b/modules/data.atmosphere/inst/scripts/NARRinventory.R @@ -1,49 +1,51 @@ ## code to find and download missing NARR files -##functions -num <- function(x){as.numeric(as.character(x))} -mchar <- c("01","02","03","04","05","06","07","08","09","10","11","12") -mlennorm <- c(31,28,31,30,31,30,31,31,30,31,30,31) -mlenleap <- c(31,29,31,30,31,30,31,31,30,31,30,31) +## functions +num <- function(x) { + as.numeric(as.character(x)) +} +mchar <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") +mlennorm <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) +mlenleap <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) mlen <- function(m, y) { ifelse(lubridate::leap_year(y), mlenleap[m], mlennorm[m]) } -files <- dir(pattern="tar") -flxfiles <- files[grep("flx",files)] -sfcfiles <- files[grep("sfc",files)] +files <- dir(pattern = "tar") +flxfiles <- files[grep("flx", files)] +sfcfiles <- files[grep("sfc", files)] # authenticate system('wget --no-check-certificate -O /dev/null --save-cookies auth.dss_ucar_edu --post-data="email=mdietze@illinois.edu&passwd=fromgood&action=login" https://dss.ucar.edu/cgi-bin/login') ## process FLX files -yr <- num(substr(flxfiles,9,12)) -mo <- num(substr(flxfiles,13,14)) -t0 <- num(substr(flxfiles,16,17)) -tf <- num(substr(flxfiles,18,19)) +yr <- num(substr(flxfiles, 9, 12)) +mo <- num(substr(flxfiles, 13, 14)) +t0 <- num(substr(flxfiles, 16, 17)) +tf <- num(substr(flxfiles, 18, 19)) -for(y in 2008:2009){ +for (y in 2008:2009) { ycode <- "" -# if(y %in% 2003:2005) ycode <- "rerun" + # if(y %in% 2003:2005) ycode <- "rerun" ysel <- which(yr == y) - if(length(ysel) < 48){ - for(m in 1:12){ + if (length(ysel) < 48) { + for (m in 1:12) { msel <- which(mo[ysel] == m) - if(length(msel) < 4){ - if(!(1 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRflx_",y,mchar[m],"_0108.tar",sep="")) + if (length(msel) < 4) { + if (!(1 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRflx_", y, mchar[m], "_0108.tar", sep = "")) + } + if (!(9 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRflx_", y, mchar[m], "_0916.tar", sep = "")) } - if(!(9 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRflx_",y,mchar[m],"_0916.tar",sep="")) + if (!(17 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu http://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRflx_", y, mchar[m], "_1724.tar", sep = "")) } - if(!(17 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu http://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRflx_",y,mchar[m],"_1724.tar",sep="")) + if (!(25 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRflx_", y, mchar[m], "_25", mlen(m, y), ".tar", sep = "")) } - if(!(25 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRflx_",y,mchar[m],"_25",mlen(m,y),".tar",sep="")) - } } } } @@ -51,28 +53,28 @@ for(y in 2008:2009){ ## PROCESS SFC FILES -yr <- num(substr(sfcfiles,9,12)) -mo <- num(substr(sfcfiles,13,14)) -t0 <- num(substr(sfcfiles,16,17)) -tf <- num(substr(sfcfiles,18,19)) +yr <- num(substr(sfcfiles, 9, 12)) +mo <- num(substr(sfcfiles, 13, 14)) +t0 <- num(substr(sfcfiles, 16, 17)) +tf <- num(substr(sfcfiles, 18, 19)) -for(y in 1979:2009){ +for (y in 1979:2009) { ycode <- "" -# if(y %in% 2003:2005) ycode <- "rerun" + # if(y %in% 2003:2005) ycode <- "rerun" ysel <- which(yr == y) - if(length(ysel) < 36){ - for(m in 1:12){ + if (length(ysel) < 36) { + for (m in 1:12) { msel <- which(mo[ysel] == m) - if(length(msel) < 3){ - if(!(1 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRsfc_",y,mchar[m],"_0109.tar",sep="")) + if (length(msel) < 3) { + if (!(1 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRsfc_", y, mchar[m], "_0109.tar", sep = "")) + } + if (!(10 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRsfc_", y, mchar[m], "_1019.tar", sep = "")) } - if(!(10 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRsfc_",y,mchar[m],"_1019.tar",sep="")) + if (!(20 %in% t0[msel])) { + system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/", y, mchar[m], ycode, "/NARRsfc_", y, mchar[m], "_20", mlen(m, y), ".tar", sep = "")) } - if(!(20 %in% t0[msel])){ - system(paste("wget --no-check-certificate -N --load-cookies auth.dss_ucar_edu https://dss.ucar.edu/datazone/dsszone/ds608.0/NARR/3HRLY_TAR/",y,mchar[m],ycode,"/NARRsfc_",y,mchar[m],"_20",mlen(m,y),".tar",sep="")) - } } } } diff --git a/modules/data.atmosphere/inst/scripts/ORNL_FACE_MET.v2.R b/modules/data.atmosphere/inst/scripts/ORNL_FACE_MET.v2.R index bde68b81d0a..86b78759923 100644 --- a/modules/data.atmosphere/inst/scripts/ORNL_FACE_MET.v2.R +++ b/modules/data.atmosphere/inst/scripts/ORNL_FACE_MET.v2.R @@ -1,25 +1,28 @@ - -library(hdf5,lib.loc="~/lib/R/Rhdf") +library(hdf5, lib.loc = "~/lib/R/Rhdf") ### FUNCTIONS -dm <- c(0,32,60,91,121,152,182,213,244,274,305,335,366) -dl <- c(0,32,61,92,122,153,183,214,245,275,306,336,367) -month <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") -mon_num <- c("01","02","03","04","05","06","07","08","09","10","11","12") -day2mo <- function(year,day){ +dm <- c(0, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +dl <- c(0, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336, 367) +month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") +mon_num <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") +day2mo <- function(year, day) { leap <- lubridate::leap_year(year) - mo <- rep(NA,length(day)) - mo[leap] <- findInterval(day[leap],dl) - mo[!leap] <- findInterval(day[!leap],dm) + mo <- rep(NA, length(day)) + mo[leap] <- findInterval(day[leap], dl) + mo[!leap] <- findInterval(day[!leap], dm) return(mo) } -num <- function(d){as.numeric(as.character(d))} +num <- function(d) { + as.numeric(as.character(d)) +} -checkNA <- function(val,master){ +checkNA <- function(val, master) { sNA <- which(is.na(val)) - if(length(sNA) == 0) return(val) + if (length(sNA) == 0) { + return(val) + } ## translate NA's - mNA <- round(sNA*length(master)/length(val)) + mNA <- round(sNA * length(master) / length(val)) val[sNA] <- master[mNA] return(val) } @@ -29,123 +32,125 @@ lat <- 35.90 lon <- -84.33 fname <- "NCDF/ORNL_met_halfhourly.txt" -dat <- read.table(fname,header=TRUE) - - n <- nrow(dat) - tmpA <- num(dat$Tair.) - ugrdA <- num(dat$wind.) - vgrdA <- rep(0.0,n) - prateA <- num(dat$Rainf.) - hgtA <- rep(50,n) # geopotential height [m] - SW <- num(dat$SWdown.) #w/m2 - shA <- num(dat$Qair.) - dlwrfA <- num(dat$LWdown.) # downward long wave radiation [W/m2] - presA <- num(dat$PSurf.) # pressure [Pa] - - ### RADIATION CALCULATION - ##build time variables (year, month, day of year) - dt <- 3600 - yr <- num(dat$year.) - doy <- num(dat$doy.) - hr <- num(dat$hod.) - mo <- day2mo(yr,doy) - - ## calculate potential radiation - ## in order to estimate diffuse/direct - cosz <- PEcAn.data.atmosphere::cos_solar_zenith_angle(doy, lat, lon, dt, hr) - - rpot <- 1366*cosz - rpot <- rpot[1:n] - rpotL <-(rpot[c(9:n,1:8)])#rad in local time - - SW[rpotL < SW] <- rpotL[rpotL0.9] <- 0.9 ## ensure some diffuse - frac[frac < 0.0] <- 0.0 - frac[is.na(frac)] <- 0.0 - frac[is.nan(frac)] <- 0.0 - SWd <- SW*(1-frac) ## Diffuse portion of total short wave rad +dat <- read.table(fname, header = TRUE) + +n <- nrow(dat) +tmpA <- num(dat$Tair.) +ugrdA <- num(dat$wind.) +vgrdA <- rep(0.0, n) +prateA <- num(dat$Rainf.) +hgtA <- rep(50, n) # geopotential height [m] +SW <- num(dat$SWdown.) # w/m2 +shA <- num(dat$Qair.) +dlwrfA <- num(dat$LWdown.) # downward long wave radiation [W/m2] +presA <- num(dat$PSurf.) # pressure [Pa] + +### RADIATION CALCULATION +## build time variables (year, month, day of year) +dt <- 3600 +yr <- num(dat$year.) +doy <- num(dat$doy.) +hr <- num(dat$hod.) +mo <- day2mo(yr, doy) + +## calculate potential radiation +## in order to estimate diffuse/direct +cosz <- PEcAn.data.atmosphere::cos_solar_zenith_angle(doy, lat, lon, dt, hr) + +rpot <- 1366 * cosz +rpot <- rpot[1:n] +rpotL <- (rpot[c(9:n, 1:8)]) # rad in local time + +SW[rpotL < SW] <- rpotL[rpotL < SW] ## ensure radiation < max +### this causes trouble at twilight bc of missmatch btw bin avergage and bin midpoint +frac <- SW / rpotL +frac[frac > 0.9] <- 0.9 ## ensure some diffuse +frac[frac < 0.0] <- 0.0 +frac[is.na(frac)] <- 0.0 +frac[is.nan(frac)] <- 0.0 +SWd <- SW * (1 - frac) ## Diffuse portion of total short wave rad ### convert to ED2.1 hdf met variables - nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] - nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] - vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] - vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] - -cname <- dir(".","co2_gap") -cname <- cname[grep("dat",cname)] -for(i in 1:length(cname)){ - -## Load CO2 file - cyr <- num(substr(cname,9,12))[i] - year = cyr - co2file <- read.table(cname[i],skip=9) - r1 <- co2file[co2file[,4] == 1,5] - r2 <- co2file[co2file[,4] == 2,5] - r4 <- co2file[co2file[,4] == 4,5] - r5 <- co2file[co2file[,4] == 5,5] - cdoy <- co2file[co2file[,4] == 1,2] - cmo <- day2mo(cyr,cdoy) - ELEV <- 0.5*(r1+r2) - AMB <- 0.5*(r4+r5) +nbdsfA <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] +nddsfA <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] +vbdsfA <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] +vddsfA <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] + +cname <- dir(".", "co2_gap") +cname <- cname[grep("dat", cname)] +for (i in 1:length(cname)) { + ## Load CO2 file + cyr <- num(substr(cname, 9, 12))[i] + year <- cyr + co2file <- read.table(cname[i], skip = 9) + r1 <- co2file[co2file[, 4] == 1, 5] + r2 <- co2file[co2file[, 4] == 2, 5] + r4 <- co2file[co2file[, 4] == 4, 5] + r5 <- co2file[co2file[, 4] == 5, 5] + cdoy <- co2file[co2file[, 4] == 1, 2] + cmo <- day2mo(cyr, cdoy) + ELEV <- 0.5 * (r1 + r2) + AMB <- 0.5 * (r4 + r5) ELEV[ELEV > 700] <- 550 AMB[AMB > 700] <- 360 - sely = which(yr == cyr) - for(m in unique(mo)){ - selm <- sely[which(mo[sely] == m)]-8 + sely <- which(yr == cyr) + for (m in unique(mo)) { + selm <- sely[which(mo[sely] == m)] - 8 selm[selm < 1] <- 1 - dims <- c(1,1,length(selm)) - nbdsf <- array(nbdsfA[selm],dim=dims) - nddsf <- array(nddsfA[selm],dim=dims) - vbdsf <- array(vbdsfA[selm],dim=dims) - vddsf <- array(vddsfA[selm],dim=dims) - prate <- array(prateA[selm],dim=dims) -# dlwrf <- array(dlwrfA[selm],dim=dims) -# pres <- array(presA[selm],dim=dims) - hgt <- array(hgtA[selm],dim=dims) - ugrd <- array(ugrdA[selm],dim=dims) - vgrd <- array(vgrdA[selm],dim=dims) - sh <- array(shA[selm],dim=dims) - tmp <- array(tmpA[selm],dim=dims) -# co2 <- array(co2A[selm],dim=dims) + dims <- c(1, 1, length(selm)) + nbdsf <- array(nbdsfA[selm], dim = dims) + nddsf <- array(nddsfA[selm], dim = dims) + vbdsf <- array(vbdsfA[selm], dim = dims) + vddsf <- array(vddsfA[selm], dim = dims) + prate <- array(prateA[selm], dim = dims) + # dlwrf <- array(dlwrfA[selm],dim=dims) + # pres <- array(presA[selm],dim=dims) + hgt <- array(hgtA[selm], dim = dims) + ugrd <- array(ugrdA[selm], dim = dims) + vgrd <- array(vgrdA[selm], dim = dims) + sh <- array(shA[selm], dim = dims) + tmp <- array(tmpA[selm], dim = dims) + # co2 <- array(co2A[selm],dim=dims) ## grab & fill in other vars -## ncep <- read.table(paste("ncep/",mon_num[m],year,".dat",sep="")) -## dlwrf <- rep(ncep[,11],each=6) -## pres <- 1e5*(rep(ncep[,6],each=6)/1004.)^(1004./287.) ## exner -> pres -## narr <- hdf5load(paste("NARR/ORNL_",year,month[m],".h5",sep=""),load=FALSE) -## dlwrf <- narr$dlwrf -## pres <- narr$pres - dlwrf <- array(dlwrfA[selm],dim=dims) - pres <- array(presA[selm],dim=dims) + ## ncep <- read.table(paste("ncep/",mon_num[m],year,".dat",sep="")) + ## dlwrf <- rep(ncep[,11],each=6) + ## pres <- 1e5*(rep(ncep[,6],each=6)/1004.)^(1004./287.) ## exner -> pres + ## narr <- hdf5load(paste("NARR/ORNL_",year,month[m],".h5",sep=""),load=FALSE) + ## dlwrf <- narr$dlwrf + ## pres <- narr$pres + dlwrf <- array(dlwrfA[selm], dim = dims) + pres <- array(presA[selm], dim = dims) ## fill missing - nbdsf <- checkNA(nbdsf,narr$nbdsf) - nddsf <- checkNA(nddsf,narr$nddsf) - vbdsf <- checkNA(vbdsf,narr$vbdsf) - vddsf <- checkNA(vddsf,narr$vddsf) - prate <- checkNA(prate,narr$prate) - hgt <- checkNA(hgt,narr$hgt) - ugrd <- checkNA(ugrd,sqrt(narr$ugrd^2+narr$vgrd^2)) - sh <- checkNA(sh,narr$sh) - tmp <- checkNA(tmp,narr$tmp) - - selcm <- which(cmo == m) -4 + nbdsf <- checkNA(nbdsf, narr$nbdsf) + nddsf <- checkNA(nddsf, narr$nddsf) + vbdsf <- checkNA(vbdsf, narr$vbdsf) + vddsf <- checkNA(vddsf, narr$vddsf) + prate <- checkNA(prate, narr$prate) + hgt <- checkNA(hgt, narr$hgt) + ugrd <- checkNA(ugrd, sqrt(narr$ugrd^2 + narr$vgrd^2)) + sh <- checkNA(sh, narr$sh) + tmp <- checkNA(tmp, narr$tmp) + + selcm <- which(cmo == m) - 4 selcm[selcm < 1] <- 1 - ##ambient - co2 <- array(AMB[selcm],dim=c(1,1,length(selcm))) - mout <- paste("NCDF/AMB_",year,month[m],".h5",sep="") - hdf5save(mout,"nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","hgt" - ,"ugrd","vgrd","sh","tmp","co2") + ## ambient + co2 <- array(AMB[selcm], dim = c(1, 1, length(selcm))) + mout <- paste("NCDF/AMB_", year, month[m], ".h5", sep = "") + hdf5save( + mout, "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", + "ugrd", "vgrd", "sh", "tmp", "co2" + ) ## elevated - co2 <- array(ELEV[selcm],dim=c(1,1,length(selcm))) - mout <- paste("NCDF/ELEV_",year,month[m],".h5",sep="") - hdf5save(mout,"nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","hgt" - ,"ugrd","vgrd","sh","tmp","co2") - + co2 <- array(ELEV[selcm], dim = c(1, 1, length(selcm))) + mout <- paste("NCDF/ELEV_", year, month[m], ".h5", sep = "") + hdf5save( + mout, "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "hgt", + "ugrd", "vgrd", "sh", "tmp", "co2" + ) } } diff --git a/modules/data.atmosphere/inst/scripts/PRISMlapse.R b/modules/data.atmosphere/inst/scripts/PRISMlapse.R index 2d9703f112b..585168c8d96 100644 --- a/modules/data.atmosphere/inst/scripts/PRISMlapse.R +++ b/modules/data.atmosphere/inst/scripts/PRISMlapse.R @@ -4,75 +4,83 @@ ## and PRISM corrections for other topographic effects (aspect, distance to coast, etc.) ## will hopefully eventually add other topo effects to clim corrections -`flip` <- function(x){t(x[nrow(x):1,])} - -`read.grid` <- function(file,list=FALSE){ - header <- scan(file=file,what=list(name="character",val="double"),nlines=6,quiet=TRUE) - header$val <- as(header$val,"numeric") - #[(charmatch(c("xll","yll"),substring(header$name,1,3)))] - grid <- flip(unname(as.matrix(read.table(file,skip=6,header=F,na.strings="-9999")))) - if (list) return(list(grid=grid,hname=header$name,hval=header$val)) +`flip` <- function(x) { + t(x[nrow(x):1, ]) +} + +`read.grid` <- function(file, list = FALSE) { + header <- scan(file = file, what = list(name = "character", val = "double"), nlines = 6, quiet = TRUE) + header$val <- as(header$val, "numeric") + # [(charmatch(c("xll","yll"),substring(header$name,1,3)))] + grid <- flip(unname(as.matrix(read.table(file, skip = 6, header = F, na.strings = "-9999")))) + if (list) { + return(list(grid = grid, hname = header$name, hval = header$val)) + } return(grid) } -plot.grid <- function(grid,xlim=NULL,ylim=NULL){ +plot.grid <- function(grid, xlim = NULL, ylim = NULL) { xmin <- grid$hval[grid$hname == "xllcorner"] ymin <- grid$hval[grid$hname == "yllcorner"] nrow <- grid$hval[grid$hname == "nrows"] ncol <- grid$hval[grid$hname == "ncols"] res <- grid$hval[grid$hname == "cellsize"] - if(is.null(xlim)) { xlim <- c(xmin,xmin+ncol*res)} - if(is.null(ylim)) { ylim <- c(ymin,ymin+nrow*res)} - xscale <- seq(xmin,xmin+ncol*res,length=ncol) - yscale <- seq(ymin,ymin+nrow*res,length=nrow) - cols <- findInterval(xlim,xscale) - rows <- findInterval(ylim,yscale) + if (is.null(xlim)) { + xlim <- c(xmin, xmin + ncol * res) + } + if (is.null(ylim)) { + ylim <- c(ymin, ymin + nrow * res) + } + xscale <- seq(xmin, xmin + ncol * res, length = ncol) + yscale <- seq(ymin, ymin + nrow * res, length = nrow) + cols <- findInterval(xlim, xscale) + rows <- findInterval(ylim, yscale) r <- rows[1]:rows[2] c <- cols[1]:cols[2] # print(list(xmin,ymin,nrow,ncol,res,xlim,ylim)) - image(xscale[c],yscale[r],grid$grid[c,r]) + image(xscale[c], yscale[r], grid$grid[c, r]) } -slm <- function(x,y){ ## simple linear model +slm <- function(x, y) { ## simple linear model - ##reduce data - valid <- apply(cbind(is.na(x),is.na(y)),1,sum) == 0 + ## reduce data + valid <- apply(cbind(is.na(x), is.na(y)), 1, sum) == 0 x <- x[valid] y <- y[valid] - + xbar <- mean(x) ybar <- mean(y) - ssx <- sum((x-xbar)^2) - spxy <- sum((x-xbar)*(y-ybar)) - b1 <- spxy/ssx - b0 <- ybar-b1*xbar - return(c(b0,b1)) + ssx <- sum((x - xbar)^2) + spxy <- sum((x - xbar) * (y - ybar)) + b1 <- spxy / ssx + b0 <- ybar - b1 * xbar + return(c(b0, b1)) } library(maps) ## load up PRISM dem (terrain map) at 800m resolution -dem <- read.grid("us_30s_dem.asc",TRUE) -xlim <- c(-98,-67) -ylim <- c(25,50) -plot.grid(dem,xlim=xlim,ylim=ylim) -map("state",add=TRUE) +dem <- read.grid("us_30s_dem.asc", TRUE) +xlim <- c(-98, -67) +ylim <- c(25, 50) +plot.grid(dem, xlim = xlim, ylim = ylim) +map("state", add = TRUE) ## load up mean climate map -annual <- read.grid("us_ppt_1971_2000.14",TRUE) -plot.grid(annual,xlim=xlim,ylim=ylim) -map("state",add=TRUE) +annual <- read.grid("us_ppt_1971_2000.14", TRUE) +plot.grid(annual, xlim = xlim, ylim = ylim) +map("state", add = TRUE) monthly <- list() -for(i in 1:12){ - fname <- paste("us_ppt_1971_2000.",i,sep="") +for (i in 1:12) { + fname <- paste("us_ppt_1971_2000.", i, sep = "") monthly[[i]] <- read.grid(fname) } ## define coarse grid -nx <- xlim[2]-xlim[1] -ny <- ylim[2]-ylim[1] -pbar <- matrix(NA,nx,ny) -pdiff <- NA*annual$grid +nx <- xlim[2] - xlim[1] +ny <- ylim[2] - ylim[1] +pbar <- matrix(NA, nx, ny) +pdiff <- NA * annual$grid ## calc cell average values xmin <- annual$hval[annual$hname == "xllcorner"] @@ -80,38 +88,38 @@ ymin <- annual$hval[annual$hname == "yllcorner"] nrow <- annual$hval[annual$hname == "nrows"] ncol <- annual$hval[annual$hname == "ncols"] res <- annual$hval[annual$hname == "cellsize"] -xscale <- seq(xmin,xmin+ncol*res,length=ncol) -yscale <- seq(ymin,ymin+nrow*res,length=nrow) -for(x in 1:nx){ - cols <- findInterval(c(xlim[1]+x-1,xlim[1]+x),xscale) - for(y in 1:ny){ - rows <- findInterval(c(ylim[1]+y-1,ylim[1]+y),yscale) - pbar[x,y] <- mean(annual$grid[cols[1]:cols[2],rows[1]:rows[2]]) - pdiff[cols[1]:cols[2],rows[1]:rows[2]] <- annual$grid[cols[1]:cols[2],rows[1]:rows[2]] - pbar[x,y] +xscale <- seq(xmin, xmin + ncol * res, length = ncol) +yscale <- seq(ymin, ymin + nrow * res, length = nrow) +for (x in 1:nx) { + cols <- findInterval(c(xlim[1] + x - 1, xlim[1] + x), xscale) + for (y in 1:ny) { + rows <- findInterval(c(ylim[1] + y - 1, ylim[1] + y), yscale) + pbar[x, y] <- mean(annual$grid[cols[1]:cols[2], rows[1]:rows[2]]) + pdiff[cols[1]:cols[2], rows[1]:rows[2]] <- annual$grid[cols[1]:cols[2], rows[1]:rows[2]] - pbar[x, y] } } -plot(dem$grid,pdiff,pch='.',xlim=c(0,2040)) -#lines(lowess(pdiff ~ dem$grid),col=2,lwd=2) -#fit1 <- lm(as.vector(pdiff) ~ as.vector(dem$grid)) -#fit1 <- slm(as.vector(dem$grid),as.vector(pdiff)) +plot(dem$grid, pdiff, pch = ".", xlim = c(0, 2040)) +# lines(lowess(pdiff ~ dem$grid),col=2,lwd=2) +# fit1 <- lm(as.vector(pdiff) ~ as.vector(dem$grid)) +# fit1 <- slm(as.vector(dem$grid),as.vector(pdiff)) x <- as.vector(dem$grid) y <- as.vector(pdiff) ### TOO SLOW, need to look at ways to pare down the data sets and/or look into external memory algorithms in R - valid <- apply(cbind(is.na(x),is.na(y)),1,sum) == 0 +valid <- apply(cbind(is.na(x), is.na(y)), 1, sum) == 0 - x <- x[valid] - y <- y[valid] - - xbar <- mean(x) - ybar <- mean(y) - ssx <- sum((x-xbar)^2) - spxy <- sum((x-xbar)*(y-ybar)) - b1 <- spxy/ssx - b0 <- ybar-b1*xbar +x <- x[valid] +y <- y[valid] + +xbar <- mean(x) +ybar <- mean(y) +ssx <- sum((x - xbar)^2) +spxy <- sum((x - xbar) * (y - ybar)) +b1 <- spxy / ssx +b0 <- ybar - b1 * xbar @@ -119,11 +127,11 @@ y <- as.vector(pdiff) #### #### EXTERNAL MEMORY APPROACH -`read.grid.header` <- function(con){ - rawheader <- readLines(con,6) +`read.grid.header` <- function(con) { + rawheader <- readLines(con, 6) header <- list() - for(i in 1:6){ - h <- strsplit(rawheader[i]," ")[[1]] + for (i in 1:6) { + h <- strsplit(rawheader[i], " ")[[1]] header[[i]] <- as.numeric(h[2]) names(header)[i] <- h[1] } @@ -132,85 +140,82 @@ y <- as.vector(pdiff) ## initialize -nline <- 10 ## number of lines to read at once +nline <- 10 ## number of lines to read at once dem.fname <- "us_30s_dem.asc" ppt.fname <- "us_ppt_1971_2000.14" -dem <- file(dem.fname,'r') -ppt <- file(ppt.fname,'r') -dem.head <- read.grid.header(dem) ## read ascii grid headers +dem <- file(dem.fname, "r") +ppt <- file(ppt.fname, "r") +dem.head <- read.grid.header(dem) ## read ascii grid headers ppt.head <- read.grid.header(ppt) ## define course grid -lat <- seq(dem.head$yllcorner,by=dem.head$cellsize,length=dem.head$nrows) -lon <- seq(dem.head$xllcorner,by=dem.head$cellsize,length=dem.head$ncols) +lat <- seq(dem.head$yllcorner, by = dem.head$cellsize, length = dem.head$nrows) +lon <- seq(dem.head$xllcorner, by = dem.head$cellsize, length = dem.head$ncols) xlim <- range(lon) xlim[1] <- floor(xlim[1]) xlim[2] <- ceiling(xlim[2]) ylim <- range(lat) ylim[1] <- floor(ylim[1]) ylim[2] <- ceiling(ylim[2]) -nx <- xlim[2]-xlim[1] -ny <- ylim[2]-ylim[1] -ssx <- spxy <- xbar <- ybar <- n <- matrix(0,ny,nx) -xcell <- floor(lon)-xlim[1]+1 +nx <- xlim[2] - xlim[1] +ny <- ylim[2] - ylim[1] +ssx <- spxy <- xbar <- ybar <- n <- matrix(0, ny, nx) +xcell <- floor(lon) - xlim[1] + 1 -##first pass, calculate means -for(i in 1:dem.head$nrows){ - +## first pass, calculate means +for (i in 1:dem.head$nrows) { ## define row - ycell <- floor(lat[i])-ylim[1] + 1 - ##read data - x <- as.numeric(strsplit(readLines(dem,1)," ")[[1]]) - y <- as.numeric(strsplit(readLines(ppt,1)," ")[[1]]) - ##set NA's + ycell <- floor(lat[i]) - ylim[1] + 1 + ## read data + x <- as.numeric(strsplit(readLines(dem, 1), " ")[[1]]) + y <- as.numeric(strsplit(readLines(ppt, 1), " ")[[1]]) + ## set NA's x[x == dem.head$NODATA_value] <- NA y[y == ppt.head$NODATA_value] <- NA - valid <- apply(cbind(is.na(x),is.na(y)),1,sum) + valid <- apply(cbind(is.na(x), is.na(y)), 1, sum) x[valid > 0] <- NA y[valid > 0] <- NA ## calc values - ntmp <- tapply(x*0+1,xcell,sum,na.rm=TRUE) - xtmp <- tapply(x,xcell,sum,na.rm=TRUE) - ytmp <- tapply(y,xcell,sum,na.rm=TRUE) - n[ycell,as.numeric(names(ntmp))] <- n[ycell,as.numeric(names(ntmp))] +ntmp - xbar[ycell,as.numeric(names(xtmp))] <- xbar[ycell,as.numeric(names(xtmp))]+xtmp - ybar[ycell,as.numeric(names(ytmp))] <- ybar[ycell,as.numeric(names(ytmp))]+ytmp + ntmp <- tapply(x * 0 + 1, xcell, sum, na.rm = TRUE) + xtmp <- tapply(x, xcell, sum, na.rm = TRUE) + ytmp <- tapply(y, xcell, sum, na.rm = TRUE) + n[ycell, as.numeric(names(ntmp))] <- n[ycell, as.numeric(names(ntmp))] + ntmp + xbar[ycell, as.numeric(names(xtmp))] <- xbar[ycell, as.numeric(names(xtmp))] + xtmp + ybar[ycell, as.numeric(names(ytmp))] <- ybar[ycell, as.numeric(names(ytmp))] + ytmp } close(dem) close(ppt) -xbar <- xbar/n -ybar <- ybar/n +xbar <- xbar / n +ybar <- ybar / n -##second pass, calculate second moments -dem <- file(dem.fname,'r') -ppt <- file(ppt.fname,'r') -dem.head <- read.grid.header(dem) ## read ascii grid headers +## second pass, calculate second moments +dem <- file(dem.fname, "r") +ppt <- file(ppt.fname, "r") +dem.head <- read.grid.header(dem) ## read ascii grid headers ppt.head <- read.grid.header(ppt) -for(i in 1:dem.head$nrows){ - +for (i in 1:dem.head$nrows) { ## define row - ycell <- floor(lat[i])-ylim[1] + 1 - ##read data - x <- as.numeric(strsplit(readLines(dem,1)," ")[[1]]) - y <- as.numeric(strsplit(readLines(ppt,1)," ")[[1]]) - ##set NA's + ycell <- floor(lat[i]) - ylim[1] + 1 + ## read data + x <- as.numeric(strsplit(readLines(dem, 1), " ")[[1]]) + y <- as.numeric(strsplit(readLines(ppt, 1), " ")[[1]]) + ## set NA's x[x == dem.head$NODATA_value] <- NA y[y == ppt.head$NODATA_value] <- NA - valid <- apply(cbind(is.na(x),is.na(y)),1,sum) + valid <- apply(cbind(is.na(x), is.na(y)), 1, sum) x[valid > 0] <- NA y[valid > 0] <- NA ## calc values - ssxtemp <- tapply((x-xbar[ycell,xcell])^2,xcell,sum,na.rm=TRUE) - spxytemp <- tapply((x-xbar[ycell,xcell])*(y-ybar[ycell,xcell]),xcell,sum,na.rm=TRUE) - ssx[ycell,as.numeric(names(ssxtemp))] <- ssx[ycell,as.numeric(names(ssxtemp))]+ssxtemp - spxy[ycell,as.numeric(names(spxytemp))] <- spxy[ycell,as.numeric(names(spxytemp))]+spxytemp + ssxtemp <- tapply((x - xbar[ycell, xcell])^2, xcell, sum, na.rm = TRUE) + spxytemp <- tapply((x - xbar[ycell, xcell]) * (y - ybar[ycell, xcell]), xcell, sum, na.rm = TRUE) + ssx[ycell, as.numeric(names(ssxtemp))] <- ssx[ycell, as.numeric(names(ssxtemp))] + ssxtemp + spxy[ycell, as.numeric(names(spxytemp))] <- spxy[ycell, as.numeric(names(spxytemp))] + spxytemp } close(dem) close(ppt) -b1 <- spxy/ssx -b0 <- ybar-b1*xbar +b1 <- spxy / ssx +b0 <- ybar - b1 * xbar ybar[is.nan(ybar)] <- NA -save(n,xbar,ybar,spxy,ssx,b1,b0,file="lapse.Rdata") - +save(n, xbar, ybar, spxy, ssx, b1, b0, file = "lapse.Rdata") diff --git a/modules/data.atmosphere/inst/scripts/WeathGenPPT.R b/modules/data.atmosphere/inst/scripts/WeathGenPPT.R index 245ee591e2b..1c6ff9924a6 100644 --- a/modules/data.atmosphere/inst/scripts/WeathGenPPT.R +++ b/modules/data.atmosphere/inst/scripts/WeathGenPPT.R @@ -20,54 +20,59 @@ ## end - c(mo,yr) ## process sample -samp <- read.met.ts(samplefolder,start.sample,end.sample,"pcpg",average=FALSE) -tod <- rep(1:freq,length=length(samp)) -day <- rep(1:(length(samp)),each=freq)[1:length(samp)] -rain <- as.integer(samp>0) -drain <- c(0,diff(rain)) +samp <- read.met.ts(samplefolder, start.sample, end.sample, "pcpg", average = FALSE) +tod <- rep(1:freq, length = length(samp)) +day <- rep(1:(length(samp)), each = freq)[1:length(samp)] +rain <- as.integer(samp > 0) +drain <- c(0, diff(rain)) rstart <- which(drain == 1) -rstop <- which(drain == -1) -lmin <- min(length(rstart),length(rstop)) -storm.len <- rstop[1:lmin]-rstart[1:lmin] +rstop <- which(drain == -1) +lmin <- min(length(rstart), length(rstop)) +storm.len <- rstop[1:lmin] - rstart[1:lmin] storm.len2 <- storm.len storm.len2[storm.len2 > freq] <- freq slen <- table(storm.len2) stod <- table(tod[rstart]) -slen <- slen/sum(slen) ## storm length frequency distribution -stod <- stod/sum(stod) ## storm start time of day frequency distribution - -for(y in start[2]:end[2]){ - mstart <- 1; if(y == start[2]) {mstart<- start[1]} - mend <- 12; if(y == end[2]) {mend <- end[1]} - for(m in mstart:mend){ +slen <- slen / sum(slen) ## storm length frequency distribution +stod <- stod / sum(stod) ## storm start time of day frequency distribution +for (y in start[2]:end[2]) { + mstart <- 1 + if (y == start[2]) { + mstart <- start[1] + } + mend <- 12 + if (y == end[2]) { + mend <- end[1] + } + for (m in mstart:mend) { ## read files - met <- read.met(infolder,m,y) + met <- read.met(infolder, m, y) ## set vars - tod <- rep(1:freq,length=nrow(met)) - day <- rep(1:(nrow(met)),each=freq)[1:nrow(met)] + tod <- rep(1:freq, length = nrow(met)) + day <- rep(1:(nrow(met)), each = freq)[1:nrow(met)] nday <- max(day) ppt <- met$conprr + met$pcpg - dppt <- tapply(ppt,day,sum) - + dppt <- tapply(ppt, day, sum) + ## draw start times - sstart <- findInterval(runif(nday),cumsum(stod)) - send <- sstart + findInterval(runif(nday),cumsum(slen)) - send[send>(freq-1)] <- (freq-1) - wt <- 1/((send-sstart)+1) + sstart <- findInterval(runif(nday), cumsum(stod)) + send <- sstart + findInterval(runif(nday), cumsum(slen)) + send[send > (freq - 1)] <- (freq - 1) + wt <- 1 / ((send - sstart) + 1) ## create pseudo-precip record - for(i in 1:nday){ + for (i in 1:nday) { d <- which(day == i) ppt[d] <- 0.0 - ppt[d[1]+(sstart[i]:send[i])] <- dppt[i]*wt[i] + ppt[d[1] + (sstart[i]:send[i])] <- dppt[i] * wt[i] } met$conprr <- 0.0 met$pcpg <- ppt - + ## write out new ppt - write.met(met,outfolder,m,y) + write.met(met, outfolder, m, y) } } diff --git a/modules/data.atmosphere/inst/scripts/checkMetHDF.R b/modules/data.atmosphere/inst/scripts/checkMetHDF.R index 98d6492b5ef..6e71fd0c3fd 100644 --- a/modules/data.atmosphere/inst/scripts/checkMetHDF.R +++ b/modules/data.atmosphere/inst/scripts/checkMetHDF.R @@ -3,9 +3,9 @@ library(hdf5) fname <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcorHDF/ED_OL_2002JUN.h5" -met <- hdf5load(fname,load=FALSE) +met <- hdf5load(fname, load = FALSE) -par(mfrow=c(3,4)) -for(i in 1:12){ - plot(met[[i]],main=names(met)[i],type='l') +par(mfrow = c(3, 4)) +for (i in 1:12) { + plot(met[[i]], main = names(met)[i], type = "l") } diff --git a/modules/data.atmosphere/inst/scripts/extract2driver.R b/modules/data.atmosphere/inst/scripts/extract2driver.R index d850f91abb6..082a3f27e21 100644 --- a/modules/data.atmosphere/inst/scripts/extract2driver.R +++ b/modules/data.atmosphere/inst/scripts/extract2driver.R @@ -7,302 +7,327 @@ ################################## ## how to extract a specific cell -#GLOBAL CONSTANTS (pun intended) -daysInYear=365.25 -secsInHour=60*60 -secsInDay=secsInHour*24 -axialTilt=23.45 +# GLOBAL CONSTANTS (pun intended) +daysInYear <- 365.25 +secsInHour <- 60 * 60 +secsInDay <- secsInHour * 24 +axialTilt <- 23.45 -LAT <- 68.0+38.0/60.0 -LON <- 360-(149 +36.0/60.0) +LAT <- 68.0 + 38.0 / 60.0 +LON <- 360 - (149 + 36.0 / 60.0) # TIME TO GRAB startyear <- 2010 stopyear <- 2011 -timestep <- 3*secsInHour -timelag <- floor(LON/15)/24 # difference from GMT (fractional day) +timestep <- 3 * secsInHour +timelag <- floor(LON / 15) / 24 # difference from GMT (fractional day) narrdir <- "/home/scratch/dietze_lab/NARR/" nomaddir <- "/home/scratch/dietze_lab/NOMADS/" -tempdir <-paste(narrdir, '/toolik.obs.rad/', sep='') +tempdir <- paste(narrdir, "/toolik.obs.rad/", sep = "") outdir <- "toolik.obs.rad" -#IO OPERATIONS -readGrib<-function(indices, gribfile, tempfile=paste(tempdir,'GRIB.txt', sep='')){ - command<-paste(nomaddir,"/wgrib -s -d ",indices,' ', gribfile," -text -h -o ", tempfile, sep='') - system(command, intern=TRUE) - V <- read.table(paste(tempfile,sep=""),skip=1,header=FALSE)[[1]] - system(paste("rm ",tempfile,sep="")) - return(V) +# IO OPERATIONS +readGrib <- function(indices, gribfile, tempfile = paste(tempdir, "GRIB.txt", sep = "")) { + command <- paste(nomaddir, "/wgrib -s -d ", indices, " ", gribfile, " -text -h -o ", tempfile, sep = "") + system(command, intern = TRUE) + V <- read.table(paste(tempfile, sep = ""), skip = 1, header = FALSE)[[1]] + system(paste("rm ", tempfile, sep = "")) + return(V) } readMetGrib <- function(indices, gribfile, - nearestCells, weights){ - print(gribfile) - V<-readGrib(indices, gribfile) - if(length(V)> 0){ - return(sum(V[nearestCells]*weights,na.rm=TRUE)) - } - return(NA) + nearestCells, weights) { + print(gribfile) + V <- readGrib(indices, gribfile) + if (length(V) > 0) { + return(sum(V[nearestCells] * weights, na.rm = TRUE)) + } + return(NA) } -extractTar<-function(tarfile, sourcedir, targetdir){ - system(paste("tar -xf ", sourcedir, tarfile, ' -C ', targetdir, sep=''), intern=TRUE) +extractTar <- function(tarfile, sourcedir, targetdir) { + system(paste("tar -xf ", sourcedir, tarfile, " -C ", targetdir, sep = ""), intern = TRUE) } -readMetTar<-function(tarfile, indices, - nearestCells, weights, - sourcedir=narrdir, targetdir=tempdir){ - print(tarfile) - #returns meteorological data from a tar file - #as a 2D matrix of parameters and tar subfiles - - system(paste("rm ",targetdir,'/merged_AWIP32*',sep="")) # clean up - # copy file to temp space and open up - extractTar(tarfile, sourcedir, targetdir) - - # get list of sub-files; parse - subfiles <- dir(targetdir,"merged") - subfiles <- paste(targetdir, subfiles, sep='') - - ## LOOP OVER SMALL FILES - tarMetData <- matrix(NA,nrow=length(subfiles),ncol=length(indices)) - if (length(indices) > 0){ - for(i in 1:length(subfiles)){ - for(k in 1:length(indices)){ - tarMetData[i,k] <- readMetGrib(indices[k],subfiles[i],nearestCells, weights) - } - system(paste("rm ",subfiles[i],sep="")) # clean up - }# end loop within file - } - tarMetData +readMetTar <- function(tarfile, indices, + nearestCells, weights, + sourcedir = narrdir, targetdir = tempdir) { + print(tarfile) + # returns meteorological data from a tar file + # as a 2D matrix of parameters and tar subfiles + + system(paste("rm ", targetdir, "/merged_AWIP32*", sep = "")) # clean up + # copy file to temp space and open up + extractTar(tarfile, sourcedir, targetdir) + + # get list of sub-files; parse + subfiles <- dir(targetdir, "merged") + subfiles <- paste(targetdir, subfiles, sep = "") + + ## LOOP OVER SMALL FILES + tarMetData <- matrix(NA, nrow = length(subfiles), ncol = length(indices)) + if (length(indices) > 0) { + for (i in 1:length(subfiles)) { + for (k in 1:length(indices)) { + tarMetData[i, k] <- readMetGrib(indices[k], subfiles[i], nearestCells, weights) + } + system(paste("rm ", subfiles[i], sep = "")) # clean up + } # end loop within file + } + tarMetData } -readMetTars<-function(tarfiles, indices, - nearestCells, weights){ - # returns meteorological data from a list of tar files - # bound into a single 2 dimensional matrix representing 4 dimensions - # each column represents a parameter - # each row represents a lat/lon coordinate at a specific time - foo<-sapply(tarfiles, - function(tarfile){readMetTar(tarfile, indices, nearestCells, weights)}) - if(!is.list(foo)){print(foo);browser()} - print(tarfiles) - do.call(rbind, foo) +readMetTars <- function(tarfiles, indices, + nearestCells, weights) { + # returns meteorological data from a list of tar files + # bound into a single 2 dimensional matrix representing 4 dimensions + # each column represents a parameter + # each row represents a lat/lon coordinate at a specific time + foo <- sapply( + tarfiles, + function(tarfile) { + readMetTar(tarfile, indices, nearestCells, weights) + } + ) + if (!is.list(foo)) { + print(foo) + browser() + } + print(tarfiles) + do.call(rbind, foo) } -writeHdf5<-function(file, metdata, potential, downscale.radiation=function(x){x}) { - - as.met.array <- function(x) {array(x, dim=c(1,1,length(x)))} - - shortWave <- monthMetData[,17] - shortWave[potential < shortWave] <- potential[potential0) - bound <- bound[rin[bound] > 0 & rin[bound+1] >0] - for(i in bound){ - x1 = sum(k1*r[i + 1:4 -2 ]) - x2 = sum(k2*r[i + 1:4 -2 ]) - r[i] = x1 - r[i+1]=x2 - } - - return(r) +smoothedRadiation <- function(a, month, year, timelag, new.timestep, old.timestep) { ## radiation + k1 <- c(1 / 6, 1 / 2, 1 / 2, -1 / 6) + k2 <- c(-1 / 6, 1 / 2, 1 / 2, 1 / 6) + dat <- rep(a, each = old.timestep / new.timestep) + lab <- rep(seq(a), each = old.timestep / new.timestep) + startday <- firstday(month, year) + startday <- lastday(month, year) + rin <- potentialRadiation( + rep(startday:startday, each = secsInDay / new.timestep), + rep( + seq(new.timestep, secsInDay, new.timestep), + monthlength(mo, year) + ), LAT, timelag * 24 + ) + rbar <- rep(tapply(rin, lab, mean), each = old.timestep / new.timestep) + r <- apply(cbind(dat * rin / rbar, rep(0, length(dat))), 1, max) + r[rbar == 0] <- 0 + + ## filter + bound <- which(diff(lab) > 0) + bound <- bound[rin[bound] > 0 & rin[bound + 1] > 0] + for (i in bound) { + x1 <- sum(k1 * r[i + 1:4 - 2]) + x2 <- sum(k2 * r[i + 1:4 - 2]) + r[i] <- x1 + r[i + 1] <- x2 + } + + return(r) } -deg2rad<-function(degrees){ - pi/180 * degrees +deg2rad <- function(degrees) { + pi / 180 * degrees } -rad2deg<-function(radians){ - 180/pi * radians +rad2deg <- function(radians) { + 180 / pi * radians } -potentialRadiation <- function(day,time,LAT,timelag){ - #radiation as determined only by solar position - dayangle=2.0*pi*(day)/daysInYear - declination = 0.006918 - - 0.399912 * cos(dayangle)+ - 0.070257 * sin(dayangle)- - 0.006758 * cos(2.0*dayangle)+ - 0.000907 * sin(2.0*dayangle)- - 0.002697 * cos(3.0*dayangle)+ - 0.00148 * sin(3.0*dayangle) - eccentricity=1.00011 + - 0.034221 * cos(dayangle)+ - 0.00128 * sin(dayangle)+ - 0.000719 * cos(2.0*dayangle)+ - 0.000077 * sin(2.0*dayangle) - solartime=time/secsInHour-12.0+timelag - radiation = 1367.0 * - eccentricity * - (cos(declination) * - cos(deg2rad(LAT)) * - cos(deg2rad(15.0)*(solartime)) + - sin(declination) * - sin(deg2rad(LAT))) - radiation[radiation<0] <- 0 - radiation +potentialRadiation <- function(day, time, LAT, timelag) { + # radiation as determined only by solar position + dayangle <- 2.0 * pi * (day) / daysInYear + declination <- 0.006918 - + 0.399912 * cos(dayangle) + + 0.070257 * sin(dayangle) - + 0.006758 * cos(2.0 * dayangle) + + 0.000907 * sin(2.0 * dayangle) - + 0.002697 * cos(3.0 * dayangle) + + 0.00148 * sin(3.0 * dayangle) + eccentricity <- 1.00011 + + 0.034221 * cos(dayangle) + + 0.00128 * sin(dayangle) + + 0.000719 * cos(2.0 * dayangle) + + 0.000077 * sin(2.0 * dayangle) + solartime <- time / secsInHour - 12.0 + timelag + radiation <- 1367.0 * + eccentricity * + (cos(declination) * + cos(deg2rad(LAT)) * + cos(deg2rad(15.0) * (solartime)) + + sin(declination) * + sin(deg2rad(LAT))) + radiation[radiation < 0] <- 0 + radiation } -potentialRadiation2<-function(lat, lon, day, hours){ - if(lon>180) lon <- lon-360 - f <- deg2rad(279.5+0.9856*day) - eccentricity <- (-104.7*sin(f) - +596.2*sin(2*f) - +4.3*sin(4*f) - -429.3*cos(f) - -2.0*cos(2*f) - +19.3*cos(3*f))/3600 - #equation of time -> eccentricity and obliquity - meridian <- floor(lon/15)*15 - if(meridian<0) meridian <- meridian+15 - lonCorrection <- (lon-meridian)*-4/60 - timeZone <- meridian/360*24 - midbin <- 0.5*timestep/secsInHour # shift calc to middle of bin - solarTime <- 12+lonCorrection-eccentricity-timeZone-midbin - solarHour <- pi/12*(hours-solarTime) - dayangle<-2*pi*(day+10)/daysInYear - declenation <- -deg2rad(axialTilt) * cos(dayangle) - cosz <- sin(deg2rad(lat))*sin(declenation)+cos(deg2rad(lat))*cos(declenation)*cos(solarHour) - cosz[cosz<0] <- 0 - 1366*cosz +potentialRadiation2 <- function(lat, lon, day, hours) { + if (lon > 180) lon <- lon - 360 + f <- deg2rad(279.5 + 0.9856 * day) + eccentricity <- (-104.7 * sin(f) + + 596.2 * sin(2 * f) + + 4.3 * sin(4 * f) + - 429.3 * cos(f) + - 2.0 * cos(2 * f) + + 19.3 * cos(3 * f)) / 3600 + # equation of time -> eccentricity and obliquity + meridian <- floor(lon / 15) * 15 + if (meridian < 0) meridian <- meridian + 15 + lonCorrection <- (lon - meridian) * -4 / 60 + timeZone <- meridian / 360 * 24 + midbin <- 0.5 * timestep / secsInHour # shift calc to middle of bin + solarTime <- 12 + lonCorrection - eccentricity - timeZone - midbin + solarHour <- pi / 12 * (hours - solarTime) + dayangle <- 2 * pi * (day + 10) / daysInYear + declenation <- -deg2rad(axialTilt) * cos(dayangle) + cosz <- sin(deg2rad(lat)) * sin(declenation) + cos(deg2rad(lat)) * cos(declenation) * cos(solarHour) + cosz[cosz < 0] <- 0 + 1366 * cosz } -shortWaveDiffuseRad<-function(potentialRad, shortWaveRad){ - ## this causes trouble at twilight bc of missmatch btw bin avergage and bin midpoint - frac <- shortWaveRad/potentialRad - frac[frac>0.9] <- 0.9 # ensure some diffuse - frac[frac < 0.0] <- 0.0 - frac[is.na(frac)] <- 0.0 - frac[is.nan(frac)] <- 0.0 - shortWaveRad*(1-frac) # return diffuse portion of total short wave rad +shortWaveDiffuseRad <- function(potentialRad, shortWaveRad) { + ## this causes trouble at twilight bc of missmatch btw bin avergage and bin midpoint + frac <- shortWaveRad / potentialRad + frac[frac > 0.9] <- 0.9 # ensure some diffuse + frac[frac < 0.0] <- 0.0 + frac[is.na(frac)] <- 0.0 + frac[is.nan(frac)] <- 0.0 + shortWaveRad * (1 - frac) # return diffuse portion of total short wave rad } # check if GRIB files are there yet -tarfiles <- dir(narrdir,"NARR.*\\.tar$") -if(length(tarfiles) == 0) stop() -years<-substr(tarfiles, 9, 12) -tarfiles<-tarfiles[which(years>=startyear & years<=stopyear)] -vars =substr(tarfiles,5,7) -years=substr(tarfiles,9,12) -months=substr(tarfiles,13,14) +tarfiles <- dir(narrdir, "NARR.*\\.tar$") +if (length(tarfiles) == 0) stop() +years <- substr(tarfiles, 9, 12) +tarfiles <- tarfiles[which(years >= startyear & years <= stopyear)] +vars <- substr(tarfiles, 5, 7) +years <- substr(tarfiles, 9, 12) +months <- substr(tarfiles, 13, 14) # load maps -mapfile <- paste(narrdir, "rr-fixed.grb", sep='') +mapfile <- paste(narrdir, "rr-fixed.grb", sep = "") NLAT <- readGrib(20, mapfile) ELON <- readGrib(19, mapfile) isLand <- readGrib(16, mapfile) -NLAT[NLAT>1.0e20] <- NA -ELON[ELON>1.0e20] <- NA -isLand[isLand>1.0e20] <- NA +NLAT[NLAT > 1.0e20] <- NA +ELON[ELON > 1.0e20] <- NA +isLand[isLand > 1.0e20] <- NA -landCells <- which(isLand>0) +landCells <- which(isLand > 0) # Determine extraction location -distance <- (LAT-NLAT)^2 +(LON-ELON)^2 +distance <- (LAT - NLAT)^2 + (LON - ELON)^2 nearestCell <- which.min(distance) # Determine 4 nearest neighbors for interpolation nearestCells <- landCells[order(distance[landCells])[1:4]] nearestCells <- nearestCells[which(isLand[nearestCells] > 0)] -if(length(nearestCells) == 0) nearestCells = nearestCell -weights <- (1/sqrt(distance[nearestCells]))/sum(1/sqrt(distance[nearestCells])) -#print(weights) -#stop() +if (length(nearestCells) == 0) nearestCells <- nearestCell +weights <- (1 / sqrt(distance[nearestCells])) / sum(1 / sqrt(distance[nearestCells])) +# print(weights) +# stop() # loop over large files -#library(hdf5,lib.loc="/home/mdietze/lib/R/Rhdf") +# library(hdf5,lib.loc="/home/mdietze/lib/R/Rhdf") library(hdf5) -for(year in unique(years)){ - yearTars <- which(years == year) - yearnum<-as.numeric(year) - for(month in c('08','09','10','11','12')){ - browser() - monthTars <- yearTars[which(months[yearTars] == month)] - monthnum<-as.numeric(month) - print(paste(year,month)) - - surfaceTars <- monthTars[which(vars[monthTars] == "sfc")] - surfaceMetData<-readMetTars(tarfiles[surfaceTars], - list(cfrzr=33, cicep=32, crain=34, csnow=31, dlwrf=42, - dswrf=41, pres =3, prate=24, tmp =5), - nearestCells, weights) - - fluxTars <- monthTars[which(vars[monthTars] == "flx")] - fluxMetData<-readMetTars(tarfiles[fluxTars], - list(tmp10=38, tmp30=44, ugrd10=35, ugrd30=41, vgrd10=36, - vgrd30=42, spfh10=40, spfh30=46, pres10=39, pres30=45, - hgt1 =4), - nearestCells, weights) - monthMetData <- cbind(fluxMetData,surfaceMetData) - startday<-firstday(monthnum,yearnum)+1 - stopday<-lastday(monthnum,yearnum)+1 - days <- rep(startday:stopday, each = 24/3) - hours <- rep(seq(0,21,by=3),length=nrow(monthMetData)) - - potential<-potentialRadiation2(LAT,LON,days, hours) - # write as h5 - out.file <- paste(narrdir, outdir,"/", - outdir,"_", yearnum, - monthnames[monthnum],".h5",sep="") - print(out.file) - writeHdf5(out.file, monthMetData, potential, - downscale.radiation=function(x){x}) - #downscale.radiation=function(x){smoothedRadiation(x, monthnum, yearnum, timelag, new.timestep=900, old.timestep=timestep)}) - } # end month +for (year in unique(years)) { + yearTars <- which(years == year) + yearnum <- as.numeric(year) + for (month in c("08", "09", "10", "11", "12")) { + browser() + monthTars <- yearTars[which(months[yearTars] == month)] + monthnum <- as.numeric(month) + print(paste(year, month)) + + surfaceTars <- monthTars[which(vars[monthTars] == "sfc")] + surfaceMetData <- readMetTars( + tarfiles[surfaceTars], + list( + cfrzr = 33, cicep = 32, crain = 34, csnow = 31, dlwrf = 42, + dswrf = 41, pres = 3, prate = 24, tmp = 5 + ), + nearestCells, weights + ) + + fluxTars <- monthTars[which(vars[monthTars] == "flx")] + fluxMetData <- readMetTars( + tarfiles[fluxTars], + list( + tmp10 = 38, tmp30 = 44, ugrd10 = 35, ugrd30 = 41, vgrd10 = 36, + vgrd30 = 42, spfh10 = 40, spfh30 = 46, pres10 = 39, pres30 = 45, + hgt1 = 4 + ), + nearestCells, weights + ) + monthMetData <- cbind(fluxMetData, surfaceMetData) + startday <- firstday(monthnum, yearnum) + 1 + stopday <- lastday(monthnum, yearnum) + 1 + days <- rep(startday:stopday, each = 24 / 3) + hours <- rep(seq(0, 21, by = 3), length = nrow(monthMetData)) + + potential <- potentialRadiation2(LAT, LON, days, hours) + # write as h5 + out.file <- paste(narrdir, outdir, "/", + outdir, "_", yearnum, + monthnames[monthnum], ".h5", + sep = "" + ) + print(out.file) + writeHdf5(out.file, monthMetData, potential, + downscale.radiation = function(x) { + x + } + ) + # downscale.radiation=function(x){smoothedRadiation(x, monthnum, yearnum, timelag, new.timestep=900, old.timestep=timestep)}) + } # end month } # end year - - diff --git a/modules/data.atmosphere/inst/scripts/extract2driver.grid.R b/modules/data.atmosphere/inst/scripts/extract2driver.grid.R index a66935406a9..b881ae87fa7 100644 --- a/modules/data.atmosphere/inst/scripts/extract2driver.grid.R +++ b/modules/data.atmosphere/inst/scripts/extract2driver.grid.R @@ -2,25 +2,25 @@ ################################## ## ## -## GRID TIMESERIES ## +## GRID TIMESERIES ## ## ## ################################## ## how to extract a specific cell ## Central New England -LAT <- c(41.705,43.325) -LON <- 360+c(-73.428,-71.224) +LAT <- c(41.705, 43.325) +LON <- 360 + c(-73.428, -71.224) YRES <- 0.10 XRES <- 0.10 froot <- "PALEO" -XSEQ <- seq(LON[1]+XRES/2,LON[2],by=XRES) -YSEQ <- seq(LAT[1]+YRES/2,LAT[2],by=YRES) +XSEQ <- seq(LON[1] + XRES / 2, LON[2], by = XRES) +YSEQ <- seq(LAT[1] + YRES / 2, LAT[2], by = YRES) NX <- length(XSEQ) NY <- length(YSEQ) -LATgrid <- matrix(rep(YSEQ,length(XSEQ)),NY,NX) -LONgrid <- matrix(rep(XSEQ,each=length(YSEQ)),NY,NX) +LATgrid <- matrix(rep(YSEQ, length(XSEQ)), NY, NX) +LONgrid <- matrix(rep(XSEQ, each = length(YSEQ)), NY, NX) ## TIME TO GRAB yr0 <- 1979 @@ -29,25 +29,25 @@ yrf <- 1993 dirname <- "/home/scratch/dietze_lab/NARR" NDIR <- "/home/scratch/dietze_lab/NOMADS" -gribgrab <- function(srch,vnam){ - system(paste(NDIR,"/wgrib -d ",srch," ",dirname,"/tmp/",fname[it]," -text -h -o ",dirname,"/tmp/",vnam,".txt", sep="")) - V <- read.table(paste(dirname,"/tmp/",vnam,".txt",sep=""),skip=1,header=FALSE) +gribgrab <- function(srch, vnam) { + system(paste(NDIR, "/wgrib -d ", srch, " ", dirname, "/tmp/", fname[it], " -text -h -o ", dirname, "/tmp/", vnam, ".txt", sep = "")) + V <- read.table(paste(dirname, "/tmp/", vnam, ".txt", sep = ""), skip = 1, header = FALSE) V <- V[[1]] - system(paste("rm ",dirname,"/tmp/",vnam,".txt",sep="")) + system(paste("rm ", dirname, "/tmp/", vnam, ".txt", sep = "")) rval <- NA - if(length(V)> 0){ - rval <- tapply(V[ROWS]*WT,CELL,sum,na.rm=TRUE) + if (length(V) > 0) { + rval <- tapply(V[ROWS] * WT, CELL, sum, na.rm = TRUE) } rval } -dm <- c(1,32,60,91,121,152,182,213,244,274,305,335,366) -dl <- c(1,32,61,92,122,153,183,214,245,275,306,336,367) -month <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") -mon_num <- c("01","02","03","04","05","06","07","08","09","10","11","12") -m2d <- function(m,y){ +dm <- c(1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +dl <- c(1, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 336, 367) +month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") +mon_num <- c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12") +m2d <- function(m, y) { ## convert from month to day of year - if(y %% 4 == 0){ + if (y %% 4 == 0) { return(dl[m]) } return(dm[m]) @@ -55,16 +55,16 @@ m2d <- function(m,y){ ## check if GRIB files are there yet -gfiles <- dir(dirname,"tar") -if(length(gfiles) == 0) stop() -fsplit <- strsplit(gfiles, ".",fixed=TRUE) -var <- yr <- mo <- sday <- fday <- rep(NA,length(fsplit)) -for(i in 1:length(fsplit)){ - var[i] <- substr(fsplit[[i]][1],5,7) - yr[i] <- substr(fsplit[[i]][1],9,12) - mo[i] <- substr(fsplit[[i]][1],13,14) - sday[i] <- substr(fsplit[[i]][1],16,17) - fday[i] <- substr(fsplit[[i]][1],18,19) +gfiles <- dir(dirname, "tar") +if (length(gfiles) == 0) stop() +fsplit <- strsplit(gfiles, ".", fixed = TRUE) +var <- yr <- mo <- sday <- fday <- rep(NA, length(fsplit)) +for (i in 1:length(fsplit)) { + var[i] <- substr(fsplit[[i]][1], 5, 7) + yr[i] <- substr(fsplit[[i]][1], 9, 12) + mo[i] <- substr(fsplit[[i]][1], 13, 14) + sday[i] <- substr(fsplit[[i]][1], 16, 17) + fday[i] <- substr(fsplit[[i]][1], 18, 19) } ### SELECT TARGET TIME @@ -80,39 +80,39 @@ unique(yr) ## load maps map <- "rr-fixed.grb" -system(paste(NDIR,"/wgrib -d 20 ",map," -text -h -o ",dirname,"/NLAT.txt", sep="")) -system(paste(NDIR,"/wgrib -d 19 ",map," -text -h -o ",dirname,"/ELON.txt", sep="")) -system(paste(NDIR,"/wgrib -d 16 ",map," -text -h -o ",dirname,"/LAND.txt", sep="")) +system(paste(NDIR, "/wgrib -d 20 ", map, " -text -h -o ", dirname, "/NLAT.txt", sep = "")) +system(paste(NDIR, "/wgrib -d 19 ", map, " -text -h -o ", dirname, "/ELON.txt", sep = "")) +system(paste(NDIR, "/wgrib -d 16 ", map, " -text -h -o ", dirname, "/LAND.txt", sep = "")) -NLAT <- read.table(paste(dirname,"/NLAT.txt",sep=""),skip=1,header=FALSE)[,1] -ELON <- read.table(paste(dirname,"/ELON.txt",sep=""),skip=1,header=FALSE)[,1] -LAND <- read.table(paste(dirname,"/LAND.txt",sep=""),skip=1,header=FALSE)[,1] +NLAT <- read.table(paste(dirname, "/NLAT.txt", sep = ""), skip = 1, header = FALSE)[, 1] +ELON <- read.table(paste(dirname, "/ELON.txt", sep = ""), skip = 1, header = FALSE)[, 1] +LAND <- read.table(paste(dirname, "/LAND.txt", sep = ""), skip = 1, header = FALSE)[, 1] -NLAT[NLAT>1.0e20] <- NA -ELON[ELON>1.0e20] <- NA -LAND[LAND>1.0e20] <- NA +NLAT[NLAT > 1.0e20] <- NA +ELON[ELON > 1.0e20] <- NA +LAND[LAND > 1.0e20] <- NA -##plot(ELON-360,NLAT,col=LAND+1,pch="+",xlim=LON-360,ylim=LAT) -##abline(h=seq(LAT[1],LAT[2],YRES),lty=2,col=3) -##abline(v=seq(LON[1],LON[2],XRES)-360,lty=2,col=3) -##map("state",add=TRUE) +## plot(ELON-360,NLAT,col=LAND+1,pch="+",xlim=LON-360,ylim=LAT) +## abline(h=seq(LAT[1],LAT[2],YRES),lty=2,col=3) +## abline(v=seq(LON[1],LON[2],XRES)-360,lty=2,col=3) +## map("state",add=TRUE) -lsel <- which(LAND>0) +lsel <- which(LAND > 0) ## Determine extraction location ROW <- WTS <- list() cnt <- 1 -for(X in 1:NX){ - for(Y in 1:NY){ +for (X in 1:NX) { + for (Y in 1:NY) { ## Determine 4 nearest terrestrial neighbors for interpolation - dist <- (YSEQ[Y]-NLAT)^2 +(XSEQ[X]-ELON)^2 + dist <- (YSEQ[Y] - NLAT)^2 + (XSEQ[X] - ELON)^2 ROW[[cnt]] <- lsel[order(dist[lsel])[1:4]] - WTS[[cnt]] <- (1/sqrt(dist[ROW[[cnt]]]))/sum(1/sqrt(dist[ROW[[cnt]]])) + WTS[[cnt]] <- (1 / sqrt(dist[ROW[[cnt]]])) / sum(1 / sqrt(dist[ROW[[cnt]]])) cnt <- cnt + 1 } } -CELL <- ROWS <- WT <- rep(NA,4*length(ROW)) -for(i in 1:length(ROW)){ - j <- 1:4 +(i-1)*4 +CELL <- ROWS <- WT <- rep(NA, 4 * length(ROW)) +for (i in 1:length(ROW)) { + j <- 1:4 + (i - 1) * 4 ROWS[j] <- ROW[[i]] WT[j] <- WTS[[i]] CELL[j] <- i @@ -120,145 +120,141 @@ for(i in 1:length(ROW)){ METDATA <- list() ## loop over large files -library(hdf5,lib.loc="/home/mdietze/lib/R/Rhdf") +library(hdf5, lib.loc = "/home/mdietze/lib/R/Rhdf") dt <- 10800 -for(y in unique(yr)){ +for (y in unique(yr)) { ysel <- which(yr == y) - for(m in unique(mo[ysel])){ - msel <- ysel[which(mo[ysel] == m)] + for (m in unique(mo[ysel])) { + msel <- ysel[which(mo[ysel] == m)] ssel <- msel[which(var[msel] == "sfc")] fsel <- msel[which(var[msel] == "flx")] - for(i in msel){ + for (i in msel) { + ## METDATA <- list() -## METDATA <- list() - ## copy file to temp space and open up - system(paste("cp",gfiles[i],"tmp")) - system(paste("cd tmp; tar -xf",gfiles[i])) - system(paste("rm tmp/",gfiles[i],sep="")) - + system(paste("cp", gfiles[i], "tmp")) + system(paste("cd tmp; tar -xf", gfiles[i])) + system(paste("rm tmp/", gfiles[i], sep = "")) + ## get list of sub-files; parse - fname <- dir("tmp","merged") - day <- hr <- rep(NA,length(fname)) - for(j in 1:length(fname)){ - day[j] <- substr(fname[j],21,22) - hr[j] <- substr(fname[j],23,24) + fname <- dir("tmp", "merged") + day <- hr <- rep(NA, length(fname)) + for (j in 1:length(fname)) { + day[j] <- substr(fname[j], 21, 22) + hr[j] <- substr(fname[j], 23, 24) } - + ## LOOP OVER SMALL FILES met <- list() - for(it in 1:11) met[[it]] <- array(NA,c(NX,NY,length(fname))) - for(it in 1:length(fname)){ - -####################### -###### IF SFC ######## -####################### - if(var[i] == "sfc"){ - met[[1]][,,it] <- matrix(gribgrab(33,"CFRZR"),NX,NY,byrow=TRUE) - met[[2]][,,it] <- matrix(gribgrab(32,"CICEP"),NX,NY,byrow=TRUE) - met[[3]][,,it] <- matrix(gribgrab(34,"CRAIN"),NX,NY,byrow=TRUE) - met[[4]][,,it] <- matrix(gribgrab(31,"CSNOW"),NX,NY,byrow=TRUE) - met[[5]][,,it] <- matrix(gribgrab(42,"DLWRF"),NX,NY,byrow=TRUE) - met[[6]][,,it] <- matrix(gribgrab(41,"DSWRF"),NX,NY,byrow=TRUE) - met[[7]][,,it] <- matrix(gribgrab(3,"PRES"),NX,NY,byrow=TRUE) - met[[8]][,,it] <- matrix(gribgrab(24,"PRATE"),NX,NY,byrow=TRUE) - met[[9]][,,it] <- matrix(gribgrab(5,"TMP"),NX,NY,byrow=TRUE) - } else{ - if(var[i] == "flx"){ - met[[1]][,,it] <- matrix(gribgrab(38,"TMP10"),NX,NY,byrow=TRUE) - met[[2]][,,it] <- matrix(gribgrab(44,"TMP30"),NX,NY,byrow=TRUE) - met[[3]][,,it] <- matrix(gribgrab(35,"UGRD10"),NX,NY,byrow=TRUE) - met[[4]][,,it] <- matrix(gribgrab(41,"UGRD30"),NX,NY,byrow=TRUE) - met[[5]][,,it] <- matrix(gribgrab(36,"VGRD10"),NX,NY,byrow=TRUE) - met[[6]][,,it] <- matrix(gribgrab(42,"VGRD30"),NX,NY,byrow=TRUE) - met[[7]][,,it] <- matrix(gribgrab(40,"SPFH10"),NX,NY,byrow=TRUE) - met[[8]][,,it] <- matrix(gribgrab(46,"SPFH30"),NX,NY,byrow=TRUE) - met[[9]][,,it] <- matrix(gribgrab(39,"PRES10"),NX,NY,byrow=TRUE) - met[[10]][,,it] <- matrix(gribgrab(45,"PRES30"),NX,NY,byrow=TRUE) - met[[11]][,,it] <- matrix(gribgrab(4,"HGT1"),NX,NY,byrow=TRUE) - + for (it in 1:11) met[[it]] <- array(NA, c(NX, NY, length(fname))) + for (it in 1:length(fname)) { + ####################### + ###### IF SFC ######## + ####################### + if (var[i] == "sfc") { + met[[1]][, , it] <- matrix(gribgrab(33, "CFRZR"), NX, NY, byrow = TRUE) + met[[2]][, , it] <- matrix(gribgrab(32, "CICEP"), NX, NY, byrow = TRUE) + met[[3]][, , it] <- matrix(gribgrab(34, "CRAIN"), NX, NY, byrow = TRUE) + met[[4]][, , it] <- matrix(gribgrab(31, "CSNOW"), NX, NY, byrow = TRUE) + met[[5]][, , it] <- matrix(gribgrab(42, "DLWRF"), NX, NY, byrow = TRUE) + met[[6]][, , it] <- matrix(gribgrab(41, "DSWRF"), NX, NY, byrow = TRUE) + met[[7]][, , it] <- matrix(gribgrab(3, "PRES"), NX, NY, byrow = TRUE) + met[[8]][, , it] <- matrix(gribgrab(24, "PRATE"), NX, NY, byrow = TRUE) + met[[9]][, , it] <- matrix(gribgrab(5, "TMP"), NX, NY, byrow = TRUE) + } else { + if (var[i] == "flx") { + met[[1]][, , it] <- matrix(gribgrab(38, "TMP10"), NX, NY, byrow = TRUE) + met[[2]][, , it] <- matrix(gribgrab(44, "TMP30"), NX, NY, byrow = TRUE) + met[[3]][, , it] <- matrix(gribgrab(35, "UGRD10"), NX, NY, byrow = TRUE) + met[[4]][, , it] <- matrix(gribgrab(41, "UGRD30"), NX, NY, byrow = TRUE) + met[[5]][, , it] <- matrix(gribgrab(36, "VGRD10"), NX, NY, byrow = TRUE) + met[[6]][, , it] <- matrix(gribgrab(42, "VGRD30"), NX, NY, byrow = TRUE) + met[[7]][, , it] <- matrix(gribgrab(40, "SPFH10"), NX, NY, byrow = TRUE) + met[[8]][, , it] <- matrix(gribgrab(46, "SPFH30"), NX, NY, byrow = TRUE) + met[[9]][, , it] <- matrix(gribgrab(39, "PRES10"), NX, NY, byrow = TRUE) + met[[10]][, , it] <- matrix(gribgrab(45, "PRES30"), NX, NY, byrow = TRUE) + met[[11]][, , it] <- matrix(gribgrab(4, "HGT1"), NX, NY, byrow = TRUE) } else { - print(c("FILE TYPE UNKNOWN",gfiles[i])) + print(c("FILE TYPE UNKNOWN", gfiles[i])) } } - system(paste("rm ",dirname,"/tmp/",fname[it],sep="")) ## clean up - } ## end loop within file - + system(paste("rm ", dirname, "/tmp/", fname[it], sep = "")) ## clean up + } ## end loop within file + METDATA[[i]] <- met - } ## end loop over ensemble members + } ## end loop over ensemble members - print(c(y,m)) + print(c(y, m)) save.image("paleo.RData") - + ## calculate time variables - doy <- rep(m2d(as.numeric(m),as.numeric(y)):(m2d(as.numeric(m)+1,as.numeric(y))-1),each = 8) + doy <- rep(m2d(as.numeric(m), as.numeric(y)):(m2d(as.numeric(m) + 1, as.numeric(y)) - 1), each = 8) n <- length(doy) - hr <- rep(seq(0,21,by=3),length=n) - -#### MERGE CHUNKS INTO MONTH TIMESERIES + hr <- rep(seq(0, 21, by = 3), length = n) + + #### MERGE CHUNKS INTO MONTH TIMESERIES - SW <- prate <- dlwrf <- pres <- ugrd <- vgrd <- sh <- tmp <- array(NA,c(NX,NY,n)) - ##sfc + SW <- prate <- dlwrf <- pres <- ugrd <- vgrd <- sh <- tmp <- array(NA, c(NX, NY, n)) + ## sfc cnt <- 0 - for(j in ssel){ - rws <- cnt+1:(dim(METDATA[[j]][[1]])[3]) - SW[,,rws] <- METDATA[[j]][[6]] - prate[,,rws] <- METDATA[[j]][[8]] - dlwrf[,,rws] <- METDATA[[j]][[5]] + for (j in ssel) { + rws <- cnt + 1:(dim(METDATA[[j]][[1]])[3]) + SW[, , rws] <- METDATA[[j]][[6]] + prate[, , rws] <- METDATA[[j]][[8]] + dlwrf[, , rws] <- METDATA[[j]][[5]] cnt <- rev(rws)[1] } cnt <- 0 - for(j in fsel){ - rws <- cnt+1:(dim(METDATA[[j]][[1]])[3]) - pres[,,rws] <- METDATA[[j]][[9]] - ugrd[,,rws] <- METDATA[[j]][[3]] - vgrd[,,rws] <- METDATA[[j]][[5]] - sh[,,rws] <- METDATA[[j]][[7]] - tmp[,,rws] <- METDATA[[j]][[1]] + for (j in fsel) { + rws <- cnt + 1:(dim(METDATA[[j]][[1]])[3]) + pres[, , rws] <- METDATA[[j]][[9]] + ugrd[, , rws] <- METDATA[[j]][[3]] + vgrd[, , rws] <- METDATA[[j]][[5]] + sh[, , rws] <- METDATA[[j]][[7]] + tmp[, , rws] <- METDATA[[j]][[1]] cnt <- rev(rws)[1] - } - + } + ## calculate potential radiation ## in order to estimate diffuse/direct - f <- pi/180*(279.5+0.9856*doy) - et <- (-104.7*sin(f)+596.2*sin(2*f)+4.3*sin(4*f) - -429.3*cos(f)-2.0*cos(2*f)+19.3*cos(3*f))/3600 - ##equation of time -> eccentricity and obliquity - rpot <-array(NA,c(NX,NY,n)) - merid <- floor(LONgrid/15)*15 - #if(merid<0) - merid[merid>180] <- merid[merid>180]+15-360 - lc <- (LONgrid-merid)*-4/60 ## longitude correction - tz <- merid/360*24 ## time zone - midbin <- 0.5*dt/86400*24 ## shift calc to middle of bin - for(j in 1:n){ - t0 <- 12+lc-et[j]-tz-midbin ## solar time - h <- pi/12*(hr[j]-t0) ## solar hour - dec <- -23.45*pi/180*cos(2*pi*(doy[j]+10)/365) ## declination - cosz <- sin(LATgrid*pi/180)*sin(dec)+cos(LATgrid*pi/180)*cos(dec)*cos(h) - cosz[cosz<0] <- 0 - rpot[,,j] <- 1366*cosz + f <- pi / 180 * (279.5 + 0.9856 * doy) + et <- (-104.7 * sin(f) + 596.2 * sin(2 * f) + 4.3 * sin(4 * f) + - 429.3 * cos(f) - 2.0 * cos(2 * f) + 19.3 * cos(3 * f)) / 3600 + ## equation of time -> eccentricity and obliquity + rpot <- array(NA, c(NX, NY, n)) + merid <- floor(LONgrid / 15) * 15 + # if(merid<0) + merid[merid > 180] <- merid[merid > 180] + 15 - 360 + lc <- (LONgrid - merid) * -4 / 60 ## longitude correction + tz <- merid / 360 * 24 ## time zone + midbin <- 0.5 * dt / 86400 * 24 ## shift calc to middle of bin + for (j in 1:n) { + t0 <- 12 + lc - et[j] - tz - midbin ## solar time + h <- pi / 12 * (hr[j] - t0) ## solar hour + dec <- -23.45 * pi / 180 * cos(2 * pi * (doy[j] + 10) / 365) ## declination + cosz <- sin(LATgrid * pi / 180) * sin(dec) + cos(LATgrid * pi / 180) * cos(dec) * cos(h) + cosz[cosz < 0] <- 0 + rpot[, , j] <- 1366 * cosz } - SW[rpot < SW] <- rpot[rpot0.9] <- 0.9 ## ensure some diffuse + frac <- SW / rpot + frac[frac > 0.9] <- 0.9 ## ensure some diffuse frac[frac < 0.0] <- 0.0 frac[is.na(frac)] <- 0.0 frac[is.nan(frac)] <- 0.0 - SWd <- SW*(1-frac) ## Diffuse portion of total short wave rad + SWd <- SW * (1 - frac) ## Diffuse portion of total short wave rad + + nbdsf <- (SW - SWd) * 0.57 # near IR beam downward solar radiation [W/m2] + nddsf <- SWd * 0.48 # near IR diffuse downward solar radiation [W/m2] + vbdsf <- (SW - SWd) * 0.43 # visible beam downward solar radiation [W/m2] + vddsf <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] - nbdsf <- (SW - SWd) * 0.57# near IR beam downward solar radiation [W/m2] - nddsf <- SWd* 0.48 # near IR diffuse downward solar radiation [W/m2] - vbdsf <- (SW - SWd) * 0.43# visible beam downward solar radiation [W/m2] - vddsf <- SWd * 0.52 # visible diffuse downward solar radiation [W/m2] - ## write as h5 - mout <- paste(froot,"/",froot,"_",y,month[as.numeric(m)],".h5",sep="") - hdf5save(mout,"nbdsf","nddsf","vbdsf","vddsf","prate","dlwrf","pres","ugrd","vgrd","sh","tmp") - - } ## end month + mout <- paste(froot, "/", froot, "_", y, month[as.numeric(m)], ".h5", sep = "") + hdf5save(mout, "nbdsf", "nddsf", "vbdsf", "vddsf", "prate", "dlwrf", "pres", "ugrd", "vgrd", "sh", "tmp") + } ## end month } ## end year - + save.image("paleo.RData") diff --git a/modules/data.atmosphere/inst/scripts/flux2lsm.v2.R b/modules/data.atmosphere/inst/scripts/flux2lsm.v2.R index 5e82b696091..f7991f352bd 100644 --- a/modules/data.atmosphere/inst/scripts/flux2lsm.v2.R +++ b/modules/data.atmosphere/inst/scripts/flux2lsm.v2.R @@ -1,59 +1,58 @@ - ### recode of flux2lsm to run on new HDF5 files as reference input and output ### version for Ameriflux L3 files ### SETTINGS site <- "nc_pita" siteID <- "USNC2" -lat = 35.8031 -lon = -76.66791 -tlag <- ceiling(lon/15)/24 -tlag <- -5.0/24 +lat <- 35.8031 +lon <- -76.66791 +tlag <- ceiling(lon / 15) / 24 +tlag <- -5.0 / 24 -#site <- "mize" -#siteID <- "USSP2" -#lat <- 29.7648 -#lon <- -82.2448166667 -#tlag <- -5.0/24 +# site <- "mize" +# siteID <- "USSP2" +# lat <- 29.7648 +# lon <- -82.2448166667 +# tlag <- -5.0/24 site <- "donaldson" siteID <- "USSP3" -lat <- 29.7547666667 +lat <- 29.7547666667 lon <- -82.1632833333 -tlag <- -5.0/24 +tlag <- -5.0 / 24 site <- "oak_openings" siteID <- "USOho" lat <- 41.55454 lon <- -83.84376 -tlag <- -5.0/24 +tlag <- -5.0 / 24 -#yr0 <- 1987 +# yr0 <- 1987 yr0 <- 1998 yrf <- 2008 -tstep = 1800 -rad_step = 900 +tstep <- 1800 +rad_step <- 900 build.date <- Sys.Date() -ismissing <- rep(TRUE,12) -inpath <- paste("/home/mdietze/inputs/fluxnet/",site,"/",sep="") -#refpath <- "/home/scratch/dietze_lab/NARR/bartlett/bartlett_" +ismissing <- rep(TRUE, 12) +inpath <- paste("/home/mdietze/inputs/fluxnet/", site, "/", sep = "") +# refpath <- "/home/scratch/dietze_lab/NARR/bartlett/bartlett_" refpath <- "/home/scratch/dietze_lab/NARR/lower48/lower48_" -LAT <- c(26,49) -LON <- 360+c(-124.5,-67) +LAT <- c(26, 49) +LON <- 360 + c(-124.5, -67) YRES <- 0.50 XRES <- 0.50 -XSEQ <- seq(LON[1]+XRES/2,LON[2],by=XRES) -YSEQ <- seq(LAT[1]+YRES/2,LAT[2],by=YRES) +XSEQ <- seq(LON[1] + XRES / 2, LON[2], by = XRES) +YSEQ <- seq(LAT[1] + YRES / 2, LAT[2], by = YRES) NX <- length(XSEQ) NY <- length(YSEQ) -cellX <- findInterval(360+lon,XSEQ) -cellY <- findInterval(lat,YSEQ) +cellX <- findInterval(360 + lon, XSEQ) +cellY <- findInterval(lat, YSEQ) -outpath <- paste("/home/mdietze/inputs/fluxnet/",site,"/met",build.date,"/",site,"_",sep="") -system(paste("mkdir /home/mdietze/inputs/fluxnet/",site,"/met",build.date,"/",sep="")) +outpath <- paste("/home/mdietze/inputs/fluxnet/", site, "/met", build.date, "/", site, "_", sep = "") +system(paste("mkdir /home/mdietze/inputs/fluxnet/", site, "/met", build.date, "/", sep = "")) ## VARIABLES # 1 - hgt @@ -70,113 +69,111 @@ system(paste("mkdir /home/mdietze/inputs/fluxnet/",site,"/met",build.date,"/",se # 12 - vbdsf # 13 - vddsf # 14 - date - + ### LIBRARIES -library(XML,lib.loc="~/lib/R") -library(ed21,lib.loc="~/lib/R") -library(hdf5,lib.loc="~/lib/R/Rhdf") -setwd(paste("~/inputs/fluxnet/",site,sep="")) +library(XML, lib.loc = "~/lib/R") +library(ed21, lib.loc = "~/lib/R") +library(hdf5, lib.loc = "~/lib/R/Rhdf") +setwd(paste("~/inputs/fluxnet/", site, sep = "")) ### FUNCTIONS -T_st = 373.15 #steam temperature (K) -e_st = 1013.25 #saturation vapor pressure at steam temp (hPa) -radians = 3.14159/180.0 -Kelvin = 273.15 #Celsius->Kelvin -C_PARCONV = 1.0/(0.45*4.6) -WATT = 1/4.6 - -rh2rv <- function(rh, T){ - #/converts relative humidity to specific humidity - #/input: rh = relative humidity (proportion, not %) - #/input: T = absolute temperature - rh*2.541e6*exp(-5415.0/T)*18/29 +T_st <- 373.15 # steam temperature (K) +e_st <- 1013.25 # saturation vapor pressure at steam temp (hPa) +radians <- 3.14159 / 180.0 +Kelvin <- 273.15 # Celsius->Kelvin +C_PARCONV <- 1.0 / (0.45 * 4.6) +WATT <- 1 / 4.6 + +rh2rv <- function(rh, T) { + # /converts relative humidity to specific humidity + # /input: rh = relative humidity (proportion, not %) + # /input: T = absolute temperature + rh * 2.541e6 * exp(-5415.0 / T) * 18 / 29 } -SatVapPres <- function(T){ - #/estimates saturation vapor pressure (kPa) Goff-Gratch 1946 - #/input: T = absolute temperature - 0.1*exp( -7.90298*(T_st/T-1) + 5.02808*log(T_st/T) - 1.3816e-7*(10^(11.344*(1-T/T_st))-1) + 8.1328e-3*(10^(-3.49149*(T_st/T-1))-1) + log(e_st)) +SatVapPres <- function(T) { + # /estimates saturation vapor pressure (kPa) Goff-Gratch 1946 + # /input: T = absolute temperature + 0.1 * exp(-7.90298 * (T_st / T - 1) + 5.02808 * log(T_st / T) - 1.3816e-7 * (10^(11.344 * (1 - T / T_st)) - 1) + 8.1328e-3 * (10^(-3.49149 * (T_st / T - 1)) - 1) + log(e_st)) } -exner <- function(pres){ - #/ estimated exner function - #/ input: pres = air pressure (Bar) - 1004.0*pres^(287.0/1004.0) +exner <- function(pres) { + # / estimated exner function + # / input: pres = air pressure (Bar) + 1004.0 * pres^(287.0 / 1004.0) } -AirDens <- function(pres, T, rv){ - #/ estimate air density from pressure, temperature, and humidity - #/ input: pres = air pressure (pascals) - #/ input: T = air temperature (Kelvin) - #/ input: rv = humidity - pres/(287.0*T*(1.0+0.61*rv)) +AirDens <- function(pres, T, rv) { + # / estimate air density from pressure, temperature, and humidity + # / input: pres = air pressure (pascals) + # / input: T = air temperature (Kelvin) + # / input: rv = humidity + pres / (287.0 * T * (1.0 + 0.61 * rv)) } -firstday <- function(mo,yr){ - ldoy = c(0,31,60,91,121,152,182,213,244,274,305,335,366) - doy = c(0,31,59,90,120,151,181,212,243,273,304,334,365) +firstday <- function(mo, yr) { + ldoy <- c(0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) + doy <- c(0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) ifelse(lubridate::leap_year(yr), ldoy[mo], doy[mo]) } -nday <- function(mo,yr){ - lnday = c(31,29,31,30,31,30,31,31,30,31,30,31) - nday = c(31,28,31,30,31,30,31,31,30,31,30,31) +nday <- function(mo, yr) { + lnday <- c(31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) + nday <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) ifelse(lubridate::leap_year(yr), lnday[mo], nday[mo]) } -smoo <- function(a,b,c){ ## smoother - comb <- c(a[-7:0 + length(a)],b,c[1:8]) - comb <- rep(comb,each=6) - comb <- filter(comb,c(0.1,0.2,0.4,0.2,0.1),"con",circular = TRUE) - comb <- comb[49:(length(comb)-48)] +smoo <- function(a, b, c) { ## smoother + comb <- c(a[-7:0 + length(a)], b, c[1:8]) + comb <- rep(comb, each = 6) + comb <- filter(comb, c(0.1, 0.2, 0.4, 0.2, 0.1), "con", circular = TRUE) + comb <- comb[49:(length(comb) - 48)] comb } -smoR <- function(a,mo,yr,tstep = 900,torig = 10800){ ## radiation - dat <- rep(a,each=torig/tstep) - lab <- rep(1:length(a),each=torig/tstep) - d0 <- firstday(mo,yr) - df <- firstday(mo+1,yr)-1 - rin <- rpot(rep(d0:df,each=86400/tstep),rep(seq(tstep,86400,tstep),nday(mo,yr)),lat,tlag*24) - rbar <- rep(tapply(rin,lab,mean),each=torig/tstep) - r <-apply(cbind(dat*rin/rbar,rep(0,length(dat))),1,max) +smoR <- function(a, mo, yr, tstep = 900, torig = 10800) { ## radiation + dat <- rep(a, each = torig / tstep) + lab <- rep(1:length(a), each = torig / tstep) + d0 <- firstday(mo, yr) + df <- firstday(mo + 1, yr) - 1 + rin <- rpot(rep(d0:df, each = 86400 / tstep), rep(seq(tstep, 86400, tstep), nday(mo, yr)), lat, tlag * 24) + rbar <- rep(tapply(rin, lab, mean), each = torig / tstep) + r <- apply(cbind(dat * rin / rbar, rep(0, length(dat))), 1, max) } -month <- c("JAN","FEB","MAR","APR","MAY","JUN","JUL","AUG","SEP","OCT","NOV","DEC") +month <- c("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC") # calcDecDay( day, month, year){ # day-1 + firstday(month,year) -#} - -dirfrac <- function(jday,time,rshort,lat){ - - dayangle=2.0*3.14159*(jday)/365.25 - declination=0.006918-0.399912*cos(dayangle)+0.070257*sin(dayangle)-0.006758*cos(2.0*dayangle)+0.000907*sin(2.0*dayangle)-0.002697*cos(3.0*dayangle)+0.00148*sin(3.0*dayangle) - eccen=1.00011+0.034221*cos(dayangle)+0.00128*sin(dayangle)+0.000719*cos(2.0*dayangle)+0.000077*sin(2.0*dayangle) - solartime=time/3600.0-12.0-1.0 - ket=1367.0*eccen*(cos(declination)*cos(lat/360.0*2.0*3.14159)*cos(15.0/360.0*2.0*3.14159*(solartime))+sin(declination)*sin((lat)/360.0*2.0*3.14159)) - frac=rshort/ket - if (frac > 0.9) frac=0.9 - if (frac < 0.0) frac=0.0 +# } + +dirfrac <- function(jday, time, rshort, lat) { + dayangle <- 2.0 * 3.14159 * (jday) / 365.25 + declination <- 0.006918 - 0.399912 * cos(dayangle) + 0.070257 * sin(dayangle) - 0.006758 * cos(2.0 * dayangle) + 0.000907 * sin(2.0 * dayangle) - 0.002697 * cos(3.0 * dayangle) + 0.00148 * sin(3.0 * dayangle) + eccen <- 1.00011 + 0.034221 * cos(dayangle) + 0.00128 * sin(dayangle) + 0.000719 * cos(2.0 * dayangle) + 0.000077 * sin(2.0 * dayangle) + solartime <- time / 3600.0 - 12.0 - 1.0 + ket <- 1367.0 * eccen * (cos(declination) * cos(lat / 360.0 * 2.0 * 3.14159) * cos(15.0 / 360.0 * 2.0 * 3.14159 * (solartime)) + sin(declination) * sin((lat) / 360.0 * 2.0 * 3.14159)) + frac <- rshort / ket + if (frac > 0.9) frac <- 0.9 + if (frac < 0.0) frac <- 0.0 frac } -dirfrac2 <- function(rpotn,rshort){ - frac=rshort/rpotn - frac[frac > 0.9]=0.9 - frac[frac < 0.0]=0.0 +dirfrac2 <- function(rpotn, rshort) { + frac <- rshort / rpotn + frac[frac > 0.9] <- 0.9 + frac[frac < 0.0] <- 0.0 frac } -rpot <- function(jday,time,lat,tlag){ - - dayangle=2.0*3.14159*(jday)/365.25 - declination=0.006918-0.399912*cos(dayangle)+0.070257*sin(dayangle)-0.006758*cos(2.0*dayangle)+0.000907*sin(2.0*dayangle)-0.002697*cos(3.0*dayangle)+0.00148*sin(3.0*dayangle) - eccen=1.00011+0.034221*cos(dayangle)+0.00128*sin(dayangle)+0.000719*cos(2.0*dayangle)+0.000077*sin(2.0*dayangle) - solartime=time/3600.0-12.0+tlag - ket=1367.0*eccen*(cos(declination)*cos(lat/360.0*2.0*3.14159)*cos(15.0/360.0*2.0*3.14159*(solartime))+sin(declination)*sin((lat)/360.0*2.0*3.14159)) - ket[ket<0] <- 0 +rpot <- function(jday, time, lat, tlag) { + dayangle <- 2.0 * 3.14159 * (jday) / 365.25 + declination <- 0.006918 - 0.399912 * cos(dayangle) + 0.070257 * sin(dayangle) - 0.006758 * cos(2.0 * dayangle) + 0.000907 * sin(2.0 * dayangle) - 0.002697 * cos(3.0 * dayangle) + 0.00148 * sin(3.0 * dayangle) + eccen <- 1.00011 + 0.034221 * cos(dayangle) + 0.00128 * sin(dayangle) + 0.000719 * cos(2.0 * dayangle) + 0.000077 * sin(2.0 * dayangle) + solartime <- time / 3600.0 - 12.0 + tlag + ket <- 1367.0 * eccen * (cos(declination) * cos(lat / 360.0 * 2.0 * 3.14159) * cos(15.0 / 360.0 * 2.0 * 3.14159 * (solartime)) + sin(declination) * sin((lat) / 360.0 * 2.0 * 3.14159)) + ket[ket < 0] <- 0 ket } @@ -184,31 +181,31 @@ rpot <- function(jday,time,lat,tlag){ ### MAIN ## get list of met/flux files -mfile <- dir(inpath,"L3") -mfile <- mfile[grep("txt",mfile)] +mfile <- dir(inpath, "L3") +mfile <- mfile[grep("txt", mfile)] ## load site data -met <- list(hgt=NULL,prate=NULL,pres=NULL,sh=NULL,tmp=NULL,ugrd=NULL,vgrd=NULL,co2=NULL,dlwrf=NULL,nbdsf=NULL,nddsf=NULL,vbdsf=NULL,vddsf=NULL,date=NULL) -for(i in 1:length(mfile)){ +met <- list(hgt = NULL, prate = NULL, pres = NULL, sh = NULL, tmp = NULL, ugrd = NULL, vgrd = NULL, co2 = NULL, dlwrf = NULL, nbdsf = NULL, nddsf = NULL, vbdsf = NULL, vddsf = NULL, date = NULL) +for (i in 1:length(mfile)) { print(mfile[i]) ## read ameriflux annual file - rawmet <- read.csv(mfile[i],header=TRUE,na.strings=c("-9999.00","-9999")) - rawmet[rawmet==-9999] <- NA - year <- sub(siteID,"",mfile[i]) - year <- as.numeric(sub("_L3.txt","",year)) - if(!is.numeric(year)|is.na(year)) print(c("ODD YEAR",year,mfile[i])) - + rawmet <- read.csv(mfile[i], header = TRUE, na.strings = c("-9999.00", "-9999")) + rawmet[rawmet == -9999] <- NA + year <- sub(siteID, "", mfile[i]) + year <- as.numeric(sub("_L3.txt", "", year)) + if (!is.numeric(year) | is.na(year)) print(c("ODD YEAR", year, mfile[i])) + ## preprocess some met variables w <- rawmet$WS wdir <- rawmet$WD ## meteorology - met$prate <- c(met$prate , rawmet$Precip/1800) - met$tmp <- c(met$tmp , rawmet$Ta + Kelvin) - met$sh <- c(met$sh , rh2rv(rawmet$Rh/100,rawmet$Ta)) - met$ugrd <- c(met$ugrd , w*cos(wdir*radians)) - met$vgrd <- c(met$vgrd , w*sin(wdir*radians)) - met$date <- rbind(met$date,cbind(rep(year,nrow(rawmet)),rawmet[,1:4])) ## date + met$prate <- c(met$prate, rawmet$Precip / 1800) + met$tmp <- c(met$tmp, rawmet$Ta + Kelvin) + met$sh <- c(met$sh, rh2rv(rawmet$Rh / 100, rawmet$Ta)) + met$ugrd <- c(met$ugrd, w * cos(wdir * radians)) + met$vgrd <- c(met$vgrd, w * sin(wdir * radians)) + met$date <- rbind(met$date, cbind(rep(year, nrow(rawmet)), rawmet[, 1:4])) ## date ## should we set CO2 @@ -217,138 +214,136 @@ for(i in 1:length(mfile)){ vis <- rawmet$PPFD * WATT ## umol/m2/sec -> w/m2 vis[vis < 0] <- 0.0 miss <- which(is.na(rshort)) - rshort[miss] <- vis[miss]/0.45 + rshort[miss] <- vis[miss] / 0.45 rpotn <- rawmet$R_pot rdiff <- rawmet$Rd - fdiff <- rdiff/rshort - fdir <- 1-fdiff - fdir.pot <- dirfrac2(rpotn,rshort) + fdiff <- rdiff / rshort + fdir <- 1 - fdiff + fdir.pot <- dirfrac2(rpotn, rshort) miss <- which(is.na(fdir)) - fdir[miss] <-fdir.pot[miss] + fdir[miss] <- fdir.pot[miss] nir <- rshort - vis - - + + ## radiation - met$nbdsf <- c(met$nbdsf , nir*fdir) - met$vbdsf <- c(met$vbdsf , vis*fdir) - met$nddsf <- c(met$nddsf , nir*(1-fdir)) - met$vddsf <- c(met$vddsf , vis*(1-fdir)) - met$dlwrf <- c(met$dlwrf , rep(NA,length(vis))) - + met$nbdsf <- c(met$nbdsf, nir * fdir) + met$vbdsf <- c(met$vbdsf, vis * fdir) + met$nddsf <- c(met$nddsf, nir * (1 - fdir)) + met$vddsf <- c(met$vddsf, vis * (1 - fdir)) + met$dlwrf <- c(met$dlwrf, rep(NA, length(vis))) } -##shift tower data to GMT -for(i in 1:13){ - if(!is.null(met[[i]])){ - met[[i]] <- c(rep(mean(met[[i]][1:(-48*tlag)]),-48*tlag),met[[i]]) +## shift tower data to GMT +for (i in 1:13) { + if (!is.null(met[[i]])) { + met[[i]] <- c(rep(mean(met[[i]][1:(-48 * tlag)]), -48 * tlag), met[[i]]) } } ## interp rad to 15min -rad <- list(hgt=NULL,prate=NULL,pres=NULL,sh=NULL,tmp=NULL,ugrd=NULL,vgrd=NULL,co2=NULL,dlwrf=NULL,nbdsf=NULL,nddsf=NULL,vbdsf=NULL,vddsf=NULL,date=NULL) - -for(j in 9:13){ - for(yr in yr0:yrf){ - for(mo in 1:12){ - d0 <- firstday(mo,yr)+1 - df <- firstday(mo+1,yr) - sel <- which(met[[14]][,1] == yr) -# sel <- sel[which(met[[14]][sel,2] %in% d0:df)] - sel <- sel[which(met$date[sel,2] == mo)] - if(length(sel)>0){ -## rtemp <- smoR(met[[j]][sel],mo,yr,rad_step,1800) - rtemp <- rep(met[[j]][sel],each=1800/rad_step) -## if(!is.null(rtemp)){ - if(is.null(rad[[j]])){ - rad[[j]] <- rtemp - }else{ - rad[[j]] <- c(rad[[j]],rtemp) - } -## } +rad <- list(hgt = NULL, prate = NULL, pres = NULL, sh = NULL, tmp = NULL, ugrd = NULL, vgrd = NULL, co2 = NULL, dlwrf = NULL, nbdsf = NULL, nddsf = NULL, vbdsf = NULL, vddsf = NULL, date = NULL) + +for (j in 9:13) { + for (yr in yr0:yrf) { + for (mo in 1:12) { + d0 <- firstday(mo, yr) + 1 + df <- firstday(mo + 1, yr) + sel <- which(met[[14]][, 1] == yr) + # sel <- sel[which(met[[14]][sel,2] %in% d0:df)] + sel <- sel[which(met$date[sel, 2] == mo)] + if (length(sel) > 0) { + ## rtemp <- smoR(met[[j]][sel],mo,yr,rad_step,1800) + rtemp <- rep(met[[j]][sel], each = 1800 / rad_step) + ## if(!is.null(rtemp)){ + if (is.null(rad[[j]])) { + rad[[j]] <- rtemp + } else { + rad[[j]] <- c(rad[[j]], rtemp) + } + ## } } } } } -narr <- narr.next <- hdf5load(paste(refpath,yr0,month[1],".h5",sep=""),load=FALSE) +narr <- narr.next <- hdf5load(paste(refpath, yr0, month[1], ".h5", sep = ""), load = FALSE) ## FOR EACH NARR FILE -for(yr in yr0:yrf){ - for(mo in 1:12){ - - ##load NARR data +for (yr in yr0:yrf) { + for (mo in 1:12) { + ## load NARR data narr.last <- narr narr <- narr.next - if(mo < 12){ - narr.next <- hdf5load(paste(refpath,yr,month[mo+1],".h5",sep=""),load=FALSE) + if (mo < 12) { + narr.next <- hdf5load(paste(refpath, yr, month[mo + 1], ".h5", sep = ""), load = FALSE) } else { - if(yr < yrf){ - narr.next <- hdf5load(paste(refpath,yr+1,month[1],".h5",sep=""),load=FALSE) + if (yr < yrf) { + narr.next <- hdf5load(paste(refpath, yr + 1, month[1], ".h5", sep = ""), load = FALSE) } } - - ##interp to target res + + ## interp to target res rmet <- list() - for(j in c(1,4:8,11)){ - nj = which(names(met) == names(narr)[j]) - rmet[[j]] <- smoo(narr.last[[j]][cellY,cellX,],narr[[j]][cellY,cellX,],narr.next[[j]][cellY,cellX,]) + for (j in c(1, 4:8, 11)) { + nj <- which(names(met) == names(narr)[j]) + rmet[[j]] <- smoo(narr.last[[j]][cellY, cellX, ], narr[[j]][cellY, cellX, ], narr.next[[j]][cellY, cellX, ]) } - for(j in c(2,3,9,10)){ #not LW - rmet[[j]] <- smoR(narr[[j-1]][cellY,cellX,],mo,yr,rad_step) + for (j in c(2, 3, 9, 10)) { # not LW + rmet[[j]] <- smoR(narr[[j - 1]][cellY, cellX, ], mo, yr, rad_step) } - - ##align tower - d0 <- firstday(mo,yr)+1 - df <- firstday(mo+1,yr) - - sel <- which(met[[14]][,1] == yr) - sel <- sel[which(floor(met[[14]][sel,5]-0.001) %in% d0:df)] - - if(length(sel)>0){ - ##merge atm - for(j in c(1,4:8)){ - nj = which(names(met)==names(narr)[j]) - if(!is.null(met[[nj]])){ - if(j == 8) { ###precip - ##snowcor + + ## align tower + d0 <- firstday(mo, yr) + 1 + df <- firstday(mo + 1, yr) + + sel <- which(met[[14]][, 1] == yr) + sel <- sel[which(floor(met[[14]][sel, 5] - 0.001) %in% d0:df)] + + if (length(sel) > 0) { + ## merge atm + for (j in c(1, 4:8)) { + nj <- which(names(met) == names(narr)[j]) + if (!is.null(met[[nj]])) { + if (j == 8) { ### precip + ## snowcor snow <- which(rmet[[5]] < Kelvin) - met[[nj]][snow] <- rmet[[j]][snow] ##fill snow from NARR + met[[nj]][snow] <- rmet[[j]][snow] ## fill snow from NARR } nsel <- which(!is.na(met[[nj]][sel])) rmet[[j]][nsel] <- met[[nj]][sel[nsel]] ## fill NARR from tower } } ## merge radiation - ryr <- rep(met[[14]][,1],each=1800/rad_step) - rdoy <- rep(met[[14]][,5],each=1800/rad_step) + ryr <- rep(met[[14]][, 1], each = 1800 / rad_step) + rdoy <- rep(met[[14]][, 5], each = 1800 / rad_step) rsel <- which(ryr == yr) rsel <- which(rdoy[rsel] %in% d0:df) - for(j in c(2,3,9,10)){ - nj = which(names(met)==names(narr)[j]) - if(!is.null(met[[nj]])){ + for (j in c(2, 3, 9, 10)) { + nj <- which(names(met) == names(narr)[j]) + if (!is.null(met[[nj]])) { nsel <- which(!is.na(rad[[nj]][rsel])) rmet[[j]][nsel] <- rad[[nj]][rsel[nsel]] ## fill NARR from tower } - } + } } ## else just output downscaled NARR - - ##output - ofile <- paste(outpath,yr,month[mo],".h5",sep="") - Rfile <- paste(outpath,yr,month[mo],".RData",sep="") - vgrd <- array(rmet[[1]],c(1,1,length(rmet[[1]]))) - vddsf <- array(rmet[[2]],c(1,1,length(rmet[[2]]))) - vbdsf <- array(rmet[[3]],c(1,1,length(rmet[[3]]))) - ugrd <- array(rmet[[4]],c(1,1,length(rmet[[4]]))) - tmp <- array(rmet[[5]],c(1,1,length(rmet[[5]]))) - sh <- array(rmet[[6]],c(1,1,length(rmet[[6]]))) - pres <- array(rmet[[7]],c(1,1,length(rmet[[7]]))) - prate <- array(rmet[[8]],c(1,1,length(rmet[[8]]))) - nddsf <- array(rmet[[9]],c(1,1,length(rmet[[9]]))) - nbdsf <- array(rmet[[10]],c(1,1,length(rmet[[10]]))) -# hgt <- array(rmet[[11]],c(1,1,length(rmet[[11]]))) -# dlwrf <- array(rmet[[12]],c(1,1,length(rmet[[12]]))) - dlwrf <- array(rmet[[11]],c(1,1,length(rmet[[11]]))) - hgt = 50 - hdf5save(ofile,"vgrd","vddsf","vbdsf","ugrd","tmp","sh","pres","prate","nddsf","nbdsf","hgt","dlwrf") - save(vgrd,vddsf,vbdsf,ugrd,tmp,sh,pres,prate,nddsf,nbdsf,hgt,dlwrf,file=Rfile) + + ## output + ofile <- paste(outpath, yr, month[mo], ".h5", sep = "") + Rfile <- paste(outpath, yr, month[mo], ".RData", sep = "") + vgrd <- array(rmet[[1]], c(1, 1, length(rmet[[1]]))) + vddsf <- array(rmet[[2]], c(1, 1, length(rmet[[2]]))) + vbdsf <- array(rmet[[3]], c(1, 1, length(rmet[[3]]))) + ugrd <- array(rmet[[4]], c(1, 1, length(rmet[[4]]))) + tmp <- array(rmet[[5]], c(1, 1, length(rmet[[5]]))) + sh <- array(rmet[[6]], c(1, 1, length(rmet[[6]]))) + pres <- array(rmet[[7]], c(1, 1, length(rmet[[7]]))) + prate <- array(rmet[[8]], c(1, 1, length(rmet[[8]]))) + nddsf <- array(rmet[[9]], c(1, 1, length(rmet[[9]]))) + nbdsf <- array(rmet[[10]], c(1, 1, length(rmet[[10]]))) + # hgt <- array(rmet[[11]],c(1,1,length(rmet[[11]]))) + # dlwrf <- array(rmet[[12]],c(1,1,length(rmet[[12]]))) + dlwrf <- array(rmet[[11]], c(1, 1, length(rmet[[11]]))) + hgt <- 50 + hdf5save(ofile, "vgrd", "vddsf", "vbdsf", "ugrd", "tmp", "sh", "pres", "prate", "nddsf", "nbdsf", "hgt", "dlwrf") + save(vgrd, vddsf, vbdsf, ugrd, tmp, sh, pres, prate, nddsf, nbdsf, hgt, dlwrf, file = Rfile) } ## end month } ## end year diff --git a/modules/data.atmosphere/inst/scripts/lapse.R b/modules/data.atmosphere/inst/scripts/lapse.R index 3bef0e72be4..9f4969074ef 100644 --- a/modules/data.atmosphere/inst/scripts/lapse.R +++ b/modules/data.atmosphere/inst/scripts/lapse.R @@ -1,52 +1,52 @@ -library(ed21,lib.loc="~/lib/R") +library(ed21, lib.loc = "~/lib/R") ### Load data datadir <- "/home/mdietze/stats/lapse/" -stationfile <- dir(datadir,"dat.txt",full.names=TRUE) +stationfile <- dir(datadir, "dat.txt", full.names = TRUE) dat <- read.csv(stationfile) met <- ExtractNOAAstationMonthly(dat) ### load station locations -namefile <- dir(datadir,"stn.txt",full.names=TRUE) -stn <- read.fwf(namefile,c(7,8,31,33,7,32,18,10,11,10),header=FALSE,skip=2,na.strings=" ",as.is=TRUE) +namefile <- dir(datadir, "stn.txt", full.names = TRUE) +stn <- read.fwf(namefile, c(7, 8, 31, 33, 7, 32, 18, 10, 11, 10), header = FALSE, skip = 2, na.strings = " ", as.is = TRUE) keep <- which(!(1:nrow(stn) %in% c(15))) ## exclude stations -stnID <- as.numeric(as.character(stn[keep,1])) -elev <- as.numeric(as.character(stn[keep,10])) -#mch <- match(stnID,met$ID) +stnID <- as.numeric(as.character(stn[keep, 1])) +elev <- as.numeric(as.character(stn[keep, 10])) +# mch <- match(stnID,met$ID) -Temp <- apply(met$MNTM,2,mean,na.rm=TRUE)[keep] -ppt <- apply(met$TPCP,2,mean,na.rm=TRUE)[keep] -snow <- apply(met$TSNW,2,mean,na.rm=TRUE)[keep] +Temp <- apply(met$MNTM, 2, mean, na.rm = TRUE)[keep] +ppt <- apply(met$TPCP, 2, mean, na.rm = TRUE)[keep] +snow <- apply(met$TSNW, 2, mean, na.rm = TRUE)[keep] -par(mfrow=c(3,1)) +par(mfrow = c(3, 1)) -plot(elev,Temp) +plot(elev, Temp) tm <- lm(Temp ~ elev) -abline(a=coef(tm)[1],b=coef(tm)[2]) +abline(a = coef(tm)[1], b = coef(tm)[2]) -plot(elev,ppt) +plot(elev, ppt) pm <- lm(ppt ~ elev) -abline(a=coef(pm)[1],b=coef(pm)[2]) +abline(a = coef(pm)[1], b = coef(pm)[2]) -plot(elev,snow) +plot(elev, snow) sm <- lm(snow ~ elev) -abline(a=coef(sm)[1],b=coef(sm)[2]) +abline(a = coef(sm)[1], b = coef(sm)[2]) -print(c("Temp",coef(tm))) -print(c("ppt",coef(pm))) -print(c("snow",coef(sm))) +print(c("Temp", coef(tm))) +print(c("ppt", coef(pm))) +print(c("snow", coef(sm))) ## need to double check units!! -par(mfrow=c(1,2)) -plot(elev,Temp,xlab="Elevation (m)",ylab="Temperature (C)") +par(mfrow = c(1, 2)) +plot(elev, Temp, xlab = "Elevation (m)", ylab = "Temperature (C)") tm <- lm(Temp ~ elev) -abline(a=coef(tm)[1],b=coef(tm)[2]) +abline(a = coef(tm)[1], b = coef(tm)[2]) -plot(elev,ppt*12,xlab="Elevation (m)",ylab="Precipitation (mm/year)") -pm <- lm(ppt*12 ~ elev) -abline(a=coef(pm)[1],b=coef(pm)[2]) +plot(elev, ppt * 12, xlab = "Elevation (m)", ylab = "Precipitation (mm/year)") +pm <- lm(ppt * 12 ~ elev) +abline(a = coef(pm)[1], b = coef(pm)[2]) diff --git a/modules/data.atmosphere/inst/scripts/load.ameriflux.R b/modules/data.atmosphere/inst/scripts/load.ameriflux.R index f9fb0578551..22257a92b90 100644 --- a/modules/data.atmosphere/inst/scripts/load.ameriflux.R +++ b/modules/data.atmosphere/inst/scripts/load.ameriflux.R @@ -1,24 +1,25 @@ ## general code to load Ameriflux/Fluxnet "LaThuille" files ## -load.Ameriflux <- function(path,level=4,freq="h"){ - +load.Ameriflux <- function(path, level = 4, freq = "h") { post <- "3.txt" - if(level == 4){post <- paste(level,"_",freq,".txt",sep="")} - files <- dir(path,pattern=post,full.names=TRUE) + if (level == 4) { + post <- paste(level, "_", freq, ".txt", sep = "") + } + files <- dir(path, pattern = post, full.names = TRUE) dat <- list() - for( i in 1:length(files)){ - dat.tmp <- read.csv(files[i],header=TRUE) + for (i in 1:length(files)) { + dat.tmp <- read.csv(files[i], header = TRUE) dat.tmp[dat.tmp <= -9999.0] <- NA - if(length(dat) == 0){ - for(j in 1:ncol(dat.tmp)){ - dat[[j]] <- dat.tmp[,j] + if (length(dat) == 0) { + for (j in 1:ncol(dat.tmp)) { + dat[[j]] <- dat.tmp[, j] } names(dat) <- names(dat.tmp) } else { - for(j in 1:ncol(dat.tmp)){ - dat[[j]] <- c(dat[[j]],dat.tmp[,j]) + for (j in 1:ncol(dat.tmp)) { + dat[[j]] <- c(dat[[j]], dat.tmp[, j]) } } } @@ -31,5 +32,3 @@ load.Ameriflux <- function(path,level=4,freq="h"){ ## ## access values based on list notation or by name ## e.g. plot(flux[[23]]) or plot(flux$NEE_st_fMDS) - - diff --git a/modules/data.atmosphere/inst/scripts/lsmMerge.R b/modules/data.atmosphere/inst/scripts/lsmMerge.R index bc9b4379248..c85e3abeb15 100644 --- a/modules/data.atmosphere/inst/scripts/lsmMerge.R +++ b/modules/data.atmosphere/inst/scripts/lsmMerge.R @@ -5,217 +5,213 @@ ## and HB daily means ## settings -mettype <- 1 ## 0 = SOI Type of met file being READ - ## 1 = regional -loc <- c(43.93,-71.75) ## Bartlett +mettype <- 1 ## 0 = SOI Type of met file being READ +## 1 = regional +loc <- c(43.93, -71.75) ## Bartlett ## libraries and constants library(ed2) Kelvin <- 273.15 ## Primary hi-res file (in RAMS format) -#metbase <- "/home/mcd/inputs/fluxnet/bartlett/hires/lat43.5lon-71.5/" -#metbase <- "/home/mcd/inputs/fluxnet/bartlett/hourly/lat43.5lon-71.5/" -#metbase <- "/home/mcd/inputs/fluxnet/bartlett/lat43.5lon-71.5/" +# metbase <- "/home/mcd/inputs/fluxnet/bartlett/hires/lat43.5lon-71.5/" +# metbase <- "/home/mcd/inputs/fluxnet/bartlett/hourly/lat43.5lon-71.5/" +# metbase <- "/home/mcd/inputs/fluxnet/bartlett/lat43.5lon-71.5/" metbase <- "/n/Moorcroft_Lab/Users/mcd/inputs/reanalysis/ncep/" -#metstart <- c(1,2004) -#metstart <- c(1,2000) -#metstart <- c(8,2002) -metstart <- c(1,1974) -metstop <- c(12,2005) -#metres <- 96 ##divisions/day -#metres <- 24 ##divisions/day +# metstart <- c(1,2004) +# metstart <- c(1,2000) +# metstart <- c(8,2002) +metstart <- c(1, 1974) +metstop <- c(12, 2005) +# metres <- 96 ##divisions/day +# metres <- 24 ##divisions/day metres <- 4 ## Secondary daily file -##daily <- read.table("/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/dailyPPTwatershed.txt",header=TRUE,na.strings="-99.0") -daily <- read.csv("/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/pwd_all.txt",header=TRUE,na.strings="-99.0") +## daily <- read.table("/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/dailyPPTwatershed.txt",header=TRUE,na.strings="-99.0") +daily <- read.csv("/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/pwd_all.txt", header = TRUE, na.strings = "-99.0") daily.time <- string2day(daily$DATE) -daily.dd <- jday(daily.time[,1],daily.time[,2],daily.time[,3]) -daily.ppt <- daily$WS_6/86400.0 ## select coln and rescale to mm/s -temp <- read.csv("/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/TempDailyMean.txt",header=TRUE,na.strings="-99.0") +daily.dd <- jday(daily.time[, 1], daily.time[, 2], daily.time[, 3]) +daily.ppt <- daily$WS_6 / 86400.0 ## select coln and rescale to mm/s +temp <- read.csv("/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/TempDailyMean.txt", header = TRUE, na.strings = "-99.0") temp.time <- string2day(temp$DATE) -temp.dd <- jday(temp.time[,1],temp.time[,2],temp.time[,3]) +temp.dd <- jday(temp.time[, 1], temp.time[, 2], temp.time[, 3]) daily.temp <- temp$STA_6 + Kelvin ## select coln and rescale to Kelvin ### ALT Secondary: use a NOAA met station -stationfolder <-"/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" -stationfile <- dir(stationfolder,"dat.txt") -smet <- ExtractNOAAstation(read.csv(paste(stationfolder,stationfile,sep=""))) +stationfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" +stationfile <- dir(stationfolder, "dat.txt") +smet <- ExtractNOAAstation(read.csv(paste(stationfolder, stationfile, sep = ""))) daily.time <- temp.time <- smet$date -daily.ppt <- smet$PRCP/86400 -daily.temp <- 0.5*(smet$TMIN+smet$TMAX) + Kelvin +daily.ppt <- smet$PRCP / 86400 +daily.temp <- 0.5 * (smet$TMIN + smet$TMAX) + Kelvin ## output path -##outpath <- "/home/mcd/hydro_test/hubbard/met/hires/lat43.5lon-71.5/" -##outpath <- "/home/mcd/hydro_test/hubbard/met/hourly/" -##outpath <- "/home/mcd/hydro_test/hubbard/met/lowresbartlett/lat43.5lon-71.5/" -##outpath <- "/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/NCEP/" +## outpath <- "/home/mcd/hydro_test/hubbard/met/hires/lat43.5lon-71.5/" +## outpath <- "/home/mcd/hydro_test/hubbard/met/hourly/" +## outpath <- "/home/mcd/hydro_test/hubbard/met/lowresbartlett/lat43.5lon-71.5/" +## outpath <- "/n/Moorcroft_Lab/Users/mcd/hydro_test/hubbard/met/NCEP/" outpath <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" -##parameters for nocturnal smoothing on splice -##hires -#width <- 5 ## width of smoothing window -#nsmooth <- 4 ## number of measurements on each side of the day to be smoothed -##hourly -#width <- 2 ## width of smoothing window -#nsmooth <- 2 ## number of measurements on each side of the day to be smoothed -##6hr +## parameters for nocturnal smoothing on splice +## hires +# width <- 5 ## width of smoothing window +# nsmooth <- 4 ## number of measurements on each side of the day to be smoothed +## hourly +# width <- 2 ## width of smoothing window +# nsmooth <- 2 ## number of measurements on each side of the day to be smoothed +## 6hr width <- 0 ## width of smoothing window nsmooth <- 0 ## number of measurements on each side of the day to be smoothed sfun <- combl(width) ## Loop over met/rad files -for(yr in metstart[2]:metstop[2]){ +for (yr in metstart[2]:metstop[2]) { fmo <- 1 lmo <- 12 - if(yr == metstart[2]) fmo <- metstart[1] - if(yr == metstop[2]) lmo <- metstop[1] - for(mo in fmo:lmo){ + if (yr == metstart[2]) fmo <- metstart[1] + if (yr == metstop[2]) lmo <- metstop[1] + for (mo in fmo:lmo) { + print(c(mo, yr)) - print(c(mo,yr)) - ## read met file met <- rad <- NULL - if(mettype == 0){ - met <- read.met(metbase,mo,yr) - rad <- read.rad(metbase,mo,yr) + if (mettype == 0) { + met <- read.met(metbase, mo, yr) + rad <- read.rad(metbase, mo, yr) } else { - met <- read.met.region(metbase,mo,yr,loc) - rad <- read.rad.region(metbase,mo,yr,loc) + met <- read.met.region(metbase, mo, yr, loc) + rad <- read.rad.region(metbase, mo, yr, loc) } ## print(c(as.integer(yr),as.integer(mo),as.integer(nrow(met)),nrow(met)/96)) ## } - ##} - + ## } + ## select matching daily - sel <- which(apply(cbind(daily.time[,1] == yr,daily.time[,2]==mo),1,sum)==2) + sel <- which(apply(cbind(daily.time[, 1] == yr, daily.time[, 2] == mo), 1, sum) == 2) dtemp <- daily.ppt[sel] - dtemp.days <- daily.time[sel,3] + dtemp.days <- daily.time[sel, 3] - sel <- which(apply(cbind(temp.time[,1] == yr,temp.time[,2]==mo),1,sum)==2) + sel <- which(apply(cbind(temp.time[, 1] == yr, temp.time[, 2] == mo), 1, sum) == 2) temp.lowres <- daily.temp[sel] - temp.lowres.days <- temp.time[sel,3] - + temp.lowres.days <- temp.time[sel, 3] + ## first establish wet/dry day status in both records, calculate daily ppt - wetThresh <- 2.45/86400 # if < .1 inch/day assume is dry (below detection) - days <- rep(1:31,each=metres)[1:nrow(met)] - raddays <- rep(1:31,each=96)[1:nrow(rad)] - wet <- tapply((met$conprr+met$pcpg),days,sum)/metres - stormlen <- tapply((met$conprr+met$pcpg)>0,days,sum) + wetThresh <- 2.45 / 86400 # if < .1 inch/day assume is dry (below detection) + days <- rep(1:31, each = metres)[1:nrow(met)] + raddays <- rep(1:31, each = 96)[1:nrow(rad)] + wet <- tapply((met$conprr + met$pcpg), days, sum) / metres + stormlen <- tapply((met$conprr + met$pcpg) > 0, days, sum) iswet <- wet > wetThresh all.days <- sort(unique(days)) - + ## create temperature record - temp.hires <- tapply((met$theta),days,mean) - + temp.hires <- tapply((met$theta), days, mean) + ## id miss-match days daymatch <- 1:length(all.days) - daymatch[dtemp.days[apply(cbind(iswet[dtemp.days],dtemp > 0),1,sum) == 1]] <- NA + daymatch[dtemp.days[apply(cbind(iswet[dtemp.days], dtemp > 0), 1, sum) == 1]] <- NA daymatch[is.na(dtemp)] <- which(is.na(dtemp)) ## set missing to "match" to keep original data - for(t in 1:length(daymatch)){ - dmatch <- match(t,dtemp.days) - if(is.na(daymatch[t])&&!is.na(dmatch)){ - ##if target is wet, find nearest wet day - if(dtemp[dmatch] > 0){ + for (t in 1:length(daymatch)) { + dmatch <- match(t, dtemp.days) + if (is.na(daymatch[t]) && !is.na(dmatch)) { + ## if target is wet, find nearest wet day + if (dtemp[dmatch] > 0) { j <- t - for(i in 1:length(all.days)){ - j <- max(1,t-i) - if(iswet[j]) break; - j <- min(length(all.days),t+i) - if(iswet[j]) break; + for (i in 1:length(all.days)) { + j <- max(1, t - i) + if (iswet[j]) break + j <- min(length(all.days), t + i) + if (iswet[j]) break } daymatch[t] <- j } - ##if target is dry, find nearest dry day - if(dtemp[dmatch] == 0){ + ## if target is dry, find nearest dry day + if (dtemp[dmatch] == 0) { j <- t - for(i in 1:length(all.days)){ - j <- max(1,t-i) - if(!iswet[j]) break; - j <- min(length(all.days),t+i) - if(!iswet[j]) break; + for (i in 1:length(all.days)) { + j <- max(1, t - i) + if (!iswet[j]) break + j <- min(length(all.days), t + i) + if (!iswet[j]) break } daymatch[t] <- j } } - } - #temp.bias <- mean(temp.hires-temp.lowres) - + } + # temp.bias <- mean(temp.hires-temp.lowres) + ## make new met & rad files metnew <- met radnew <- rad - for(i in 1:length(all.days)){ - dmatch <- match(i,dtemp.days) + for (i in 1:length(all.days)) { + dmatch <- match(i, dtemp.days) ## select individual measurements to swap sel1 <- which(days == i) sel2 <- which(days == daymatch[i]) rsel1 <- which(raddays == i) rsel2 <- which(raddays == daymatch[i]) nrep <- length(sel2) - length(sel1) - if(nrep > 0){ - sel1 <- c(sel1,rep(sel1[length(sel1)],nrep)) + if (nrep > 0) { + sel1 <- c(sel1, rep(sel1[length(sel1)], nrep)) } - if(nrep < 0){ + if (nrep < 0) { nrep <- -nrep - sel2 <- c(sel2,rep(sel2[length(sel2)],nrep)) + sel2 <- c(sel2, rep(sel2[length(sel2)], nrep)) } - ## splice in nearest match day (both met and rad) - metnew[sel1,] <- met[sel2,] - radnew[sel1,] <- rad[sel2,] + ## splice in nearest match day (both met and rad) + metnew[sel1, ] <- met[sel2, ] + radnew[sel1, ] <- rad[sel2, ] ## rescale ppt to preserve daily sum - if(!is.na(dmatch) && !is.na(dtemp[dmatch])){ - if(dtemp[dmatch] > 0){ - fac <- dtemp[dmatch]/wet[daymatch[i]] + if (!is.na(dmatch) && !is.na(dtemp[dmatch])) { + if (dtemp[dmatch] > 0) { + fac <- dtemp[dmatch] / wet[daymatch[i]] metnew$conprr[sel1] <- metnew$conprr[sel1] * fac - metnew$pcpg[sel1] <- metnew$pcpg[sel1] * fac - } else { ## make sure dry days stay dry + metnew$pcpg[sel1] <- metnew$pcpg[sel1] * fac + } else { ## make sure dry days stay dry metnew$conprr[sel1] <- 0.0 metnew$pcpg[sel1] <- 0.0 } } - + ## linearly rescale temperature - tmatch <- match(i,temp.lowres.days) - if(!is.na(tmatch) && !is.na(temp.lowres[tmatch])){ - airtemp <- met$theta[sel1]*met$pi0[sel1]/1004 - b <-mean(airtemp)-temp.lowres[tmatch] ##mean(metnew$theta[sel1]) - metnew$theta[sel1] <- metnew$theta[sel1] - b*1004/met$pi0[sel1] + tmatch <- match(i, temp.lowres.days) + if (!is.na(tmatch) && !is.na(temp.lowres[tmatch])) { + airtemp <- met$theta[sel1] * met$pi0[sel1] / 1004 + b <- mean(airtemp) - temp.lowres[tmatch] ## mean(metnew$theta[sel1]) + metnew$theta[sel1] <- metnew$theta[sel1] - b * 1004 / met$pi0[sel1] } - + ## smooth transition on spliced - if(i > 1 && (i != daymatch[i] || (i-1) != daymatch[i-1])){ - splice <- matrix(NA,2*nsmooth+1,12) - rsplice <- matrix(NA,2*nsmooth+1,ncol(rad)) + if (i > 1 && (i != daymatch[i] || (i - 1) != daymatch[i - 1])) { + splice <- matrix(NA, 2 * nsmooth + 1, 12) + rsplice <- matrix(NA, 2 * nsmooth + 1, ncol(rad)) ## loop over data to be smoothed - for(w in 1:(2*nsmooth + 1)){ - row <- w - nsmooth -1 + sel1[1] - window <- 1:length(sfun) + row-floor(length(sfun)/2) - 1 - ##print(c(row,window)) - splice[w,] <- apply(metnew[window,]*sfun,2,sum) - - rrow <- w - nsmooth -1 + rsel1[1] - rwindow <- 1:length(sfun) + rrow-floor(length(sfun)/2) - 1 - ##print(c(row,window)) - rsplice[w,] <- apply(radnew[rwindow,]*sfun,2,sum) + for (w in 1:(2 * nsmooth + 1)) { + row <- w - nsmooth - 1 + sel1[1] + window <- 1:length(sfun) + row - floor(length(sfun) / 2) - 1 + ## print(c(row,window)) + splice[w, ] <- apply(metnew[window, ] * sfun, 2, sum) + + rrow <- w - nsmooth - 1 + rsel1[1] + rwindow <- 1:length(sfun) + rrow - floor(length(sfun) / 2) - 1 + ## print(c(row,window)) + rsplice[w, ] <- apply(radnew[rwindow, ] * sfun, 2, sum) } - metnew[-nsmooth:nsmooth+sel1[1],] <- splice - radnew[-nsmooth:nsmooth+rsel1[1],] <- rsplice + metnew[-nsmooth:nsmooth + sel1[1], ] <- splice + radnew[-nsmooth:nsmooth + rsel1[1], ] <- rsplice } - } - - redowet <- tapply((metnew$conprr+metnew$pcpg),days,sum)/metres - - ## write out new file - write.met(metnew,outpath,mo,yr) - write.rad(radnew,outpath,mo,yr) - - } ### end MONTH + redowet <- tapply((metnew$conprr + metnew$pcpg), days, sum) / metres + + ## write out new file + write.met(metnew, outpath, mo, yr) + write.rad(radnew, outpath, mo, yr) + } ### end MONTH } ### end YEAR @@ -233,22 +229,21 @@ for(yr in metstart[2]:metstop[2]){ ## code currently assumes that metres is the same for both files origfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/lat44.5lon-71.5/" -#NCEPfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/temp/" +# NCEPfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/temp/" NCEPfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" -outfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor3/lat44.5lon-71.5/" +outfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor3/lat44.5lon-71.5/" -start <- c(1,2004) -end <- c(12,2005) - -for(y in start[2]:end[2]){ - for(m in 1:12){ +start <- c(1, 2004) +end <- c(12, 2005) +for (y in start[2]:end[2]) { + for (m in 1:12) { ## read files - met <- read.met(origfolder,m,y) - ncep <- read.met(NCEPfolder,m,y) + met <- read.met(origfolder, m, y) + ncep <- read.met(NCEPfolder, m, y) ## calc temp - temp <- met$theta*met$pi0/1004-273.15 + temp <- met$theta * met$pi0 / 1004 - 273.15 ## replace values sel <- temp < 0 @@ -256,7 +251,7 @@ for(y in start[2]:end[2]){ met$pcpg[sel] <- ncep$pcpg[sel] ## write new file - write.met(met,outfolder,m,y) + write.met(met, outfolder, m, y) } } @@ -268,43 +263,42 @@ for(y in start[2]:end[2]){ origfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/lat44.5lon-71.5/" NCEPfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/temp/" -outfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/station/lat44.5lon-71.5/" -stationfolder <-"/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" +outfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/station/lat44.5lon-71.5/" +stationfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" -stationfile <- dir(stationfolder,"dat.txt") -station <- read.csv(paste(stationfolder,stationfile,sep="")) +stationfile <- dir(stationfolder, "dat.txt") +station <- read.csv(paste(stationfolder, stationfile, sep = "")) smet <- ExtractNOAAstation(station) -start <- c(1,2004) -end <- c(12,2005) - -for(y in start[2]:end[2]){ - for(m in 1:12){ +start <- c(1, 2004) +end <- c(12, 2005) +for (y in start[2]:end[2]) { + for (m in 1:12) { ## read files - met <- read.met(origfolder,m,y) - ncep <- read.met(NCEPfolder,m,y) + met <- read.met(origfolder, m, y) + ncep <- read.met(NCEPfolder, m, y) ## calc temperatures - Ttower <- met$theta*met$pi0/1004-273.15 - Treg <- ncep$theta*ncep$pi0/1004-273.15 - ysel <- which(smet$date[,1] == y) - msel <- ysel[which(smet$date[ysel,2] == m)] - Ts <- rep(0.5*(smet$TMIN[msel]+smet$TMAX[msel]),each=4) + Ttower <- met$theta * met$pi0 / 1004 - 273.15 + Treg <- ncep$theta * ncep$pi0 / 1004 - 273.15 + ysel <- which(smet$date[, 1] == y) + msel <- ysel[which(smet$date[ysel, 2] == m)] + Ts <- rep(0.5 * (smet$TMIN[msel] + smet$TMAX[msel]), each = 4) ## calc PPT - Pt <- met$pcpg+met$conprr - Pr <- ncep$pcpg+ncep$conprr - Ps <- rep(smet$PRCP[msel]/8640,each=4) ## cm/day -> mm/sec - print(c(y,m,apply(cbind(Pt,Ps,Pr),2,sum))) - + Pt <- met$pcpg + met$conprr + Pr <- ncep$pcpg + ncep$conprr + Ps <- rep(smet$PRCP[msel] / 8640, each = 4) ## cm/day -> mm/sec + print(c(y, m, apply(cbind(Pt, Ps, Pr), 2, sum))) + ## replace values -# sel <- temp < 0 -# met$conprr[sel] <- ncep$conprr[sel] -# met$pcpg[sel] <- ncep$pcpg[sel] + # sel <- temp < 0 + # met$conprr[sel] <- ncep$conprr[sel] + # met$pcpg[sel] <- ncep$pcpg[sel] ## write new file -# write.met(met,outfolder,m,y) + # write.met(met,outfolder,m,y) } } @@ -320,68 +314,73 @@ for(y in start[2]:end[2]){ ## of storm lengths and time of day samplefolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/lat44.5lon-71.5/" -#infolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor2/lat44.5lon-71.5/" -infolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" -#outfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor2/lat44.5lon-71.5/" +# infolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor2/lat44.5lon-71.5/" +infolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/NorthConwayMet/" +# outfolder <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor2/lat44.5lon-71.5/" outfolder <- infolder -start.sample <- c(6,1,2004) -end.sample <- c(12,31,2005) -freq <- 4 #obs/day +start.sample <- c(6, 1, 2004) +end.sample <- c(12, 31, 2005) +freq <- 4 # obs/day -start <- c(1,2000) -end <- c(12,2005) +start <- c(1, 2000) +end <- c(12, 2005) ## process sample -samp <- read.met.ts(samplefolder,start.sample,end.sample,"pcpg",average=FALSE) -tod <- rep(1:freq,length=length(samp)) -day <- rep(1:(length(samp)),each=freq)[1:length(samp)] -rain <- as.integer(samp>0) -drain <- c(0,diff(rain)) +samp <- read.met.ts(samplefolder, start.sample, end.sample, "pcpg", average = FALSE) +tod <- rep(1:freq, length = length(samp)) +day <- rep(1:(length(samp)), each = freq)[1:length(samp)] +rain <- as.integer(samp > 0) +drain <- c(0, diff(rain)) rstart <- which(drain == 1) -rstop <- which(drain == -1) -lmin <- min(length(rstart),length(rstop)) -storm.len <- rstop[1:lmin]-rstart[1:lmin] +rstop <- which(drain == -1) +lmin <- min(length(rstart), length(rstop)) +storm.len <- rstop[1:lmin] - rstart[1:lmin] storm.len2 <- storm.len storm.len2[storm.len2 > freq] <- freq slen <- table(storm.len2) stod <- table(tod[rstart]) -slen <- slen/sum(slen) ## storm length frequency distribution -stod <- stod/sum(stod) ## storm start time of day frequency distribution - -for(y in start[2]:end[2]){ - mstart <- 1; if(y == start[2]) {mstart<- start[1]} - mend <- 12; if(y == end[2]) {mend <- end[1]} - for(m in mstart:mend){ +slen <- slen / sum(slen) ## storm length frequency distribution +stod <- stod / sum(stod) ## storm start time of day frequency distribution +for (y in start[2]:end[2]) { + mstart <- 1 + if (y == start[2]) { + mstart <- start[1] + } + mend <- 12 + if (y == end[2]) { + mend <- end[1] + } + for (m in mstart:mend) { ## read files - met <- read.met(infolder,m,y) + met <- read.met(infolder, m, y) ## set vars - tod <- rep(1:freq,length=nrow(met)) - day <- rep(1:(nrow(met)),each=freq)[1:nrow(met)] + tod <- rep(1:freq, length = nrow(met)) + day <- rep(1:(nrow(met)), each = freq)[1:nrow(met)] nday <- max(day) ppt <- met$conprr + met$pcpg - dppt <- tapply(ppt,day,sum) - + dppt <- tapply(ppt, day, sum) + ## draw start times - sstart <- findInterval(runif(nday),cumsum(stod)) - send <- sstart + findInterval(runif(nday),cumsum(slen)) - send[send>(freq-1)] <- (freq-1) - wt <- 1/((send-sstart)+1) + sstart <- findInterval(runif(nday), cumsum(stod)) + send <- sstart + findInterval(runif(nday), cumsum(slen)) + send[send > (freq - 1)] <- (freq - 1) + wt <- 1 / ((send - sstart) + 1) ## create pseudo-precip record - for(i in 1:nday){ + for (i in 1:nday) { d <- which(day == i) ppt[d] <- 0.0 - ppt[d[1]+(sstart[i]:send[i])] <- dppt[i]*wt[i] + ppt[d[1] + (sstart[i]:send[i])] <- dppt[i] * wt[i] } met$conprr <- 0.0 met$pcpg <- ppt - + ## write out new ppt - write.met(met,outfolder,m,y) + write.met(met, outfolder, m, y) } } @@ -389,12 +388,10 @@ for(y in start[2]:end[2]){ ########### COMPARE AVERAGE TEMP AND PPT ACROSS MET FILES ############ orig <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/lat44.5lon-71.5/" -cor2<- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor2/lat44.5lon-71.5/" - -start <- c(1,1,2000) -end <- c(12,31,2003) - -Mo <- read.met.ts(orig,start,end,c("theta","conprr","pcpg")) -M2 <- read.met.ts(cor2,start,end,c("theta","conprr","pcpg")) +cor2 <- "/n/Moorcroft_Lab/Users/mcd/inputs/fluxnet/bartlett/snowcor2/lat44.5lon-71.5/" +start <- c(1, 1, 2000) +end <- c(12, 31, 2003) +Mo <- read.met.ts(orig, start, end, c("theta", "conprr", "pcpg")) +M2 <- read.met.ts(cor2, start, end, c("theta", "conprr", "pcpg")) diff --git a/modules/data.atmosphere/inst/scripts/met.workflow.NARR.R b/modules/data.atmosphere/inst/scripts/met.workflow.NARR.R index 49bb65ba19c..9556fd89ef1 100644 --- a/modules/data.atmosphere/inst/scripts/met.workflow.NARR.R +++ b/modules/data.atmosphere/inst/scripts/met.workflow.NARR.R @@ -6,40 +6,40 @@ library(RPostgreSQL) #--------------------------------------------------------------------------------------------------# # Clear old database connections for (i in dbListConnections(PostgreSQL())) { - db.close(i) + db.close(i) } -dbparms <- list(driver=driver, user=user, dbname=dbname, password=password, host=host) +dbparms <- list(driver = driver, user = user, dbname = dbname, password = password, host = host) #--------------------------------------------------------------------------------------------------# -# Download raw NARR from the internet - -if(raw){ - con <- db.open(dbparms) - outfolder <- paste0(dir,met,"/") - pkg <- "PEcAn.data.atmosphere" - NARR.host <- "geo.bu.edu" - fcn <- paste0("download.",met) - - args <- list(site.id, outfolder, start_year, end_year, overwrite=FALSE, verbose=FALSE, pkg,raw.host,dbparms,con=con) - - raw.id <- do.call(fcn,args) +# Download raw NARR from the internet + +if (raw) { + con <- db.open(dbparms) + outfolder <- paste0(dir, met, "/") + pkg <- "PEcAn.data.atmosphere" + NARR.host <- "geo.bu.edu" + fcn <- paste0("download.", met) + + args <- list(site.id, outfolder, start_year, end_year, overwrite = FALSE, verbose = FALSE, pkg, raw.host, dbparms, con = con) + + raw.id <- do.call(fcn, args) } #--------------------------------------------------------------------------------------------------# # Change to CF Standards -if (cf == TRUE){ - con <- db.open(dbparms) - input.id <- raw.id - outfolder <- paste0(dir,met,"_CF/") - pkg <- "PEcAn.data.atmosphere" - fcn <- paste0("met2CF.",met) - write <- TRUE - formatname <- 'CF Meteorology' - mimetype <- 'application/x-netcdf' - - cf.id <- convert_input(input.id,outfolder,formatname,mimetype,site.id,start_date,end_date,pkg,fcn,write,username,con=con,raw.host=raw.host) +if (cf == TRUE) { + con <- db.open(dbparms) + input.id <- raw.id + outfolder <- paste0(dir, met, "_CF/") + pkg <- "PEcAn.data.atmosphere" + fcn <- paste0("met2CF.", met) + write <- TRUE + formatname <- "CF Meteorology" + mimetype <- "application/x-netcdf" + + cf.id <- convert_input(input.id, outfolder, formatname, mimetype, site.id, start_date, end_date, pkg, fcn, write, username, con = con, raw.host = raw.host) } #--------------------------------------------------------------------------------------------------# @@ -54,52 +54,49 @@ if (cf == TRUE){ # write <- TRUE # formatname <- 'CF Meteorology' # mimetype <- 'application/x-netcdf' -# -# +# +# # perm.id <- convert_input(input.id,outfolder,formatname,mimetype,site.id,start_date,end_date,pkg,fcn,write,username,con=con) # } #--------------------------------------------------------------------------------------------------# # Extract for location - # perm.id (this isn't properly automated) +# perm.id (this isn't properly automated) # l <- list(raw.host=raw.host,newsite=newsite) -str_ns <- paste0(newsite %/% 1000000000, "-", newsite %% 1000000000) +str_ns <- paste0(newsite %/% 1000000000, "-", newsite %% 1000000000) -if (extract == TRUE){ +if (extract == TRUE) { input.id <- perm.id - con <- db.open(dbparms) - outfolder <- paste0(dir,met,"_CF_site_",str_ns,"/") - pkg <- "PEcAn.data.atmosphere" - fcn <- "extract.nc" - write <- TRUE - formatname <- 'CF Meteorology' - mimetype <- 'application/x-netcdf' - - extract.id <- convert_input(input.id,outfolder,formatname,mimetype,site.id,start_year,end_year,pkg,fcn,write,username,con=con,newsite = newsite,raw.host=raw.host,write=TRUE) + con <- db.open(dbparms) + outfolder <- paste0(dir, met, "_CF_site_", str_ns, "/") + pkg <- "PEcAn.data.atmosphere" + fcn <- "extract.nc" + write <- TRUE + formatname <- "CF Meteorology" + mimetype <- "application/x-netcdf" + + extract.id <- convert_input(input.id, outfolder, formatname, mimetype, site.id, start_year, end_year, pkg, fcn, write, username, con = con, newsite = newsite, raw.host = raw.host, write = TRUE) } #--------------------------------------------------------------------------------------------------# # Prepare for Model -if(nchar(model) >2){ - - con <- db.open(dbparms) - +if (nchar(model) > 2) { + con <- db.open(dbparms) + # Acquire lst (probably a better method, but this works for now) source("modules/data.atmosphere/R/site.lst.R") - lst <- site.lst(newsite,con) - + lst <- site.lst(newsite, con) + # Convert to model format - input.id <- extract.id - outfolder <- paste0(dir,met,"_",model,"_site_",str_ns,"/") - pkg <- paste0("PEcAn.",model) - fcn <- paste0("met2model.",model) - write <- TRUE + input.id <- extract.id + outfolder <- paste0(dir, met, "_", model, "_site_", str_ns, "/") + pkg <- paste0("PEcAn.", model) + fcn <- paste0("met2model.", model) + write <- TRUE overwrite <- "" - - model.id <- convert_input(input.id,outfolder,mod.formatname,mod.mimetype,newsite,start_year,end_year,pkg,fcn,write,username,con=con,lst=lst,overwrite=overwrite,raw.host=raw.host,write=TRUE) - + model.id <- convert_input(input.id, outfolder, mod.formatname, mod.mimetype, newsite, start_year, end_year, pkg, fcn, write, username, con = con, lst = lst, overwrite = overwrite, raw.host = raw.host, write = TRUE) } #--------------------------------------------------------------------------------------------------# diff --git a/modules/data.atmosphere/inst/scripts/met.workflow.R b/modules/data.atmosphere/inst/scripts/met.workflow.R index 7d25d432777..11e0a5c0aef 100644 --- a/modules/data.atmosphere/inst/scripts/met.workflow.R +++ b/modules/data.atmosphere/inst/scripts/met.workflow.R @@ -2,7 +2,7 @@ library(PEcAn.data.atmosphere) library(PEcAn.ED2) library(PEcAn.SIPNET) -options(warn=2) +options(warn = 2) site <- "US-Dk3" overwrite <- FALSE @@ -12,19 +12,18 @@ start_date <- as.POSIXlt("2001-01-01 00:00:00", tz = "UTC") end_date <- as.POSIXlt("2001-12-31 23:59:59", tz = "UTC") # download data -print(download.Ameriflux(site, "/tmp/met/ameriflux", start_date=start_date, end_date=end_date, overwrite=overwrite)) +print(download.Ameriflux(site, "/tmp/met/ameriflux", start_date = start_date, end_date = end_date, overwrite = overwrite)) # convert to CF -print(met2CF.Ameriflux("/tmp/met/ameriflux", site, "/tmp/met/cf", start_date=start_date, end_date=end_date, overwrite=overwrite)) +print(met2CF.Ameriflux("/tmp/met/ameriflux", site, "/tmp/met/cf", start_date = start_date, end_date = end_date, overwrite = overwrite)) # only execute if regional dataset -#print(permute.nc("/tmp/met/cf", site, "/tmp/met/permute")) -#print(extract.nc("/tmp/met/permute", site, "/tmp/met/extract", 45.2041, -68.7403)) +# print(permute.nc("/tmp/met/cf", site, "/tmp/met/permute")) +# print(extract.nc("/tmp/met/permute", site, "/tmp/met/extract", 45.2041, -68.7403)) # only execute if site level dataset -print(metgapfill("/tmp/met/cf", site, "/tmp/met/gapfill", start_date=start_date, end_date=end_date, overwrite=overwrite)) +print(metgapfill("/tmp/met/cf", site, "/tmp/met/gapfill", start_date = start_date, end_date = end_date, overwrite = overwrite)) # model specific -print(met2model.ED2("/tmp/met/gapfill", site, "/tmp/met/ed", start_date=start_date, end_date=end_date, overwrite=overwrite)) -print(met2model.SIPNET("/tmp/met/gapfill", site, "/tmp/met/sipnet", start_date=start_date, end_date=end_date, overwrite=overwrite)) - +print(met2model.ED2("/tmp/met/gapfill", site, "/tmp/met/ed", start_date = start_date, end_date = end_date, overwrite = overwrite)) +print(met2model.SIPNET("/tmp/met/gapfill", site, "/tmp/met/sipnet", start_date = start_date, end_date = end_date, overwrite = overwrite)) diff --git a/modules/data.atmosphere/inst/scripts/met.workflow.gen.R b/modules/data.atmosphere/inst/scripts/met.workflow.gen.R index 2f604906c3e..cccd752eabb 100644 --- a/modules/data.atmosphere/inst/scripts/met.workflow.gen.R +++ b/modules/data.atmosphere/inst/scripts/met.workflow.gen.R @@ -1,5 +1,5 @@ -# -# Generalized met workflow +# +# Generalized met workflow # Functional for downloading NARR, converting to CF, Rechunk/Permuting, extracting and prep for SIPNET #--------------------------------------------------------------------------------------------------# # Load libraries @@ -12,89 +12,86 @@ library(RPostgreSQL) # Setup database connection for (i in dbListConnections(PostgreSQL())) db.close(i) -dbparms <- list(driver=driver, user=user, dbname=dbname, password=password, host=host) +dbparms <- list(driver = driver, user = user, dbname = dbname, password = password, host = host) #--------------------------------------------------------------------------------------------------# -# Download raw data from the internet +# Download raw data from the internet -if (raw == TRUE){ -con <- db.open(dbparms) -outfolder <- paste0(dir,data.set,"/") -pkg <- "PEcAn.data.atmosphere" -fcn <- paste0("download.",fcn.data) +if (raw == TRUE) { + con <- db.open(dbparms) + outfolder <- paste0(dir, data.set, "/") + pkg <- "PEcAn.data.atmosphere" + fcn <- paste0("download.", fcn.data) -args <- list(data.set,outfolder,pkg,raw.host,start_year,end_year,site.id,dbparms,con) + args <- list(data.set, outfolder, pkg, raw.host, start_year, end_year, site.id, dbparms, con) -raw.id <- do.call(fcn,args) + raw.id <- do.call(fcn, args) } #--------------------------------------------------------------------------------------------------# # Change to CF Standards -if (cf == TRUE){ -con <- db.open(dbparms) -input.id <- raw.id -outfolder <- paste0(dir,data.set,"_CF/") -pkg <- "PEcAn.data.atmosphere" -fcn <- paste0("met2CF.",fcn.data) -write <- TRUE +if (cf == TRUE) { + con <- db.open(dbparms) + input.id <- raw.id + outfolder <- paste0(dir, data.set, "_CF/") + pkg <- "PEcAn.data.atmosphere" + fcn <- paste0("met2CF.", fcn.data) + write <- TRUE -cf.id <- convert_input(input.id,outfolder,pkg,fcn,write,username,con) # doesn't update existing record + cf.id <- convert_input(input.id, outfolder, pkg, fcn, write, username, con) # doesn't update existing record } #--------------------------------------------------------------------------------------------------# # Rechunk and Permute -if (perm == TRUE){ -con <- db.open(dbparms) -input.id <- cf.id -outfolder <- paste0(dir,data.set,"_CF_Permute/") -pkg <- "PEcAn.data.atmosphere" -fcn <- "permute.nc" -write <- TRUE +if (perm == TRUE) { + con <- db.open(dbparms) + input.id <- cf.id + outfolder <- paste0(dir, data.set, "_CF_Permute/") + pkg <- "PEcAn.data.atmosphere" + fcn <- "permute.nc" + write <- TRUE -perm.id <- convert_input(input.id,outfolder,pkg,fcn,write,username,con) + perm.id <- convert_input(input.id, outfolder, pkg, fcn, write, username, con) } #--------------------------------------------------------------------------------------------------# # Extract for location -if (extract == TRUE){ -con <- db.open(dbparms) -input.id <- perm.id -str_ns <- paste0(newsite %/% 1000000000, "-", newsite %% 1000000000) -outfolder <- paste0("/projectnb/dietzelab/pecan.data/input/",data.set,"_CF_site_",str_ns,"/") -pkg <- "PEcAn.data.atmosphere" -fcn <- "extract.nc" -write <- TRUE +if (extract == TRUE) { + con <- db.open(dbparms) + input.id <- perm.id + str_ns <- paste0(newsite %/% 1000000000, "-", newsite %% 1000000000) + outfolder <- paste0("/projectnb/dietzelab/pecan.data/input/", data.set, "_CF_site_", str_ns, "/") + pkg <- "PEcAn.data.atmosphere" + fcn <- "extract.nc" + write <- TRUE -extract.id <- convert_input(input.id,outfolder,pkg,fcn,write,username,con,newsite = newsite) + extract.id <- convert_input(input.id, outfolder, pkg, fcn, write, username, con, newsite = newsite) } #--------------------------------------------------------------------------------------------------# # Prepare for Model -if(nchar(model) >2){ +if (nchar(model) > 2) { + con <- db.open(dbparms) -con <- db.open(dbparms) + # Acquire lst (probably a better method, but this works for now) + lst <- site.lst(newsite, con) -# Acquire lst (probably a better method, but this works for now) -lst <- site.lst(newsite,con) + # Convert to model format + input.id <- extract.id + outfolder <- paste0(dir, data.set, "_", model, "_site_", str_ns, "/") + pkg <- paste0("PEcAn.", model) + fcn <- paste0("met2model.", model) + write <- TRUE + overwrite <- "" -# Convert to model format -input.id <- extract.id -outfolder <- paste0(dir,data.set,"_",model,"_site_",str_ns,"/") -pkg <- paste0("PEcAn.",model) -fcn <- paste0("met2model.",model) -write <- TRUE -overwrite <- "" - -model.id <- convert_input(input.id,outfolder,pkg,fcn,write,username,con,lst=lst,overwrite=overwrite) + model.id <- convert_input(input.id, outfolder, pkg, fcn, write, username, con, lst = lst, overwrite = overwrite) } #--------------------------------------------------------------------------------------------------# # Clear old database connections for (i in dbListConnections(PostgreSQL())) db.close(i) - - diff --git a/modules/data.atmosphere/inst/scripts/met.workflow.params.NARR.R b/modules/data.atmosphere/inst/scripts/met.workflow.params.NARR.R index 384f4c05c51..0af834b40be 100644 --- a/modules/data.atmosphere/inst/scripts/met.workflow.params.NARR.R +++ b/modules/data.atmosphere/inst/scripts/met.workflow.params.NARR.R @@ -1,10 +1,10 @@ # Database setup -driver <- "PostgreSQL" -user <- "bety" -dbname <- "bety" +driver <- "PostgreSQL" +user <- "bety" +dbname <- "bety" password <- "bety" -host <- "psql-pecan.bu.edu" +host <- "psql-pecan.bu.edu" # Select username, host and directory folder for data @@ -14,7 +14,7 @@ username <- "" # dir <- "/fs/data4/" raw.host <- "geo.bu.edu" -dir <- "/projectnb/dietzelab/pecan.data/input/" +dir <- "/projectnb/dietzelab/pecan.data/input/" # raw.host <- "pecan2.bu.edu" # dir <- "/home/ecowdery/input/" @@ -22,29 +22,29 @@ dir <- "/projectnb/dietzelab/pecan.data/input/" ####################################################### # # ####################################################### -# Choose meteorology data set. +# Choose meteorology data set. met <- "NARR" ####################################################### # Set start and end dates (when possible otherwise NA) -start_year <- 1979 -end_year <- 2013 +start_year <- 1979 +end_year <- 2013 ####################################################### # Location -# Set site id number when possible +# Set site id number when possible # RHIN: 1000000003 # ORNL: 854 # NARR: 1135 site.id <- 1135 # if(regional==TRUE){ -# site.id <- NA -# extract <- TRUE # +# site.id <- NA +# extract <- TRUE # # } @@ -59,21 +59,21 @@ extract.id <- 1000000129 # RHIN = 1000000059 # ORNL = 1000000060 -raw <- FALSE +raw <- FALSE raw.id <- 1000000127 ####################################################### # Change to CF standards? If not, specify cf.id # NARR cf.id on geo = 288 -cf <- FALSE +cf <- FALSE cf.id <- 1000000128 ####################################################### # Permute data? If not, specify perm.id # NARR perm.id on geo = 1000000023 -perm <- FALSE +perm <- FALSE perm.id <- 1000000023 ####################################################### @@ -81,25 +81,19 @@ perm.id <- 1000000023 model <- "DALEC" -if(model == "ED2"){ - mod.formatname <- 'ed.met_driver_header_files_format' - mod.mimetype <- 'text/plain' -}else if(model == "SIPNET"){ - mod.formatname <- 'Sipnet.climna' - mod.mimetype <- 'text/csv' -}else if(model == "BIOCRO"){ - mod.formatname <- 'biocromet' - mod.mimetype <- 'text/csv' -}else if(model == "DALEC"){ - mod.formatname <- 'DALEC meteorology' - mod.mimetype <- 'text/plain' +if (model == "ED2") { + mod.formatname <- "ed.met_driver_header_files_format" + mod.mimetype <- "text/plain" +} else if (model == "SIPNET") { + mod.formatname <- "Sipnet.climna" + mod.mimetype <- "text/csv" +} else if (model == "BIOCRO") { + mod.formatname <- "biocromet" + mod.mimetype <- "text/csv" +} else if (model == "DALEC") { + mod.formatname <- "DALEC meteorology" + mod.mimetype <- "text/plain" } ####################################################### # Run met workflow - - - - - - diff --git a/modules/data.atmosphere/inst/scripts/met.workflow.params.R b/modules/data.atmosphere/inst/scripts/met.workflow.params.R index 27445b88220..128bf51ac84 100644 --- a/modules/data.atmosphere/inst/scripts/met.workflow.params.R +++ b/modules/data.atmosphere/inst/scripts/met.workflow.params.R @@ -1,13 +1,13 @@ # Database setup -driver <- "PostgreSQL" -user <- "bety" -dbname <- "bety" +driver <- "PostgreSQL" +user <- "bety" +dbname <- "bety" password <- "bety" -host <- "psql-pecan.bu.edu" +host <- "psql-pecan.bu.edu" ####################################################### -# Choose data set. -# Current choices: +# Choose data set. +# Current choices: # NARR # Ameriflux # FACE_RHIN @@ -19,17 +19,17 @@ fcn.data <- "NARR" username <- "" raw.host <- "geo.bu.edu" -dir <- "/projectnb/dietzelab/pecan.data/input/" +dir <- "/projectnb/dietzelab/pecan.data/input/" # Set start and end dates (when possible otherwise NA) -start_year <- 1979 -end_year <- 2013 +start_year <- 1979 +end_year <- 2013 # Download raw data? If not, specify raw.id # NARR raw.id on geo = 285 -raw <- FALSE +raw <- FALSE raw.id <- 285 # Set site id number when possible (not possible for NARR) @@ -43,13 +43,13 @@ raw.id <- 285 # Change to CF standards? If not, specify cf.id # NARR cf.id on geo = 288 -cf <- FALSE +cf <- FALSE cf.id <- 288 # Permute data? If not, specify perm.id # NARR perm.id on geo = 1000000023 -perm <- FALSE +perm <- FALSE perm.id <- 1000000023 # Select extraction site @@ -63,4 +63,4 @@ model <- "SIPNET" # Run met workflow -source("~/pecan/modules/data.atmosphere/inst/scripts/met.workflow.gen.R") \ No newline at end of file +source("~/pecan/modules/data.atmosphere/inst/scripts/met.workflow.gen.R") diff --git a/modules/data.atmosphere/inst/scripts/met_ensemble_testing.R b/modules/data.atmosphere/inst/scripts/met_ensemble_testing.R index e4aa89b9f25..713fb89468b 100644 --- a/modules/data.atmosphere/inst/scripts/met_ensemble_testing.R +++ b/modules/data.atmosphere/inst/scripts/met_ensemble_testing.R @@ -1,11 +1,11 @@ ## met ensemble helper script -site <- list(id=1000000650) +site <- list(id = 1000000650) input_met <- PalEON_ENS start_date <- "01/01/1950" end_date <- "12/31/2015" model <- "SIPNET" -host = "localhost" -dbparms = list(user=bety,password = bety,host=localhost, dbname=bety,driver=PostgreSQL,write=TRUE) -dir = "/home/carya/outdir/dbfiles" -overwrite = FALSE \ No newline at end of file +host <- "localhost" +dbparms <- list(user = bety, password = bety, host = localhost, dbname = bety, driver = PostgreSQL, write = TRUE) +dir <- "/home/carya/outdir/dbfiles" +overwrite <- FALSE diff --git a/modules/data.atmosphere/inst/scripts/ncep/met2csv.R b/modules/data.atmosphere/inst/scripts/ncep/met2csv.R index f2c822e8c88..b872ee720df 100644 --- a/modules/data.atmosphere/inst/scripts/ncep/met2csv.R +++ b/modules/data.atmosphere/inst/scripts/ncep/met2csv.R @@ -8,14 +8,14 @@ lati <- as.numeric(args[1]) load("/home/dlebauer/met/ncep/latlon.RData") -qair2rh <- function(qair, temp, press = 1013.25){ - es <- 6.112 * exp((17.67 * temp)/(temp + 243.5)) - e <- qair * press / (0.378 * qair + 0.622) - rh <- e / es - rh[rh > 1] <- 1 - rh[rh < 0] <- 0 - - return(rh) +qair2rh <- function(qair, temp, press = 1013.25) { + es <- 6.112 * exp((17.67 * temp) / (temp + 243.5)) + e <- qair * press / (0.378 * qair + 0.622) + rh <- e / es + rh[rh > 1] <- 1 + rh[rh < 0] <- 0 + + return(rh) } @@ -35,86 +35,90 @@ qair2rh <- function(qair, temp, press = 1013.25){ ## } -for(loni in 1:192){ - - Tmin = 0# Minimum temperature initialization - - currentlat <- round(Lat[lati], 2) - currentlon <- round(Lon[loni], 2) - weather.dir <- file.path("/home/dlebauer/met/ncep/", - paste0(abs(currentlat), - ifelse(currentlat>0,"N", "S"), "x", - abs(currentlon), - ifelse(currentlon>0, "E", "W"))) - - weather <- data.table(read.csv(file.path(weather.dir, "rawweather.csv"))) - - ## unit conversion - Tmin <- min(weather$temp - 273.15, 0) - - ## qc functions restricting to "valid range" given in .nc meta-data - qctemp <- function(x) ifelse(x > 400 | x < 100, mean(x[x < 400 & x > 100]), x) - qcsolar <- function(x)ifelse(x<0, 0, ifelse(abs(x) > 1300, mean(x[x < 1300]), x)) - qcwind <- function(x) ifelse(abs(x) > 102, mean(abs(x[x < 102])), x) - qcprecip <- function(x) ifelse(x > 0.005 | x < 0 , mean(x[x < 0.005 & x >0], x)) - qcrh <- function(x) ifelse(x > 100 | x < 0, mean(x[x < 100 & x>0]), x) ## using logical range (0-100) rather than "valid range (-25-125)" - qcshum <- function(x) ifelse(x > 100 | x < 0, mean(x[x < 0.6553 & x > 0]), x) - ## shum:long_name = "mean Daily Specific Humidity at 2 m" ; - ## shum:units = "kg/kg" ; - ## rhum:long_name = "mean Daily relative humidity at sigma level 995" ; - ## rhum:units = "%" ; - ## prate:long_name = "mean Daily Precipitation Rate at surface" ; - ## prate:units = "Kg/m^2/s" ; - ## uwnd:long_name = "mean Daily u-wind at 10 m" ; - ## uwnd:units = "m/s" ; - ## vwnd:long_name = "mean Daily v-wind at 10 m" ; - ## vwnd:units = "m/s" ; - ## air:long_name = "mean Daily Air temperature at 2 m" ; - ## air:units = "degK" ; - ## dswrf:long_name = "mean Daily Downward Solar Radiation Flux at surface" ; - ## dswrf:units = "W/m^2" ; - - x <- weather[,list(year, day, - Tavg = qctemp(temp), - Tmax = qctemp(tempmax), - Tmin = qctemp(tempmin), - solarR = PEcAn.utils::ud_convert(qcsolar(solar), - "watt day", - "megajoule"), - WS = sqrt(qcwind(uwind)^2 + qcwind(vwind)^2), - precip = PEcAn.utils::ud_convert(qcprecip(precip), "mm s-1", "mm day-1")), - qcshum(shum), - qcrh(rh)] - - - x$RHmax <- x[, qair2rh(shum, Tmin)] - x$RHmin <- x[, qair2rh(shum, Tmax)] - x$RHavg <- x[, (RHmax + RHmin) / 2] - - - - forweach <- x[, list(year, day, solarR, - Tmax, Tmin, Tavg, - RHmax, RHmin, RHavg, WS, - precip)] - ## fieldc=0.4 - ## wiltp=0.1 - ## mdep=2 - - ##call weachNEW - dat <- weachNEW(forweach, - lat = currentlat, - ts = 1, - temp.units = "Celsius", - rh.units = "fraction", - ws.units = "mps", - pp.units = "mm") - write.csv(dat, file.path(weather.dir, "weather.csv"), row.names = FALSE) +for (loni in 1:192) { + Tmin <- 0 # Minimum temperature initialization + + currentlat <- round(Lat[lati], 2) + currentlon <- round(Lon[loni], 2) + weather.dir <- file.path( + "/home/dlebauer/met/ncep/", + paste0( + abs(currentlat), + ifelse(currentlat > 0, "N", "S"), "x", + abs(currentlon), + ifelse(currentlon > 0, "E", "W") + ) + ) + + weather <- data.table(read.csv(file.path(weather.dir, "rawweather.csv"))) + + ## unit conversion + Tmin <- min(weather$temp - 273.15, 0) + + ## qc functions restricting to "valid range" given in .nc meta-data + qctemp <- function(x) ifelse(x > 400 | x < 100, mean(x[x < 400 & x > 100]), x) + qcsolar <- function(x) ifelse(x < 0, 0, ifelse(abs(x) > 1300, mean(x[x < 1300]), x)) + qcwind <- function(x) ifelse(abs(x) > 102, mean(abs(x[x < 102])), x) + qcprecip <- function(x) ifelse(x > 0.005 | x < 0, mean(x[x < 0.005 & x > 0], x)) + qcrh <- function(x) ifelse(x > 100 | x < 0, mean(x[x < 100 & x > 0]), x) ## using logical range (0-100) rather than "valid range (-25-125)" + qcshum <- function(x) ifelse(x > 100 | x < 0, mean(x[x < 0.6553 & x > 0]), x) + ## shum:long_name = "mean Daily Specific Humidity at 2 m" ; + ## shum:units = "kg/kg" ; + ## rhum:long_name = "mean Daily relative humidity at sigma level 995" ; + ## rhum:units = "%" ; + ## prate:long_name = "mean Daily Precipitation Rate at surface" ; + ## prate:units = "Kg/m^2/s" ; + ## uwnd:long_name = "mean Daily u-wind at 10 m" ; + ## uwnd:units = "m/s" ; + ## vwnd:long_name = "mean Daily v-wind at 10 m" ; + ## vwnd:units = "m/s" ; + ## air:long_name = "mean Daily Air temperature at 2 m" ; + ## air:units = "degK" ; + ## dswrf:long_name = "mean Daily Downward Solar Radiation Flux at surface" ; + ## dswrf:units = "W/m^2" ; + + x <- weather[ + , list(year, day, + Tavg = qctemp(temp), + Tmax = qctemp(tempmax), + Tmin = qctemp(tempmin), + solarR = PEcAn.utils::ud_convert( + qcsolar(solar), + "watt day", + "megajoule" + ), + WS = sqrt(qcwind(uwind)^2 + qcwind(vwind)^2), + precip = PEcAn.utils::ud_convert(qcprecip(precip), "mm s-1", "mm day-1") + ), + qcshum(shum), + qcrh(rh) + ] + + + x$RHmax <- x[, qair2rh(shum, Tmin)] + x$RHmin <- x[, qair2rh(shum, Tmax)] + x$RHavg <- x[, (RHmax + RHmin) / 2] + + + + forweach <- x[, list( + year, day, solarR, + Tmax, Tmin, Tavg, + RHmax, RHmin, RHavg, WS, + precip + )] + ## fieldc=0.4 + ## wiltp=0.1 + ## mdep=2 + + ## call weachNEW + dat <- weachNEW(forweach, + lat = currentlat, + ts = 1, + temp.units = "Celsius", + rh.units = "fraction", + ws.units = "mps", + pp.units = "mm" + ) + write.csv(dat, file.path(weather.dir, "weather.csv"), row.names = FALSE) } - - - - - - - diff --git a/modules/data.atmosphere/inst/scripts/runtest-download.NOAA_GEFS.R b/modules/data.atmosphere/inst/scripts/runtest-download.NOAA_GEFS.R index e38d1fe7191..411e27eee4c 100644 --- a/modules/data.atmosphere/inst/scripts/runtest-download.NOAA_GEFS.R +++ b/modules/data.atmosphere/inst/scripts/runtest-download.NOAA_GEFS.R @@ -1,11 +1,11 @@ -#This program is to test the R script donload.NOAA.R during development. -#It accepts a command line argument specifying the case test number to run, otherwise, it -#defaults to the first case. +# This program is to test the R script donload.NOAA.R during development. +# It accepts a command line argument specifying the case test number to run, otherwise, it +# defaults to the first case. -test_no = 1 -args = commandArgs(trailingOnly = TRUE) +test_no <- 1 +args <- commandArgs(trailingOnly = TRUE) if (length(args) > 0) { - test_no = as.integer(args[1]) + test_no <- as.integer(args[1]) } devtools::load_all("~/pecan/modules/data.atmosphere/") @@ -17,22 +17,18 @@ devtools::load_all("~/pecan/modules/data.atmosphere/") ##' @param end date # Other parameters optional -if (test_no == 1) { #Default case - should work - normal 16 day forecast - download.NOAA_GEFS("~/Working/results", lat.in= 45.805925, lon.in = -90.07961, "US-WCr") -} else if (test_no == 2) { #Should be an Error - date out of bounds - download.NOAA_GEFS("~/Working/results", lat.in= 45.805925, lon.in = -90.07961, "US-WCr", Sys.time() - lubridate::days(12), Sys.time(), verbose = FALSE) -} else if (test_no == 3) { #Should work - normal 16 day forecast - download.NOAA_GEFS("~/Working/results", lat.in= 45.805925, lon.in = -90.07961, "US-WCr", Sys.time() - lubridate::days(4), verbose = FALSE) -} else if (test_no == 4) { #Should work - 1 day's worth of data - download.NOAA_GEFS("~/Working/results", llat.in= 45.805925, lon.in = -90.07961, "US-WCr", Sys.time() - lubridate::days(8), Sys.time() - lubridate::days(7), verbose = FALSE) -} else if (test_no == 5) { #Should be an error - date out of bounds - download.NOAA_GEFS("~/Working/results", lat.in= 45.805925, lon.in = -90.07961, "US-WCr", Sys.Date() + lubridate::days(1), verbose = FALSE) -} else if (test_no == 6) { #Should crash - timespan not large enough - download.NOAA_GEFS("~/Working/results", lat.in= 45.805925, lon.in = -90.07961, "US-WCr", Sys.time(), Sys.time(), verbose = FALSE) -} else if (test_no == 7) { #Should work, but have the timespan shrunk by one day. Output should be identical to default case. - download.NOAA_GEFS("~/Working/results", lat.in= 45.805925, lon.in = -90.07961, "US-WCr", Sys.time(), Sys.time() + lubridate::days(17), verbose = FALSE) +if (test_no == 1) { # Default case - should work - normal 16 day forecast + download.NOAA_GEFS("~/Working/results", lat.in = 45.805925, lon.in = -90.07961, "US-WCr") +} else if (test_no == 2) { # Should be an Error - date out of bounds + download.NOAA_GEFS("~/Working/results", lat.in = 45.805925, lon.in = -90.07961, "US-WCr", Sys.time() - lubridate::days(12), Sys.time(), verbose = FALSE) +} else if (test_no == 3) { # Should work - normal 16 day forecast + download.NOAA_GEFS("~/Working/results", lat.in = 45.805925, lon.in = -90.07961, "US-WCr", Sys.time() - lubridate::days(4), verbose = FALSE) +} else if (test_no == 4) { # Should work - 1 day's worth of data + download.NOAA_GEFS("~/Working/results", llat.in = 45.805925, lon.in = -90.07961, "US-WCr", Sys.time() - lubridate::days(8), Sys.time() - lubridate::days(7), verbose = FALSE) +} else if (test_no == 5) { # Should be an error - date out of bounds + download.NOAA_GEFS("~/Working/results", lat.in = 45.805925, lon.in = -90.07961, "US-WCr", Sys.Date() + lubridate::days(1), verbose = FALSE) +} else if (test_no == 6) { # Should crash - timespan not large enough + download.NOAA_GEFS("~/Working/results", lat.in = 45.805925, lon.in = -90.07961, "US-WCr", Sys.time(), Sys.time(), verbose = FALSE) +} else if (test_no == 7) { # Should work, but have the timespan shrunk by one day. Output should be identical to default case. + download.NOAA_GEFS("~/Working/results", lat.in = 45.805925, lon.in = -90.07961, "US-WCr", Sys.time(), Sys.time() + lubridate::days(17), verbose = FALSE) } - - - - diff --git a/modules/data.atmosphere/tests/test.NARR.R b/modules/data.atmosphere/tests/test.NARR.R index 5cf00da8dc0..3fa81c0fff2 100644 --- a/modules/data.atmosphere/tests/test.NARR.R +++ b/modules/data.atmosphere/tests/test.NARR.R @@ -1,21 +1,20 @@ -if(FALSE){ ##### NOT RUN -# Currently works for: -# NARR sites that haven't previously been extracted (need to clean up database) -# ED2 +if (FALSE) { ##### NOT RUN + # Currently works for: + # NARR sites that haven't previously been extracted (need to clean up database) + # ED2 -rm(list = setdiff(ls(), lsf.str())) # clear variables but not sourced functions -for (i in dbListConnections(PostgreSQL())) db.close(i) #close any stray database connections + rm(list = setdiff(ls(), lsf.str())) # clear variables but not sourced functions + for (i in dbListConnections(PostgreSQL())) db.close(i) # close any stray database connections -site = 780 # Duplicate dbfiles are still a problem so for now need to choose sites that haven't been extracted yet -input = "NARR" -start_date = "1979-01-01 00:00:00" -end_date = "2013-12-31 23:59:00" -model = "ED2" -host = list(name = "geo.bu.edu") -bety = list(user = "bety", dbname = "bety", password="bety", host="psql-pecan.bu.edu") -dir ="/projectnb/dietzelab/pecan.data/input/" - -outfolder <- met.process(site, input, start_date, end_date, model, host, bety, dir) + site <- 780 # Duplicate dbfiles are still a problem so for now need to choose sites that haven't been extracted yet + input <- "NARR" + start_date <- "1979-01-01 00:00:00" + end_date <- "2013-12-31 23:59:00" + model <- "ED2" + host <- list(name = "geo.bu.edu") + bety <- list(user = "bety", dbname = "bety", password = "bety", host = "psql-pecan.bu.edu") + dir <- "/projectnb/dietzelab/pecan.data/input/" + outfolder <- met.process(site, input, start_date, end_date, model, host, bety, dir) } diff --git a/modules/data.atmosphere/tests/testthat.R b/modules/data.atmosphere/tests/testthat.R index af1aebc1cf4..6ecbda37ec7 100644 --- a/modules/data.atmosphere/tests/testthat.R +++ b/modules/data.atmosphere/tests/testthat.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html diff --git a/modules/data.atmosphere/tests/testthat/helper.R b/modules/data.atmosphere/tests/testthat/helper.R index deba7cbdd0c..9c78c410971 100644 --- a/modules/data.atmosphere/tests/testthat/helper.R +++ b/modules/data.atmosphere/tests/testthat/helper.R @@ -13,24 +13,30 @@ #' # Only messages on stderr are recognized #' expect_failure(expect_log("Hello", "Hello")) #' -expect_log <- function(object, regexp, ...){ - qobj <- rlang::enquo(object) - msg <- capture.output( - {val <- rlang::eval_tidy(qobj)}, - type = "message") - label = rlang::expr_label(rlang::get_expr(qobj)) +expect_log <- function(object, regexp, ...) { + qobj <- rlang::enquo(object) + msg <- capture.output( + { + val <- rlang::eval_tidy(qobj) + }, + type = "message" + ) + label <- rlang::expr_label(rlang::get_expr(qobj)) - expect( - length(msg) > 0, - sprintf("%s did not produce any log messages", label)) - msg = paste(msg, collapse = "\n") - expect( - grepl(regexp, msg, ...), - sprintf( - "%s does not match %s.\nActual value: \"%s\"", - label, - encodeString(regexp, quote = "\""), - encodeString(msg))) + expect( + length(msg) > 0, + sprintf("%s did not produce any log messages", label) + ) + msg <- paste(msg, collapse = "\n") + expect( + grepl(regexp, msg, ...), + sprintf( + "%s does not match %s.\nActual value: \"%s\"", + label, + encodeString(regexp, quote = "\""), + encodeString(msg) + ) + ) - invisible(val) + invisible(val) } diff --git a/modules/data.atmosphere/tests/testthat/test.cf-downscaling.R b/modules/data.atmosphere/tests/testthat/test.cf-downscaling.R index f2dce0296a3..5fc4b2b84ed 100644 --- a/modules/data.atmosphere/tests/testthat/test.cf-downscaling.R +++ b/modules/data.atmosphere/tests/testthat/test.cf-downscaling.R @@ -2,24 +2,29 @@ context("downscaling") daily.nc <- ncdf4::nc_open("data/urbana_daily_test.nc") on.exit(ncdf4::nc_close(daily.nc), add = TRUE) -daily.cf <- load.cfmet(met.nc = daily.nc, lat = 39.75, lon = -87.25, - start.date = "1951-01-02", end.date = "1951-05-31") +daily.cf <- load.cfmet( + met.nc = daily.nc, lat = 39.75, lon = -87.25, + start.date = "1951-01-02", end.date = "1951-05-31" +) test_that( - paste("cfmet.downscale.time works\n", - "these are capturing the current state of the downscale algorithms;\n", - "actual values will need to be revised if (when) algorithms change"), -{ - b <- cfmet.downscale.time(cfmet = daily.cf, lat = 40) - expect_equal(unique(b$year), 1951) - expect_equal(range(b$doy), c(2,151)) - expect_equal(unique(b$hour), 0:23) - expect_equal(round(range(b$downwelling_photosynthetic_photon_flux)), c(0, 2061)) - expect_equal(round(range(b$air_temperature)), c(-22, 31)) - # expect_equal(round(range(b$relative_humidity)), c(0.30569194491299, 1)) - expect_equal(signif(range(b$precipitation_flux), 3), c(0, 1.67e-05)) - expect_equal(signif(range(b$wind), 2), c(0.066, 6.60)) -}) + paste( + "cfmet.downscale.time works\n", + "these are capturing the current state of the downscale algorithms;\n", + "actual values will need to be revised if (when) algorithms change" + ), + { + b <- cfmet.downscale.time(cfmet = daily.cf, lat = 40) + expect_equal(unique(b$year), 1951) + expect_equal(range(b$doy), c(2, 151)) + expect_equal(unique(b$hour), 0:23) + expect_equal(round(range(b$downwelling_photosynthetic_photon_flux)), c(0, 2061)) + expect_equal(round(range(b$air_temperature)), c(-22, 31)) + # expect_equal(round(range(b$relative_humidity)), c(0.30569194491299, 1)) + expect_equal(signif(range(b$precipitation_flux), 3), c(0, 1.67e-05)) + expect_equal(signif(range(b$wind), 2), c(0.066, 6.60)) + } +) test_that("downscaling with timestep", { df <- data.frame( @@ -29,7 +34,8 @@ test_that("downscaling with timestep", { air_pressure = 1030, wind_speed = 0, relative_humidity = 0.5, - precipitation_flux = 2 / (60 * 60)) # units: mm/sec + precipitation_flux = 2 / (60 * 60) + ) # units: mm/sec r1 <- cfmet.downscale.daily(df, output.dt = 1, lat = 40) r6 <- cfmet.downscale.daily(df, output.dt = 6, lat = 40) @@ -39,13 +45,12 @@ test_that("downscaling with timestep", { expect_equal(nrow(r6), 4) expect_equal(nrow(r12), 2) - list(r1, r6,r12) %>% - purrr::walk(~{ - expect_equal(mean(.$air_temperature), (df$air_temperature - 273.15)) # input is K, output is C - expect_equal(sum(.$precipitation_flux), df$precipitation_flux) - expect_true(all(.$wind == df$wind_speed)) - }) - + list(r1, r6, r12) %>% + purrr::walk(~ { + expect_equal(mean(.$air_temperature), (df$air_temperature - 273.15)) # input is K, output is C + expect_equal(sum(.$precipitation_flux), df$precipitation_flux) + expect_true(all(.$wind == df$wind_speed)) + }) }) test_that("output for a given day not affected by adjacent days", { @@ -58,15 +63,16 @@ test_that("output for a given day not affected by adjacent days", { air_pressure = 1030, wind_speed = 0, relative_humidity = 0.5, - precipitation_flux = c(0, 2 / (60 * 60))) + precipitation_flux = c(0, 2 / (60 * 60)) + ) # print(cfmet.downscale.daily(df[2,], 6, 40)) # print(cfmet.downscale.daily(df, 6, 40)) - expect_equal(cfmet.downscale.daily(df[1,], 6, 40), cfmet.downscale.daily(df, 6, 40)[1:4,]) - expect_equal(cfmet.downscale.daily(df[2,], 6, 40), cfmet.downscale.daily(df, 6, 40)[5:8,]) + expect_equal(cfmet.downscale.daily(df[1, ], 6, 40), cfmet.downscale.daily(df, 6, 40)[1:4, ]) + expect_equal(cfmet.downscale.daily(df[2, ], 6, 40), cfmet.downscale.daily(df, 6, 40)[5:8, ]) }) -test_that("get.ncvector works",{ +test_that("get.ncvector works", { run.dates <- data.frame(index = 1:2, date = c(lubridate::ymd("1951-01-01 UTC"), lubridate::ymd("1951-01-02 UTC"))) res <- get.ncvector("air_temperature", lati = 1, loni = 1, run.dates, met.nc = daily.nc) expect_type(res, "double") diff --git a/modules/data.atmosphere/tests/testthat/test.check_met_file.R b/modules/data.atmosphere/tests/testthat/test.check_met_file.R index d03a73d874e..4be1457e0a7 100644 --- a/modules/data.atmosphere/tests/testthat/test.check_met_file.R +++ b/modules/data.atmosphere/tests/testthat/test.check_met_file.R @@ -3,22 +3,21 @@ context("Check met input file correctly detects errors") test_that( "Check met input correctly finds errors in bad met files", { - urbana_daily_results <- check_met_input_file("data/urbana_daily_test.nc") expect_s3_class(urbana_daily_results, "data.frame") expect_true( all(c("correct dimensions", "required variable present", "variable has correct units") %in% - urbana_daily_results[["test_type"]]) + urbana_daily_results[["test_type"]]) ) expect_true( all(urbana_daily_results %>% - dplyr::filter(test_type == "variable has correct units") %>% - dplyr::pull(test_passed)) + dplyr::filter(test_type == "variable has correct units") %>% + dplyr::pull(test_passed)) ) expect_false( all(urbana_daily_results %>% - dplyr::filter(target_variable %in% c("dimensions", "air_pressure", "eastward_wind")) %>% - dplyr::pull(test_passed)) + dplyr::filter(target_variable %in% c("dimensions", "air_pressure", "eastward_wind")) %>% + dplyr::pull(test_passed)) ) urbana_subdaily_results <- check_met_input_file("data/urbana_subdaily_test.nc") diff --git a/modules/data.atmosphere/tests/testthat/test.download.AmerifluxLBL.R b/modules/data.atmosphere/tests/testthat/test.download.AmerifluxLBL.R index 4ca10b43afe..27b0a2b49ac 100644 --- a/modules/data.atmosphere/tests/testthat/test.download.AmerifluxLBL.R +++ b/modules/data.atmosphere/tests/testthat/test.download.AmerifluxLBL.R @@ -1,76 +1,85 @@ - test_that("download respects overwrite argument", { - outdir <- withr::local_tempdir() - zippath <- file.path(outdir, "AMF_US-Akn_BASE-BADM_6-5.zip") - csvpath <- sub("-BADM(.*).zip", "_HH\\1.csv", zippath) - - # Mock out amerifluxr functions to test our code without network calls - local_mocked_bindings( - amf_download_base = \(...) { - tmp_csv <- basename(csvpath) - withr::with_tempdir({ - writeLines( - c( "# fake file", - "#", - "TIMESTAMP_START,TIMESTAMP_END", - "201101010000,201101010030", - "201112310000,201112310030"), - tmp_csv) - zip(zippath, tmp_csv, flags = "-qr0X")}) - zippath - }, - amf_var_info = \(...) data.frame( - Site_ID = "US-Akn", - BASE_Version = "6-5"), - .package = "amerifluxr" - ) - - # wrapper, just to skip retyping args - dl_akn <- function(...) download.AmerifluxLBL( - site = "US-Akn", - outfolder = outdir, - start_date = "2011-01-01", - end_date = "2011-10-01", - ...) + outdir <- withr::local_tempdir() + zippath <- file.path(outdir, "AMF_US-Akn_BASE-BADM_6-5.zip") + csvpath <- sub("-BADM(.*).zip", "_HH\\1.csv", zippath) + + # Mock out amerifluxr functions to test our code without network calls + local_mocked_bindings( + amf_download_base = \(...) { + tmp_csv <- basename(csvpath) + withr::with_tempdir({ + writeLines( + c( + "# fake file", + "#", + "TIMESTAMP_START,TIMESTAMP_END", + "201101010000,201101010030", + "201112310000,201112310030" + ), + tmp_csv + ) + zip(zippath, tmp_csv, flags = "-qr0X") + }) + zippath + }, + amf_var_info = \(...) data.frame( + Site_ID = "US-Akn", + BASE_Version = "6-5" + ), + .package = "amerifluxr" + ) + + # wrapper, just to skip retyping args + dl_akn <- function(...) { + download.AmerifluxLBL( + site = "US-Akn", + outfolder = outdir, + start_date = "2011-01-01", + end_date = "2011-10-01", + ... + ) + } + + # Case 0: new download + expect_false(file.exists(zippath)) + expect_false(file.exists(csvpath)) + dl_akn() + expect_true(file.exists(zippath)) + expect_true(file.exists(csvpath)) - # Case 0: new download - expect_false(file.exists(zippath)) - expect_false(file.exists(csvpath)) - dl_akn() - expect_true(file.exists(zippath)) - expect_true(file.exists(csvpath)) - - # Case 1: reuse existing download - ziptime <- file.mtime(zippath) - csvtime <- file.mtime(csvpath) - expect_log( - dl_akn(overwrite = FALSE), - "skipping download.*skipping extraction") - expect_equal(file.mtime(zippath), ziptime) - expect_equal(file.mtime(csvpath), csvtime) + # Case 1: reuse existing download + ziptime <- file.mtime(zippath) + csvtime <- file.mtime(csvpath) + expect_log( + dl_akn(overwrite = FALSE), + "skipping download.*skipping extraction" + ) + expect_equal(file.mtime(zippath), ziptime) + expect_equal(file.mtime(csvpath), csvtime) - # Case 2: overwrite existing download - dl_akn(overwrite = TRUE) - expect_gt(file.mtime(zippath), ziptime) - expect_gt(file.mtime(csvpath), csvtime) + # Case 2: overwrite existing download + dl_akn(overwrite = TRUE) + expect_gt(file.mtime(zippath), ziptime) + expect_gt(file.mtime(csvpath), csvtime) - # Case 3: Freshen csv without clobbering zip - file.remove(csvpath) - ziptime <- file.mtime(zippath) - expect_log(dl_akn(overwrite = FALSE), "skipping download") - expect_true(file.exists(csvpath)) - expect_equal(file.mtime(zippath), ziptime) + # Case 3: Freshen csv without clobbering zip + file.remove(csvpath) + ziptime <- file.mtime(zippath) + expect_log(dl_akn(overwrite = FALSE), "skipping download") + expect_true(file.exists(csvpath)) + expect_equal(file.mtime(zippath), ziptime) - # Case 4: Re-download zip without clobbering CSV - # (Note: I'm not sure this is desirable! For consistency it may be better - # to overwrite the CSV so we know it matches the zip file. - # If you change the behavior, go ahead and update this test to match.) - file.remove(zippath) - csvtime <- file.mtime(csvpath) - expect_log( - dl_akn(overwrite = FALSE), - "skipping extraction") - expect_true(file.exists(zippath)) - expect_equal(file.mtime(csvpath), csvtime) + # Case 4: Re-download zip without clobbering CSV + # (Note: I'm not sure this is desirable! For consistency it may be better + # to overwrite the CSV so we know it matches the zip file. + # If you change the behavior, go ahead and update this test to match.) + file.remove(zippath) + csvtime <- file.mtime(csvpath) + expect_log( + dl_akn(overwrite = FALSE), + "skipping extraction" + ) + expect_true(file.exists(zippath)) + expect_equal(file.mtime(csvpath), csvtime) }) diff --git a/modules/data.atmosphere/tests/testthat/test.download.CRUNCEP.R b/modules/data.atmosphere/tests/testthat/test.download.CRUNCEP.R index cf69aafeee9..96c6f58e050 100644 --- a/modules/data.atmosphere/tests/testthat/test.download.CRUNCEP.R +++ b/modules/data.atmosphere/tests/testthat/test.download.CRUNCEP.R @@ -11,11 +11,13 @@ test_that("download works and returns a valid CF file", { PEcAn.logger::logger.setLevel("WARN") - result <- download.CRUNCEP(outfolder = tmpdir, - start_date = "2000-01-01", - end_date = "2000-12-31", - lat.in = 40, - lon.in = -88) + result <- download.CRUNCEP( + outfolder = tmpdir, + start_date = "2000-01-01", + end_date = "2000-12-31", + lat.in = 40, + lon.in = -88 + ) cf <- ncdf4::nc_open(result$file) cf_units <- cf$dim$time$units ncdf4::nc_close(cf) @@ -30,6 +32,8 @@ test_that("download works and returns a valid CF file", { end_date = "2000-12-31", lat.in = 40, lon.in = -88, - overwrite = FALSE), - "already exists. Skipping") + overwrite = FALSE + ), + "already exists. Skipping" + ) }) diff --git a/modules/data.atmosphere/tests/testthat/test.download.GFDLR.R b/modules/data.atmosphere/tests/testthat/test.download.GFDLR.R index 30462d3a82a..6515b7d6aa1 100644 --- a/modules/data.atmosphere/tests/testthat/test.download.GFDLR.R +++ b/modules/data.atmosphere/tests/testthat/test.download.GFDLR.R @@ -8,10 +8,12 @@ test_that("GFDL server is reachable", { skip_on_ci() skip_if_offline() - test_url <- paste0("http://nomads.gfdl.noaa.gov:9192/opendap/", - "CMIP5/output1/NOAA-GFDL/GFDL-CM3/rcp45/3hr/", - "atmos/3hr/r1i1p1/v20110601/tas/", - "tas_3hr_GFDL-CM3_rcp45_r1i1p1_2006010100-2010123123.nc") + test_url <- paste0( + "http://nomads.gfdl.noaa.gov:9192/opendap/", + "CMIP5/output1/NOAA-GFDL/GFDL-CM3/rcp45/3hr/", + "atmos/3hr/r1i1p1/v20110601/tas/", + "tas_3hr_GFDL-CM3_rcp45_r1i1p1_2006010100-2010123123.nc" + ) test_nc <- tryCatch( ncdf4::nc_open(test_url, suppress_dimvals = TRUE), error = function(e) { @@ -47,7 +49,7 @@ test_that("GFDL server is reachable", { expect_equal(cf$dim$latitude$len, 1) expect_equal(cf$dim$longitude$len, 1) - expect_equal(cf$dim$time$len, 365*(24/3)) # one year at 3-hr interval, leap days ignored + expect_equal(cf$dim$time$len, 365 * (24 / 3)) # one year at 3-hr interval, leap days ignored expect_equal(cf$nvar, 8) # Expect that overwrite argument is respected @@ -59,7 +61,9 @@ test_that("GFDL server is reachable", { site_id = 753, lat.in = 40, lon.in = -88, - overwrite = FALSE), - "already exists. Skipping") + overwrite = FALSE + ), + "already exists. Skipping" + ) }) }) diff --git a/modules/data.atmosphere/tests/testthat/test.download.MERRA.R b/modules/data.atmosphere/tests/testthat/test.download.MERRA.R index 7a51c68fd03..6ff9e488d14 100644 --- a/modules/data.atmosphere/tests/testthat/test.download.MERRA.R +++ b/modules/data.atmosphere/tests/testthat/test.download.MERRA.R @@ -7,17 +7,19 @@ teardown(unlink(outdir, recursive = TRUE)) test_that("MERRA download works", { skip_on_ci() skip_if_offline() - + start_date <- "2009-06-01" end_date <- "2009-06-04" dat <- download.MERRA(outdir, start_date, end_date, - lat.in = 45.3, lon.in = -85.3, overwrite = TRUE) + lat.in = 45.3, lon.in = -85.3, overwrite = TRUE + ) expect_true(file.exists(dat$file[[1]])) nc <- ncdf4::nc_open(dat$file[[1]]) on.exit(ncdf4::nc_close(nc), add = TRUE) expect_timeseq <- seq(lubridate::as_datetime(start_date), - lubridate::as_datetime(paste(end_date, "23:59:59")), - by = "1 hour", tz = "UTC") + lubridate::as_datetime(paste(end_date, "23:59:59")), + by = "1 hour", tz = "UTC" + ) time <- lubridate::as_datetime("2009-01-01") + as.difftime(ncdf4::ncvar_get(nc, "time"), units = "days") expect_equal(time, expect_timeseq) diff --git a/modules/data.atmosphere/tests/testthat/test.download.NARR.R b/modules/data.atmosphere/tests/testthat/test.download.NARR.R index cb4bd4121fa..15b585cb4a7 100644 --- a/modules/data.atmosphere/tests/testthat/test.download.NARR.R +++ b/modules/data.atmosphere/tests/testthat/test.download.NARR.R @@ -21,7 +21,8 @@ test_that("NARR download works as expected", { r <- download.NARR_site( outfolder = getwd(), start_date, end_date, lat.in, lon.in, - progress = TRUE, parallel = TRUE, ncores = 2) + progress = TRUE, parallel = TRUE, ncores = 2 + ) expect_equal(nrow(r), 1) expect_true(file.exists(r$file[1])) nc <- ncdf4::nc_open(r$file) diff --git a/modules/data.atmosphere/tests/testthat/test.download.raw.met.module.R b/modules/data.atmosphere/tests/testthat/test.download.raw.met.module.R index 363a9b823d7..065f3820337 100644 --- a/modules/data.atmosphere/tests/testthat/test.download.raw.met.module.R +++ b/modules/data.atmosphere/tests/testthat/test.download.raw.met.module.R @@ -1,34 +1,35 @@ test_that("`.download.raw.met.module` throws an error if register$site is unknown", { - register <- list(scale = 'example') + register <- list(scale = "example") expect_error( - .download.raw.met.module(dir = NULL, met = NULL, register = register, str_ns = NULL), + .download.raw.met.module(dir = NULL, met = NULL, register = register, str_ns = NULL), "Unknown register\\$scale" ) }) test_that("`.download.raw.met.module` is able to call the right download function based on met value", { mocked_res <- mockery::mock(10) - mockery::stub(.download.raw.met.module, 'PEcAn.DB::convert_input', mocked_res) - register <- list(scale = 'site') + mockery::stub(.download.raw.met.module, "PEcAn.DB::convert_input", mocked_res) + register <- list(scale = "site") res <- .download.raw.met.module( - dir = NULL, - met = 'ERA5', - register = register, - machine = NULL, - start_date = '2010-01-01', - end_date = '2010-01-01', - str_ns = NULL, - con = NULL, - input_met = NULL, - site.id = 1, - lat.in = 1, - lon.in = 1, - host = NULL, - site = NULL, - username = NULL, - dbparms = NULL) + dir = NULL, + met = "ERA5", + register = register, + machine = NULL, + start_date = "2010-01-01", + end_date = "2010-01-01", + str_ns = NULL, + con = NULL, + input_met = NULL, + site.id = 1, + lat.in = 1, + lon.in = 1, + host = NULL, + site = NULL, + username = NULL, + dbparms = NULL + ) args <- mockery::mock_args(mocked_res) mockery::expect_called(mocked_res, 1) expect_equal(res, 10) - expect_equal(args[[1]]$fcn, 'download.ERA5') -}) \ No newline at end of file + expect_equal(args[[1]]$fcn, "download.ERA5") +}) diff --git a/modules/data.atmosphere/tests/testthat/test.load.cfmet.R b/modules/data.atmosphere/tests/testthat/test.load.cfmet.R index b0df59ff85b..c1f3c3de3a9 100644 --- a/modules/data.atmosphere/tests/testthat/test.load.cfmet.R +++ b/modules/data.atmosphere/tests/testthat/test.load.cfmet.R @@ -7,12 +7,14 @@ subdaily_file <- "data/urbana_subdaily_test.nc" daily.nc <- ncdf4::nc_open(daily_file) on.exit(ncdf4::nc_close(daily.nc), add = TRUE) -daily.cf <- load.cfmet(met.nc = daily.nc, lat = 39.75, lon = -87.25, - start.date = "1951-01-02", end.date = "1951-05-31") +daily.cf <- load.cfmet( + met.nc = daily.nc, lat = 39.75, lon = -87.25, + start.date = "1951-01-02", end.date = "1951-05-31" +) subdaily.nc <- ncdf4::nc_open(subdaily_file) on.exit(ncdf4::nc_close(subdaily.nc), add = TRUE) -test_that("data extracted from test pecan-cf met files is valid",{ +test_that("data extracted from test pecan-cf met files is valid", { expect_is(daily.cf, "data.frame") expect_is(daily.cf$date, "POSIXct") @@ -25,39 +27,65 @@ test_that("data extracted from test pecan-cf met files is valid",{ expect_true(all(daily.cf$year == 1951)) expect_true(all(daily.cf$day %in% 1:31)) expect_true(all(diff(daily.cf$doy) > 0)) ## test dataset is for 1/2 year so day of year should be increasing - expect_true(all(c("index", "date", "doy", "year", "month", "day", "hour", - "surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", - "air_temperature", "air_temperature_max", "air_temperature_min", - "northward_wind", "eastward_wind", "relative_humidity") - %in% colnames(daily.cf))) + expect_true(all(c( + "index", "date", "doy", "year", "month", "day", "hour", + "surface_downwelling_longwave_flux_in_air", "surface_downwelling_shortwave_flux_in_air", + "air_temperature", "air_temperature_max", "air_temperature_min", + "northward_wind", "eastward_wind", "relative_humidity" + ) + %in% colnames(daily.cf))) }) -test_that("load.cfmet respects start/end date",{ +test_that("load.cfmet respects start/end date", { expect_equal(strftime(min(daily.cf$date), "%F", tz = "UTC"), "1951-01-02") expect_equal(strftime(max(daily.cf$date), "%F", tz = "UTC"), "1951-05-31") expect_equal(nrow(daily.cf), 150) }) -test_that("load.cfmet throws error if start/end date out of range",{ - expect_error(load.cfmet(met.nc = subdaily.nc, lat = 39, lon = -88, - start.date = "9999-01-01", end.date = "9999-02-02"), - "run end date .* after met data ends") - expect_error(load.cfmet(met.nc = subdaily.nc, lat = 39, lon = -88, - start.date = "0000-01-01", end.date = "0000-02-02"), - "run start date .* before met data starts") - expect_error(load.cfmet(met.nc = daily.nc, lat = 39, lon = -88, - start.date = "1950-12-31", end.date = "1951-12-31"), - "run start date .* before met data starts") - expect_error(load.cfmet(met.nc = daily.nc, lat = 39, lon = -88, - start.date = "1951-01-02", end.date = "1952-01-01"), - "run end date .* after met data ends") +test_that("load.cfmet throws error if start/end date out of range", { + expect_error( + load.cfmet( + met.nc = subdaily.nc, lat = 39, lon = -88, + start.date = "9999-01-01", end.date = "9999-02-02" + ), + "run end date .* after met data ends" + ) + expect_error( + load.cfmet( + met.nc = subdaily.nc, lat = 39, lon = -88, + start.date = "0000-01-01", end.date = "0000-02-02" + ), + "run start date .* before met data starts" + ) + expect_error( + load.cfmet( + met.nc = daily.nc, lat = 39, lon = -88, + start.date = "1950-12-31", end.date = "1951-12-31" + ), + "run start date .* before met data starts" + ) + expect_error( + load.cfmet( + met.nc = daily.nc, lat = 39, lon = -88, + start.date = "1951-01-02", end.date = "1952-01-01" + ), + "run end date .* after met data ends" + ) }) -test_that("load.cfmet enforces lat/lon matching",{ - expect_error(load.cfmet(met.nc = daily.nc, lat = 39, lon = 20, - start.date = "1951-01-01", end.date = "1951-01-07"), - "lat / lon .* outside range of met file") - expect_error(load.cfmet(met.nc = daily.nc, lat = 9, lon = -88, - start.date = "1951-01-01", end.date = "1951-01-07"), - "lat / lon .* outside range of met file") +test_that("load.cfmet enforces lat/lon matching", { + expect_error( + load.cfmet( + met.nc = daily.nc, lat = 39, lon = 20, + start.date = "1951-01-01", end.date = "1951-01-07" + ), + "lat / lon .* outside range of met file" + ) + expect_error( + load.cfmet( + met.nc = daily.nc, lat = 9, lon = -88, + start.date = "1951-01-01", end.date = "1951-01-07" + ), + "lat / lon .* outside range of met file" + ) }) diff --git a/modules/data.atmosphere/tests/testthat/test.met.process.R b/modules/data.atmosphere/tests/testthat/test.met.process.R index 01e819c0744..0afb3e2a048 100644 --- a/modules/data.atmosphere/tests/testthat/test.met.process.R +++ b/modules/data.atmosphere/tests/testthat/test.met.process.R @@ -1,26 +1,27 @@ test_that("`met.process` able to call .download.raw.met.module based on met process stage params", { - input_met <- list(source = 'CRUNCEP', id = '1') + input_met <- list(source = "CRUNCEP", id = "1") - mockery::stub(met.process, 'PEcAn.DB::db.open', 1) - mockery::stub(met.process, 'PEcAn.DB::db.close', 1) - mockery::stub(met.process, 'PEcAn.DB::db.query', list(file_path = '/test/path', file_name = 'test')) - mockery::stub(met.process, 'read.register', list()) - mockery::stub(met.process, 'PEcAn.DB::query.format.vars', list()) - mockery::stub(met.process, 'PEcAn.DB::dbfile.check', list(id = 1)) - mockery::stub(met.process, 'assign', 1) - mockery::stub(met.process, 'PEcAn.DB::query.site', list(lat = 0, lon = 0)) - mockery::stub(met.process, 'met.process.stage', list(download.raw = TRUE, met2cf = FALSE, standardize = FALSE, met2model = FALSE)) + mockery::stub(met.process, "PEcAn.DB::db.open", 1) + mockery::stub(met.process, "PEcAn.DB::db.close", 1) + mockery::stub(met.process, "PEcAn.DB::db.query", list(file_path = "/test/path", file_name = "test")) + mockery::stub(met.process, "read.register", list()) + mockery::stub(met.process, "PEcAn.DB::query.format.vars", list()) + mockery::stub(met.process, "PEcAn.DB::dbfile.check", list(id = 1)) + mockery::stub(met.process, "assign", 1) + mockery::stub(met.process, "PEcAn.DB::query.site", list(lat = 0, lon = 0)) + mockery::stub(met.process, "met.process.stage", list(download.raw = TRUE, met2cf = FALSE, standardize = FALSE, met2model = FALSE)) mocked_res <- mockery::mock(1) - mockery::stub(met.process, '.download.raw.met.module', mocked_res) + mockery::stub(met.process, ".download.raw.met.module", mocked_res) res <- met.process( - site = list(id = 1), + site = list(id = 1), input_met = input_met, - start_date = '2001-01-01', - end_date = '2003-01-01', - model = 'ED2', + start_date = "2001-01-01", + end_date = "2003-01-01", + model = "ED2", dbparms = list(), - dir = 'test') - mockery::expect_called(mocked_res, 1) - expect_equal(res$path$path1, '/test/path/test') + dir = "test" + ) + mockery::expect_called(mocked_res, 1) + expect_equal(res$path$path1, "/test/path/test") }) diff --git a/modules/data.atmosphere/tests/testthat/test.met2CF.ALMA.R b/modules/data.atmosphere/tests/testthat/test.met2CF.ALMA.R index 14c92080434..c651836020f 100644 --- a/modules/data.atmosphere/tests/testthat/test.met2CF.ALMA.R +++ b/modules/data.atmosphere/tests/testthat/test.met2CF.ALMA.R @@ -1,30 +1,30 @@ -if(FALSE){ +if (FALSE) { + ## Currently just code used for internal development + ## will eventually convert to a formal test + in.path <- "/fs/data4/phase1a_met_drivers_v4/PDL" + in.prefix <- "" + outfolder <- "/tmp/PaleonMet" + start_date <- "1030-01-01" + end_date <- "1030-12-31" + overwrite <- TRUE + verbose <- FALSE -## Currently just code used for internal development -## will eventually convert to a formal test -in.path = "/fs/data4/phase1a_met_drivers_v4/PDL" -in.prefix = "" -outfolder = "/tmp/PaleonMet" -start_date = "1030-01-01" -end_date = "1030-12-31" -overwrite = TRUE -verbose=FALSE + results <- met2CF.PalEON(in.path, in.prefix, outfolder, start_date, end_date, overwrite, verbose) -results = met2CF.PalEON(in.path,in.prefix,outfolder,start_date,end_date,overwrite,verbose) - -## full conversion: -in.path.base = "/fs/data4/phase1a_met_drivers_v4" -sites = dir(in.path.base) -in.prefix = "" -outfolder.base = "/fs/data1/pecan.data/input/PalEON.MIP." -start_date = "850-01-01" -end_date = "2010-12-31" -overwrite = FALSE -verbose = FALSE -results = list() -for(s in sites){ - results[[s]] = met2CF.PalEON(file.path(in.path.base,s),in.prefix, - paste0(outfolder.base,s),start_date,end_date,overwrite,verbose) + ## full conversion: + in.path.base <- "/fs/data4/phase1a_met_drivers_v4" + sites <- dir(in.path.base) + in.prefix <- "" + outfolder.base <- "/fs/data1/pecan.data/input/PalEON.MIP." + start_date <- "850-01-01" + end_date <- "2010-12-31" + overwrite <- FALSE + verbose <- FALSE + results <- list() + for (s in sites) { + results[[s]] <- met2CF.PalEON( + file.path(in.path.base, s), in.prefix, + paste0(outfolder.base, s), start_date, end_date, overwrite, verbose + ) + } } - -} \ No newline at end of file diff --git a/modules/data.atmosphere/tests/testthat/test.met2CF.csv.R b/modules/data.atmosphere/tests/testthat/test.met2CF.csv.R index fb5d24d1cff..e53c0fcf1e8 100644 --- a/modules/data.atmosphere/tests/testthat/test.met2CF.csv.R +++ b/modules/data.atmosphere/tests/testthat/test.met2CF.csv.R @@ -1,31 +1,31 @@ context("testing csv import using met2CF.csv") format <- list( - header = 1, - time_zone = "GMT", - time.row = 1, - skip = 0, - unit.row = TRUE, - na.strings = NA, - vars = list( - input_name = c( - "Corrected Date/Time", "Solar Radiation (W/m2)", "Temp (C)", - "PAR_(umol_m-2_s-1)", "PAR_(mol_m-2_h-1)", "RH (%)", "Wind Speed (m/s)", - "VPD", "Rain (mm)" - ), - input_units = c( - NA, "W m-2", "celsius", - "umol m-2 s-1", "mol m-2 h-1", "%", "m s-1", "Pa", "mm h-1" - ), - bety_name = c( - "datetime", "solar_radiation", "airT", - "PAR", NA, "relative_humidity", "Wspd", NA, "precipitation_rate" - ), - storage_type = c( - "%m/%d/%y %H:%M", NA, NA, NA, NA, NA, NA, NA, NA - ) - ), - lat <- 42 + 47 / 60 + 30 / 6000, - lon <- 76 + 7 / 60 + 20 / 6000 + header = 1, + time_zone = "GMT", + time.row = 1, + skip = 0, + unit.row = TRUE, + na.strings = NA, + vars = list( + input_name = c( + "Corrected Date/Time", "Solar Radiation (W/m2)", "Temp (C)", + "PAR_(umol_m-2_s-1)", "PAR_(mol_m-2_h-1)", "RH (%)", "Wind Speed (m/s)", + "VPD", "Rain (mm)" + ), + input_units = c( + NA, "W m-2", "celsius", + "umol m-2 s-1", "mol m-2 h-1", "%", "m s-1", "Pa", "mm h-1" + ), + bety_name = c( + "datetime", "solar_radiation", "airT", + "PAR", NA, "relative_humidity", "Wspd", NA, "precipitation_rate" + ), + storage_type = c( + "%m/%d/%y %H:%M", NA, NA, NA, NA, NA, NA, NA, NA + ) + ), + lat <- 42 + 47 / 60 + 30 / 6000, + lon <- 76 + 7 / 60 + 20 / 6000 ) # met2CF.csv(in.path = "data", in.file = "met2CF.csv.csv", outfolder = tempdir(), # format = format, @@ -37,20 +37,20 @@ outfolder <- tempdir() # Initial test suite to test the met2CF.csv function test_that("met2CF.csv function works correctly", { - output <- PEcAn.data.atmosphere::met2CF.csv( - in.path = "data", - in.prefix = "test.met2CF.csv.csv", - outfolder = outfolder, - start_date = lubridate::ymd_hm("2013-03-01 18:00"), - end_date = lubridate::ymd_hm("2013-03-27 17:00"), - format = format, - lat = format$lat, - lon = format$lon, - overwrite = TRUE - ) - nc_files <- list.files(outfolder, pattern = "\\.nc$", full.names = TRUE) + output <- PEcAn.data.atmosphere::met2CF.csv( + in.path = "data", + in.prefix = "test.met2CF.csv.csv", + outfolder = outfolder, + start_date = lubridate::ymd_hm("2013-03-01 18:00"), + end_date = lubridate::ymd_hm("2013-03-27 17:00"), + format = format, + lat = format$lat, + lon = format$lon, + overwrite = TRUE + ) + nc_files <- list.files(outfolder, pattern = "\\.nc$", full.names = TRUE) - expect_true(file.exists(nc_files)) - expect_true(file.size(nc_files) > 0) - expect_equal(nc_files, file.path(outfolder, "test.met2CF.2013.nc")) + expect_true(file.exists(nc_files)) + expect_true(file.size(nc_files) > 0) + expect_equal(nc_files, file.path(outfolder, "test.met2CF.2013.nc")) }) diff --git a/modules/data.atmosphere/tests/testthat/test.metutils.R b/modules/data.atmosphere/tests/testthat/test.metutils.R index 2e56e57e8db..ae2d36b0b8c 100644 --- a/modules/data.atmosphere/tests/testthat/test.metutils.R +++ b/modules/data.atmosphere/tests/testthat/test.metutils.R @@ -1,21 +1,23 @@ context("testing met utility functions") -test_that("sw2par, par2ppfd, sw2ppfd are consistent ",{ - expect_equal(sw2par(1000), 486) - expect_equal(par2ppfd(486), 2068.08510638298) - expect_equal(sw2ppfd(1000), par2ppfd(sw2par(1000))) - expect_equal(sw2ppfd(0:1000), par2ppfd(sw2par(0:1000))) +test_that("sw2par, par2ppfd, sw2ppfd are consistent ", { + expect_equal(sw2par(1000), 486) + expect_equal(par2ppfd(486), 2068.08510638298) + expect_equal(sw2ppfd(1000), par2ppfd(sw2par(1000))) + expect_equal(sw2ppfd(0:1000), par2ppfd(sw2par(0:1000))) }) -test_that("qair2rh is consistent",{ +test_that("qair2rh is consistent", { expect_equal(qair2rh(qair = 1, temp = 10, press = 1013.25), 1) }) -test_that("get.rh RH from dewpoint",{ +test_that("get.rh RH from dewpoint", { # air temp fixed at 15C - getrhtest <- function(T_Test, Td_test){ - testrh <- get.rh(T = 273.15 + T_Test, - Td = 273.15 + Td_test) + getrhtest <- function(T_Test, Td_test) { + testrh <- get.rh( + T = 273.15 + T_Test, + Td = 273.15 + Td_test + ) return(testrh) } # air T = dewpoint @@ -31,4 +33,3 @@ test_that("get.rh RH from dewpoint",{ expect_equal(getrhtest(25, 10), 38.82, tolerance = 0.2) expect_equal(getrhtest(0, -5), 69, tolerance = 0.2) }) - diff --git a/modules/data.atmosphere/tests/testthat/test.upscale_met.R b/modules/data.atmosphere/tests/testthat/test.upscale_met.R index 3b706893abc..0c7573b6b5e 100644 --- a/modules/data.atmosphere/tests/testthat/test.upscale_met.R +++ b/modules/data.atmosphere/tests/testthat/test.upscale_met.R @@ -8,10 +8,12 @@ tmpdir <- tempfile() setup(dir.create(tmpdir, showWarnings = FALSE)) teardown(unlink(tmpdir, recursive = TRUE)) -sc_result <- upscale_met(outfolder = tmpdir, - input_met = "data/urbana_subdaily_test.nc", - resolution = 6/24, - overwrite = TRUE) +sc_result <- upscale_met( + outfolder = tmpdir, + input_met = "data/urbana_subdaily_test.nc", + resolution = 6 / 24, + overwrite = TRUE +) scaled <- ncdf4::nc_open(sc_result$file) stime <- scaled$dim$time @@ -23,16 +25,17 @@ test_that("output is scaled correctly", { sc_dt_hours <- PEcAn.utils::ud_convert( sc_dt, stime$units, - sub("^.* since", "hours since", stime$units)) + sub("^.* since", "hours since", stime$units) + ) expect_equal(sc_dt_hours, 6) # change in file length should be proportional to change in timestep, - # but allow truncation of incomplete timesteps at end of output - expect_lt(stime$len*sc_dt - otime$len*orig_dt, sc_dt) + # but allow truncation of incomplete timesteps at end of output + expect_lt(stime$len * sc_dt - otime$len * orig_dt, sc_dt) # date ranges should match to within one upscaled timestep. - expect_lt(min(stime$vals) - min(otime$vals), sc_dt/orig_dt) - expect_lt(max(otime$vals) - max(stime$vals), sc_dt/orig_dt) + expect_lt(min(stime$vals) - min(otime$vals), sc_dt / orig_dt) + expect_lt(max(otime$vals) - max(stime$vals), sc_dt / orig_dt) }) test_that("units are preserved", { diff --git a/modules/data.land/R/DataONE_doi_download.R b/modules/data.land/R/DataONE_doi_download.R index 12ea27b707e..32fe78dd922 100644 --- a/modules/data.land/R/DataONE_doi_download.R +++ b/modules/data.land/R/DataONE_doi_download.R @@ -3,45 +3,45 @@ #' @param id the doi or other identifier linked to the package in DataONE #' #' @return returns the id in the proper format for querying the DataONE Federation (using solrQuery syntax) -#' @export +#' @export #' #' @author Liam P Burke, \email{lpburke@@bu.edu} -#' @description This function is for formatting purposes. It simply inserts the doi or id that the user wishes to query into Solr format so that it is compatible with the dataoneR query functionality in the PEcAn function -#' -#' -format_identifier = function(id){ +#' @description This function is for formatting purposes. It simply inserts the doi or id that the user wishes to query into Solr format so that it is compatible with the dataoneR query functionality in the PEcAn function +#' +#' +format_identifier <- function(id) { doi.template <- 'id:"_"' # solr format base::gsub("_", id, doi.template) # replace "_" with the doi or id } # end function # ----------------------------------------------------------------------------------------------------------------------------------------------------------------- -#' id_resolveable +#' id_resolveable #' -#' @param id the doi or other identifier linked to the package in DataONE +#' @param id the doi or other identifier linked to the package in DataONE #' @param CNode CNode="PROD" -#' @param return_result boolean that returns or suppresses result of query. defaults to TRUE. +#' @param return_result boolean that returns or suppresses result of query. defaults to TRUE. #' @description Uses dataone::query from dataoneR to query DataONE. Prints result if data exists -#' -#' @return returns message indicating wether or not the id resolves to data in the DataONE federation and information about said data. +#' +#' @return returns message indicating wether or not the id resolves to data in the DataONE federation and information about said data. #' @export #' -#' -id_resolveable = function(id, return_result = TRUE, CNode = "PROD"){ +#' +id_resolveable <- function(id, return_result = TRUE, CNode = "PROD") { doi1 <- format_identifier(id) # reformat the id in solr format - - cn <- dataone::CNode(CNode) - queryParams <- list(q=doi1, rows="5") + + cn <- dataone::CNode(CNode) + queryParams <- list(q = doi1, rows = "5") result <- dataone::query(cn, solrQuery = queryParams, as = "data.frame") # return query results as a data.frame - - if(return_result == TRUE){ # option that displays data.frame of query + + if (return_result == TRUE) { # option that displays data.frame of query print(result) } - - if(is.null(result[1,1])){ # if there is no data available, result[1,1] will return a NULL value + + if (is.null(result[1, 1])) { # if there is no data available, result[1,1] will return a NULL value return("doi does not resolve in the DataOne federation and therefore cannot be retrieved by doi. Either download this data locally and import using PEcAn's drag and drop feature, or search DataOne manually for another data identifier. Thank you for your patience.") - } else{ + } else { return("data can be found in D1 federation") } } # end function @@ -50,28 +50,28 @@ id_resolveable = function(id, return_result = TRUE, CNode = "PROD"){ #' get_resource_map #' -#' @param id the doi or other identifier linked to the package in DataONE +#' @param id the doi or other identifier linked to the package in DataONE #' @param CNode default is "PROD" #' @description Locates data in DataONE and returns the resource_map or a message indicating that there is no corresponding resource_map for the given id -#' +#' #' @return return the resource_map or a message indicating that there is no corresponding resource_map for the given id #' @export #' -#' -get_resource_map = function(id, CNode = "PROD"){ - cn <- dataone::CNode(CNode) - locations <- dataone::resolve(cn, pid = id) +#' +get_resource_map <- function(id, CNode = "PROD") { + cn <- dataone::CNode(CNode) + locations <- dataone::resolve(cn, pid = id) mnId <- locations$data[1, "nodeIdentifier"] mn <- dataone::getMNode(cn, mnId) - + doi1 <- format_identifier(id) # format the identifier in solr Query format - queryParamList <- list(q=doi1, fl="resourceMap") # custom query for the resourceMap - resource_map_df <- dataone::query(cn, solrQuery = queryParamList, as="data.frame") - resource_map <- resource_map_df[1,1] - - if (is.null(resource_map_df[1,1])){ # inform user if id/ doi has a corresponding resource_map or if this needs to be found manually + queryParamList <- list(q = doi1, fl = "resourceMap") # custom query for the resourceMap + resource_map_df <- dataone::query(cn, solrQuery = queryParamList, as = "data.frame") + resource_map <- resource_map_df[1, 1] + + if (is.null(resource_map_df[1, 1])) { # inform user if id/ doi has a corresponding resource_map or if this needs to be found manually print("doi does not resolve a resource_map. Please manually search for the resource_map in DataONE search: https://search.DataONE.org/#data") - } else{ + } else { print("Continue to next phase to complete download") return(resource_map) } @@ -87,26 +87,26 @@ get_resource_map = function(id, CNode = "PROD"){ #' @param overwrite_directory boolean that indicates whether or not the function should overwrite the directory #' @param directory location that download.packages places the data #' -#' @description Uses resource_map and dataone::getPackage to download the data into a BagItFile. Then utils::unzip unzips the data and stores in the user's directory. +#' @description Uses resource_map and dataone::getPackage to download the data into a BagItFile. Then utils::unzip unzips the data and stores in the user's directory. #' @return results of download #' @export #' -#' -download_package_rm = function(resource_map, directory, CNode = "PROD", download_format = "application/bagit-097", - overwrite_directory = TRUE){ +#' +download_package_rm <- function(resource_map, directory, CNode = "PROD", download_format = "application/bagit-097", + overwrite_directory = TRUE) { # Finding the mnId (query) - cn <- dataone::CNode(CNode) - locations <- dataone::resolve(cn, pid = resource_map) - mnId <- locations$data[1,"nodeIdentifier"] - - # download the bagitFile + cn <- dataone::CNode(CNode) + locations <- dataone::resolve(cn, pid = resource_map) + mnId <- locations$data[1, "nodeIdentifier"] + + # download the bagitFile mn <- dataone::getMNode(cn, mnId) bagitFile <- dataone::getPackage(mn, id = resource_map, format = download_format) - - zip_contents <- utils::unzip(bagitFile, files = NULL, list = FALSE, overwrite = overwrite_directory, # Unzip the bagitFile and store in directory specified under exdir - junkpaths = FALSE, exdir = directory, unzip = "internal", - setTimes = FALSE) + + zip_contents <- utils::unzip(bagitFile, + files = NULL, list = FALSE, overwrite = overwrite_directory, # Unzip the bagitFile and store in directory specified under exdir + junkpaths = FALSE, exdir = directory, unzip = "internal", + setTimes = FALSE + ) return(zip_contents) } # end function - - diff --git a/modules/data.land/R/IC_BADM_Utilities.R b/modules/data.land/R/IC_BADM_Utilities.R index 7c9ec829d1d..61ce87d3308 100644 --- a/modules/data.land/R/IC_BADM_Utilities.R +++ b/modules/data.land/R/IC_BADM_Utilities.R @@ -1,45 +1,44 @@ - #' Read.IC.info.BADM #' #' @param lat numeric latitude #' @param long numeric longitude #' #' @description This function returns a dataframe of plant biomass, root and soil carbon for a set of lat and long coordinates. -#' This function first finds the level1 and level2 ecoregions for the given coordinates, and then tries to filter BADM database for those eco-regions. +#' This function first finds the level1 and level2 ecoregions for the given coordinates, and then tries to filter BADM database for those eco-regions. #' If no data found in the BADM database for the given lat/longs eco-regions, then all the data in the database will be used to return the initial condition. -#' All the variables are also converted to kg/m^2. +#' All the variables are also converted to kg/m^2. #' @return a dataframe with 7 columns of Site, Variable, Date, Organ, AGB, soil_organic_carbon_content, litter_carbon_content. #' Variable in the return object refers to what this value was called inside BADM database. #' #' @export #' @examples #' \dontrun{ -#' badm_test <- Read.IC.info.BADM(45.805925,-90.07961) -#'} -Read.IC.info.BADM <-function(lat, long){ - cov.factor <-1 - #Reading in the DB +#' badm_test <- Read.IC.info.BADM(45.805925, -90.07961) +#' } +Read.IC.info.BADM <- function(lat, long) { + cov.factor <- 1 + # Reading in the DB # U.S.SB <- PEcAn.data.land::BADM - + Regions <- EPA_ecoregion_finder(lat, long) Code_Level <- Regions$L2 - + # Let's find the biomass/.soil and litter - #L2 + # L2 biomass.df <- U.S.SB %>% dplyr::filter( .data$NA_L2CODE == Code_Level, grepl("ROOT_|AG_BIOMASS|SOIL_STOCK|SOIL_CHEM", .data$VARIABLE) ) %>% dplyr::select("SITE_ID", "GROUP_ID", "VARIABLE_GROUP", "VARIABLE", "DATAVALUE") - + # if no data was found on L2, then let's check for L1 - if (nrow(biomass.df) < 3) { + if (nrow(biomass.df) < 3) { Code_Level <- Regions$L1 - + biomass.df <- U.S.SB %>% dplyr::filter( .data$NA_L1CODE == Code_Level, @@ -47,23 +46,22 @@ Read.IC.info.BADM <-function(lat, long){ ) %>% dplyr::select("SITE_ID", "GROUP_ID", "VARIABLE_GROUP", "VARIABLE", "DATAVALUE") } - + # if no data was found on L1 too, then let's use the whole db - if (nrow(biomass.df) < 3) { + if (nrow(biomass.df) < 3) { Code_Level <- "ALL" biomass.df <- U.S.SB %>% dplyr::filter(grepl("ROOT_|AG_BIOMASS|SOIL_STOCK|SOIL_CHEM", .data$VARIABLE)) %>% dplyr::select("SITE_ID", "GROUP_ID", "VARIABLE_GROUP", "VARIABLE", "DATAVALUE") } - + # for each entry entries <- biomass.df %>% split(.$GROUP_ID) %>% purrr::map_dfr( function(Gdf) { - # we keep them here PlantWoodIni <- NA SoilIni <- NA @@ -75,81 +73,86 @@ Read.IC.info.BADM <-function(lat, long){ # find what type of entry it is - biomass/soil or litter if (nrow(Gdf) > 0) { type <- - sapply(c( - "*LIT", - "*SOIL", - "*_BIOMASS", - "*_ROOT_BIOMASS", - "*_LIT_BIOMASS" - ), - grepl, - Gdf[1, 3]) + sapply( + c( + "*LIT", + "*SOIL", + "*_BIOMASS", + "*_ROOT_BIOMASS", + "*_LIT_BIOMASS" + ), + grepl, + Gdf[1, 3] + ) type <- names(type)[type] - } else{ + } else { return(NULL) } - - if (length(type) > 1) + + if (length(type) > 1) { type <- type[-which(type == "*_BIOMASS")] + } #----------------- Unit conversion unit.in <- Gdf %>% dplyr::filter(grepl("UNIT", .data$VARIABLE)) %>% - dplyr::pull(.data$DATAVALUE) %>% + dplyr::pull(.data$DATAVALUE) %>% as.character() - - - #Converting DM to C content - #Variations and determinants of carbon content in plants:a global synthesis - https://www.biogeosciences.net/15/693/2018/bg-15-693-2018.pdf - if (length(unit.in) > 0) - if (unit.in =="kgDM m-2") cov.factor <- cov.factor *0.48 - + + + # Converting DM to C content + # Variations and determinants of carbon content in plants:a global synthesis - https://www.biogeosciences.net/15/693/2018/bg-15-693-2018.pdf + if (length(unit.in) > 0) { + if (unit.in == "kgDM m-2") cov.factor <- cov.factor * 0.48 + } + unit.ready <- ifelse(unit.in == "gC m-2", - "g/m^2", - ifelse(unit.in == "kgDM m-2", "kg/m^2", - "kg/m^2")) - - if (length(unit.in) == 0) + "g/m^2", + ifelse(unit.in == "kgDM m-2", "kg/m^2", + "kg/m^2" + ) + ) + + if (length(unit.in) == 0) { unit.ready <- "kg/m^2" - + } + Date.in <- Gdf %>% dplyr::filter(grepl("DATE", .data$VARIABLE)) %>% dplyr::pull(.data$DATAVALUE) %>% as.Date() - - if (length(Date.in) == 0) + + if (length(Date.in) == 0) { Date.in <- NA + } #----------------collect - + # if it's biomass if (type == "*_BIOMASS") { Organ.in <- Gdf %>% dplyr::filter(grepl("ORGAN", .data$VARIABLE)) %>% - dplyr::pull(.data$DATAVALUE) - - + dplyr::pull(.data$DATAVALUE) + + PlantWoodIni <- - PEcAn.utils::ud_convert(Gdf$DATAVALUE[1]%>% - as.numeric()*cov.factor, unit.ready, "kg/m^2")#"AG_BIOMASS_CROP","AG_BIOMASS_SHRUB","AG_BIOMASS_TREE","AG_BIOMASS_OTHER" - + PEcAn.utils::ud_convert(Gdf$DATAVALUE[1] %>% + as.numeric() * cov.factor, unit.ready, "kg/m^2") # "AG_BIOMASS_CROP","AG_BIOMASS_SHRUB","AG_BIOMASS_TREE","AG_BIOMASS_OTHER" } else if (type == "*SOIL") { val <- Gdf %>% dplyr::filter(grepl("SOIL_STOCK_C_ORG", .data$VARIABLE)) %>% dplyr::pull(.data$DATAVALUE) %>% as.numeric() - - if (length(val) > 0) - SoilIni <- PEcAn.utils::ud_convert(val*cov.factor, "g/m^2", "kg/m^2") - + + if (length(val) > 0) { + SoilIni <- PEcAn.utils::ud_convert(val * cov.factor, "g/m^2", "kg/m^2") + } } else if (type == "*_LIT_BIOMASS") { litterIni <- PEcAn.utils::ud_convert(Gdf$DATAVALUE[1] %>% - as.numeric()*cov.factor, unit.ready, "kg/m^2") - + as.numeric() * cov.factor, unit.ready, "kg/m^2") } else if (type == "*_ROOT_BIOMASS") { Rootini <- - PEcAn.utils::ud_convert(Gdf$DATAVALUE[1]%>% - as.numeric()*cov.factor, unit.ready, "kg/m^2") - + PEcAn.utils::ud_convert(Gdf$DATAVALUE[1] %>% + as.numeric() * cov.factor, unit.ready, "kg/m^2") } return( data.frame( @@ -162,12 +165,13 @@ Read.IC.info.BADM <-function(lat, long){ litter_carbon_content = litterIni ) ) - }) - + } + ) + - #cleaning -ind <- apply(entries[,5:7], 1, function(x) all(is.na(x))) -entries <- entries[-which(ind),] + # cleaning + ind <- apply(entries[, 5:7], 1, function(x) all(is.na(x))) + entries <- entries[-which(ind), ] @@ -186,39 +190,44 @@ entries <- entries[-which(ind),] #' @return a dataframe with file, host, mimetype, formatname, startdate, enddate and dbfile.name columns #' @export #' -netcdf.writer.BADM <- function(lat, long, siteid, outdir, ens){ - - - #Reading in the BADM data - entries <- Read.IC.info.BADM (lat, long) - - #-- +netcdf.writer.BADM <- function(lat, long, siteid, outdir, ens) { + # Reading in the BADM data + entries <- Read.IC.info.BADM(lat, long) + + #-- input <- list() - dims <- list(lat = lat , - lon = long, - time = 1) - + dims <- list( + lat = lat, + lon = long, + time = 1 + ) + PWI <- entries$AGB[!is.na(entries$AGB)] LIn <- entries$litter_carbon_content[!is.na(entries$litter_carbon_content)] SIn <- entries$soil_organic_carbon_content[!is.na(entries$soil_organic_carbon_content)] - - - variables <- list(SoilMoistFrac = 1, - SWE = 0) - - if (length(PWI) > 0) + + + variables <- list( + SoilMoistFrac = 1, + SWE = 0 + ) + + if (length(PWI) > 0) { variables <- - c(variables, wood_carbon_content = sample(PWI, 1, replace = T)) - if (length(LIn) > 0) + c(variables, wood_carbon_content = sample(PWI, 1, replace = T)) + } + if (length(LIn) > 0) { variables <- - c(variables, litter_carbon_content = sample(LIn, 1, replace = T)) - if (length(SIn) > 0) + c(variables, litter_carbon_content = sample(LIn, 1, replace = T)) + } + if (length(SIn) > 0) { variables <- - c(variables, soil_organic_carbon_content = sample(SIn, 1, replace = T)) - + c(variables, soil_organic_carbon_content = sample(SIn, 1, replace = T)) + } + input$dims <- dims input$vals <- variables - + return(pool_ic_list2netcdf( input = input, @@ -239,31 +248,32 @@ netcdf.writer.BADM <- function(lat, long, siteid, outdir, ens){ #' @return a list of paths to generated and stored IC files. #' @export #' -BADM_IC_process <- function(settings, dir, overwrite=TRUE){ +BADM_IC_process <- function(settings, dir, overwrite = TRUE) { # create site info. - new.site <- - settings %>% - purrr::map(~.x[['run']] ) %>% - purrr::map('site')%>% - purrr::map(function(site.list){ - #conversion from string to number + new.site <- + settings %>% + purrr::map(~ .x[["run"]]) %>% + purrr::map("site") %>% + purrr::map(function(site.list) { + # conversion from string to number site.list$lat <- as.numeric(site.list$lat) site.list$lon <- as.numeric(site.list$lon) - list(id=site.list$id, lat=site.list$lat, lon=site.list$lon) - })%>% - dplyr::bind_rows() %>% + list(id = site.list$id, lat = site.list$lat, lon = site.list$lon) + }) %>% + dplyr::bind_rows() %>% as.list() out.ense <- seq_len(settings$ensemble$size) %>% purrr::map(~ netcdf.writer.BADM(new.site$lat, - new.site$lon, - new.site$id, - outdir=dir, - ens=.x)) - + new.site$lon, + new.site$id, + outdir = dir, + ens = .x + )) + out.ense <- out.ense %>% stats::setNames(rep("path", length(out.ense))) - + return(out.ense) } @@ -272,27 +282,29 @@ BADM_IC_process <- function(settings, dir, overwrite=TRUE){ #' @param Lat numeric latitude #' @param Lon numeric longitude #' @param folder.path path to the directory where you store the shape files of L1 and L2 ecoregion maps. -#' @description This function is designed to find the level1 and level2 code ecoregions for a given lat and long. +#' @description This function is designed to find the level1 and level2 code ecoregions for a given lat and long. #' You can learn more about ecoregions here: \url{https://www.epa.gov/eco-research/ecoregions}. #' #' @return a dataframe with codes corresponding to level1 and level2 codes as two columns #' @export #' -EPA_ecoregion_finder <- function(Lat, Lon, folder.path = NULL){ - #if we don't specify the folder path to the shapefiles. - #try eco-region map for just CONUS US. +EPA_ecoregion_finder <- function(Lat, Lon, folder.path = NULL) { + # if we don't specify the folder path to the shapefiles. + # try eco-region map for just CONUS US. if (is.null(folder.path)) { - file.paths <- system.file("extdata",c("eco-region.json", "eco-regionl2.json"), package = "PEcAn.data.land") + file.paths <- system.file("extdata", c("eco-region.json", "eco-regionl2.json"), package = "PEcAn.data.land") } else { - #if we have pre-downloaded shapefiles for the ecoregion map. + # if we have pre-downloaded shapefiles for the ecoregion map. file.paths <- file.path(folder.path, c("NA_CEC_Eco_Level1.shp", "NA_CEC_Eco_Level2.shp")) } - #lat long to spatial point + # lat long to spatial point U.S.SB.sp <- - data.frame(Lati = Lat %>% as.numeric(), - Long = Lon %>% as.numeric()) - + data.frame( + Lati = Lat %>% as.numeric(), + Long = Lon %>% as.numeric() + ) + sp::coordinates(U.S.SB.sp) <- ~ Long + Lati # L1 layer L1 <- @@ -301,10 +313,10 @@ EPA_ecoregion_finder <- function(Lat, Lon, folder.path = NULL){ "+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs" ) %>% sf::st_transform("+proj=longlat +datum=WGS84") - #reproject points based on projection of the eco-region map. + # reproject points based on projection of the eco-region map. sp::proj4string(U.S.SB.sp) <- sp::proj4string(sf::as_Spatial(L1)) # finding the code for each site - over.out.L1 <- sp::over(U.S.SB.sp, sf::as_Spatial(L1)) + over.out.L1 <- sp::over(U.S.SB.sp, sf::as_Spatial(L1)) # L2 layer L2 <- sf::read_sf(file.paths[2]) %>% @@ -316,5 +328,3 @@ EPA_ecoregion_finder <- function(Lat, Lon, folder.path = NULL){ over.out.L2 <- sp::over(U.S.SB.sp, sf::as_Spatial(L2)) return(data.frame(L1 = over.out.L1$NA_L1CODE, L2 = over.out.L2$NA_L2CODE)) } - - diff --git a/modules/data.land/R/ISCN_extract.R b/modules/data.land/R/ISCN_extract.R index 88d621a10e4..c75c85e9dd8 100644 --- a/modules/data.land/R/ISCN_extract.R +++ b/modules/data.land/R/ISCN_extract.R @@ -6,14 +6,14 @@ #' #' @return A data frame containing sampled SOC, each row represent each site. #' @export -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom dplyr %>% IC_ISCN_SOC <- function(site_info, ens = 100, ecoregion.path = NULL) { iscn_soc <- PEcAn.data.land::iscn_soc site_eco <- PEcAn.data.land::EPA_ecoregion_finder(site_info$lat, site_info$lon, ecoregion.path) - soc <- iscn_soc[,which(colnames(iscn_soc) %in% site_eco$L2)] + soc <- iscn_soc[, which(colnames(iscn_soc) %in% site_eco$L2)] ic_sample_soc <- data.frame(matrix(NA, ens, length(site_info$site_id))) %>% `colnames<-`(site_info$site_id) for (i in seq_along(site_eco$L2)) { @@ -23,7 +23,7 @@ IC_ISCN_SOC <- function(site_info, ens = 100, ecoregion.path = NULL) { if (!site_eco$L2[i] %in% colnames(soc)) { next } - ic_sample_soc[,i] <- PEcAn.utils::ud_convert(sample(soc[,site_eco$L2[i]], ens, replace = T), "g cm-2", "kg m-2") + ic_sample_soc[, i] <- PEcAn.utils::ud_convert(sample(soc[, site_eco$L2[i]], ens, replace = T), "g cm-2", "kg m-2") } return(ic_sample_soc) -} \ No newline at end of file +} diff --git a/modules/data.land/R/InventoryGrowthFusion.R b/modules/data.land/R/InventoryGrowthFusion.R index 99b93299d8d..14e314d854e 100644 --- a/modules/data.land/R/InventoryGrowthFusion.R +++ b/modules/data.land/R/InventoryGrowthFusion.R @@ -3,7 +3,7 @@ #' this code fuses forest inventory data with tree growth data (tree ring or #' dendrometer band) for the same plots. Code is a rewrite of Clark et al 2007 #' Ecol Appl into JAGS -#' +#' #' @note Requires JAGS #' #' @param data list of data inputs @@ -27,8 +27,7 @@ #' #' @return an mcmc.list object #' @export -InventoryGrowthFusion <- function( - data, +InventoryGrowthFusion <- function(data, cov.data = NULL, time_data = NULL, n.iter = 5000, @@ -42,29 +41,30 @@ InventoryGrowthFusion <- function( z0 = NULL, save.state = TRUE, restart = NULL) { - # baseline variables to monitor burnin.variables <- c("tau_add", "tau_dbh", "tau_inc", "mu") # process variability, dbh and tree-ring observation error, intercept out.variables <- c("deviance", "tau_add", "tau_dbh", "tau_inc", "mu") # if(save.state) out.variables <- c(out.variables,"x") - if(!exists("model")) model = 0 - + if (!exists("model")) model <- 0 + ## restart - if(length(n.chunk)>1){ - k_restart = n.chunk[2] - n.chunk = n.chunk[1] + if (length(n.chunk) > 1) { + k_restart <- n.chunk[2] + n.chunk <- n.chunk[1] } else { - k_restart = 1 + k_restart <- 1 } - max.chunks <- ceiling(n.iter/n.chunk) - if(max.chunks < k_restart){ - PEcAn.logger::logger.warn("MCMC already complete",max.chunks,k_restart) + max.chunks <- ceiling(n.iter / n.chunk) + if (max.chunks < k_restart) { + PEcAn.logger::logger.warn("MCMC already complete", max.chunks, k_restart) return(NULL) } - avail.chunks <- k_restart:ceiling(n.iter/n.chunk) - - check.dup.data <- function(data,loc){ - if(any(duplicated(names(data)))){PEcAn.logger::logger.error("duplicated variable at",loc,names(data))} + avail.chunks <- k_restart:ceiling(n.iter / n.chunk) + + check.dup.data <- function(data, loc) { + if (any(duplicated(names(data)))) { + PEcAn.logger::logger.error("duplicated variable at", loc, names(data)) + } } # start text object that will be manipulated (to build different linear models, swap in/out covariates) TreeDataFusionMV <- " @@ -72,30 +72,30 @@ model{ ### Loop over all individuals for(i in 1:ni){ - + #### Data Model: DBH for(t in 1:nt){ z[i,t] ~ dnorm(x[i,t],tau_dbh) } - + #### Data Model: growth for(t in 2:nt){ inc[i,t] <- x[i,t]-x[i,t-1] y[i,t] ~ dnorm(inc[i,t],tau_inc) } - + #### Process Model for(t in 2:nt){ Dnew[i,t] <- x[i,t-1] + mu ##PROCESS x[i,t]~dnorm(Dnew[i,t],tau_add) } - + ## initial condition x[i,1] ~ dnorm(x_ic,tau_ic) } ## end loop over individuals ## RANDOM_EFFECTS - + #### Priors tau_dbh ~ dgamma(a_dbh,r_dbh) tau_inc ~ dgamma(a_inc,r_inc) @@ -106,7 +106,7 @@ model{ ## TIME VARYING BETAS ## RANDOM EFFECT TAUS }" - + Pformula <- NULL ######################################################################## ### @@ -117,70 +117,73 @@ model{ Rpriors <- NULL Reffects <- NULL ## parse random effects - r_vars <- gsub(" ","",unlist(strsplit(random,"+",fixed=TRUE))) ## split on +, remove whitespace - for(i in seq_along(r_vars)){ + r_vars <- gsub(" ", "", unlist(strsplit(random, "+", fixed = TRUE))) ## split on +, remove whitespace + for (i in seq_along(r_vars)) { ## special case: individidual - if(r_vars[i] == "i"){ - r_var <- "i" + if (r_vars[i] == "i") { + r_var <- "i" counter <- "" - index <- "i" + index <- "i" nr <- nrow(cov.data) - } else if(r_vars[i] == "t"){ - r_var <- "t" + } else if (r_vars[i] == "t") { + r_var <- "t" counter <- "" - index <- "t" + index <- "t" nr <- ncol(cov.data) } else { index <- counter <- nr <- NA - r_var <- gsub("(","",gsub(")","",r_vars[i],fixed = TRUE),fixed="TRUE") - r_var <- strsplit(r_var,"|",fixed=TRUE)[[1]] - fix <- r_var[1] + r_var <- gsub("(", "", gsub(")", "", r_vars[i], fixed = TRUE), fixed = "TRUE") + r_var <- strsplit(r_var, "|", fixed = TRUE)[[1]] + fix <- r_var[1] ## check for nested effects - r_var <- strsplit(gsub("\\",":",r_var[2],fixed=TRUE),":",fixed = TRUE)[[1]] - for(j in seq_along(length(r_var))){ - if(j>1)print("WARNING: not actually nesting random effects at this time") ## HACK: to get started, not actually nesting + r_var <- strsplit(gsub("\\", ":", r_var[2], fixed = TRUE), ":", fixed = TRUE)[[1]] + for (j in seq_along(length(r_var))) { + if (j > 1) print("WARNING: not actually nesting random effects at this time") ## HACK: to get started, not actually nesting ## parse - j_var <- strsplit(r_var[j],"[",fixed = TRUE)[[1]] - index[j] <- gsub("]","",j_var[2],fixed=TRUE) + j_var <- strsplit(r_var[j], "[", fixed = TRUE)[[1]] + index[j] <- gsub("]", "", j_var[2], fixed = TRUE) counter[j] <- j_var[1] - r_var[j] <- j_var[1] + r_var[j] <- j_var[1] ## add variable to data - if(!(r_var[j] %in% names(data))){ - data[[length(data)+1]] <- as.numeric(as.factor(as.character(cov.data[,r_var[j]]))) ## multiple conversions to eliminate gaps + if (!(r_var[j] %in% names(data))) { + data[[length(data) + 1]] <- as.numeric(as.factor(as.character(cov.data[, r_var[j]]))) ## multiple conversions to eliminate gaps names(data)[length(data)] <- r_var[j] } - check.dup.data(data,"r_var") + check.dup.data(data, "r_var") nr[j] <- max(as.numeric(data[[r_var[j]]])) } - index <- paste0("[",index,"]") + index <- paste0("[", index, "]") } ## create formula - Pformula <- paste(Pformula, - paste0("+ alpha_", r_var,"[",counter,index,"]")) + Pformula <- paste( + Pformula, + paste0("+ alpha_", r_var, "[", counter, index, "]") + ) ## create random effect - for(j in seq_along(nr)){ - Reffects <- paste(Reffects, - paste0("for(k in 1:",nr[j],"){\n"), - paste0(" alpha_",r_var[j],"[k] ~ dnorm(0,tau_",r_var[j],")\n}\n")) + for (j in seq_along(nr)) { + Reffects <- paste( + Reffects, + paste0("for(k in 1:", nr[j], "){\n"), + paste0(" alpha_", r_var[j], "[k] ~ dnorm(0,tau_", r_var[j], ")\n}\n") + ) } ## create priors - Rpriors <- paste(Rpriors,paste0("tau_",r_var," ~ dgamma(1,0.1)\n",collapse = " ")) + Rpriors <- paste(Rpriors, paste0("tau_", r_var, " ~ dgamma(1,0.1)\n", collapse = " ")) ## track burnin.variables <- c(burnin.variables, paste0("tau_", r_var)) - out.variables <- c(out.variables, paste0("tau_", r_var), paste0("alpha_",r_var)) - + out.variables <- c(out.variables, paste0("tau_", r_var), paste0("alpha_", r_var)) } ## Substitute into code TreeDataFusionMV <- sub(pattern = "## RANDOM EFFECT TAUS", Rpriors, TreeDataFusionMV) TreeDataFusionMV <- gsub(pattern = "## RANDOM_EFFECTS", Reffects, TreeDataFusionMV) - } ### END RANDOM EFFECTS - + } ### END RANDOM EFFECTS + ######################################################################## ### ### FIXED EFFECTS ### ######################################################################## - if(FALSE){ + if (FALSE) { ## DEV TESTING FOR X, polynomial X, and X interactions fixed <- "X + X^3 + X*bob + bob + dia + X*Tmin[t]" ## faux model, just for testing jags code } @@ -188,121 +191,119 @@ model{ if (is.null(fixed)) { Xf <- NULL } else { - ## check for covariate data (note: will falsely fail if only effect is X) if (is.null(cov.data)) { print("formula provided but covariate data is absent:", fixed) } else { cov.data <- as.data.frame(cov.data) } - + ## check if there's a tilda in the formula if (length(grep("~", fixed)) == 0) { fixed <- paste("~", fixed) } - - ### BEGIN adding in tree size (endogenous variable X) + + ### BEGIN adding in tree size (endogenous variable X) ## First deal with endogenous terms (X and X*cov interactions) - fixedX <- sub("~","",fixed, fixed=TRUE) - lm.terms <- gsub("[[:space:]]", "", strsplit(fixedX,split = "+",fixed=TRUE)[[1]]) ## split on + and remove whitespace - X.terms <- strsplit(lm.terms,split = c("^"),fixed = TRUE) - X.terms <- sapply(X.terms,function(str){unlist(strsplit(str,,split="*",fixed=TRUE))}) - X.terms <- which(sapply(X.terms,function(x){any(toupper(x) == "X")})) - if(length(X.terms) > 0){ + fixedX <- sub("~", "", fixed, fixed = TRUE) + lm.terms <- gsub("[[:space:]]", "", strsplit(fixedX, split = "+", fixed = TRUE)[[1]]) ## split on + and remove whitespace + X.terms <- strsplit(lm.terms, split = c("^"), fixed = TRUE) + X.terms <- sapply(X.terms, function(str) { + unlist(strsplit(str, , split = "*", fixed = TRUE)) + }) + X.terms <- which(sapply(X.terms, function(x) { + any(toupper(x) == "X") + })) + if (length(X.terms) > 0) { ## rebuild fixed without X.terms - fixed <- paste("~",paste(lm.terms[-X.terms],collapse = " + ")) - + fixed <- paste("~", paste(lm.terms[-X.terms], collapse = " + ")) + ## isolate terms with X X.terms <- lm.terms[X.terms] Xpriors <- NULL - for(i in seq_along(X.terms)){ - + for (i in seq_along(X.terms)) { myBeta <- NULL Xformula <- NULL - if(length(grep("*",X.terms[i],fixed = TRUE)) == 1){ ## INTERACTION - + if (length(grep("*", X.terms[i], fixed = TRUE)) == 1) { ## INTERACTION + myIndex <- "[i]" - covX <- strsplit(X.terms[i],"*",fixed=TRUE)[[1]] - covX <- covX[-which(toupper(covX)=="X")] ## remove X from terms - - ##is covariate fixed or time varying? - tvar <- length(grep("[t]",covX,fixed=TRUE)) > 0 - if(tvar){ - covX <- sub("[t]","",covX,fixed = TRUE) - if(!(covX %in% names(data))){ + covX <- strsplit(X.terms[i], "*", fixed = TRUE)[[1]] + covX <- covX[-which(toupper(covX) == "X")] ## remove X from terms + + ## is covariate fixed or time varying? + tvar <- length(grep("[t]", covX, fixed = TRUE)) > 0 + if (tvar) { + covX <- sub("[t]", "", covX, fixed = TRUE) + if (!(covX %in% names(data))) { ## add cov variables to data object data[[covX]] <- time_data[[covX]] } - check.dup.data(data,"covX") + check.dup.data(data, "covX") myIndex <- "[i,t]" } else { ## variable is fixed - if(covX %in% colnames(cov.data)){ ## covariate present - if(!(covX %in% names(data))){ + if (covX %in% colnames(cov.data)) { ## covariate present + if (!(covX %in% names(data))) { ## add cov variables to data object - data[[covX]] <- cov.data[,covX] + data[[covX]] <- cov.data[, covX] } - check.dup.data(data,"covX2") - + check.dup.data(data, "covX2") } else { ## covariate absent print("covariate absent from covariate data:", covX) } - } ## end fixed or time varying - - myBeta <- paste0("betaX_",covX) - Xformula <- paste0(myBeta,"*x[i,t-1]*",covX,myIndex) - - } else if(length(grep("^",X.terms[i],fixed=TRUE))==1){ ## POLYNOMIAL - powX <- strsplit(X.terms[i],"^",fixed=TRUE)[[1]] - powX <- powX[-which(toupper(powX)=="X")] ## remove X from terms - myBeta <- paste0("betaX",powX) - Xformula <- paste0(myBeta,"*x[i,t-1]^",powX) - - } else { ## JUST X + + myBeta <- paste0("betaX_", covX) + Xformula <- paste0(myBeta, "*x[i,t-1]*", covX, myIndex) + } else if (length(grep("^", X.terms[i], fixed = TRUE)) == 1) { ## POLYNOMIAL + powX <- strsplit(X.terms[i], "^", fixed = TRUE)[[1]] + powX <- powX[-which(toupper(powX) == "X")] ## remove X from terms + myBeta <- paste0("betaX", powX) + Xformula <- paste0(myBeta, "*x[i,t-1]^", powX) + } else { ## JUST X myBeta <- "betaX" - Xformula <- paste0(myBeta,"*x[i,t-1]") + Xformula <- paste0(myBeta, "*x[i,t-1]") } - + ## add variables to Pformula - Pformula <- paste(Pformula,"+",Xformula) - + Pformula <- paste(Pformula, "+", Xformula) + ## add priors - Xpriors <- paste(Xpriors," ",myBeta,"~dnorm(0,0.001)\n") - + Xpriors <- paste(Xpriors, " ", myBeta, "~dnorm(0,0.001)\n") + ## add to out.variables out.variables <- c(out.variables, myBeta) - - } ## END LOOP OVER X TERMS - + } ## END LOOP OVER X TERMS + ## create priors TreeDataFusionMV <- sub(pattern = "## ENDOGENOUS BETAS", Xpriors, TreeDataFusionMV) - - } ## end processing of X terms - + } ## end processing of X terms + ## build design matrix from formula - Xf <- with(cov.data, model.matrix(formula(fixed))) + Xf <- with(cov.data, model.matrix(formula(fixed))) Xf.cols <- colnames(Xf) - Xf.cols <- sub(":","_",Xf.cols) ## for interaction terms, switch separator + Xf.cols <- sub(":", "_", Xf.cols) ## for interaction terms, switch separator colnames(Xf) <- Xf.cols Xf.cols <- Xf.cols[Xf.cols != "(Intercept)"] - Xf <- as.matrix(Xf[, Xf.cols]) + Xf <- as.matrix(Xf[, Xf.cols]) colnames(Xf) <- Xf.cols - ##Center the covariate data + ## Center the covariate data Xf.center <- apply(Xf, 2, mean, na.rm = TRUE) - Xf <- t(t(Xf) - Xf.center) - } ## end fixed effects parsing - + Xf <- t(t(Xf) - Xf.center) + } ## end fixed effects parsing + ## build formula in JAGS syntax if (!is.null(Xf)) { - Xf.names <- gsub(" ", "_", colnames(Xf)) ## JAGS doesn't like spaces in variable names + Xf.names <- gsub(" ", "_", colnames(Xf)) ## JAGS doesn't like spaces in variable names ## append to process model formula - Pformula <- paste(Pformula, - paste0("+ beta", Xf.names, "*Xf[rep[i],", seq_along(Xf.names), "]", collapse = " ")) + Pformula <- paste( + Pformula, + paste0("+ beta", Xf.names, "*Xf[rep[i],", seq_along(Xf.names), "]", collapse = " ") + ) ## create 'rep' variable if not defined - if(is.null(data$rep)){ + if (is.null(data$rep)) { data$rep <- seq_len(nrow(Xf)) } ## create priors @@ -312,195 +313,204 @@ model{ data[["Xf"]] <- Xf out.variables <- c(out.variables, paste0("beta", Xf.names)) } - - check.dup.data(data,"Xf") + + check.dup.data(data, "Xf") ######################################################################## ### ### TIME-VARYING ### ######################################################################## - - if(FALSE){ # always false...just for development + + if (FALSE) { # always false...just for development ## DEVEL TESTING FOR TIME VARYING - #time_varying <- "TminJuly + PrecipDec + TminJuly*PrecipDec" - time_varying <- "tmax_Jun + ppt_Dec + tmax_Jun*ppt_Dec" - time_data <- list(TminJuly = matrix(0,4,4),PrecipDec = matrix(1,4,4)) + # time_varying <- "TminJuly + PrecipDec + TminJuly*PrecipDec" + time_varying <- "tmax_Jun + ppt_Dec + tmax_Jun*ppt_Dec" + time_data <- list(TminJuly = matrix(0, 4, 4), PrecipDec = matrix(1, 4, 4)) } - - if(!is.null(time_varying)){ + + if (!is.null(time_varying)) { if (is.null(time_data)) { PEcAn.logger::logger.error("time_varying formula provided but time_data is absent:", time_varying) } Xt.priors <- "" - + ## parse equation into variable names - t_vars <- gsub(" ","",unlist(strsplit(time_varying,"+",fixed=TRUE))) ## split on +, remove whitespace + t_vars <- gsub(" ", "", unlist(strsplit(time_varying, "+", fixed = TRUE))) ## split on +, remove whitespace ## check for interaction terms - it_vars <- t_vars[grep(pattern = "*",x=t_vars,fixed = TRUE)] - if(length(it_vars) > 0){ + it_vars <- t_vars[grep(pattern = "*", x = t_vars, fixed = TRUE)] + if (length(it_vars) > 0) { t_vars <- t_vars[!(t_vars %in% it_vars)] - } - + } + ## INTERACTIONS WITH TIME-VARYING VARS ## TODO: deal with interactions with catagorical variables ## need to create new data matrices on the fly - for(i in seq_along(it_vars)){ - - ##is covariate fixed or time varying? - covX <- strsplit(it_vars[i],"*",fixed=TRUE)[[1]] - tvar <- length(grep("[t]",covX[1],fixed=TRUE)) > 0 - tvar[2] <- length(grep("[t]",covX[2],fixed=TRUE)) > 0 + for (i in seq_along(it_vars)) { + ## is covariate fixed or time varying? + covX <- strsplit(it_vars[i], "*", fixed = TRUE)[[1]] + tvar <- length(grep("[t]", covX[1], fixed = TRUE)) > 0 + tvar[2] <- length(grep("[t]", covX[2], fixed = TRUE)) > 0 myBeta <- "beta" - for(j in 1:2){ - if(j == 2) myBeta <- paste0(myBeta,"_") - if(tvar[j]){ - covX[j] <- sub("[t]","",covX[j],fixed = TRUE) - if(!(covX[j] %in% names(data))){ + for (j in 1:2) { + if (j == 2) myBeta <- paste0(myBeta, "_") + if (tvar[j]) { + covX[j] <- sub("[t]", "", covX[j], fixed = TRUE) + if (!(covX[j] %in% names(data))) { ## add cov variables to data object data[[covX[j]]] <- time_data[[covX[j]]] } - myBeta <- paste0(myBeta,covX[j]) - covX[j] <- paste0(covX[j],"[i,t]") + myBeta <- paste0(myBeta, covX[j]) + covX[j] <- paste0(covX[j], "[i,t]") } else { ## variable is fixed - if(!(covX[j] %in% names(data))){ + if (!(covX[j] %in% names(data))) { ## add cov variables to data object - data[[covX[j]]] <- cov.data[,covX[j]] + data[[covX[j]]] <- cov.data[, covX[j]] } - myBeta <- paste0(myBeta,covX[j]) - covX[j] <- paste0(covX[j],"[i]") + myBeta <- paste0(myBeta, covX[j]) + covX[j] <- paste0(covX[j], "[i]") } ## end fixed or time varying - } ## end building beta - + ## append to process model formula - Pformula <- paste(Pformula, - paste0(" + ",myBeta,"*",covX[1],"*",covX[2])) - + Pformula <- paste( + Pformula, + paste0(" + ", myBeta, "*", covX[1], "*", covX[2]) + ) + ## priors - Xt.priors <- paste0(Xt.priors, - " ",myBeta,"~dnorm(0,0.001)\n") - + Xt.priors <- paste0( + Xt.priors, + " ", myBeta, "~dnorm(0,0.001)\n" + ) + ## add to list of varibles JAGS is tracking out.variables <- c(out.variables, myBeta) - - } ## end time-varying interaction terms - - + } ## end time-varying interaction terms + + ## loop over variables - for(j in seq_along(t_vars)){ + for (j in seq_along(t_vars)) { tvar <- t_vars[j] - - if(!(tvar %in% names(data))){ + + if (!(tvar %in% names(data))) { ## add cov variables to data object data[[tvar]] <- time_data[[tvar]] } - check.dup.data(data,"tvar") + check.dup.data(data, "tvar") ## append to process model formula - Pformula <- paste(Pformula, - paste0("+ beta", tvar, "*",tvar,"[i,t]")) - + Pformula <- paste( + Pformula, + paste0("+ beta", tvar, "*", tvar, "[i,t]") + ) + ## add to list of varibles JAGS is tracking out.variables <- c(out.variables, paste0("beta", tvar)) } ## build prior - Xt.priors <- paste0(Xt.priors, - paste0(" beta", t_vars, "~dnorm(0,0.001)", collapse = "\n") + Xt.priors <- paste0( + Xt.priors, + paste0(" beta", t_vars, "~dnorm(0,0.001)", collapse = "\n") ) TreeDataFusionMV <- sub(pattern = "## TIME VARYING BETAS", Xt.priors, TreeDataFusionMV) - } ## END time varying covariates - - + + ## insert process model into JAGS template if (!is.null(Pformula)) { TreeDataFusionMV <- sub(pattern = "##PROCESS", Pformula, TreeDataFusionMV) } - + ## Save script - if(!is.null(save.jags)){ - cat(TreeDataFusionMV,file=save.jags) + if (!is.null(save.jags)) { + cat(TreeDataFusionMV, file = save.jags) } - + ## state variable initial condition - if(is.null(z0)){ + if (is.null(z0)) { z0 <- t(apply(data$y, 1, function(y) { -rev(cumsum(rev(y))) })) + data$z[, ncol(data$z)] } - + ## JAGS initial conditions - init <- list() - if(coda::is.mcmc.list(restart)){ + init <- list() + if (coda::is.mcmc.list(restart)) { init <- PEcAn.utils::mcmc.list2init(restart) nchain <- length(init) } else { nchain <- 3 for (i in seq_len(nchain)) { y.samp <- sample(data$y, length(data$y), replace = TRUE) - init[[i]] <- list(x = z0, - tau_add = stats::runif(1, 1, 5) / stats::var(diff(y.samp), na.rm = TRUE), - tau_dbh = 1, - tau_inc = 1500, - tau_ind = 50, - tau_yr = 100, - betaX2 = 0, - ind = rep(0, data$ni), - year = rep(0, data$nt)) + init[[i]] <- list( + x = z0, + tau_add = stats::runif(1, 1, 5) / stats::var(diff(y.samp), na.rm = TRUE), + tau_dbh = 1, + tau_inc = 1500, + tau_ind = 50, + tau_yr = 100, + betaX2 = 0, + ind = rep(0, data$ni), + year = rep(0, data$nt) + ) } } - - + + PEcAn.logger::logger.info("COMPILE JAGS MODEL") j.model <- rjags::jags.model(file = textConnection(TreeDataFusionMV), data = data, inits = init, n.chains = 3) - - if(n.burn > 0){ + + if (n.burn > 0) { PEcAn.logger::logger.info("BURN IN") - jags.out <- rjags::coda.samples(model = j.model, - variable.names = burnin.variables, - n.iter = n.burn) + jags.out <- rjags::coda.samples( + model = j.model, + variable.names = burnin.variables, + n.iter = n.burn + ) if (burnin_plot) { plot(jags.out) } } - + PEcAn.logger::logger.info("RUN MCMC") rjags::load.module("dic") - for(k in avail.chunks){ - + for (k in avail.chunks) { ## determine whether to sample states - if(as.logical(save.state) & k%%as.numeric(save.state) == 0){ - vnames <- c("x",out.variables) ## save x periodically + if (as.logical(save.state) & k %% as.numeric(save.state) == 0) { + vnames <- c("x", out.variables) ## save x periodically } else { vnames <- out.variables } - + ## sample chunk jags.out <- rjags::coda.samples(model = j.model, variable.names = vnames, n.iter = n.chunk) - + ## save chunk - ofile <- paste("IGF",model,k,"RData",sep=".") + ofile <- paste("IGF", model, k, "RData", sep = ".") print(ofile) - save(jags.out,file=ofile) - + save(jags.out, file = ofile) + ## update restart - if(!is.null(restart) & ((is.logical(restart) && restart) || coda::is.mcmc.list(restart))){ - ofile <- paste("IGF",model,"RESTART.RData",sep=".") - jags.final <- rjags::coda.samples(model = j.model, variable.names = c("x",out.variables), n.iter = 1) - k_restart = k + 1 ## finished k, so would restart at k+1 - save(jags.final,k_restart,file=ofile) + if (!is.null(restart) & ((is.logical(restart) && restart) || coda::is.mcmc.list(restart))) { + ofile <- paste("IGF", model, "RESTART.RData", sep = ".") + jags.final <- rjags::coda.samples(model = j.model, variable.names = c("x", out.variables), n.iter = 1) + k_restart <- k + 1 ## finished k, so would restart at k+1 + save(jags.final, k_restart, file = ofile) } - + ## check for convergence and break from loop early - D <- coda::as.mcmc.list(lapply(jags.out,function(x){x[,'deviance']})) - gbr <- coda::gelman.diag(D)$psrf[1,1] - trend <- mean(sapply(D,function(x){stats::coef(stats::lm(x~seq_len(n.chunk)))[2]})) - if(gbr < 1.005 & abs(trend) < 0.5) break + D <- coda::as.mcmc.list(lapply(jags.out, function(x) { + x[, "deviance"] + })) + gbr <- coda::gelman.diag(D)$psrf[1, 1] + trend <- mean(sapply(D, function(x) { + stats::coef(stats::lm(x ~ seq_len(n.chunk)))[2] + })) + if (gbr < 1.005 & abs(trend) < 0.5) break } - + return(jags.out) } # InventoryGrowthFusion - diff --git a/modules/data.land/R/InventoryGrowthFusionDiagnostics.R b/modules/data.land/R/InventoryGrowthFusionDiagnostics.R index 21d4114c92e..e097e29b3d9 100644 --- a/modules/data.land/R/InventoryGrowthFusionDiagnostics.R +++ b/modules/data.land/R/InventoryGrowthFusionDiagnostics.R @@ -3,46 +3,49 @@ ##' @param jags.out output mcmc.list from InventoryGrowthFusion ##' @param combined data output from matchInventoryRings ##' @author Michael Dietze -##' @export -InventoryGrowthFusionDiagnostics <- function(jags.out, combined=NULL) { - - out <- as.matrix(jags.out) - x.cols <- which(substr(colnames(out), 1, 1) == "x") - if(length(x.cols) > 0){ - ci <- apply(out[, x.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) +##' @export +InventoryGrowthFusionDiagnostics <- function(jags.out, combined = NULL) { + out <- as.matrix(jags.out) + x.cols <- which(substr(colnames(out), 1, 1) == "x") + if (length(x.cols) > 0) { + ci <- apply(out[, x.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) ci.names <- parse.MatrixNames(colnames(ci), numeric = TRUE) - + ### DBH par(mfrow=c(3,2)) - if(length(x.cols) > 0){ + if (length(x.cols) > 0) { graphics::layout(matrix(1:8, 4, 2, byrow = TRUE)) - ci <- apply(out[, x.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) + ci <- apply(out[, x.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) ci.names <- parse.MatrixNames(colnames(ci), numeric = TRUE) - + smp <- sample.int(data$ni, min(8, data$ni)) for (i in smp) { sel <- which(ci.names$row == i) rng <- c(range(ci[, sel], na.rm = TRUE), range(data$z[i, ], na.rm = TRUE)) - - plot(data$time, ci[2, sel], type = "n", - ylim = range(rng), ylab = "DBH (cm)", main = i) + + plot(data$time, ci[2, sel], + type = "n", + ylim = range(rng), ylab = "DBH (cm)", main = i + ) PEcAn.visualization::ciEnvelope(data$time, ci[1, sel], ci[3, sel], col = "lightBlue") graphics::points(data$time, data$z[i, ], pch = "+", cex = 1.5) # lines(data$time,z0[i,],lty=2) - + ## growth - sel <- which(ci.names$row == i) + sel <- which(ci.names$row == i) inc.mcmc <- apply(out[, x.cols[sel]], 1, diff) - inc.ci <- apply(inc.mcmc, 1, stats::quantile, c(0.025, 0.5, 0.975)) * 5 + inc.ci <- apply(inc.mcmc, 1, stats::quantile, c(0.025, 0.5, 0.975)) * 5 # inc.names = parse.MatrixNames(colnames(ci),numeric=TRUE) - - plot(data$time[-1], inc.ci[2, ], type = "n", - ylim = range(inc.ci, na.rm = TRUE), ylab = "Ring Increment (mm)") + + plot(data$time[-1], inc.ci[2, ], + type = "n", + ylim = range(inc.ci, na.rm = TRUE), ylab = "Ring Increment (mm)" + ) PEcAn.visualization::ciEnvelope(data$time[-1], inc.ci[1, ], inc.ci[3, ], col = "lightBlue") graphics::points(data$time, data$y[i, ] * 5, pch = "+", cex = 1.5, type = "b", lty = 2) } } } - + if (FALSE) { ## check a DBH plot(out[, which(colnames(out) == "x[3,31]")]) @@ -50,54 +53,60 @@ InventoryGrowthFusionDiagnostics <- function(jags.out, combined=NULL) { graphics::hist(out[, which(colnames(out) == "x[3,31]")]) graphics::abline(v = z[3, 31], col = 2, lwd = 2) } - + ## process model - vars <- (1:ncol(out))[-c(which(substr(colnames(out), 1, 1) == "x"), - grep("tau", colnames(out)), - grep("year", colnames(out)), - grep("ind", colnames(out)), - grep("alpha",colnames(out)), - grep("deviance",colnames(out)))] - + vars <- (1:ncol(out))[-c( + which(substr(colnames(out), 1, 1) == "x"), + grep("tau", colnames(out)), + grep("year", colnames(out)), + grep("ind", colnames(out)), + grep("alpha", colnames(out)), + grep("deviance", colnames(out)) + )] + graphics::par(mfrow = c(1, 1)) for (i in vars) { graphics::hist(out[, i], main = colnames(out)[i]) - graphics::abline(v=0,lwd=3) + graphics::abline(v = 0, lwd = 3) } if (length(vars) > 1 && length(vars) < 10) { graphics::pairs(out[, vars]) } - - if("deviance" %in% colnames(out)){ - graphics::hist(out[,"deviance"]) - vars <- c(vars,which(colnames(out)=="deviance")) + + if ("deviance" %in% colnames(out)) { + graphics::hist(out[, "deviance"]) + vars <- c(vars, which(colnames(out) == "deviance")) } - - + + ## rebuild coda for just vars - var.out <- coda::as.mcmc.list(lapply(jags.out,function(x){ x[,vars]})) - + var.out <- coda::as.mcmc.list(lapply(jags.out, function(x) { + x[, vars] + })) + ## convergence coda::gelman.diag(var.out) - + #### Diagnostic plots plot(var.out) - - if("deviance" %in% colnames(out)){ - graphics::hist(out[,"deviance"]) - vars <- c(vars,which(colnames(out)=="deviance")) + + if ("deviance" %in% colnames(out)) { + graphics::hist(out[, "deviance"]) + vars <- c(vars, which(colnames(out) == "deviance")) } - + ## rebuild coda for just vars - var.out <- coda::as.mcmc.list(lapply(jags.out,function(x){ x[,vars]})) - + var.out <- coda::as.mcmc.list(lapply(jags.out, function(x) { + x[, vars] + })) + ## convergence coda::gelman.diag(var.out) - + #### Diagnostic plots plot(var.out) - - + + ## Standard Deviations layout(matrix(c(1,2,3,3),2,2,byrow=TRUE)) graphics::par(mfrow = c(2, 3)) prec <- out[, grep("tau", colnames(out))] @@ -106,44 +115,50 @@ InventoryGrowthFusionDiagnostics <- function(jags.out, combined=NULL) { } stats::cor(prec) # pairs(prec) - + ### alpha graphics::par(mfrow = c(1, 1)) alpha.cols <- grep("alpha", colnames(out)) if (length(alpha.cols) > 0) { alpha.ord <- 1:length(alpha.cols) ci.alpha <- apply(out[, alpha.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) - plot(alpha.ord, ci.alpha[2, ], type = "n", - ylim = range(ci.alpha, na.rm = TRUE), ylab = "Random Effects") + plot(alpha.ord, ci.alpha[2, ], + type = "n", + ylim = range(ci.alpha, na.rm = TRUE), ylab = "Random Effects" + ) PEcAn.visualization::ciEnvelope(alpha.ord, ci.alpha[1, ], ci.alpha[3, ], col = "lightBlue") graphics::lines(alpha.ord, ci.alpha[2, ], lty = 1, lwd = 2) graphics::abline(h = 0, lty = 2) } - + graphics::par(mfrow = c(1, 1)) ### alpha alpha.cols <- grep("alpha", colnames(out)) if (length(alpha.cols) > 0) { alpha.ord <- 1:length(alpha.cols) ci.alpha <- apply(out[, alpha.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) - plot(alpha.ord, ci.alpha[2, ], type = "n", - ylim = range(ci.alpha, na.rm = TRUE), ylab = "Random Effects") + plot(alpha.ord, ci.alpha[2, ], + type = "n", + ylim = range(ci.alpha, na.rm = TRUE), ylab = "Random Effects" + ) PEcAn.visualization::ciEnvelope(alpha.ord, ci.alpha[1, ], ci.alpha[3, ], col = "lightBlue") graphics::lines(alpha.ord, ci.alpha[2, ], lty = 1, lwd = 2) graphics::abline(h = 0, lty = 2) } - + ### YEAR year.cols <- grep("year", colnames(out)) if (length(year.cols > 0)) { ci.yr <- apply(out[, year.cols], 2, stats::quantile, c(0.025, 0.5, 0.975)) - plot(data$time, ci.yr[2, ], type = "n", - ylim = range(ci.yr, na.rm = TRUE), ylab = "Year Effect") + plot(data$time, ci.yr[2, ], + type = "n", + ylim = range(ci.yr, na.rm = TRUE), ylab = "Year Effect" + ) PEcAn.visualization::ciEnvelope(data$time, ci.yr[1, ], ci.yr[3, ], col = "lightBlue") graphics::lines(data$time, ci.yr[2, ], lty = 1, lwd = 2) graphics::abline(h = 0, lty = 2) } - + ### INDIV ind.cols <- which(substr(colnames(out), 1, 3) == "ind") if (length(ind.cols) > 0 & !is.null(combined)) { @@ -151,7 +166,7 @@ InventoryGrowthFusionDiagnostics <- function(jags.out, combined=NULL) { graphics::abline(v = 0, lty = 2) tapply(apply(out[, ind.cols], 2, mean), combined$PLOT, mean) table(combined$PLOT) - + spp <- combined$SPP # boxplot(out[order(spp),ind.cols],horizontal=TRUE,outline=FALSE,col=spp[order(spp)]) graphics::boxplot(out[, ind.cols], horizontal = TRUE, outline = FALSE, col = spp) @@ -163,5 +178,5 @@ InventoryGrowthFusionDiagnostics <- function(jags.out, combined=NULL) { } # InventoryGrowthFusionDiagnostics ########### NEXT STEPS: ############ -#what explains the year effects? climate -#what explains the individual effects? size, species, canopy position, plot -> landscape +# what explains the year effects? climate +# what explains the individual effects? size, species, canopy position, plot -> landscape diff --git a/modules/data.land/R/Read_Tuscon.R b/modules/data.land/R/Read_Tuscon.R index f3eff647b96..6938c81a301 100644 --- a/modules/data.land/R/Read_Tuscon.R +++ b/modules/data.land/R/Read_Tuscon.R @@ -9,9 +9,9 @@ Clean_Tucson <- function(file) { split <- strsplit(lines, " ") tags <- NULL decade <- NULL - + for (i in seq_along(split)) { - tags[i] <- split[[i]][1] + tags[i] <- split[[i]][1] decade[i] <- split[[i]][2] } utags <- unique(tags) @@ -19,7 +19,7 @@ Clean_Tucson <- function(file) { if (file.exists(newfile)) { file.remove(newfile) } - + for (tag in utags) { rows <- rev(which(tags == tag)) keep <- 1 @@ -30,8 +30,8 @@ Clean_Tucson <- function(file) { break } } - keep <- min(keep, length(rows)) - rows <- rev(rows[1:keep]) + keep <- min(keep, length(rows)) + rows <- rev(rows[1:keep]) append <- file.exists(newfile) write(lines[rows], newfile, append = append) } @@ -40,8 +40,8 @@ Clean_Tucson <- function(file) { #' Read_Tucson #' -#' wrapper around read.tucson that loads a whole directory of tree ring files -#' and calls a 'clean' function that removes redundant records +#' wrapper around read.tucson that loads a whole directory of tree ring files +#' and calls a 'clean' function that removes redundant records #' (WinDendro can sometimes create duplicate records when editing) #' #' @param folder path to read files from. @@ -49,7 +49,6 @@ Clean_Tucson <- function(file) { #' #' @export Read_Tucson <- function(folder) { - filenames <- dir(folder, pattern = "TXT", full.names = TRUE) filenames <- c(filenames, dir(folder, pattern = "rwl", full.names = TRUE)) filenames <- c(filenames, dir(folder, pattern = "rw", full.names = TRUE)) @@ -62,7 +61,6 @@ Read_Tucson <- function(folder) { file <- Clean_Tucson(file) filedata[[file]] <- dplR::read.tucson(file, header = FALSE) } - + return(filedata) } # Read_Tucson - diff --git a/modules/data.land/R/Soilgrids_SoilC_prep.R b/modules/data.land/R/Soilgrids_SoilC_prep.R index 4fc88cca8c4..9fb4b10a6cf 100644 --- a/modules/data.land/R/Soilgrids_SoilC_prep.R +++ b/modules/data.land/R/Soilgrids_SoilC_prep.R @@ -12,37 +12,38 @@ #' #' @author Dongchen Zhang #' @importFrom magrittr %>% -Soilgrids_SoilC_prep <- function(site_info, start_date, end_date, time_points, - outdir = NULL, export_csv = FALSE){ - #if we export CSV but didn't provide any path - if(as.logical(export_csv) & is.null(outdir)){ +Soilgrids_SoilC_prep <- function(site_info, start_date, end_date, time_points, + outdir = NULL, export_csv = FALSE) { + # if we export CSV but didn't provide any path + if (as.logical(export_csv) & is.null(outdir)) { PEcAn.logger::logger.info("If you want to export CSV file, please ensure input the outdir!") return(0) - }else if(as.logical(export_csv) & !file.exists(file.path(outdir, "soilgrids_soilC_data.csv"))){ - #if we want to export the csv file for soilgrids data. + } else if (as.logical(export_csv) & !file.exists(file.path(outdir, "soilgrids_soilC_data.csv"))) { + # if we want to export the csv file for soilgrids data. Previous_CSV <- PEcAn.data.land::soilgrids_soilC_extract(site_info, outdir) - }else if(!as.logical(export_csv) & !file.exists(file.path(outdir, "soilgrids_soilC_data.csv"))){ - #if we don't want to export the csv file. + } else if (!as.logical(export_csv) & !file.exists(file.path(outdir, "soilgrids_soilC_data.csv"))) { + # if we don't want to export the csv file. Previous_CSV <- PEcAn.data.land::soilgrids_soilC_extract(site_info) - }else if(file.exists(file.path(outdir, "soilgrids_soilC_data.csv"))){ - #if we have previous extracted soilgrids csv file. + } else if (file.exists(file.path(outdir, "soilgrids_soilC_data.csv"))) { + # if we have previous extracted soilgrids csv file. Previous_CSV <- as.data.frame(utils::read.csv(file.path(outdir, "soilgrids_soilC_data.csv"))) - SoilC_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_TotSoilCarb"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, agb, sd, target time point. + SoilC_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_TotSoilCarb"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, agb, sd, target time point. SoilC_Output$site_id <- site_info$site_id - #loop over time and site + # loop over time and site for (i in seq_along(time_points)) { t <- time_points[i] for (id in site_info$site_id) { - site_SoilC <- Previous_CSV[which(Previous_CSV$Site_ID == id),] + site_SoilC <- Previous_CSV[which(Previous_CSV$Site_ID == id), ] if (dim(site_SoilC)[1] == 0) { next } - SoilC_Output[which(SoilC_Output$site_id==id), paste0(t, "_TotSoilCarb")] <- site_SoilC$Total_soilC_0.200cm - SoilC_Output[which(SoilC_Output$site_id==id), paste0(t, "_SD")] <- site_SoilC$Std_soilC_0.200cm + SoilC_Output[which(SoilC_Output$site_id == id), paste0(t, "_TotSoilCarb")] <- site_SoilC$Total_soilC_0.200cm + SoilC_Output[which(SoilC_Output$site_id == id), paste0(t, "_SD")] <- site_SoilC$Std_soilC_0.200cm } } } PEcAn.logger::logger.info("Soilgrids SoilC Prep Completed!") list(SoilC_Output = SoilC_Output, time_points = time_points, var = "TotSoilCarb") -} \ No newline at end of file +} diff --git a/modules/data.land/R/buildJAGSdata_InventoryRings.R b/modules/data.land/R/buildJAGSdata_InventoryRings.R index 7b963b1ee9a..d6231d09415 100644 --- a/modules/data.land/R/buildJAGSdata_InventoryRings.R +++ b/modules/data.land/R/buildJAGSdata_InventoryRings.R @@ -8,29 +8,29 @@ ##' also sets all the priors ##' @export buildJAGSdata_InventoryRings <- function(combined, inc.unit.conv = 0.1) { - ## pull out growth to a matrix, convert to cm of diameter y <- as.matrix(combined[, !is.na(as.numeric(colnames(combined)))]) * inc.unit.conv * 2 time <- as.numeric(colnames(y)) - + ## pull out diameter to a matrix - DBH.cols <- grep("DBH", colnames(combined)) - DBH <- as.matrix(combined[, DBH.cols]) + DBH.cols <- grep("DBH", colnames(combined)) + DBH <- as.matrix(combined[, DBH.cols]) class(DBH) <- "numeric" - z <- matrix(NA, nrow(y), ncol(y)) - DBH.years <- as.numeric(sub("DBH", "", colnames(combined)[DBH.cols])) - DBH.years <- ifelse(DBH.years < 20, DBH.years + 2000, DBH.years + 1900) + z <- matrix(NA, nrow(y), ncol(y)) + DBH.years <- as.numeric(sub("DBH", "", colnames(combined)[DBH.cols])) + DBH.years <- ifelse(DBH.years < 20, DBH.years + 2000, DBH.years + 1900) z[, which(time %in% DBH.years)] <- DBH - + ## if present, pull out mortality and recruitment COND.cols <- grep("COND", colnames(combined)) if (length(COND.cols) > 0) { - COND <- as.matrix(combined[, COND.cols]) - w <- matrix(NA, nrow(y), ncol(y)) + COND <- as.matrix(combined[, COND.cols]) + w <- matrix(NA, nrow(y), ncol(y)) COND.years <- as.numeric(sub("COND", "", colnames(combined)[COND.cols])) - COND.years <- ifelse(COND.years < 20, - COND.years + 2000, - ifelse(COND.years < 100, COND.years + 1900, COND.years)) + COND.years <- ifelse(COND.years < 20, + COND.years + 2000, + ifelse(COND.years < 100, COND.years + 1900, COND.years) + ) w[, which(time %in% COND.years)] <- COND ## convert COND matrix to numeric 0/1 w[w == "L"] <- 1 @@ -39,7 +39,7 @@ buildJAGSdata_InventoryRings <- function(combined, inc.unit.conv = 0.1) { ## recruitment r <- which(w[i, ] %in% "R") if (length(r) > 0) { - w[i, seq_len(r - 1)] <- 0 ## should really set last census, not last year, to 0, and then put NAs in between **** + w[i, seq_len(r - 1)] <- 0 ## should really set last census, not last year, to 0, and then put NAs in between **** w[i, r] <- 1 } ## mortality @@ -50,24 +50,25 @@ buildJAGSdata_InventoryRings <- function(combined, inc.unit.conv = 0.1) { ## known to be alive l <- which(w[i, ] %in% "1") if (length(l) > 1) { - + } } class(w) <- "numeric" ## Fill in COND matrix - } else { w <- matrix(1, nrow(y), ncol(y)) } - + ## build data object for JAGS n <- nrow(y) - return(list(y = y[1:n, ], - z = z[1:n, ], - ni = n, nt = ncol(y), - x_ic = 1, tau_ic = 1e-04, - a_dbh = 16, r_dbh = 8, - a_inc = 0.001, r_inc = 1, - a_add = 1, r_add = 1, - time = time)) + return(list( + y = y[1:n, ], + z = z[1:n, ], + ni = n, nt = ncol(y), + x_ic = 1, tau_ic = 1e-04, + a_dbh = 16, r_dbh = 8, + a_inc = 0.001, r_inc = 1, + a_add = 1, r_add = 1, + time = time + )) } # buildJAGSdata_InventoryRings diff --git a/modules/data.land/R/cohort2pool.R b/modules/data.land/R/cohort2pool.R index 8a099fd36f9..7d7a9d289d2 100644 --- a/modules/data.land/R/cohort2pool.R +++ b/modules/data.land/R/cohort2pool.R @@ -1,5 +1,5 @@ ##' cohort2pool function -##'Calculates total biomass using veg cohort file. +##' Calculates total biomass using veg cohort file. ##' @name cohort2pool ##' @title cohort2pool ##' @description Converts .rds files into pool netcdf files. @@ -15,96 +15,97 @@ ##' veg_file <- "~/downloads/FFT_site_1-25665/FFT.2008.veg.rds" ##' cohort2pool(veg_File = veg_file, allom_param = NULL) ##' } -##' +##' -cohort2pool <- function(dat, allom_param = NULL, dbh_name="DBH") { - - #Grab plot size +cohort2pool <- function(dat, allom_param = NULL, dbh_name = "DBH") { + # Grab plot size herb_plot <- dat[[1]]$clipArea[1] - #Grab number of plots + # Grab number of plots herb_num <- length(unique(dat[[1]]$plot)) # - if(sum(!is.na(dat[[2]]))==0){ + if (sum(!is.na(dat[[2]])) == 0) { biomass <- 0 total_area <- 1 ratio <- 0 - }else{ + } else { ## Grab DBH - dbh <- dat[[2]][,dbh_name] - - #calculate total area - subplot_fullName <- paste(dat[[2]]$site_name,dat[[2]]$plot,dat[[2]]$Subplot) - unique_subplot_records <- dat[[2]][!duplicated(subplot_fullName),] + dbh <- dat[[2]][, dbh_name] + + # calculate total area + subplot_fullName <- paste(dat[[2]]$site_name, dat[[2]]$plot, dat[[2]]$Subplot) + unique_subplot_records <- dat[[2]][!duplicated(subplot_fullName), ] Unique_Plot <- unique(unique_subplot_records$plot) area <- c() for (i in 1:length(Unique_Plot)) { - subplot_IDs <- unique_subplot_records[which(unique_subplot_records$plot == Unique_Plot[i]),]$Subplot - if(sum(subplot_IDs %in% c(31, 32, 40, 41)) == length(subplot_IDs)){ + subplot_IDs <- unique_subplot_records[which(unique_subplot_records$plot == Unique_Plot[i]), ]$Subplot + if (sum(subplot_IDs %in% c(31, 32, 40, 41)) == length(subplot_IDs)) { # unique_subplot_records[which(unique_subplot_records$plot == Unique_Plot[i]),]$PlotSize <- 400 area <- c(area, rep(400, length(subplot_IDs))) - }else if(sum(subplot_IDs %in% c(21, 23, 39, 41)) == length(subplot_IDs)){ + } else if (sum(subplot_IDs %in% c(21, 23, 39, 41)) == length(subplot_IDs)) { # unique_subplot_records[which(unique_subplot_records$plot == Unique_Plot[i]),]$PlotSize <- 1600 area <- c(area, rep(1600, length(subplot_IDs))) } } - total_area <- sum(area)/4 - + total_area <- sum(area) / 4 + ## Grab allometry - if(is.null(allom_param)){ - a <- -2.0127 + if (is.null(allom_param)) { + a <- -2.0127 b <- 2.4342 - biomass = exp(a + b*log(dbh)) - #Hard code foliage equation from Jenkins paper + biomass <- exp(a + b * log(dbh)) + # Hard code foliage equation from Jenkins paper b0 <- -4.0813 b1 <- 5.8816 - ratio = ifelse(dbh>=2.5,exp(b0 + (b1/dbh)),exp(b0 + (b1/2.5))) + ratio <- ifelse(dbh >= 2.5, exp(b0 + (b1 / dbh)), exp(b0 + (b1 / 2.5))) } else { - #Predict AGB using allom.predit code taken from Allom.Vignette.Rmd + # Predict AGB using allom.predit code taken from Allom.Vignette.Rmd # allom.fit = #outputs from AllomAve function # stand = allom.predict(allom.fit,dbh = dbh,pft = "LH",component = 3,use = "Bg",interval = "prediction") # AGB = apply(stand,1,sum) # hist(AGB) AGB <- NULL print("user provided allometry parameters not yet supported") - #return(NULL) + # return(NULL) return(AGB) } } - #calculate total herbaceous biomass, already in kgC - tot_herb <- sum(dat[[1]][,"dryMass"])/(herb_plot*herb_num) - - #Calculate AGB + # calculate total herbaceous biomass, already in kgC + tot_herb <- sum(dat[[1]][, "dryMass"]) / (herb_plot * herb_num) + + # Calculate AGB biomass[is.na(biomass)] <- 0 - tot_biomass <- sum(biomass,na.rm = TRUE) - - #calculate total wood and leaf biomass + tot_biomass <- sum(biomass, na.rm = TRUE) + + # calculate total wood and leaf biomass ratio[is.na(ratio)] <- 0 - leaf <- ratio*biomass - tot_leaf <- sum(leaf,na.rm = TRUE) - - #Divide by plot area, divide by 2 to convert from kg to kgC - leaf_biomass = ((tot_leaf/(total_area))/2 + tot_herb)#in kg - - if(tot_biomass == 0){ + leaf <- ratio * biomass + tot_leaf <- sum(leaf, na.rm = TRUE) + + # Divide by plot area, divide by 2 to convert from kg to kgC + leaf_biomass <- ((tot_leaf / (total_area)) / 2 + tot_herb) # in kg + + if (tot_biomass == 0) { AGB <- leaf_biomass - }else{ - AGB <- ((tot_biomass/(total_area))/2 + tot_herb)#in kg + } else { + AGB <- ((tot_biomass / (total_area)) / 2 + tot_herb) # in kg } - wood_biomass = AGB - leaf_biomass - - #grab soil carbon info - if(sum(is.na(dat[[3]]))){ + wood_biomass <- AGB - leaf_biomass + + # grab soil carbon info + if (sum(is.na(dat[[3]]))) { soil_carbon <- NA - }else{ - soil_carbon <- mean(dat[[3]]$SoilCarbon) #conversion done in extract_NEON_veg (gC/m^2) + } else { + soil_carbon <- mean(dat[[3]]$SoilCarbon) # conversion done in extract_NEON_veg (gC/m^2) } - - #Prep Arguments for pool_ic function - dims <- list(time =1) #Time dimension may be irrelevant - variables <-list(AbvGrndWood = AGB, wood_carbon_content = wood_biomass, leaf_carbon_content = leaf_biomass, soil_organic_carbon_content = soil_carbon) - input <- list(dims = dims, - vals = variables) - - + + # Prep Arguments for pool_ic function + dims <- list(time = 1) # Time dimension may be irrelevant + variables <- list(AbvGrndWood = AGB, wood_carbon_content = wood_biomass, leaf_carbon_content = leaf_biomass, soil_organic_carbon_content = soil_carbon) + input <- list( + dims = dims, + vals = variables + ) + + return(input) -} \ No newline at end of file +} diff --git a/modules/data.land/R/data.R b/modules/data.land/R/data.R index 78fc30551e4..f9ab26c5b50 100644 --- a/modules/data.land/R/data.R +++ b/modules/data.land/R/data.R @@ -14,8 +14,8 @@ #' \item{VARIABLE_GROUP}{category, eg abovground biomass or soil chemistry} #' \item{VARIABLE, DATAVALUE}{key and value for each measured variable} #' \item{NA_L1CODE, NA_L1NAME, NA_L2CODE, NA_L2NAME}{ -#' numeric IDs and names for the Level 1 and level 2 ecoregions where -#' this site is located} +#' numeric IDs and names for the Level 1 and level 2 ecoregions where +#' this site is located} #' } #' @source Originally from Fluxnet , #' but the provenence and age of this specific file is not clear. @@ -43,31 +43,31 @@ #' A list with 26 entries: #' \describe{ #' \item{air.cond, h2o.cond, sand.cond, silt.cond, clay.cond}{ -#' thermal conductivity, W m^-1 K^-1} +#' thermal conductivity, W m^-1 K^-1} #' \item{air.hcap, sand.hcap, silt.hcap, clay.hcap}{heat capacity, -#' J m^-3 K^-1} +#' J m^-3 K^-1} #' \item{kair, ksand, ksilt, kclay}{relative conductivity factor} #' \item{fieldcp.K}{hydraulic conductance at field capacity, mm day^-1} #' \item{grav}{gravity acceleration, m s^-2} #' \item{soil.key}{Abbreviations for each of 18 soil texture classes, e.g. #' "SiL", "LSa"} #' \item{soil.name}{Names for 18 soil texture classes, e.g. "Sand", -#' "Silty clay"} +#' "Silty clay"} #' \item{soilcp.MPa}{soil water potential when air-dry, MPa} #' \item{soilld.MPa}{soil water potential at critical water content, MPa} #' \item{soilwp.MPa}{soil water potential at wilting point, MPa} #' \item{stext.lines}{list of 18 lists, each giving minimum and maximum -#' sand/silt/clay contents for a soil texture class} +#' sand/silt/clay contents for a soil texture class} #' \item{stext.polygon}{list of 18 lists, each giving corner points in the -#' soil texture triangle for a soil texture class} +#' soil texture triangle for a soil texture class} #' \item{texture}{data frame with 13 rows and 21 columns, giving default -#' parameter values for 13 named soil textures} +#' parameter values for 13 named soil textures} #' \item{theta.crit}{critical water content (fractional soil moisture at -#' which plants start dropping leaves), m^3 m^-3} +#' which plants start dropping leaves), m^3 m^-3} #' \item{xclay.def}{default volume fraction of sand in each of 18 soil -#' texture classes} +#' texture classes} #' \item{xsand.def}{default volume fraction of clay in each of 18 soil -#' texture classes} +#' texture classes} #' } #' @source #' The hydraulic parameters are derived from Cosby et al 1984, "A Statistical diff --git a/modules/data.land/R/dataone_download.R b/modules/data.land/R/dataone_download.R index 5339b5a5cab..a333356e83e 100644 --- a/modules/data.land/R/dataone_download.R +++ b/modules/data.land/R/dataone_download.R @@ -1,64 +1,62 @@ -#' DataONE download +#' DataONE download #' #' @param id "The identifier of a package, package metadata or other package member" -- dataone r #' @param filepath path to where files will be stored #' @param CNode character, passed to `dataone::CNode` -#' @param lazyLoad "A logical value. If TRUE, then only package member system metadata is downloaded and not data. The default is FALSE." -- dataone R +#' @param lazyLoad "A logical value. If TRUE, then only package member system metadata is downloaded and not data. The default is FALSE." -- dataone R #' @param quiet "A 'logical'. If TRUE (the default) then informational messages will not be printed." -- dataone R -#' +#' #' @author Liam P Burke, \email{lpburke@@bu.edu} -#' @description Adapts the dataone::getDataPackage workflow to allow users to download data from the DataONE federation by simply entering the doi or associated package id +#' @description Adapts the dataone::getDataPackage workflow to allow users to download data from the DataONE federation by simply entering the doi or associated package id #' #' @export #' -#' @examples +#' @examples #' \dontrun{ -#' dataone_download(id = "doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87", +#' dataone_download(id = "doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87", #' filepath = "/fs/data1/pecan.data/dbfiles") #' } -dataone_download = function(id, filepath = "/fs/data1/pecan.data/dbfiles", CNode = "PROD", lazyLoad = FALSE, quiet = FALSE){ +dataone_download <- function(id, filepath = "/fs/data1/pecan.data/dbfiles", CNode = "PROD", lazyLoad = FALSE, quiet = FALSE) { ### Check for wget functionality test <- try(system2("wget", "--version", stderr = TRUE)) if (inherits(test, "try-error")) { PEcAn.logger::logger.severe("wget system utility is not available on this system. Please install it to use this functionality.") } - if (!requireNamespace("dataone", quietly = TRUE) - || !requireNamespace("datapack", quietly = TRUE)) { + if (!requireNamespace("dataone", quietly = TRUE) || + !requireNamespace("datapack", quietly = TRUE)) { PEcAn.logger::logger.severe( "Could not find one or more of packages `dataone` and `datapack`,", "which are needed by `dataone_download()`.", - "Please install them to use this functionality.") + "Please install them to use this functionality." + ) } ### automatically retrieve mnId - cn <- dataone::CNode(CNode) - locations <- dataone::resolve(cn, pid = id) - PEcAn.logger::logger.info("Connecting to Member Node") - mnId <- locations$data[1,"nodeIdentifier"] - + cn <- dataone::CNode(CNode) + locations <- dataone::resolve(cn, pid = id) + PEcAn.logger::logger.info("Connecting to Member Node") + mnId <- locations$data[1, "nodeIdentifier"] + ### begin D1 download process d1c <- dataone::D1Client("PROD", mnId) - PEcAn.logger::logger.info("Resolving file locations. This may take a few minutes.") - pkg <- dataone::getDataPackage(d1c, id = id, lazyLoad = lazyLoad, quiet = quiet, limit = "1GB") - files <- datapack::getValue(pkg, name="sysmeta@formatId") + PEcAn.logger::logger.info("Resolving file locations. This may take a few minutes.") + pkg <- dataone::getDataPackage(d1c, id = id, lazyLoad = lazyLoad, quiet = quiet, limit = "1GB") + files <- datapack::getValue(pkg, name = "sysmeta@formatId") n <- length(files) # number of files - PEcAn.logger::logger.info("Files located.") + PEcAn.logger::logger.info("Files located.") ### make new directory within this directory newdir_D1 <- file.path(filepath, paste0("DataOne_", gsub("/", "-", id))) dir.create(newdir_D1) - - ### download the data with wget + + ### download the data with wget # '--header=' spoofs the user agent so that we avoid authentication errors. DataONE is now actively preventing web scraping. - for(i in 1:n){ + for (i in 1:n) { PEcAn.logger::logger.info(paste("Downloading", "file", i, "of", n, sep = " ")) - system(paste("cd", newdir_D1, "&&", "{", "wget", "--header='User-Agent: Mozilla/5.0 (Windows NT 5.1; rv:23.0) Gecko/20100101 Firefox/23.0'", "--content-disposition", names(files)[i], "; cd -; }")) # cd to newdir, download files with wget, cd back + system(paste("cd", newdir_D1, "&&", "{", "wget", "--header='User-Agent: Mozilla/5.0 (Windows NT 5.1; rv:23.0) Gecko/20100101 Firefox/23.0'", "--content-disposition", names(files)[i], "; cd -; }")) # cd to newdir, download files with wget, cd back } - PEcAn.logger::logger.info(paste(n, "files downloaded to", newdir_D1, sep = " ")) + PEcAn.logger::logger.info(paste(n, "files downloaded to", newdir_D1, sep = " ")) } - - - diff --git a/modules/data.land/R/download.SM_CDS.R b/modules/data.land/R/download.SM_CDS.R index 20e6433e1bf..7faf5e22dda 100644 --- a/modules/data.land/R/download.SM_CDS.R +++ b/modules/data.land/R/download.SM_CDS.R @@ -1,5 +1,5 @@ #' Download CDS soil moisture data for the SDA workflow. -#' @details +#' @details #' Introduction on how to play with the CDS python API #' to correctly build the python environment with the cdsapi installed, you need to follow those steps. #' 1. Install miniconda. @@ -38,43 +38,50 @@ #' #' @return A vector containing file paths to the downloaded files. #' @export -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom dplyr %>% download.SM_CDS <- function(outfolder, time.points, overwrite = FALSE, auto.create.key = FALSE) { - #load cdsapi from python environment. - tryCatch({ - cdsapi <- reticulate::import("cdsapi") - }, error = function(e) { - PEcAn.logger::logger.severe( - "Failed to load `cdsapi` Python library. ", - "Please make sure it is installed to a location accessible to `reticulate`.", - "You should be able to install it with the following command: ", - "`pip install --user cdsapi`.", - "The following error was thrown by `reticulate::import(\"cdsapi\")`: ", - conditionMessage(e) - ) - }) - #define function for building credential file. - #maybe as a helper function. - getnetrc <- function (dl_dir) { + # load cdsapi from python environment. + tryCatch( + { + cdsapi <- reticulate::import("cdsapi") + }, + error = function(e) { + PEcAn.logger::logger.severe( + "Failed to load `cdsapi` Python library. ", + "Please make sure it is installed to a location accessible to `reticulate`.", + "You should be able to install it with the following command: ", + "`pip install --user cdsapi`.", + "The following error was thrown by `reticulate::import(\"cdsapi\")`: ", + conditionMessage(e) + ) + } + ) + # define function for building credential file. + # maybe as a helper function. + getnetrc <- function(dl_dir) { netrc <- file.path(dl_dir, ".cdsapirc") if (file.exists(netrc) == FALSE || - any(grepl("https://cds.climate.copernicus.eu/api/v2", - readLines(netrc))) == FALSE) { + any(grepl( + "https://cds.climate.copernicus.eu/api/v2", + readLines(netrc) + )) == FALSE) { netrc_conn <- file(netrc) - writeLines(c( - sprintf( - "url: %s", - getPass::getPass(msg = "Enter URL from the following link \n (https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key):") + writeLines( + c( + sprintf( + "url: %s", + getPass::getPass(msg = "Enter URL from the following link \n (https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key):") + ), + sprintf( + "key: %s", + getPass::getPass(msg = "Enter KEY from the following link \n (https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key):") + ) ), - sprintf( - "key: %s", - getPass::getPass(msg = "Enter KEY from the following link \n (https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key):") - ) - ), - netrc_conn) + netrc_conn + ) close(netrc_conn) message( "A netrc file with your CDS Login credentials was stored in the output directory " @@ -82,7 +89,7 @@ download.SM_CDS <- function(outfolder, time.points, overwrite = FALSE, auto.crea } return(netrc) } - #check if the token exists for the cdsapi. + # check if the token exists for the cdsapi. if (!file.exists(file.path(Sys.getenv("HOME"), ".cdsapirc")) & auto.create.key) { getnetrc(Sys.getenv("HOME")) } else if (!file.exists(file.path(Sys.getenv("HOME"), ".cdsapirc")) & !auto.create.key) { @@ -91,26 +98,29 @@ download.SM_CDS <- function(outfolder, time.points, overwrite = FALSE, auto.crea "https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key ." ) } - #grab the client object. - tryCatch({ - cclient <- cdsapi$Client() - }, error = function(e) { - PEcAn.logger::logger.severe( - "The following error was thrown by `cdsapi$Client()`: ", - conditionMessage(e) - ) - }) - #loop over each time point. + # grab the client object. + tryCatch( + { + cclient <- cdsapi$Client() + }, + error = function(e) { + PEcAn.logger::logger.severe( + "The following error was thrown by `cdsapi$Client()`: ", + conditionMessage(e) + ) + } + ) + # loop over each time point. file.names <- c() - #setup progress bar. + # setup progress bar. pb <- utils::txtProgressBar(min = 0, max = length(time.points), style = 3) for (i in seq_along(time.points)) { - #name file. - fname <- file.path(outfolder, paste('surface_soil_moisture', time.points[i], "nc", sep = ".")) + # name file. + fname <- file.path(outfolder, paste("surface_soil_moisture", time.points[i], "nc", sep = ".")) fname.zip <- gsub(".nc", ".zip", fname, fixed = T) - #add new extracted file into vector. + # add new extracted file into vector. file.names <- c(file.names, fname) - #if we have already downloaded this file. + # if we have already downloaded this file. if (file.exists(fname) && !overwrite) { PEcAn.logger::logger.warn(glue::glue( "File `{fname}` already exists, and `overwrite` is FALSE. ", @@ -118,35 +128,35 @@ download.SM_CDS <- function(outfolder, time.points, overwrite = FALSE, auto.crea )) next } - #prepare file through cds server. + # prepare file through cds server. while ("try-error" %in% class(try(do_next <- cclient$retrieve( - 'satellite-soil-moisture', + "satellite-soil-moisture", list( - 'variable'= 'surface_soil_moisture', - 'type_of_sensor'= 'active', - 'time_aggregation'= 'day_average', - 'year'= sprintf("%04d", lubridate::year(time.points[i])), - 'month'= sprintf("%02d", lubridate::month(time.points[i])), - 'day'= sprintf("%02d", lubridate::day(time.points[i])), - 'type_of_record'= 'cdr', - 'version'= 'v202212' + "variable" = "surface_soil_moisture", + "type_of_sensor" = "active", + "time_aggregation" = "day_average", + "year" = sprintf("%04d", lubridate::year(time.points[i])), + "month" = sprintf("%02d", lubridate::month(time.points[i])), + "day" = sprintf("%02d", lubridate::day(time.points[i])), + "type_of_record" = "cdr", + "version" = "v202212" ), - 'download.zip' + "download.zip" )))) { Sys.sleep(10) PEcAn.logger::logger.info("Encounter error! Will try download in 10 seconds.") } - #download file to local. + # download file to local. utils::download.file(do_next$reply$location, destfile = fname.zip) - #unzip file. + # unzip file. unzipPath <- utils::unzip(zipfile = fname.zip, exdir = outfolder) - #rename unziped file. + # rename unziped file. base::file.rename(unzipPath, fname) - #remove zip file. + # remove zip file. base::file.remove(fname.zip) - #update progress bar + # update progress bar pbi <- i utils::setTxtProgressBar(pb, pbi) } file.names -} \ No newline at end of file +} diff --git a/modules/data.land/R/download_NEON_soilmoisture.R b/modules/data.land/R/download_NEON_soilmoisture.R index c84bb563c7b..7321c92e88c 100644 --- a/modules/data.land/R/download_NEON_soilmoisture.R +++ b/modules/data.land/R/download_NEON_soilmoisture.R @@ -1,20 +1,20 @@ ##' Download NEON Soil Water Content and Soil Salinity data by date and site name -##' +##' ##' @param site four letter NEON site code name(s). If no site is specified, it will download all of them (chr) (e.g "BART" or c("SRER", "KONA", "BART")) ##' @param avg averaging interval (minutes): 1, 30, or both ("all") . default returns both ##' @param var variable of interest: "SWC" (soil water content) or "SIC" (soil ion content) or both ("all") default returns both. ##' Both variables will be saved in outdir automatically (chr) ##' @param startdate start date as YYYY-mm. If left empty, all data available will be downloaded (chr) -##' @param enddate start date as YYYY-mm. If left empty, all data available will be downloaded (chr) +##' @param enddate start date as YYYY-mm. If left empty, all data available will be downloaded (chr) ##' @param outdir out directory to store the following data: -##' .rds list files of SWC and SIC data for each site and sensor position, -##' sensor positions .csv for each site, +##' .rds list files of SWC and SIC data for each site and sensor position, +##' sensor positions .csv for each site, ##' variable description .csv file, ##' readme .csv file ##' @return List of specified variable(s) AND prints the path to output folder -##' +##' ##' @author Juliette Bateman -##' +##' ##' @examples ##' \dontrun{ ##' test <- download_NEON_soilmoisture( @@ -26,59 +26,57 @@ ##' outdir = getwd())} ## Install NEON libs -#devtools::install_github("NEONScience/NEON-geolocation/geoNEON") -#devtools::install_github("NEONScience/NEON-utilities/neonUtilities", force = TRUE) -#install.packages("BiocManager") +# devtools::install_github("NEONScience/NEON-geolocation/geoNEON") +# devtools::install_github("NEONScience/NEON-utilities/neonUtilities", force = TRUE) +# install.packages("BiocManager") # BiocManager::install("rhdf5") download_NEON_soilmoist <- function(site, avg = "all", var = "all", startdate = NA, enddate = NA, outdir) { - - - #################### Data Download from NEON #################### - soil.raw = neonUtilities::loadByProduct(dpID = "DP1.00094.001", site = site, avg = avg, startdate = startdate, enddate = enddate, check.size = FALSE) - + #################### Data Download from NEON #################### + soil.raw <- neonUtilities::loadByProduct(dpID = "DP1.00094.001", site = site, avg = avg, startdate = startdate, enddate = enddate, check.size = FALSE) + # Export into new folder in outdir - dir = paste0(outdir, "/NEONSoilMoist", "_", startdate, "-", enddate) + dir <- paste0(outdir, "/NEONSoilMoist", "_", startdate, "-", enddate) dir.create(dir) - + #################### Clean-up Data Observations #################### - # Only select data from list and remove flagged observations + # Only select data from list and remove flagged observations if (avg == 30) { - data.raw = soil.raw$SWS_30_minute %>% stats::na.omit() + data.raw <- soil.raw$SWS_30_minute %>% stats::na.omit() } else if (avg == 1) { - data.raw = soil.raw$SWS_1_minute %>% stats::na.omit() + data.raw <- soil.raw$SWS_1_minute %>% stats::na.omit() } else { - data.raw = list(soil.raw$SWS_1_minute, soil.raw$SWS_30_minute) %>% stats::na.omit() + data.raw <- list(soil.raw$SWS_1_minute, soil.raw$SWS_30_minute) %>% stats::na.omit() } - + # Separate variables, omit flagged data obs - data.raw.SWC = (split(data.raw, data.raw$VSWCFinalQF))$'0' %>% + data.raw.SWC <- (split(data.raw, data.raw$VSWCFinalQF))$"0" %>% dplyr::select(c("domainID", "siteID", "horizontalPosition", "verticalPosition", "startDateTime", "endDateTime", "VSWCMean", "VSWCMinimum", "VSWCMaximum", "VSWCVariance", "VSWCNumPts", "VSWCExpUncert", "VSWCStdErMean")) - data.raw.SIC = (split(data.raw, data.raw$VSICFinalQF))$'0' %>% - dplyr::select(c("domainID", "siteID", "horizontalPosition", "verticalPosition", "startDateTime", "endDateTime","VSICMean", "VSICMinimum", "VSICMaximum", "VSICVariance", "VSICNumPts", "VSICExpUncert", "VSICStdErMean")) - - data.raw.both = list(data.raw.SWC, data.raw.SIC) + data.raw.SIC <- (split(data.raw, data.raw$VSICFinalQF))$"0" %>% + dplyr::select(c("domainID", "siteID", "horizontalPosition", "verticalPosition", "startDateTime", "endDateTime", "VSICMean", "VSICMinimum", "VSICMaximum", "VSICVariance", "VSICNumPts", "VSICExpUncert", "VSICStdErMean")) + + data.raw.both <- list(data.raw.SWC, data.raw.SIC) names(data.raw.both) <- c("SWC", "SIC") - data.split.both = lapply(data.raw.both, function(x) split(x, x$siteID)) - + data.split.both <- lapply(data.raw.both, function(x) split(x, x$siteID)) + # Separate dataframe into lists by site and sensor position - data.SWC.sites = split(data.raw.SWC, data.raw.SWC$siteID) - data.SIC.sites = split(data.raw.SIC, data.raw.SIC$siteID) - for (i in 1:length(data.SWC.sites)){ - data.SWC.sites[i]=lapply(data.SWC.sites[i], function(x) split(x, list(x$horizontalPosition, x$verticalPosition))) + data.SWC.sites <- split(data.raw.SWC, data.raw.SWC$siteID) + data.SIC.sites <- split(data.raw.SIC, data.raw.SIC$siteID) + for (i in 1:length(data.SWC.sites)) { + data.SWC.sites[i] <- lapply(data.SWC.sites[i], function(x) split(x, list(x$horizontalPosition, x$verticalPosition))) } - for (i in 1:length(data.SIC.sites)){ - data.SIC.sites[i]=lapply(data.SIC.sites[i], function(x) split(x, list(x$horizontalPosition, x$verticalPosition))) + for (i in 1:length(data.SIC.sites)) { + data.SIC.sites[i] <- lapply(data.SIC.sites[i], function(x) split(x, list(x$horizontalPosition, x$verticalPosition))) } - + #################### Save data into folders #################### - - # Saving metadata and site data lists as .rds files to outdir, organize into site specific folders - sensor.pos = split(soil.raw$sensor_positions_00094, soil.raw$sensor_positions_00094$siteID) - for (i in names(sensor.pos)){ + + # Saving metadata and site data lists as .rds files to outdir, organize into site specific folders + sensor.pos <- split(soil.raw$sensor_positions_00094, soil.raw$sensor_positions_00094$siteID) + for (i in names(sensor.pos)) { utils::write.csv(sensor.pos[[i]], file = paste0(dir, "/", i, "_sensor_positions.csv")) } for (i in names(data.SIC.sites)) { @@ -87,31 +85,30 @@ download_NEON_soilmoist <- function(site, avg = "all", var = "all", for (i in names(data.SWC.sites)) { saveRDS(data.SWC.sites[[i]], file = paste0(dir, "/", i, "_SWC_data.rds")) } - for (i in 1:length(site)){ - folders = paste0(dir, "/", site[1:i]) + for (i in 1:length(site)) { + folders <- paste0(dir, "/", site[1:i]) dir.create(folders[i]) fs::file_move(paste0(dir, "/", site[i], "_sensor_positions.csv"), folders[i]) fs::file_move(paste0(dir, "/", site[i], "_SIC_data.rds"), folders[i]) fs::file_move(paste0(dir, "/", site[i], "_SWC_data.rds"), folders[i]) } - - utils::write.csv(soil.raw$readme_00094, file = (paste0(dir,"/readme.csv"))) + + utils::write.csv(soil.raw$readme_00094, file = (paste0(dir, "/readme.csv"))) utils::write.csv(soil.raw$variables_00094, file = paste0(dir, "/variable_description.csv")) - - # Return file path to data and print lists of + + # Return file path to data and print lists of PEcAn.logger::logger.info("Done! NEON soil data has been downloaded and stored in ", paste0(dir), ".") if (var == "SWC") { - data.SWC = data.SWC.sites + data.SWC <- data.SWC.sites return(data.SWC) } else if (var == "SIC") { - data.SIC = data.SIC.sites - return(data.SIC) + data.SIC <- data.SIC.sites + return(data.SIC) } else if (var == "all") { data.SWC <- data.SWC.sites data.SIC <- data.SIC.sites - both.var = list(data.SWC, data.SIC) - names(both.var) = c("SWC", "SIC") + both.var <- list(data.SWC, data.SIC) + names(both.var) <- c("SWC", "SIC") return(both.var) } - } diff --git a/modules/data.land/R/ens.veg.module.R b/modules/data.land/R/ens.veg.module.R index d4c1c35282e..d2fbecb519a 100644 --- a/modules/data.land/R/ens.veg.module.R +++ b/modules/data.land/R/ens.veg.module.R @@ -10,56 +10,55 @@ ##' @param n.ensemble integer, ensemble member number ##' @param new_site data frame, id/lat/lon/name info about the site ##' @param host list, host info as in settings$host, host$name forced to be "localhost" upstream -##' +##' ##' @export ##' ##' @author Istem Fer ens_veg_module <- function(getveg.id, dbparms, - input_veg, - outfolder, - machine, - start_date, end_date, - n.ensemble, - new_site, - host){ - + input_veg, + outfolder, + machine, + start_date, end_date, + n.ensemble, + new_site, + host) { machine_host <- machine$hostname #--------------------------------------------------------------------------------------------------# # Write model specific IC files con <- PEcAn.DB::db.open(dbparms$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) - + PEcAn.logger::logger.info("Begin IC sampling, ensemble member: ", n.ensemble) - + spp.file <- PEcAn.DB::db.query(paste("SELECT * from dbfiles where container_id =", getveg.id), con) - - pkg <- "PEcAn.data.land" - fcn <- "sample_ic" - - ensveg.id <- PEcAn.DB::convert_input(input.id = getveg.id$input.id, - outfolder = paste0(outfolder, "/", input_veg$source, "_ens", n.ensemble, ".", lubridate::year(start_date)), - formatname = "spp.info", - mimetype = "application/rds", - site.id = new_site$id, - start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, - con = con, host = host, - write = TRUE, - overwrite = FALSE, - pattern = paste0(input_veg$source, "_ens", n.ensemble), - forecast = TRUE, - ensemble = 1, - # fcn specific args - in.path = spp.file$file_path, - in.name = spp.file$file_name, - n.ensemble = n.ensemble, - machine_host = machine_host, - source = input_veg$source) - - - return(ensveg.id) + pkg <- "PEcAn.data.land" + fcn <- "sample_ic" + + ensveg.id <- PEcAn.DB::convert_input( + input.id = getveg.id$input.id, + outfolder = paste0(outfolder, "/", input_veg$source, "_ens", n.ensemble, ".", lubridate::year(start_date)), + formatname = "spp.info", + mimetype = "application/rds", + site.id = new_site$id, + start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, + con = con, host = host, + write = TRUE, + overwrite = FALSE, + pattern = paste0(input_veg$source, "_ens", n.ensemble), + forecast = TRUE, + ensemble = 1, + # fcn specific args + in.path = spp.file$file_path, + in.name = spp.file$file_name, + n.ensemble = n.ensemble, + machine_host = machine_host, + source = input_veg$source + ) + + return(ensveg.id) } diff --git a/modules/data.land/R/extract_FIA.R b/modules/data.land/R/extract_FIA.R index bc91dcd3216..7feb9760a0a 100644 --- a/modules/data.land/R/extract_FIA.R +++ b/modules/data.land/R/extract_FIA.R @@ -1,4 +1,4 @@ -##' @param lon site longitude +##' @param lon site longitude ##' ##' @param lat site latitude ##' @param start_date "YYYY-MM-DD" @@ -11,101 +11,104 @@ ##' @title extract_FIA ##' @export ##' @author Istem Fer -extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms, ...){ - - #Set start_date and end_date as Date objects - start_date = as.Date(start_date) - end_date = as.Date(end_date) - +extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms, ...) { + # Set start_date and end_date as Date objects + start_date <- as.Date(start_date) + end_date <- as.Date(end_date) + veg_info <- list() - + fia.con <- PEcAn.DB::db.open(dbparms$fia) on.exit(PEcAn.DB::db.close(fia.con), add = TRUE) - - lonmin <- lon - gridres - lonmax <- lon + gridres - latmin <- lat - gridres - latmax <- lat + gridres - + + lonmin <- lon - gridres + lonmax <- lon + gridres + latmin <- lat - gridres + latmax <- lat + gridres + start_year <- lubridate::year(start_date) - end_year <- lubridate::year(end_date) - - min.year <- start_year - 5 - max.year <- start_year + 5 - + end_year <- lubridate::year(end_date) + + min.year <- start_year - 5 + max.year <- start_year + 5 + + - ################## ## ## ## PLOT ## ## ## ################## - + ## query to get plot info - query <- paste("SELECT p.cycle, p.statecd, p.measyear as time, p.cn as patch, ", - "MIN(2-c.stdorgcd) as trk, AVG(c.stdage) as age, p.lat, p.lon, p.prev_plt_cn ", - "FROM plot as p LEFT JOIN cond as c on p.cn=c.plt_cn ", - "WHERE p.lon >= ", lonmin, " AND p.lon <= ", lonmax, " AND p.lat >= ", latmin, - " AND p.lat <= ", latmax, " AND p.measyear >= ", min.year, - " AND p.measyear <= ", max.year, " GROUP BY p.cn") - + query <- paste( + "SELECT p.cycle, p.statecd, p.measyear as time, p.cn as patch, ", + "MIN(2-c.stdorgcd) as trk, AVG(c.stdage) as age, p.lat, p.lon, p.prev_plt_cn ", + "FROM plot as p LEFT JOIN cond as c on p.cn=c.plt_cn ", + "WHERE p.lon >= ", lonmin, " AND p.lon <= ", lonmax, " AND p.lat >= ", latmin, + " AND p.lat <= ", latmax, " AND p.measyear >= ", min.year, + " AND p.measyear <= ", max.year, " GROUP BY p.cn" + ) + plot.info <- PEcAn.DB::db.query(query, con = fia.con) if (nrow(plot.info) == 0) { PEcAn.logger::logger.severe("No plot data found on FIA.") } - + for (statecd in unique(plot.info$statecd)) { # Count up occurrences of each cycle cycle.count <- table(plot.info$cycle[plot.info$statecd == statecd]) - - # Find the best valid cycle, in terms of providing the most records. + + # Find the best valid cycle, in terms of providing the most records. # In case of ties, which.max will return the first one, which will be the earliest best.cycle <- as.numeric(names(cycle.count)[which.max(cycle.count)]) - + row.keep.ind <- (plot.info$statecd != statecd) | (plot.info$cycle == best.cycle) - + plot.info <- plot.info[row.keep.ind, ] } - + # as an extra precaution, remove any records that are explicitly remeasurments of the same plot plot.info <- plot.info[.select.unique.fia.plot.records(plot.info$patch, plot.info$prev_plt_cn, plot.info$time, start_year), ] - + if (nrow(plot.info) == 0) { PEcAn.logger::logger.severe("All plot data were invalid.") } - + plot.info$trk[is.na(plot.info$trk)] <- 1 plot.info$age[is.na(plot.info$age)] <- 0 - + # Dropping unneeded columns plot.info <- plot.info[, c("time", "patch", "trk", "age")] - + PEcAn.logger::logger.debug(paste0("Found ", nrow(plot.info), " patches for coordinates lat:", lat, " lon:", lon)) - + veg_info[[1]] <- plot.info - + ################## ## ## ## CSS ## ## ## ################## - - query <- paste0("SELECT p.measyear as time,p.cycle,p.statecd,p.cn as patch, - ", "CONCAT(CAST(t.subp AS CHAR),CAST(t.tree AS CHAR)) as cohort,t.dia*2.54 as dbh, ", - "t.spcd as spcd, t.tpa_unadj*0.0002471 as n FROM plot as p LEFT JOIN tree as t on p.cn=t.plt_cn ", - "WHERE p.lon >= ", lonmin, - " and p.lon < ", lonmax, - " and p.lat >= ", latmin, - " and p.lat < ", latmax) + + query <- paste0( + "SELECT p.measyear as time,p.cycle,p.statecd,p.cn as patch, + ", "CONCAT(CAST(t.subp AS CHAR),CAST(t.tree AS CHAR)) as cohort,t.dia*2.54 as dbh, ", + "t.spcd as spcd, t.tpa_unadj*0.0002471 as n FROM plot as p LEFT JOIN tree as t on p.cn=t.plt_cn ", + "WHERE p.lon >= ", lonmin, + " and p.lon < ", lonmax, + " and p.lat >= ", latmin, + " and p.lat < ", latmax + ) tree.info <- PEcAn.DB::db.query(query, con = fia.con) names(tree.info) <- tolower(names(tree.info)) - + if (nrow(tree.info) == 0) { PEcAn.logger::logger.severe("No FIA data found.") } else { PEcAn.logger::logger.debug(paste0(nrow(tree.info), " trees found initially")) } - + # Remove rows that don't map to any retained patch tree.info <- tree.info[which(tree.info$patch %in% plot.info$patch), ] if (nrow(tree.info) == 0) { @@ -113,7 +116,7 @@ extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms } else { PEcAn.logger::logger.debug(paste0(nrow(tree.info), " trees that map to previously selected patches.")) } - + ## Remove rows with no dbh, spcd, or n notree <- which(is.na(tree.info$dbh) & is.na(tree.info$spcd) & is.na(tree.info$n)) if (length(notree) > 0) { @@ -124,27 +127,24 @@ extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms } else { PEcAn.logger::logger.debug(paste0(nrow(tree.info), " trees remain after removing entries with no dbh, spcd, and/or n.")) } - + veg_info[[2]] <- tree.info return(veg_info) - - - } # extract_FIA # A function for identifying fia plot records that are remeasurements of one another, -# and upon finding them retaining only the one that is closest to some target year. +# and upon finding them retaining only the one that is closest to some target year. # Since fia.to.psscss currently selects plots from only a single cycle (for a given state) -# it shouldn't be getting remeasurements, and this probably isn't doing anything in the -# current code. But it could be useful for future updates. +# it shouldn't be getting remeasurements, and this probably isn't doing anything in the +# current code. But it could be useful for future updates. .select.unique.fia.plot.records <- function(plt_cn, prev_plt_cn, measyear, target.year) { if (length(plt_cn) != length(prev_plt_cn)) { PEcAn.logger::logger.error("Inputs must have same length!") return(NULL) } - + # Identify records that are part of the same remeasurement sequence prev_plt_cn[prev_plt_cn == ""] <- NA unique.plot.id <- rep(NA, length(plt_cn)) @@ -156,7 +156,7 @@ extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms # assign a new plot id unique.plot.id[i] <- i } - + # Check whether this record is a remeasurement of another one in the list if (!is.na(prev_plt_cn[i])) { parent.ind <- which(plt_cn == prev_plt_cn[i]) @@ -171,7 +171,7 @@ extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms next } } - + # Check whether any other record is a remeasurement of this one child.ind <- which(prev_plt_cn == plt_cn[i]) if (length(child.ind) > 0) { @@ -183,7 +183,7 @@ extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms } } } - + # For any identified remeasurement sequences, choose to keep the record that is closest to the target year ind.keep <- numeric(0) for (unique.id in unique(unique.plot.id)) { @@ -193,6 +193,6 @@ extract_FIA <- function(lon, lat, start_date, end_date, gridres = 0.075, dbparms } ind.keep <- c(ind.keep, ind.keep.i) } - + return(sort(ind.keep)) } # .select.unique.fia.plot.records diff --git a/modules/data.land/R/extract_NEON_veg.R b/modules/data.land/R/extract_NEON_veg.R index c66f1b6ddfd..3ab0e7ce270 100644 --- a/modules/data.land/R/extract_NEON_veg.R +++ b/modules/data.land/R/extract_NEON_veg.R @@ -9,95 +9,94 @@ #' @param store_dir location where you want to store downloaded NEON files #' @param neonsites prepared datasets table from NEON using neonstore::neon_sites(api = "https://data.neonscience.org/api/v0", .token = Sys.getenv("NEON_TOKEN")) #' @param ... Additional parameters -#' -#' +#' +#' #' @return veg_info object to be passed to extract_veg within ic_process #' @author Alexis Helgeson and Michael Dietze #' #' @export #' @importFrom rlang .data -#' @examples -#' start_date = as.Date("2020-01-01") -#' end_date = as.Date("2021-09-01") -extract_NEON_veg <- function(lon, lat, start_date, end_date, store_dir, neonsites = NULL, ...){ - - #Function to grab the first measurements for each plot between start and end date. - Grab_First_Measurements_of_Each_Plot <- function(temp_data){ +#' @examples +#' start_date <- as.Date("2020-01-01") +#' end_date <- as.Date("2021-09-01") +extract_NEON_veg <- function(lon, lat, start_date, end_date, store_dir, neonsites = NULL, ...) { + # Function to grab the first measurements for each plot between start and end date. + Grab_First_Measurements_of_Each_Plot <- function(temp_data) { Plot_Year <- paste0(temp_data$plot, temp_data$year) unique_Year <- sort(unique(temp_data$year)) unique_Plot <- sort(unique(temp_data$plot)) Ind <- rep(NA, length(Plot_Year)) - + for (j in 1:length(unique_Plot)) { for (k in 1:length(unique_Year)) { - if(length(which(Plot_Year == paste0(unique_Plot[j], unique_Year[k])))>0){ + if (length(which(Plot_Year == paste0(unique_Plot[j], unique_Year[k]))) > 0) { Ind[which(Plot_Year == paste0(unique_Plot[j], unique_Year[k]))] <- 1 break } } } temp_data <- cbind(temp_data, Ind) - if(sum(is.na(temp_data$Ind))==0){ + if (sum(is.na(temp_data$Ind)) == 0) { temp_data <- temp_data - }else{ - temp_data <- temp_data[-which(is.na(temp_data$Ind)),] + } else { + temp_data <- temp_data[-which(is.na(temp_data$Ind)), ] } temp_data } - - #Find sitename from lon and lat params using distance - if(is.null(neonsites)){ + + # Find sitename from lon and lat params using distance + if (is.null(neonsites)) { neonsites <- neonstore::neon_sites(api = "https://data.neonscience.org/api/v0", .token = Sys.getenv("NEON_TOKEN")) } - neonsites <- dplyr::select(neonsites, "siteCode", "siteLatitude", "siteLongitude") #select for relevant columns + neonsites <- dplyr::select(neonsites, "siteCode", "siteLatitude", "siteLongitude") # select for relevant columns betyneondist <- swfscMisc::distance(lat1 = lat, lon1 = lon, lat2 = neonsites$siteLatitude, lon2 = neonsites$siteLongitude) mindist <- min(betyneondist) distloc <- match(mindist, betyneondist) lat <- neonsites$siteLatitude[distloc] lon <- neonsites$siteLongitude[distloc] site <- dplyr::filter(neonsites, .data$siteLatitude == lat & .data$siteLongitude == lon) - sitename = site$siteCode - #Load in NEON datasets - neonstore::neon_download("DP1.10098.001", dir = store_dir, table = NA, site = sitename, start_date = start_date, end_date = end_date, type = "basic",api = "https://data.neonscience.org/api/v0") + sitename <- site$siteCode + # Load in NEON datasets + neonstore::neon_download("DP1.10098.001", dir = store_dir, table = NA, site = sitename, start_date = start_date, end_date = end_date, type = "basic", api = "https://data.neonscience.org/api/v0") apparentindividual <- neonstore::neon_read(table = "apparentindividual", product = "DP1.10098.001", site = sitename, start_date = start_date, end_date = end_date, dir = store_dir) - if(is.null(apparentindividual)){ + if (is.null(apparentindividual)) { filter.date <- NA - }else{ + } else { mappingandtagging <- neonstore::neon_read(table = "mappingandtagging", product = "DP1.10098.001", site = sitename, start_date = start_date, end_date = end_date, dir = store_dir) joined.veg <- dplyr::left_join(mappingandtagging, apparentindividual, by = "individualID") - #Filter joined.veg for required information: DBH, tree height, and species + # Filter joined.veg for required information: DBH, tree height, and species filter.veg <- dplyr::select(joined.veg, "siteID.x", "plotID.x", "subplotID", "taxonID", "scientificName", "taxonRank", "date.y", "stemDiameter", "height") - #Filter for most recent record + # Filter for most recent record filter.date <- dplyr::filter(filter.veg, .data$date.y >= start_date) - filter.date <- filter.date[which(!is.na(filter.date$subplotID), !is.na(filter.date$stemDiameter)),] - #Create year column - filter.date$year <- format(as.Date(filter.date$date.y, format="%d/%m/%Y"),"%Y") - #Rename NEON column names to match pecan functions + filter.date <- filter.date[which(!is.na(filter.date$subplotID), !is.na(filter.date$stemDiameter)), ] + # Create year column + filter.date$year <- format(as.Date(filter.date$date.y, format = "%d/%m/%Y"), "%Y") + # Rename NEON column names to match pecan functions colnames(filter.date) <- c("site_name", "plot", "Subplot", "species_USDA_symbol", "species", "taxonRank", "date", "DBH", "height", "year") filter.date <- Grab_First_Measurements_of_Each_Plot(filter.date) } - - #herb AGB - neonstore::neon_download("DP1.10023.001", dir = store_dir, table = NA, site = sitename, start_date = start_date, end_date = end_date, type = "basic",api = "https://data.neonscience.org/api/v0") + + # herb AGB + neonstore::neon_download("DP1.10023.001", dir = store_dir, table = NA, site = sitename, start_date = start_date, end_date = end_date, type = "basic", api = "https://data.neonscience.org/api/v0") massdata <- neonstore::neon_read(table = "massdata", product = "DP1.10023.001", site = sitename, start_date = start_date, end_date = end_date, dir = store_dir) - if(is.null(massdata)){ + if (is.null(massdata)) { filter.herb <- NA - }else{ + } else { perbout <- neonstore::neon_read(table = "perbout", product = "DP1.10023.001", site = sitename, start_date = start_date, end_date = end_date, dir = store_dir) joined.herb <- dplyr::left_join(massdata, perbout, by = "sampleID") filter.herb <- dplyr::select(joined.herb, "siteID.y", "plotID.x", "subplotID", "plotType.x", "clipArea", "dryMass", "collectDate.y") - #Create year column - filter.herb$year <- format(as.Date(filter.herb$collectDate.y, format="%Y-%m-%d"),"%Y") - #Rename NEON column names to match pecan functions + # Create year column + filter.herb$year <- format(as.Date(filter.herb$collectDate.y, format = "%Y-%m-%d"), "%Y") + # Rename NEON column names to match pecan functions colnames(filter.herb) <- c("site_name", "plot", "Subplot", "plotType", "clipArea", "dryMass", "date", "year") - filter.herb$dryMass <- PEcAn.utils::ud_convert(filter.herb$dryMass, 'g m-2', 'kg m-2')#convert from g to kg. + filter.herb$dryMass <- PEcAn.utils::ud_convert(filter.herb$dryMass, "g m-2", "kg m-2") # convert from g to kg. filter.herb <- Grab_First_Measurements_of_Each_Plot(filter.herb) } - + # #species info # neonstore::neon_download("DP1.10058.001", dir = store_dir, table = NA, site = sitename, start_date = start_date, end_date = end_date, type = "basic",api = "https://data.neonscience.org/api/v0") # div_1m2 <- neonstore::neon_read(table = "div_1m2", product = "DP1.10058.001", site = sitename, start_date = start_date, end_date = end_date, dir = store_dir) - # + # # #check if species info is available for herb plots # herb.plot <- unique(filter.herb$plotID.x) # check.species <- herb.plot %in% filter.species$plotID @@ -108,93 +107,93 @@ extract_NEON_veg <- function(lon, lat, start_date, end_date, store_dir, neonsite # }else{ # PEcAn.logger::logger.info(paste0("No herbacious species info available for ", sitename)) # } - # + # # #remove NAs from species column only, next step species matching does not like NAs # filter.herb <- filter.herb[!is.na(filter.herb$scientificName),] # filter.date <- filter.date[!is.na(filter.date$scientificName),] - - #####calculate the site-specific fraction of 0-30cm soc to the whole soil profile soc based on Magpit data - neonstore::neon_download("DP1.00096.001", dir = store_dir, table = NA, site = sitename, start_date = as.Date("2012-01-01"), end_date = end_date, type = "basic",api = "https://data.neonscience.org/api/v0") + + ##### calculate the site-specific fraction of 0-30cm soc to the whole soil profile soc based on Magpit data + neonstore::neon_download("DP1.00096.001", dir = store_dir, table = NA, site = sitename, start_date = as.Date("2012-01-01"), end_date = end_date, type = "basic", api = "https://data.neonscience.org/api/v0") perbulksample <- neonstore::neon_read(table = "perbulksample", product = "DP1.00096.001", site = sitename, start_date = as.Date("2012-01-01"), end_date = end_date, dir = store_dir) perarchivesample <- neonstore::neon_read(table = "perarchivesample", product = "DP1.00096.001", site = sitename, start_date = as.Date("2012-01-01"), end_date = end_date, dir = store_dir) perbiogeosample <- neonstore::neon_read(table = "perbiogeosample", product = "DP1.00096.001", site = sitename, start_date = as.Date("2012-01-01"), end_date = end_date, dir = store_dir) -if(is.null(perbulksample) | is.null(perbiogeosample) | is.null(perarchivesample)){ - print("no Magpit soil carbon data found!") - joined.soilmg <- NA -}else{ - joined.soilmg <- dplyr::left_join(perarchivesample, perbiogeosample, by = "horizonID") - joined.soilmg <- dplyr::left_join(joined.soilmg, perbulksample, by = "horizonID") - joined.soilmg<-joined.soilmg[joined.soilmg$bulkDensSampleType=="Regular",] - joined.soilmg<-joined.soilmg[joined.soilmg$biogeoSampleType=="Regular",] - joined.soilmg<-joined.soilmg[!duplicated(joined.soilmg$biogeoTopDepth),] - joined.soilmg<-dplyr::select(joined.soilmg,"collectDate.x","siteID.x","biogeoTopDepth","biogeoBottomDepth","bulkDensExclCoarseFrag","carbonTot","bulkDensSampleType","biogeoSampleType") - joined.soilmg.top30 <- joined.soilmg[joined.soilmg$biogeoBottomDepth<=35,] #set maxi 35 here to allow more top layers to be considered, e.g. 0-31cm - soilcarbon.top30 <- sum(joined.soilmg.top30$bulkDensExclCoarseFrag * joined.soilmg.top30$carbonTot * 0.001 * (joined.soilmg.top30$biogeoBottomDepth - joined.soilmg.top30$biogeoTopDepth) * 10000, na.rm=T)/1000 #convert from gram to kilogram - #remove NA from soil data - soilcarbon.whole <- sum(joined.soilmg$bulkDensExclCoarseFrag * joined.soilmg$carbonTot * 0.001 * (joined.soilmg$biogeoBottomDepth - joined.soilmg$biogeoTopDepth) * 10000, na.rm=T)/1000 #convert from gram to kilogram - frac_30 <- soilcarbon.top30/soilcarbon.whole -} + if (is.null(perbulksample) | is.null(perbiogeosample) | is.null(perarchivesample)) { + print("no Magpit soil carbon data found!") + joined.soilmg <- NA + } else { + joined.soilmg <- dplyr::left_join(perarchivesample, perbiogeosample, by = "horizonID") + joined.soilmg <- dplyr::left_join(joined.soilmg, perbulksample, by = "horizonID") + joined.soilmg <- joined.soilmg[joined.soilmg$bulkDensSampleType == "Regular", ] + joined.soilmg <- joined.soilmg[joined.soilmg$biogeoSampleType == "Regular", ] + joined.soilmg <- joined.soilmg[!duplicated(joined.soilmg$biogeoTopDepth), ] + joined.soilmg <- dplyr::select(joined.soilmg, "collectDate.x", "siteID.x", "biogeoTopDepth", "biogeoBottomDepth", "bulkDensExclCoarseFrag", "carbonTot", "bulkDensSampleType", "biogeoSampleType") + joined.soilmg.top30 <- joined.soilmg[joined.soilmg$biogeoBottomDepth <= 35, ] # set maxi 35 here to allow more top layers to be considered, e.g. 0-31cm + soilcarbon.top30 <- sum(joined.soilmg.top30$bulkDensExclCoarseFrag * joined.soilmg.top30$carbonTot * 0.001 * (joined.soilmg.top30$biogeoBottomDepth - joined.soilmg.top30$biogeoTopDepth) * 10000, na.rm = T) / 1000 # convert from gram to kilogram + # remove NA from soil data + soilcarbon.whole <- sum(joined.soilmg$bulkDensExclCoarseFrag * joined.soilmg$carbonTot * 0.001 * (joined.soilmg$biogeoBottomDepth - joined.soilmg$biogeoTopDepth) * 10000, na.rm = T) / 1000 # convert from gram to kilogram + frac_30 <- soilcarbon.top30 / soilcarbon.whole + } -####calculate soil C of 0-30cm depth from periodic soil core collections - if(is.null(perbulksample)){ + #### calculate soil C of 0-30cm depth from periodic soil core collections + if (is.null(perbulksample)) { print("no Magpit soil bulk density data found!") joined.soil <- NA - }else{ - #filter for regular type of bulk density measurements - perbulksample <- perbulksample[perbulksample$bulkDensSampleType=="Regular",] - - #remove duplicated and NA measurements for bulk density + } else { + # filter for regular type of bulk density measurements + perbulksample <- perbulksample[perbulksample$bulkDensSampleType == "Regular", ] + + # remove duplicated and NA measurements for bulk density bulkDensBottomDepth <- perbulksample$bulkDensBottomDepth bulkDensExclCoarseFrag <- perbulksample$bulkDensExclCoarseFrag - Ind <- (!duplicated(bulkDensBottomDepth))&(!is.na(bulkDensExclCoarseFrag)) + Ind <- (!duplicated(bulkDensBottomDepth)) & (!is.na(bulkDensExclCoarseFrag)) bulkDensBottomDepth <- bulkDensBottomDepth[Ind] bulkDensExclCoarseFrag <- bulkDensExclCoarseFrag[Ind] - - #calculate bulk density (need to do: more precise depth matching) - bulkDensity <- mean(bulkDensExclCoarseFrag[which(bulkDensBottomDepth <= 35)]) #set maxi 35 here to allow more bulk density samples to be considered, e.g. 23-33 cm (there might be a typo for 0-110 cm for JORN sites, so only bulk density for 22-33 cm is available) - - #if there is no bulk density measurements above 30cm. - if(is.na(bulkDensity)){ + + # calculate bulk density (need to do: more precise depth matching) + bulkDensity <- mean(bulkDensExclCoarseFrag[which(bulkDensBottomDepth <= 35)]) # set maxi 35 here to allow more bulk density samples to be considered, e.g. 23-33 cm (there might be a typo for 0-110 cm for JORN sites, so only bulk density for 22-33 cm is available) + + # if there is no bulk density measurements above 30cm. + if (is.na(bulkDensity)) { joined.soil <- NA - }else{ - #download periodic data and join tables - #so far we use end date of the the year 2021 - #because the "sls_soilCoreCollection" table will fail when we use more recent end date for some sites. - neonstore::neon_download("DP1.10086.001", dir = store_dir, table = NA, site = sitename, start_date = as.Date("2012-01-01"), end_date = as.Date("2021-01-01"), type = "basic",api = "https://data.neonscience.org/api/v0") + } else { + # download periodic data and join tables + # so far we use end date of the the year 2021 + # because the "sls_soilCoreCollection" table will fail when we use more recent end date for some sites. + neonstore::neon_download("DP1.10086.001", dir = store_dir, table = NA, site = sitename, start_date = as.Date("2012-01-01"), end_date = as.Date("2021-01-01"), type = "basic", api = "https://data.neonscience.org/api/v0") sls_soilChemistry <- neonstore::neon_read(table = "sls_soilChemistry", product = "DP1.10086.001", site = sitename, start_date = as.Date("2012-01-01"), end_date = as.Date("2021-01-01"), dir = store_dir) sls_soilCoreCollection <- neonstore::neon_read(table = "sls_soilCoreCollection", product = "DP1.10086.001", site = sitename, start_date = as.Date("2012-01-01"), end_date = as.Date("2021-01-01"), dir = store_dir) - - if(is.null(sls_soilChemistry) | is.null(sls_soilCoreCollection)){ + + if (is.null(sls_soilChemistry) | is.null(sls_soilCoreCollection)) { print("no periodic soil carbon data found!") joined.soil <- NA - }else{ + } else { joined.soil <- dplyr::left_join(sls_soilChemistry, sls_soilCoreCollection, by = "sampleID") - - #select columns + + # select columns joined.soil <- dplyr::select(joined.soil, "siteID.x", "plotID.x", "plotType.x", "organicCPercent", "collectDate.x", "sampleTopDepth", "sampleBottomDepth") joined.soil$year <- lubridate::year(joined.soil$collectDate.x) colnames(joined.soil) <- c("site_name", "plot", "plotType", "organicCPercent", "date", "top", "bottom", "year") - + joined.soil <- Grab_First_Measurements_of_Each_Plot(joined.soil) - - #remove NA values for organicCPercent data - joined.soil <- joined.soil[which(!is.na(joined.soil$organicCPercent)),] - - #calculate soil carbon + + # remove NA values for organicCPercent data + joined.soil <- joined.soil[which(!is.na(joined.soil$organicCPercent)), ] + + # calculate soil carbon joined.soil$bulkDensity <- bulkDensity joined.soil$frac_30 <- frac_30 - #here we multiply bulkdensity (in kg/m3) with soil depth (in m) to calculate the soil carbon (in kg/m2) at the top 30 cm depth of soil, and then divide it by frac_30 to get the value for whole profile. - #note that we have to divide by 100 because of percentage for the organicCPercent variable. - joined.soil$SoilCarbon <- joined.soil$organicCPercent/100 * PEcAn.utils::ud_convert(joined.soil$bulkDensity, "g cm-3", "kg m-3") * PEcAn.utils::ud_convert(30, "cm", "m")/joined.soil$frac_30 + # here we multiply bulkdensity (in kg/m3) with soil depth (in m) to calculate the soil carbon (in kg/m2) at the top 30 cm depth of soil, and then divide it by frac_30 to get the value for whole profile. + # note that we have to divide by 100 because of percentage for the organicCPercent variable. + joined.soil$SoilCarbon <- joined.soil$organicCPercent / 100 * PEcAn.utils::ud_convert(joined.soil$bulkDensity, "g cm-3", "kg m-3") * PEcAn.utils::ud_convert(30, "cm", "m") / joined.soil$frac_30 } } } - - #Create veg_info object as a list + + # Create veg_info object as a list veg_info <- list() - #Set filter.date as veg_info[[2]] + # Set filter.date as veg_info[[2]] veg_info[[2]] <- filter.date - #Set plot size as veg_info[[1]] + # Set plot size as veg_info[[1]] veg_info[[1]] <- filter.herb veg_info[[3]] <- joined.soil diff --git a/modules/data.land/R/extract_SM_CDS.R b/modules/data.land/R/extract_SM_CDS.R index c2f71dde487..1aeeb88ae1b 100644 --- a/modules/data.land/R/extract_SM_CDS.R +++ b/modules/data.land/R/extract_SM_CDS.R @@ -9,33 +9,38 @@ #' #' @return A data frame containing soil moisture and sd for each site and each time step. #' @export -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom dplyr %>% extract_SM_CDS <- function(site_info, - time.points, - in.path, - out.path = NULL, + time.points, + in.path, + out.path = NULL, allow.download = TRUE, - search_window = 10){ - #find downloaded files and the corresponding dates. + search_window = 10) { + # find downloaded files and the corresponding dates. ncfiles <- base::list.files(in.path, full.names = T) - available.dates <- ncfiles %>% purrr::map(function(file){ - base::strsplit(x = file, split = ".", fixed = T)[[1]][2] - }) %>% unlist %>% as.Date - #loop over time points. + available.dates <- ncfiles %>% + purrr::map(function(file) { + base::strsplit(x = file, split = ".", fixed = T)[[1]][2] + }) %>% + unlist() %>% + as.Date() + # loop over time points. results <- data.frame() for (i in seq_along(time.points)) { - #generate dates within search window. + # generate dates within search window. dates <- c(seq(lubridate::date(time.points[i]), by = "1 day", length.out = search_window)) - dates.ind.download <- which(!dates%in%available.dates) - dates.ind.exist <- which(dates%in%available.dates) - #if there is no nc file downloaded or within the search window. + dates.ind.download <- which(!dates %in% available.dates) + dates.ind.exist <- which(dates %in% available.dates) + # if there is no nc file downloaded or within the search window. if (length(dates.ind.download) > 0) { - PEcAn.logger::logger.info(paste("The nc files for the following dates were not found.", - paste(dates[dates.ind.download], collapse = ", "))) - #try download if allowed. + PEcAn.logger::logger.info(paste( + "The nc files for the following dates were not found.", + paste(dates[dates.ind.download], collapse = ", ") + )) + # try download if allowed. if (allow.download) { PEcAn.logger::logger.info("Try download from cds server.") ncfile <- c(ncfiles[dates.ind.exist], PEcAn.data.land::download.SM_CDS(in.path, dates[dates.ind.download])) %>% sort() @@ -46,37 +51,46 @@ extract_SM_CDS <- function(site_info, } else { ncfile <- ncfiles[dates.ind.exist] %>% sort() } - #read nc files. + # read nc files. PEcAn.logger::logger.info(paste("Extracting nc files for date", time.points[i])) - sm <- ncfile %>% furrr::future_map(function(nc){ - list(sm = raster::brick(nc, varname = "sm") %>% - raster::extract(sp::SpatialPoints(cbind(site_info$lon, site_info$lat)), - method = 'simple') %>% as.vector(), - sm_uncertainty = raster::brick(nc, varname = "sm_uncertainty") %>% - raster::extract(sp::SpatialPoints(cbind(site_info$lon, site_info$lat)), - method = 'simple') %>% as.vector()) - }, .progress = T) %>% dplyr::bind_cols() %>% as.data.frame() - sm.mean <- sm[,seq(1, length(ncfile)*2, 2)] %>% `colnames<-`(rep("sm.mean", length(seq(1, length(ncfile)*2, 2)))) - sm.uncertainty <- sm[,seq(2, length(ncfile)*2, 2)] %>% `colnames<-`(rep("sm.uncertainty", ncol(sm.mean))) - #fill in results data frame. + sm <- ncfile %>% + furrr::future_map(function(nc) { + list( + sm = raster::brick(nc, varname = "sm") %>% + raster::extract(sp::SpatialPoints(cbind(site_info$lon, site_info$lat)), + method = "simple" + ) %>% as.vector(), + sm_uncertainty = raster::brick(nc, varname = "sm_uncertainty") %>% + raster::extract(sp::SpatialPoints(cbind(site_info$lon, site_info$lat)), + method = "simple" + ) %>% as.vector() + ) + }, .progress = T) %>% + dplyr::bind_cols() %>% + as.data.frame() + sm.mean <- sm[, seq(1, length(ncfile) * 2, 2)] %>% `colnames<-`(rep("sm.mean", length(seq(1, length(ncfile) * 2, 2)))) + sm.uncertainty <- sm[, seq(2, length(ncfile) * 2, 2)] %>% `colnames<-`(rep("sm.uncertainty", ncol(sm.mean))) + # fill in results data frame. for (j in seq_along(site_info$site_id)) { - nonNA.ind <- which.min(is.na(sm.mean[j,])) - results <- rbind(results, list(date = as.character(time.points[i]), - site.id = site_info$site_id[j], - lon = site_info$lon[j], - lat = site_info$lat[j], - sm.mean = sm.mean[j, nonNA.ind], - sm.uncertainty = sm.uncertainty[j, nonNA.ind])) + nonNA.ind <- which.min(is.na(sm.mean[j, ])) + results <- rbind(results, list( + date = as.character(time.points[i]), + site.id = site_info$site_id[j], + lon = site_info$lon[j], + lat = site_info$lat[j], + sm.mean = sm.mean[j, nonNA.ind], + sm.uncertainty = sm.uncertainty[j, nonNA.ind] + )) } } - #tweak results. - #if sm mean is NA, we remove the entire row. + # tweak results. + # if sm mean is NA, we remove the entire row. # results <- results[!is.na(results$sm.mean),] - #if sm uncertainty is NA, we set the uncertainty as the maximum uncertainty across sites. + # if sm uncertainty is NA, we set the uncertainty as the maximum uncertainty across sites. results$sm.uncertainty[which(is.na(results$sm.uncertainty))] <- max(results$sm.uncertainty, na.rm = T) - #write into csv file. + # write into csv file. if (!is.null(out.path)) { utils::write.csv(results, file = file.path(out.path, "sm.csv")) } results -} \ No newline at end of file +} diff --git a/modules/data.land/R/extract_soil_nc.R b/modules/data.land/R/extract_soil_nc.R index 974173370cc..949f5d7c815 100644 --- a/modules/data.land/R/extract_soil_nc.R +++ b/modules/data.land/R/extract_soil_nc.R @@ -1,7 +1,7 @@ #' Extract soil data from gssurgo #' #' @param outdir Output directory for writing down the netcdf file -#' @param lat Latitude +#' @param lat Latitude #' @param lon Longitude #' @param size Ensemble size #' @param radius radius in meters is used to take soil type samples around the site @@ -13,48 +13,50 @@ #' #' @examples #' \dontrun{ -#' outdir <- "~/paleon/envTest" -#' lat <- 40 -#' lon <- -80 -#' PEcAn.data.land::extract_soil_gssurgo(outdir, lat, lon) +#' outdir <- "~/paleon/envTest" +#' lat <- 40 +#' lon <- -80 +#' PEcAn.data.land::extract_soil_gssurgo(outdir, lat, lon) #' } #' @author Hamze Dokoohaki #' @export -#' -extract_soil_gssurgo<-function(outdir, lat, lon, size=1, radius=500, depths=c(0.15,0.30,0.60)){ - # I keep all the ensembles here - all.soil.ens <-list() - - # I ask the gSSURGO to find all the mukeys (loosely can be thought of soil type) within 500m of my site location. +#' +extract_soil_gssurgo <- function(outdir, lat, lon, size = 1, radius = 500, depths = c(0.15, 0.30, 0.60)) { + # I keep all the ensembles here + all.soil.ens <- list() + + # I ask the gSSURGO to find all the mukeys (loosely can be thought of soil type) within 500m of my site location. # Basically I think of this as me going around and taking soil samples within 500m of my site. - #https://sdmdataaccess.nrcs.usda.gov/SpatialFilterHelp.htm + # https://sdmdataaccess.nrcs.usda.gov/SpatialFilterHelp.htm mu.Path <- paste0( "https://sdmdataaccess.nrcs.usda.gov/Spatial/SDMWGS84Geographic.wfs?", "SERVICE=WFS", "&VERSION=1.1.0", "&REQUEST=GetFeature&TYPENAME=MapunitPoly", "&FILTER=", - "", - "", - "Geometry", - "", - "", lon, ",", lat, "", - "", - "", radius, "", - "", - "", + "", + "", + "Geometry", + "", + "", lon, ",", lat, "", + "", + "", radius, "", + "", + "", "&OUTPUTFORMAT=XMLMukeyList" ) - + xmll <- curl::curl_download( mu.Path, ssl.verifyhost = FALSE, - ssl.verifypeer = FALSE) + ssl.verifypeer = FALSE + ) mukey_str <- XML::xpathApply( doc = XML::xmlParse(xmll), path = "//MapUnitKeyList", - fun = XML::xmlValue) + fun = XML::xmlValue + ) mukeys <- strsplit(mukey_str, ",")[[1]] if (length(mukeys) == 0) { @@ -64,10 +66,13 @@ extract_soil_gssurgo<-function(outdir, lat, lon, size=1, radius=500, depths=c(0. # calling the query function sending the mapunit keys soilprop <- gSSURGO.Query( mukeys, - c("chorizon.sandtotal_r", + c( + "chorizon.sandtotal_r", "chorizon.silttotal_r", "chorizon.claytotal_r", - "chorizon.hzdept_r")) + "chorizon.hzdept_r" + ) + ) soilprop.new <- soilprop %>% dplyr::arrange(.data$hzdept_r) %>% @@ -76,135 +81,145 @@ extract_soil_gssurgo<-function(outdir, lat, lon, size=1, radius=500, depths=c(0. fraction_of_silt_in_soil = "silttotal_r", fraction_of_clay_in_soil = "claytotal_r", soil_depth = "hzdept_r", - mukey = "mukey") %>% + mukey = "mukey" + ) %>% dplyr::mutate(dplyr::across( - c(dplyr::starts_with("fraction_of"), - "soil_depth"), - function(x) x / 100)) + c( + dplyr::starts_with("fraction_of"), + "soil_depth" + ), + function(x) x / 100 + )) - soilprop.new <- soilprop.new[ stats::complete.cases(soilprop.new) , ] - #converting it to list + soilprop.new <- soilprop.new[stats::complete.cases(soilprop.new), ] + # converting it to list soil.data.gssurgo <- names(soilprop.new)[1:4] %>% purrr::map(function(var) { soilprop.new[, var] }) %>% stats::setNames(names(soilprop.new)[1:4]) - #This ensures that I have at least one soil ensemble in case the modeling part failed - all.soil.ens <-c(all.soil.ens, list(soil.data.gssurgo)) - - + # This ensures that I have at least one soil ensemble in case the modeling part failed + all.soil.ens <- c(all.soil.ens, list(soil.data.gssurgo)) + + # What I do here is that I put soil data into depth classes and then model each class speparatly #- see if we need to generate soil ensemble and add that to the list of all - tryCatch({ - # find the soil depth levels based on the depth argument - # if soil profile is deeper than what is specified in the argument then I go as deep as the soil profile. - if (max(soilprop.new$soil_depth) > max(depths)) depths <- sort (c(depths, max(max(soilprop.new$soil_depth)))) - - depth.levs<-findInterval(soilprop.new$soil_depth, depths) - depth.levs[depth.levs==0] <-1 - depth.levs[depth.levs>length(depths)] <-length(depths) - - soilprop.new.grouped<-soilprop.new %>% - dplyr::mutate(DepthL=depths[depth.levs]) - - # let's fit dirichlet for each depth level separately - simulated.soil.props<-soilprop.new.grouped %>% - split(list(soilprop.new.grouped$DepthL, soilprop.new.grouped$mukey)) %>% - purrr::map_df(function(DepthL.Data){ - tryCatch({ - # I model the soil properties for this depth - dir.model <-DepthL.Data[,c(1:3)]%>% - as.matrix() %>% - sirt::dirichlet.mle(.) - # Monte Carlo sampling based on my dirichlet model - alpha <- dir.model$alpha - alpha <- matrix(alpha, nrow= size, ncol=length(alpha), byrow=TRUE ) - simulated.soil <- sirt::dirichlet.simul(alpha) - # # using the simulated sand/silt/clay to generate soil ensemble - simulated.soil<-simulated.soil %>% - as.data.frame %>% - dplyr::mutate(DepthL=rep(DepthL.Data[1,6], size), - mukey=rep(DepthL.Data[1,5], size)) %>% - `colnames<-`(c("fraction_of_sand_in_soil", - "fraction_of_silt_in_soil", - "fraction_of_clay_in_soil", - "soil_depth", - "mukey")) - simulated.soil - }, - error = function(e) { - PEcAn.logger::logger.warn(conditionMessage(e)) - return(NULL) + tryCatch( + { + # find the soil depth levels based on the depth argument + # if soil profile is deeper than what is specified in the argument then I go as deep as the soil profile. + if (max(soilprop.new$soil_depth) > max(depths)) depths <- sort(c(depths, max(max(soilprop.new$soil_depth)))) + + depth.levs <- findInterval(soilprop.new$soil_depth, depths) + depth.levs[depth.levs == 0] <- 1 + depth.levs[depth.levs > length(depths)] <- length(depths) + + soilprop.new.grouped <- soilprop.new %>% + dplyr::mutate(DepthL = depths[depth.levs]) + + # let's fit dirichlet for each depth level separately + simulated.soil.props <- soilprop.new.grouped %>% + split(list(soilprop.new.grouped$DepthL, soilprop.new.grouped$mukey)) %>% + purrr::map_df(function(DepthL.Data) { + tryCatch( + { + # I model the soil properties for this depth + dir.model <- DepthL.Data[, c(1:3)] %>% + as.matrix() %>% + sirt::dirichlet.mle(.) + # Monte Carlo sampling based on my dirichlet model + alpha <- dir.model$alpha + alpha <- matrix(alpha, nrow = size, ncol = length(alpha), byrow = TRUE) + simulated.soil <- sirt::dirichlet.simul(alpha) + # # using the simulated sand/silt/clay to generate soil ensemble + simulated.soil <- simulated.soil %>% + as.data.frame() %>% + dplyr::mutate( + DepthL = rep(DepthL.Data[1, 6], size), + mukey = rep(DepthL.Data[1, 5], size) + ) %>% + `colnames<-`(c( + "fraction_of_sand_in_soil", + "fraction_of_silt_in_soil", + "fraction_of_clay_in_soil", + "soil_depth", + "mukey" + )) + simulated.soil + }, + error = function(e) { + PEcAn.logger::logger.warn(conditionMessage(e)) + return(NULL) + } + ) }) - - }) - - # estimating the proportion of areas for those mukeys which are modeled - mukey_area <- mukey_area %>% - dplyr::filter(mukeys %in% simulated.soil.props$mukey) %>% - dplyr::mutate(Area=.data$Area/sum(.data$Area)) - - #--- Mixing the depths - soil.profiles<-simulated.soil.props %>% - split(.$mukey)%>% - purrr::map(function(soiltype.sim){ - sizein <- (mukey_area$Area[ mukey_area$mukey == soiltype.sim$mukey %>% unique()])*size - - 1:ceiling(sizein) %>% - purrr::map(function(x){ - soiltype.sim %>% - split(.$soil_depth)%>% - purrr::map_dfr(~.x[x,]) - }) - }) %>% - purrr::flatten() - - #- add them to the list of all the ensembles ready to be converted to .nc file - all.soil.ens<-soil.profiles %>% - purrr::map(function(SEns){ - names(SEns) %>% - purrr::map(function(var){ - SEns[,var] - })%>% - stats::setNames(names(SEns)) - })%>% - c(all.soil.ens,.) - - }, - error = function(e) { - PEcAn.logger::logger.warn(conditionMessage(e)) - }) - - + + # estimating the proportion of areas for those mukeys which are modeled + mukey_area <- mukey_area %>% + dplyr::filter(mukeys %in% simulated.soil.props$mukey) %>% + dplyr::mutate(Area = .data$Area / sum(.data$Area)) + + #--- Mixing the depths + soil.profiles <- simulated.soil.props %>% + split(.$mukey) %>% + purrr::map(function(soiltype.sim) { + sizein <- (mukey_area$Area[mukey_area$mukey == soiltype.sim$mukey %>% unique()]) * size + + 1:ceiling(sizein) %>% + purrr::map(function(x) { + soiltype.sim %>% + split(.$soil_depth) %>% + purrr::map_dfr(~ .x[x, ]) + }) + }) %>% + purrr::flatten() + + #- add them to the list of all the ensembles ready to be converted to .nc file + all.soil.ens <- soil.profiles %>% + purrr::map(function(SEns) { + names(SEns) %>% + purrr::map(function(var) { + SEns[, var] + }) %>% + stats::setNames(names(SEns)) + }) %>% + c(all.soil.ens, .) + }, + error = function(e) { + PEcAn.logger::logger.warn(conditionMessage(e)) + } + ) + + #-- generating the .nc files for all the collected ensembles out.ense <- (1:length(all.soil.ens)) %>% purrr::map(function(i) { - - tryCatch({ - #browser() - # calc new filename - prefix <- paste0("gSSURGO_soil_", i) - new.file <- file.path(outdir, paste0(prefix, ".nc")) - #sending it to the func where some new params will be added and then it will be written down as nc file. - suppressWarnings({ - soil2netcdf(all.soil.ens[[i]][1:4], new.file) - }) - - new.file - }, - error = function(e) { - PEcAn.logger::logger.warn(conditionMessage(e)) - return(NULL) - }) - + tryCatch( + { + # browser() + # calc new filename + prefix <- paste0("gSSURGO_soil_", i) + new.file <- file.path(outdir, paste0(prefix, ".nc")) + # sending it to the func where some new params will be added and then it will be written down as nc file. + suppressWarnings({ + soil2netcdf(all.soil.ens[[i]][1:4], new.file) + }) + + new.file + }, + error = function(e) { + PEcAn.logger::logger.warn(conditionMessage(e)) + return(NULL) + } + ) }) # removing the nulls or the ones that throw exception in the above trycatch - out.ense<- out.ense %>% + out.ense <- out.ense %>% purrr::discard(is.null) - - out.ense<-out.ense%>% + + out.ense <- out.ense %>% stats::setNames(rep("path", length(out.ense))) - + return(out.ense) } @@ -227,93 +242,91 @@ extract_soil_gssurgo<-function(outdir, lat, lon, size=1, radius=500, depths=c(0. #' @examples #' \dontrun{ #' in.file <- "~/paleon/env_paleon/soil/paleon_soil.nc" -#' outdir <- "~/paleon/envTest" -#' lat <- 40 -#' lon <- -80 -#' PEcAn.data.land::extract_soil_nc(in.file,outdir,lat,lon) +#' outdir <- "~/paleon/envTest" +#' lat <- 40 +#' lon <- -80 +#' PEcAn.data.land::extract_soil_nc(in.file, outdir, lat, lon) #' } -extract_soil_nc <- function(in.file,outdir,lat,lon){ - +extract_soil_nc <- function(in.file, outdir, lat, lon) { ## open soils nc <- ncdf4::nc_open(in.file) - + ## extract lat/lon dims <- names(nc$dim) - lat.dim <- dims[grep("^lat",dims)] - lon.dim <- dims[grep("^lon",dims)] + lat.dim <- dims[grep("^lat", dims)] + lon.dim <- dims[grep("^lon", dims)] soil.lat <- ncdf4::ncvar_get(nc, lat.dim) soil.lon <- ncdf4::ncvar_get(nc, lon.dim) - + ## check in range dlat <- abs(stats::median(diff(soil.lat))) dlon <- abs(stats::median(diff(soil.lon))) - if(lat < (min(soil.lat)-dlat) | lat > (max(soil.lat)+dlat)){ - PEcAn.logger::logger.error("site lat out of bounds",lat,range(soil.lat)) + if (lat < (min(soil.lat) - dlat) | lat > (max(soil.lat) + dlat)) { + PEcAn.logger::logger.error("site lat out of bounds", lat, range(soil.lat)) } - if(lon < (min(soil.lon)-dlon) | lon > (max(soil.lon)+dlon)){ - PEcAn.logger::logger.error("site lon out of bounds",lon,range(soil.lon)) + if (lon < (min(soil.lon) - dlon) | lon > (max(soil.lon) + dlon)) { + PEcAn.logger::logger.error("site lon out of bounds", lon, range(soil.lon)) } - if(dims[1] == lat.dim){ - soil.row <- which.min(abs(lat-soil.lat)) - soil.col <- which.min(abs(lon-soil.lon)) - } else if(dims[1] == lon.dim){ - soil.col <- which.min(abs(lat-soil.lat)) - soil.row <- which.min(abs(lon-soil.lon)) + if (dims[1] == lat.dim) { + soil.row <- which.min(abs(lat - soil.lat)) + soil.col <- which.min(abs(lon - soil.lon)) + } else if (dims[1] == lon.dim) { + soil.col <- which.min(abs(lat - soil.lat)) + soil.row <- which.min(abs(lon - soil.lon)) } else { - PEcAn.logger::logger.error("could not determine lat/lon dimension order:: ",dims) + PEcAn.logger::logger.error("could not determine lat/lon dimension order:: ", dims) } - + ## extract raw soil data soil.data <- list() soil.vars <- names(nc$var) - for(i in seq_along(soil.vars)){ - if(length(dims) == 2){ - soil.data[[soil.vars[i]]] <- ncdf4::ncvar_get(nc,soil.vars[i])[soil.row,soil.col] + for (i in seq_along(soil.vars)) { + if (length(dims) == 2) { + soil.data[[soil.vars[i]]] <- ncdf4::ncvar_get(nc, soil.vars[i])[soil.row, soil.col] } else { ## assuming there's a 3rd dim of soil depth profile - soil.data[[soil.vars[i]]] <- ncdf4::ncvar_get(nc,soil.vars[i])[soil.row,soil.col,] + soil.data[[soil.vars[i]]] <- ncdf4::ncvar_get(nc, soil.vars[i])[soil.row, soil.col, ] } } ncdf4::nc_close(nc) - + ## PalEON / MSTMIP / UNASM hack # t_ variables are topsoil layer (0– 30 cm) and # s_ variables are subsoil layer (30–100 cm) - depth <- ncdf4::ncdim_def(name = "depth", units = "meters", vals = c(0.3,1.0), create_dimvar = TRUE) - dvars <- soil.vars[grep("t_",soil.vars,fixed=TRUE)] - for(i in seq_along(dvars)){ - svar <- sub("t_","s_",dvars[i]) - soil.data[[dvars[i]]] <- c(soil.data[[dvars[i]]],soil.data[[svar]]) ## combine different depths - soil.data[[svar]] <- NULL ## drop old variable - names(soil.data)[which(names(soil.data) == dvars[i])] <- sub("t_","",dvars[i]) ## rename original + depth <- ncdf4::ncdim_def(name = "depth", units = "meters", vals = c(0.3, 1.0), create_dimvar = TRUE) + dvars <- soil.vars[grep("t_", soil.vars, fixed = TRUE)] + for (i in seq_along(dvars)) { + svar <- sub("t_", "s_", dvars[i]) + soil.data[[dvars[i]]] <- c(soil.data[[dvars[i]]], soil.data[[svar]]) ## combine different depths + soil.data[[svar]] <- NULL ## drop old variable + names(soil.data)[which(names(soil.data) == dvars[i])] <- sub("t_", "", dvars[i]) ## rename original } - - - ## name/unit conversions - soil.data$sand <- soil.data$sand/100 - soil.data$silt <- soil.data$silt/100 - soil.data$clay <- soil.data$clay/100 - soil.data$oc <- soil.data$oc/100 - soil.data$gravel <- soil.data$gravel/100 - soil.data$ref_bulk <- PEcAn.utils::ud_convert(soil.data$ref_bulk,"g cm-3","kg m-3") + + + ## name/unit conversions + soil.data$sand <- soil.data$sand / 100 + soil.data$silt <- soil.data$silt / 100 + soil.data$clay <- soil.data$clay / 100 + soil.data$oc <- soil.data$oc / 100 + soil.data$gravel <- soil.data$gravel / 100 + soil.data$ref_bulk <- PEcAn.utils::ud_convert(soil.data$ref_bulk, "g cm-3", "kg m-3") names(soil.data)[which(names(soil.data) == "clay")] <- "fraction_of_clay_in_soil" names(soil.data)[which(names(soil.data) == "sand")] <- "fraction_of_sand_in_soil" names(soil.data)[which(names(soil.data) == "silt")] <- "fraction_of_silt_in_soil" names(soil.data)[which(names(soil.data) == "gravel")] <- "fraction_of_gravel_in_soil" names(soil.data)[which(names(soil.data) == "ref_bulk")] <- "soil_bulk_density" - names(soil.data)[which(names(soil.data) == "ph")] <- "soil_ph" - names(soil.data)[which(names(soil.data) == "cec")] <- "soil_cec" ## units = meq/100g - names(soil.data)[which(names(soil.data) == "oc")] <- "soilC" ## this is currently the BETY name, would like to change and make units SI - + names(soil.data)[which(names(soil.data) == "ph")] <- "soil_ph" + names(soil.data)[which(names(soil.data) == "cec")] <- "soil_cec" ## units = meq/100g + names(soil.data)[which(names(soil.data) == "oc")] <- "soilC" ## this is currently the BETY name, would like to change and make units SI + ## calc new filename prefix <- tools::file_path_sans_ext(basename(in.file)) - new.file <- file.path(outdir,paste0(prefix,".nc")) - + new.file <- file.path(outdir, paste0(prefix, ".nc")) + ## Calculate soil parameters and export to netcdf - soil2netcdf(soil.data,new.file) - + soil2netcdf(soil.data, new.file) + return(new.file) - } @@ -357,47 +370,49 @@ extract_soil_nc <- function(in.file,outdir,lat,lon){ #' #' @examples #' soil.units("soil_albedo") -soil.units <- function(varname = NA){ - variables <- as.data.frame(matrix(c("soil_depth","m", - "soil_cec","meq/100g", - "fraction_of_clay_in_soil","1", - "fraction_of_sand_in_soil","1", - "fraction_of_silt_in_soil","1", - "fraction_of_gravel_in_soil","1", - "volume_fraction_of_water_in_soil_at_saturation","m3 m-3", - "volume_fraction_of_water_in_soil_at_field_capacity","m3 m-3", - "volume_fraction_of_condensed_water_in_dry_soil","m3 m-3", - "volume_fraction_of_condensed_water_in_soil_at_wilting_point","m3 m-3", - "soilC","percent", - "soil_ph","1", - "soil_bulk_density","kg m-3", - "soil_type","string", - "soil_hydraulic_b","1", - "soil_water_potential_at_saturation","m", - "soil_hydraulic_conductivity_at_saturation","m s-1", - "thcond0","W m-1 K-1", - "thcond1","W m-1 K-1", - "thcond2","1", - "thcond3","1", - "soil_thermal_conductivity","W m-1 K-1", - "soil_thermal_conductivity_at_saturation","W m-1 K-1", - "soil_thermal_capacity","J kg-1 K-1", - "soil_albedo","1" - ), - ncol=2,byrow = TRUE)) - colnames(variables) <- c('var','unit') - - unit = which(variables$var == varname) - - if(length(unit) == 0){ - if(is.na(varname)){ +soil.units <- function(varname = NA) { + variables <- as.data.frame(matrix( + c( + "soil_depth", "m", + "soil_cec", "meq/100g", + "fraction_of_clay_in_soil", "1", + "fraction_of_sand_in_soil", "1", + "fraction_of_silt_in_soil", "1", + "fraction_of_gravel_in_soil", "1", + "volume_fraction_of_water_in_soil_at_saturation", "m3 m-3", + "volume_fraction_of_water_in_soil_at_field_capacity", "m3 m-3", + "volume_fraction_of_condensed_water_in_dry_soil", "m3 m-3", + "volume_fraction_of_condensed_water_in_soil_at_wilting_point", "m3 m-3", + "soilC", "percent", + "soil_ph", "1", + "soil_bulk_density", "kg m-3", + "soil_type", "string", + "soil_hydraulic_b", "1", + "soil_water_potential_at_saturation", "m", + "soil_hydraulic_conductivity_at_saturation", "m s-1", + "thcond0", "W m-1 K-1", + "thcond1", "W m-1 K-1", + "thcond2", "1", + "thcond3", "1", + "soil_thermal_conductivity", "W m-1 K-1", + "soil_thermal_conductivity_at_saturation", "W m-1 K-1", + "soil_thermal_capacity", "J kg-1 K-1", + "soil_albedo", "1" + ), + ncol = 2, byrow = TRUE + )) + colnames(variables) <- c("var", "unit") + + unit <- which(variables$var == varname) + + if (length(unit) == 0) { + if (is.na(varname)) { return(variables) } else { return(NA) } - }else{ - unit = as.character(variables$unit[unit]) + } else { + unit <- as.character(variables$unit[unit]) return(unit) } - } diff --git a/modules/data.land/R/extract_veg.R b/modules/data.land/R/extract_veg.R index c88aac78229..83e25d5ba04 100644 --- a/modules/data.land/R/extract_veg.R +++ b/modules/data.land/R/extract_veg.R @@ -1,7 +1,7 @@ ##' Function queries a DB to extract veg info downstream ##' @name extract_veg ##' @title extract_veg -##' +##' ##' @param new_site new_site object passed from ic_process includes lat, lon, id, and name ##' @param start_date "YYYY-MM-DD" ##' @param end_date "YYYY-MM-DD" @@ -18,182 +18,182 @@ ##' @return results object to be passed back to get.veg.module ##' @export ##' @author Istem Fer and Alexis Helgeson -extract_veg <- function(new_site, start_date, end_date, - source, gridres, format_name = NULL, - machine_host, dbparms, outfolder, overwrite = FALSE, input_veg = input_veg, ...){ - #code taken from https://stackoverflow.com/questions/14183766/match-fun-provide-error-with-functions-defined-inside-functions - # fget <- function(name, env = parent.frame()) { - # if (identical(env, emptyenv())) { - # stop("Could not find function called ", name, call. = FALSE) - # } - # - # if (exists(name, env, inherits = FALSE) && is.function(env[[name]])) { - # env[[name]] - # } else { - # fget(name, parent.env(env)) - # } - # } - #--------------------------------------------------------------------------------------------------# - # Extract veg info - #set start and end date as date objects - start_date = as.Date(start_date) - end_date = as.Date(end_date) - #keep this code chunk future PR will integrate back ask Alexis - # fcnx <- paste0("extract_", source) # e.g. extract_FIA - # #Need a better way to check if the function exists - # if (!exists(fcnx)) { - # PEcAn.logger::logger.severe(paste(fcnx, "does not exist.")) - # }else{ - # fcn <- fget(fcnx) #Error cannot find the function - # } - # extract_* functions need to have standard args - lon <- as.numeric(new_site$lon) - lat <- as.numeric(new_site$lat) - #veg_info <- fcn(lon = lon, lat = lat, startdate = start_date, enddate = end_date, gridres, dbparms) - - if (source == "NEON_veg") { - veg_info <- extract_NEON_veg(lon = lon, lat = lat, startdate = start_date, enddate = end_date, gridres, dbparms) - } else if(source == "FIA"){ - veg_info <- extract_FIA(lon = lon, lat = lat, startdate = start_date, enddate = end_date, gridres, dbparms) - }else{ - PEcAn.logger::logger.debug("Only have extract functions for source = NEON_veg or FIA, please use load_veg") - - #grabs named function and returns error if function cannot be found - fget <- function(name, env = parent.frame()) { +extract_veg <- function(new_site, start_date, end_date, + source, gridres, format_name = NULL, + machine_host, dbparms, outfolder, overwrite = FALSE, input_veg = input_veg, ...) { + # code taken from https://stackoverflow.com/questions/14183766/match-fun-provide-error-with-functions-defined-inside-functions + # fget <- function(name, env = parent.frame()) { + # if (identical(env, emptyenv())) { + # stop("Could not find function called ", name, call. = FALSE) + # } + # + # if (exists(name, env, inherits = FALSE) && is.function(env[[name]])) { + # env[[name]] + # } else { + # fget(name, parent.env(env)) + # } + # } + #--------------------------------------------------------------------------------------------------# + # Extract veg info + # set start and end date as date objects + start_date <- as.Date(start_date) + end_date <- as.Date(end_date) + # keep this code chunk future PR will integrate back ask Alexis + # fcnx <- paste0("extract_", source) # e.g. extract_FIA + # #Need a better way to check if the function exists + # if (!exists(fcnx)) { + # PEcAn.logger::logger.severe(paste(fcnx, "does not exist.")) + # }else{ + # fcn <- fget(fcnx) #Error cannot find the function + # } + # extract_* functions need to have standard args + lon <- as.numeric(new_site$lon) + lat <- as.numeric(new_site$lat) + # veg_info <- fcn(lon = lon, lat = lat, startdate = start_date, enddate = end_date, gridres, dbparms) + + if (source == "NEON_veg") { + veg_info <- extract_NEON_veg(lon = lon, lat = lat, startdate = start_date, enddate = end_date, gridres, dbparms) + } else if (source == "FIA") { + veg_info <- extract_FIA(lon = lon, lat = lat, startdate = start_date, enddate = end_date, gridres, dbparms) + } else { + PEcAn.logger::logger.debug("Only have extract functions for source = NEON_veg or FIA, please use load_veg") + # grabs named function and returns error if function cannot be found + fget <- function(name, env = parent.frame()) { if (identical(env, emptyenv())) { - stop("Could not find function called ", name, call. = FALSE) + stop("Could not find function called ", name, call. = FALSE) } - + if (exists(name, env, inherits = FALSE) && is.function(env[[name]])) { - env[[name]] + env[[name]] } else { - fget(name, parent.env(env)) + fget(name, parent.env(env)) } - } - #--------------------------------------------------------------------------------------------------# - # Extract veg info - fcnx <- paste0("extract_", source) # e.g. extract_FIA - - fcn_exist <- try(fcn <- do.call("::", list(paste0("PEcAn.data.land"), paste0(fcnx)))) - - #detect if function exist - if(is.character(fcn_exist)){ - PEcAn.logger::logger.severe(paste(fcnx, "does not exist.")) - } - - #--------------------------------------------------------------------------------------------------# - # Match species - if (source == "NEON_veg") { - #skip species matching for now revisit later - # need check for overwrite - sppfilename <- write_veg(outfolder, start_date, veg_info = veg_info, source) - - # Build results dataframe for convert.input - results <- data.frame(file = sppfilename, - host = machine_host, - mimetype = "application/rds", - formatname = "spp.info", - startdate = start_date, - enddate = end_date, - dbfile.name = basename(sppfilename), - stringsAsFactors = FALSE) - - ### return for convert.inputs - return(invisible(results)) - }else{ - obs <- veg_info[[2]] - - # TODO: generalize this as we have more sources that have their own DB like FIA - if(is.null(format_name)){ - if(source == "FIA"){ - format_name <- "fia" - code_col <- "spcd" - }else{ - code_col <- "species_USDA_symbol" + } + #--------------------------------------------------------------------------------------------------# + # Extract veg info + fcnx <- paste0("extract_", source) # e.g. extract_FIA + + fcn_exist <- try(fcn <- do.call("::", list(paste0("PEcAn.data.land"), paste0(fcnx)))) + + # detect if function exist + if (is.character(fcn_exist)) { + PEcAn.logger::logger.severe(paste(fcnx, "does not exist.")) + } + + #--------------------------------------------------------------------------------------------------# + # Match species + if (source == "NEON_veg") { + # skip species matching for now revisit later + # need check for overwrite + sppfilename <- write_veg(outfolder, start_date, veg_info = veg_info, source) + + # Build results dataframe for convert.input + results <- data.frame( + file = sppfilename, + host = machine_host, + mimetype = "application/rds", + formatname = "spp.info", + startdate = start_date, + enddate = end_date, + dbfile.name = basename(sppfilename), + stringsAsFactors = FALSE + ) + + ### return for convert.inputs + return(invisible(results)) + } else { + obs <- veg_info[[2]] + + # TODO: generalize this as we have more sources that have their own DB like FIA + if (is.null(format_name)) { + if (source == "FIA") { + format_name <- "fia" + code_col <- "spcd" + } else { + code_col <- "species_USDA_symbol" format_name <- "usda" - obs[obs$species_USDA_symbol != "2PLANT", ] #removes the rows with 2PLANT, this is a NEON specific code that means they could not identify the species - } + obs[obs$species_USDA_symbol != "2PLANT", ] # removes the rows with 2PLANT, this is a NEON specific code that means they could not identify the species + } + } + + + # match code to species ID + spp.info <- match_species_id(input_codes = obs[[code_col]], format_name = format_name) + + # merge with data + tmp <- spp.info[, colnames(spp.info) != "input_code"] + + veg_info[[2]] <- cbind(obs, tmp) + + + #--------------------------------------------------------------------------------------------------# + # Write vegettion data as rds, return results to convert.input + + # need check for overwrite + sppfilename <- PEcAn.data.land::write_veg(outfolder, start_date, veg_info = veg_info, source) + + # Build results dataframe for convert.input + results <- data.frame( + file = sppfilename, + host = machine_host, + mimetype = "application/rds", + formatname = "spp.info", + startdate = start_date, + enddate = end_date, + dbfile.name = basename(sppfilename), + stringsAsFactors = FALSE + ) + + ### return for convert.inputs + return(invisible(results)) } - - - # match code to species ID - spp.info <- match_species_id(input_codes = obs[[code_col]], format_name = format_name) - - # merge with data - tmp <- spp.info[ , colnames(spp.info) != "input_code"] - - veg_info[[2]] <- cbind(obs, tmp) - - + #--------------------------------------------------------------------------------------------------# - # Write vegettion data as rds, return results to convert.input - - # need check for overwrite - sppfilename <- PEcAn.data.land::write_veg(outfolder, start_date, veg_info = veg_info, source) - - # Build results dataframe for convert.input - results <- data.frame(file = sppfilename, - host = machine_host, - mimetype = "application/rds", - formatname = "spp.info", - startdate = start_date, - enddate = end_date, - dbfile.name = basename(sppfilename), - stringsAsFactors = FALSE) - - ### return for convert.inputs - return(invisible(results)) + # Match species + obs <- veg_info[[2]] + + # TODO: generalize this as we have more sources that have their own DB like FIA + if (is.null(format_name)) { + if (source == "FIA") { + format_name <- "fia" + code_col <- "spcd" + } else { + code_col <- "species_USDA_symbol" + format_name <- "usda" + obs <- obs[obs$species_USDA_symbol != "2PLANT" & + obs$species_USDA_symbol != "2PLANT-H", ] # removes the rows with 2PLANT, this is a NEON specific code that means they could not identify the species + } + } } - + # match code to species ID + spp.info <- match_species_id(input_codes = obs[[code_col]], format_name = format_name) + + # merge with data + tmp <- spp.info[, colnames(spp.info) != "input_code"] + + veg_info[[2]] <- cbind(obs, tmp) + + #--------------------------------------------------------------------------------------------------# - # Match species - - obs <- veg_info[[2]] - - # TODO: generalize this as we have more sources that have their own DB like FIA - if(is.null(format_name)){ - if(source == "FIA"){ - format_name <- "fia" - code_col <- "spcd" - }else{ - code_col <- "species_USDA_symbol" - format_name <- "usda" - obs <- obs[obs$species_USDA_symbol != "2PLANT" & - obs$species_USDA_symbol != "2PLANT-H", ] #removes the rows with 2PLANT, this is a NEON specific code that means they could not identify the species - } - - } + # Write vegettion data as rds, return results to convert_input - } - # match code to species ID - spp.info <- match_species_id(input_codes = obs[[code_col]], format_name = format_name) - - # merge with data - tmp <- spp.info[ , colnames(spp.info) != "input_code"] - - veg_info[[2]] <- cbind(obs, tmp) - - - #--------------------------------------------------------------------------------------------------# - # Write vegettion data as rds, return results to convert_input - - # need check for overwrite - sppfilename <- write_veg(outfolder, start_date, veg_info = veg_info, source) - - # Build results dataframe for convert_input - results <- data.frame(file = sppfilename, - host = machine_host, - mimetype = "application/rds", - formatname = "spp.info", - startdate = start_date, - enddate = end_date, - dbfile.name = basename(sppfilename), - stringsAsFactors = FALSE) - - ### return for convert_inputs - return(invisible(results)) - - + # need check for overwrite + sppfilename <- write_veg(outfolder, start_date, veg_info = veg_info, source) + + # Build results dataframe for convert_input + results <- data.frame( + file = sppfilename, + host = machine_host, + mimetype = "application/rds", + formatname = "spp.info", + startdate = start_date, + enddate = end_date, + dbfile.name = basename(sppfilename), + stringsAsFactors = FALSE + ) + + ### return for convert_inputs + return(invisible(results)) } # extract_veg diff --git a/modules/data.land/R/fia2ED.R b/modules/data.land/R/fia2ED.R index 26e8e5aafb6..9f7931a423f 100644 --- a/modules/data.land/R/fia2ED.R +++ b/modules/data.land/R/fia2ED.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -20,33 +20,32 @@ #' @return modified settings, invisibly #' @export #' @author Mike Dietze, Rob Kooper, Ryan Kelly -fia.to.psscss <- function(settings, +fia.to.psscss <- function(settings, lat = as.numeric(settings$run$site$lat), lon = as.numeric(settings$run$site$lon), year = lubridate::year(settings$run$start.date), - gridres=0.075, + gridres = 0.075, min.year = year - 5, max.year = year + 5, - overwrite=FALSE) { - - mimetype <- "text/plain" - startdate <- lubridate::as_date(paste0(year, "-01-01")) - enddate <- lubridate::as_date(paste0(year, "-12-31")) + overwrite = FALSE) { + mimetype <- "text/plain" + startdate <- lubridate::as_date(paste0(year, "-01-01")) + enddate <- lubridate::as_date(paste0(year, "-12-31")) formatnames <- c("ED2.cohort", "ED2.patch", "ED2.site") - + latmax <- lat + gridres latmin <- lat - gridres lonmax <- lon + gridres lonmin <- lon - gridres - + ## connect to database con <- PEcAn.DB::db.open(settings$database$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) - + # Check whether inputs exist already - if(!overwrite) { + if (!overwrite) { existing.files <- list() - for(format in formatnames) { + for (format in formatnames) { existing.files[[format]] <- PEcAn.DB::dbfile.input.check( siteid = settings$run$site$id, startdate = startdate, @@ -58,35 +57,38 @@ fia.to.psscss <- function(settings, hostname = settings$host$name ) } - + if (all(sapply(existing.files, function(x) nrow(x) > 0))) { file.paths <- lapply(existing.files, function(x) file.path(x$file_path, x$file_name)) - - settings <- .add.ed2.file.paths.to.settings(settings, - css.path = file.paths$ED2.cohort, - pss.path = file.paths$ED2.patch, - site.path = file.paths$ED2.site) - + + settings <- .add.ed2.file.paths.to.settings(settings, + css.path = file.paths$ED2.cohort, + pss.path = file.paths$ED2.patch, + site.path = file.paths$ED2.site + ) + PEcAn.logger::logger.info("Using existing pss, css, and site files.") return(invisible(settings)) } else { PEcAn.logger::logger.info("No existing pss, css, and site files.") } } - + ### collect mapping from spcd to pftid query <- NULL for (pft in settings$pfts) { if (is.null(query)) { - query <- paste0("SELECT bp.name as pft, bs.spcd FROM pfts as bp INNER JOIN ", - "pfts_species AS bps ON bps.pft_id = bp.id INNER JOIN species AS bs ON bs.id = bps.specie_id WHERE ", - "bp.name = '", pft$name, "'") + query <- paste0( + "SELECT bp.name as pft, bs.spcd FROM pfts as bp INNER JOIN ", + "pfts_species AS bps ON bps.pft_id = bp.id INNER JOIN species AS bs ON bs.id = bps.specie_id WHERE ", + "bp.name = '", pft$name, "'" + ) } else { query <- paste0(query, " OR bp.name = '", pft$name, "'") } } pfts <- PEcAn.DB::db.query(query, con = con) - + # Convert PFT names to ED2 Numbers utils::data(pftmapping) for (pft.i in settings$pfts) { @@ -100,104 +102,110 @@ fia.to.psscss <- function(settings, } pfts$pft[pfts$pft == pft.i$name] <- pft.number } - - + + ## Check for NA and duplicate spcds in PFTs bad <- length(pfts$spcd %in% c(NA, "0")) if (bad > 0) { PEcAn.logger::logger.warn(sprintf("There are %d entries with no SPCD (NA or 0). They have been removed.", bad)) pfts <- pfts[!pfts$spcd %in% c(NA, 0), ] } - + bad <- pfts$spcd[duplicated(pfts$spcd)] if (length(bad) > 0) { # Coerce spcds back into species names using data from FIA manual. Makes a more readable warning. symbol.table <- PEcAn.DB::db.query("SELECT spcd, \"Symbol\" FROM species where spcd IS NOT NULL", con = con) names(symbol.table) <- tolower(names(symbol.table)) - + # grab the names where we have bad spcds in the symbol.table, exclude NAs name.list <- stats::na.omit(symbol.table$symbol[symbol.table$spcd %in% bad]) - - PEcAn.logger::logger.severe(paste0("The following species are found in multiple PFTs: ", - paste(name.list[1:min(10, length(name.list))], collapse = ", "), - ". Please remove overlapping PFTs.")) + + PEcAn.logger::logger.severe(paste0( + "The following species are found in multiple PFTs: ", + paste(name.list[1:min(10, length(name.list))], collapse = ", "), + ". Please remove overlapping PFTs." + )) } - + ## connect to database fia.con <- PEcAn.DB::db.open(settings$database$fia) on.exit(PEcAn.DB::db.close(fia.con), add = TRUE) - + ################## ## ## ## PSS ## ## ## ################## ## query to get PSS info - query <- paste("SELECT p.cycle, p.statecd, p.measyear as time, p.cn as patch, ", - "MIN(2-c.stdorgcd) as trk, AVG(c.stdage) as age, p.lat, p.lon, p.prev_plt_cn ", - "FROM plot as p LEFT JOIN cond as c on p.cn=c.plt_cn ", - "WHERE p.lon >= ", lonmin, " AND p.lon <= ", lonmax, " AND p.lat >= ", latmin, - " AND p.lat <= ", latmax, " AND p.measyear >= ", min.year, - " AND p.measyear <= ", max.year, " GROUP BY p.cn") - + query <- paste( + "SELECT p.cycle, p.statecd, p.measyear as time, p.cn as patch, ", + "MIN(2-c.stdorgcd) as trk, AVG(c.stdage) as age, p.lat, p.lon, p.prev_plt_cn ", + "FROM plot as p LEFT JOIN cond as c on p.cn=c.plt_cn ", + "WHERE p.lon >= ", lonmin, " AND p.lon <= ", lonmax, " AND p.lat >= ", latmin, + " AND p.lat <= ", latmax, " AND p.measyear >= ", min.year, + " AND p.measyear <= ", max.year, " GROUP BY p.cn" + ) + pss <- PEcAn.DB::db.query(query, con = fia.con) if (nrow(pss) == 0) { PEcAn.logger::logger.severe("No pss data found.") } - + for (statecd in unique(pss$statecd)) { # Count up occurrences of each cycle cycle.count <- table(pss$cycle[pss$statecd == statecd]) - - # Find the best valid cycle, in terms of providing the most records. + + # Find the best valid cycle, in terms of providing the most records. # In case of ties, which.max will return the first one, which will be the earliest best.cycle <- as.numeric(names(cycle.count)[which.max(cycle.count)]) - + row.keep.ind <- (pss$statecd != statecd) | (pss$cycle == best.cycle) - + pss <- pss[row.keep.ind, ] } - + # as an extra precaution, remove any records that are explicitly remeasurments of the same plot pss <- pss[.select.unique.fia.plot.records(pss$patch, pss$prev_plt_cn, pss$time, year), ] - + if (nrow(pss) == 0) { PEcAn.logger::logger.severe("All pss data were invalid.") } - + pss$trk[which(is.na(pss$trk))] <- 1 pss$age[which(is.na(pss$age))] <- 0 - + n.patch <- nrow(pss) - + ## fill missing data w/ defaults pss$site <- rep(1, n.patch) pss$area <- rep(1 / n.patch, n.patch) pss$water <- rep(0, n.patch) - + # Reorder columns, dropping unneeded ones pss <- pss[, c("site", "time", "patch", "trk", "age", "area", "water")] - + # Add soil data - soil <- c(1, 5, 5, 0.01, 0, 1, 1) #soil C & N pools (biogeochem) defaults (fsc,stsc,stsl,ssc,psc,msn,fsn)\t - soil.dat <- as.data.frame(matrix(soil, n.patch, 7, byrow = TRUE)) + soil <- c(1, 5, 5, 0.01, 0, 1, 1) # soil C & N pools (biogeochem) defaults (fsc,stsc,stsl,ssc,psc,msn,fsn)\t + soil.dat <- as.data.frame(matrix(soil, n.patch, 7, byrow = TRUE)) names(soil.dat) <- c("fsc", "stsc", "stsl", "ssc", "psc", "msn", "fsn") - pss <- cbind(pss, soil.dat) - + pss <- cbind(pss, soil.dat) + PEcAn.logger::logger.debug(paste0("Found ", nrow(pss), " patches for site ", settings$run$site$id)) - + ################## ## ## ## CSS ## ## ## ################## - query <- paste0("SELECT p.measyear as time,p.cycle,p.statecd,p.cn as patch, - ", "CONCAT(CAST(t.subp AS CHAR),CAST(t.tree AS CHAR)) as cohort,t.dia*2.54 as dbh, ", - "t.spcd as spcd, t.tpa_unadj*0.0002471 as n FROM plot as p LEFT JOIN tree as t on p.cn=t.plt_cn ", - "WHERE p.lon >= ", lonmin, - " and p.lon < ", lonmax, - " and p.lat >= ", latmin, - " and p.lat < ", latmax) + query <- paste0( + "SELECT p.measyear as time,p.cycle,p.statecd,p.cn as patch, + ", "CONCAT(CAST(t.subp AS CHAR),CAST(t.tree AS CHAR)) as cohort,t.dia*2.54 as dbh, ", + "t.spcd as spcd, t.tpa_unadj*0.0002471 as n FROM plot as p LEFT JOIN tree as t on p.cn=t.plt_cn ", + "WHERE p.lon >= ", lonmin, + " and p.lon < ", lonmax, + " and p.lat >= ", latmin, + " and p.lat < ", latmax + ) css <- PEcAn.DB::db.query(query, con = fia.con) names(css) <- tolower(names(css)) if (nrow(css) == 0) { @@ -205,7 +213,7 @@ fia.to.psscss <- function(settings, } else { PEcAn.logger::logger.debug(paste0(nrow(css), " trees found initially")) } - + # Remove rows that don't map to any retained patch css <- css[which(css$patch %in% pss$patch), ] if (nrow(css) == 0) { @@ -213,8 +221,8 @@ fia.to.psscss <- function(settings, } else { PEcAn.logger::logger.debug(paste0(nrow(css), " trees that map to previously selected patches.")) } - - + + ## Remove rows with no dbh, spcd, or n notree <- which(is.na(css$dbh) & is.na(css$spcd) & is.na(css$n)) if (length(notree) > 0) { @@ -225,29 +233,31 @@ fia.to.psscss <- function(settings, } else { PEcAn.logger::logger.debug(paste0(nrow(css), " trees remain after removing entries with no dbh, spcd, and/or n.")) } - + # --- Consistency tests between PFTs and FIA fia.species <- unique(css$spcd) - + # check for species in PFTs which the FIA db doesn't expect - pft.ind <- which(!(pfts$spcd %in% fia.species)) #vect shows pft's spcds that are confirmed by fia - pft.only <- pfts$spcd[pft.ind] #what were the spcds at those indices? - + pft.ind <- which(!(pfts$spcd %in% fia.species)) # vect shows pft's spcds that are confirmed by fia + pft.only <- pfts$spcd[pft.ind] # what were the spcds at those indices? + if (length(pft.only) > 0) { if (!exists("symbol.table")) { symbol.table <- PEcAn.DB::db.query("SELECT spcd, \"Symbol\" FROM species where spcd IS NOT NULL", con = con) names(symbol.table) <- tolower(names(symbol.table)) } name.list <- stats::na.omit(symbol.table$symbol[symbol.table$spcd %in% pft.only]) - PEcAn.logger::logger.warn(paste0("The selected PFTs contain the following species for which the FIA database ", - "contains no data at ", lat, " and ", lon, ": ", - paste(name.list[1:min(10, length(name.list))], collapse = ", "), ".")) + PEcAn.logger::logger.warn(paste0( + "The selected PFTs contain the following species for which the FIA database ", + "contains no data at ", lat, " and ", lon, ": ", + paste(name.list[1:min(10, length(name.list))], collapse = ", "), "." + )) } - + # check for species expected by FIA which the PFTs don't cover fia.ind <- which(!fia.species %in% pfts$spcd) fia.only <- fia.species[fia.ind] - + if (length(fia.only) > 0) { if (!exists("symbol.table")) { symbol.table <- PEcAn.DB::db.query("SELECT spcd, \"Symbol\" FROM species where spcd IS NOT NULL", con = con) @@ -256,45 +266,51 @@ fia.to.psscss <- function(settings, name.list <- stats::na.omit(symbol.table$symbol[symbol.table$spcd %in% fia.only]) name.list <- name.list[name.list != "DEAD"] if (length(name.list) > 0) { - PEcAn.logger::logger.warn(paste0("The FIA database expects the following species at ", lat, " and ", lon, - " but they are not described by the selected PFTs: ", - paste(name.list, collapse = ", "), - ". You should select additional pfts if you want to include these. ")) + PEcAn.logger::logger.warn(paste0( + "The FIA database expects the following species at ", lat, " and ", lon, + " but they are not described by the selected PFTs: ", + paste(name.list, collapse = ", "), + ". You should select additional pfts if you want to include these. " + )) } } - + css <- css[!(css$spcd %in% fia.only), ] if (nrow(css) == 0) { - PEcAn.logger::logger.severe(paste0("No trees remain for selected PFTs. ", - "Species that were in FIA data but didn't map to a selected PFT are: ", - paste(name.list, collapse = ", "), ".")) + PEcAn.logger::logger.severe(paste0( + "No trees remain for selected PFTs. ", + "Species that were in FIA data but didn't map to a selected PFT are: ", + paste(name.list, collapse = ", "), "." + )) } else { PEcAn.logger::logger.debug(paste0(nrow(css), " trees remain for selected PFTs.")) } - + # --- Continue work formatting css now that we've checked for species problems - n.cohort <- nrow(css) - css$time[is.na(css$time)] <- 1 + n.cohort <- nrow(css) + css$time[is.na(css$time)] <- 1 css$cohort[is.na(css$cohort)] <- 1:sum(is.na(css$cohort)) - css$dbh[is.na(css$dbh)] <- 1 # assign nominal small dbh to missing - density.median <- stats::median(css$n[which(css$n > 0)]) - css$n[is.na(css$n) | css$n == 0] <- density.median + css$dbh[is.na(css$dbh)] <- 1 # assign nominal small dbh to missing + density.median <- stats::median(css$n[which(css$n > 0)]) + css$n[is.na(css$n) | css$n == 0] <- density.median css$hite <- css$bdead <- css$balive <- css$lai <- rep(0, n.cohort) - + ## map spcd to pft css <- merge(css, pfts, by = "spcd") css <- css[, c("time", "patch", "cohort", "dbh", "hite", "pft", "n", "bdead", "balive", "lai")] - + pfts.represented <- sapply(settings$pfts, function(x) x$constants$num) %in% css$pft - if (!all(pfts.represented)) + if (!all(pfts.represented)) { PEcAn.logger::logger.warn(paste0( "The following PFTs listed in settings are not represented in the FIA data: ", - paste(sapply(settings$pfts, function(x) x$name)[!pfts.represented], collapse = ", "))) - + paste(sapply(settings$pfts, function(x) x$name)[!pfts.represented], collapse = ", ") + )) + } + PEcAn.logger::logger.debug(paste0("Found ", nrow(css), " cohorts for site ", settings$run$site$id)) - + ################## ## ## ## SITE ## @@ -302,82 +318,90 @@ fia.to.psscss <- function(settings, ################## # Obviously, this is just a placeholder for now... site <- c( - "nsite 1 file_format 1", + "nsite 1 file_format 1", "sitenum area TCI elev slope aspect soil", "1 1.0 -7 100.0 0.0 0.0 3" ) - - # ----- Write files + + # ----- Write files # Write files locally - site.string <- paste0(as.numeric(settings$run$site$id)%/%1e+09, "-", - as.numeric(settings$run$site$id)%%1e+09) + site.string <- paste0( + as.numeric(settings$run$site$id) %/% 1e+09, "-", + as.numeric(settings$run$site$id) %% 1e+09 + ) if (settings$host$name == "localhost") { out.dir.local <- file.path(settings$database$dbfiles, paste0("FIA_ED2_site_", site.string)) } else { out.dir.local <- "/tmp" } - prefix.psscss <- paste0("siteid", settings$run$site$id, ".fia", year, ".radius", gridres, - get.ed.file.latlon.text(lat, lon, site.style = FALSE)) - prefix.site <- paste0("siteid", settings$run$site$id, ".fia", year, ".radius", gridres, - get.ed.file.latlon.text(lat, lon, site.style = TRUE)) + prefix.psscss <- paste0( + "siteid", settings$run$site$id, ".fia", year, ".radius", gridres, + get.ed.file.latlon.text(lat, lon, site.style = FALSE) + ) + prefix.site <- paste0( + "siteid", settings$run$site$id, ".fia", year, ".radius", gridres, + get.ed.file.latlon.text(lat, lon, site.style = TRUE) + ) pss.file.local <- file.path(out.dir.local, paste0(prefix.psscss, ".pss")) css.file.local <- file.path(out.dir.local, paste0(prefix.psscss, ".css")) site.file.local <- file.path(out.dir.local, paste0(prefix.site, ".site")) - + dir.create(out.dir.local, showWarnings = F, recursive = T) utils::write.table(pss, pss.file.local, quote = FALSE, row.names = FALSE) utils::write.table(css, css.file.local, quote = FALSE, row.names = FALSE) - + site.file.con <- file(site.file.local) writeLines(site, site.file.con) close(site.file.con) - + # Copy to remote if needed if (settings$host$name == "localhost") { files <- c(pss.file.local, css.file.local, site.file.local) } else { - out.dir.remote <- file.path(settings$host$folder, paste0("FIA_ED2_site_", site.string)) - pss.file.remote <- file.path(out.dir.remote, paste0(prefix.psscss, ".pss")) - css.file.remote <- file.path(out.dir.remote, paste0(prefix.psscss, ".css")) + out.dir.remote <- file.path(settings$host$folder, paste0("FIA_ED2_site_", site.string)) + pss.file.remote <- file.path(out.dir.remote, paste0(prefix.psscss, ".pss")) + css.file.remote <- file.path(out.dir.remote, paste0(prefix.psscss, ".css")) site.file.remote <- file.path(out.dir.remote, paste0(prefix.site, ".site")) - + PEcAn.remote::remote.execute.cmd(settings$host, "mkdir", c("-p", out.dir.remote)) PEcAn.remote::remote.copy.to(settings$host, pss.file.local, pss.file.remote) PEcAn.remote::remote.copy.to(settings$host, css.file.local, css.file.remote) PEcAn.remote::remote.copy.to(settings$host, site.file.local, site.file.remote) files <- c(pss.file.remote, css.file.remote, site.file.remote) } - + # Insert into DB - for(i in seq_along(files)) { + for (i in seq_along(files)) { PEcAn.DB::dbfile.input.insert( - in.path = dirname(files[i]), - in.prefix = basename(files[i]), - siteid = settings$run$site$id, - startdate = startdate, - enddate = enddate, - mimetype = mimetype, + in.path = dirname(files[i]), + in.prefix = basename(files[i]), + siteid = settings$run$site$id, + startdate = startdate, + enddate = enddate, + mimetype = mimetype, formatname = formatnames[i], - parentid = NA, - con = con, - hostname = settings$host$name, + parentid = NA, + con = con, + hostname = settings$host$name, allow.conflicting.dates = TRUE ) } - + # Add file paths to settings if (settings$host$name == "localhost") { settings <- .add.ed2.file.paths.to.settings(settings, - css.path = css.file.local, - pss.path = pss.file.local, - site.path = site.file.local) + css.path = css.file.local, + pss.path = pss.file.local, + site.path = site.file.local + ) } else { - settings <- .add.ed2.file.paths.to.settings(settings, - css.path = css.file.remote, - pss.path = pss.file.remote, - site.path = site.file.remote) + settings <- .add.ed2.file.paths.to.settings(settings, + css.path = css.file.remote, + pss.path = pss.file.remote, + site.path = site.file.remote + ) } - + return(invisible(settings)) } # fia.to.psscss @@ -401,16 +425,16 @@ get.ed.file.latlon.text <- function(lat, lon, site.style = FALSE, ed.res = 1) { # A function for identifying fia plot records that are remeasurements of one another, -# and upon finding them retaining only the one that is closest to some target year. +# and upon finding them retaining only the one that is closest to some target year. # Since fia.to.psscss currently selects plots from only a single cycle (for a given state) -# it shouldn't be getting remeasurements, and this probably isn't doing anything in the -# current code. But it could be useful for future updates. +# it shouldn't be getting remeasurements, and this probably isn't doing anything in the +# current code. But it could be useful for future updates. .select.unique.fia.plot.records <- function(plt_cn, prev_plt_cn, measyear, target.year) { if (length(plt_cn) != length(prev_plt_cn)) { PEcAn.logger::logger.error("Inputs must have same length!") return(NULL) } - + # Identify records that are part of the same remeasurement sequence prev_plt_cn[prev_plt_cn == ""] <- NA unique.plot.id <- rep(NA, length(plt_cn)) @@ -422,7 +446,7 @@ get.ed.file.latlon.text <- function(lat, lon, site.style = FALSE, ed.res = 1) { # assign a new plot id unique.plot.id[i] <- i } - + # Check whether this record is a remeasurement of another one in the list if (!is.na(prev_plt_cn[i])) { parent.ind <- which(plt_cn == prev_plt_cn[i]) @@ -437,7 +461,7 @@ get.ed.file.latlon.text <- function(lat, lon, site.style = FALSE, ed.res = 1) { next } } - + # Check whether any other record is a remeasurement of this one child.ind <- which(prev_plt_cn == plt_cn[i]) if (length(child.ind) > 0) { @@ -449,7 +473,7 @@ get.ed.file.latlon.text <- function(lat, lon, site.style = FALSE, ed.res = 1) { } } } - + # For any identified remeasurement sequences, choose to keep the record that is closest to the target year ind.keep <- numeric(0) for (unique.id in unique(unique.plot.id)) { @@ -459,6 +483,6 @@ get.ed.file.latlon.text <- function(lat, lon, site.style = FALSE, ed.res = 1) { } ind.keep <- c(ind.keep, ind.keep.i) } - + return(sort(ind.keep)) } # .select.unique.fia.plot.records diff --git a/modules/data.land/R/fuse_plot_treering.R b/modules/data.land/R/fuse_plot_treering.R index f0dae802878..78793560d2f 100644 --- a/modules/data.land/R/fuse_plot_treering.R +++ b/modules/data.land/R/fuse_plot_treering.R @@ -1,46 +1,45 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- fuse_plot_treering <- function(plot.data, inc.data, tuscon.data, inc.unit.conv = 0.1) { - plot.data <- as.data.frame(plot.data) - + ## separate veg data to lists by plot - plot.id <- unique(plot.data$plot) + plot.id <- unique(plot.data$plot) diameters <- list() - spp <- list() - depth <- list() - + spp <- list() + depth <- list() + for (i in seq_along(plot.id)) { - mch <- which(plot.data$plot == plot.id[i]) - diameters[[i]] <- plot.data$dbh[mch] + mch <- which(plot.data$plot == plot.id[i]) + diameters[[i]] <- plot.data$dbh[mch] names(diameters[[i]]) <- plot.data$tree[mch] - spp[[i]] <- plot.data$spp[mch] + spp[[i]] <- plot.data$spp[mch] } mplot <- length(plot.id) - + ## match increment data to plot and tree note: much of this parsing is currently specific to Sam's ## data increments <- list() - inc.names <- sub("T", "", sub(".txt", "", names(inc.data), fixed = TRUE)) - inc.ID <- as.numeric(substr(inc.names, 1, nchar(inc.names) - 1)) - inc.rep <- substr(inc.names, nchar(inc.names), nchar(inc.names)) + inc.names <- sub("T", "", sub(".txt", "", names(inc.data), fixed = TRUE)) + inc.ID <- as.numeric(substr(inc.names, 1, nchar(inc.names) - 1)) + inc.rep <- substr(inc.names, nchar(inc.names), nchar(inc.names)) inc.length <- sapply(inc.data, length) - nyr <- max(inc.length, na.rm = TRUE) - 1 - - tuscon <- list() - T.names <- sub("T", "", sub(".TXT", "", basename(names(tuscon.data)), fixed = TRUE)) - T.ID <- as.numeric(substr(T.names, 1, nchar(T.names) - 1)) - T.rep <- substr(T.names, nchar(T.names), nchar(T.names)) - T.length <- sapply(tuscon.data, nrow) - nyr <- max(c(nyr, T.length), na.rm = TRUE) - + nyr <- max(inc.length, na.rm = TRUE) - 1 + + tuscon <- list() + T.names <- sub("T", "", sub(".TXT", "", basename(names(tuscon.data)), fixed = TRUE)) + T.ID <- as.numeric(substr(T.names, 1, nchar(T.names) - 1)) + T.rep <- substr(T.names, nchar(T.names), nchar(T.names)) + T.length <- sapply(tuscon.data, nrow) + nyr <- max(c(nyr, T.length), na.rm = TRUE) + survival <- list() for (i in seq_len(mplot)) { if (length(diameters[[i]]) == 0) { @@ -48,25 +47,24 @@ fuse_plot_treering <- function(plot.data, inc.data, tuscon.data, inc.unit.conv = } survival[[i]] <- matrix(TRUE, length(diameters[[i]]), nyr + 1) } - + for (i in seq_along(plot.id)) { ## loop over plots ntree <- length(diameters[[i]]) increments[[i]] <- matrix(NA, ntree, nyr) depth[[i]] <- rep(NA, ntree) - + for (j in seq_len(ntree)) { ## loop over trees look for next tree in list tree.id <- as.numeric(names(diameters[[i]])[j]) - mch <- which(inc.ID == tree.id) - mchT <- which(T.ID == tree.id) - if (length(mch) > 0) - { + mch <- which(inc.ID == tree.id) + mchT <- which(T.ID == tree.id) + if (length(mch) > 0) { ## match to a velmex record if (length(mch) == 1) { ## only one record, use it. - y <- inc.data[[mch]] - maxy <- max(y) + y <- inc.data[[mch]] + maxy <- max(y) growth <- diff(y) increments[[i]][j, nyr:(nyr - length(growth) + 1)] <- growth * inc.unit.conv } else { @@ -81,7 +79,7 @@ fuse_plot_treering <- function(plot.data, inc.data, tuscon.data, inc.unit.conv = growth[is.nan(growth)] <- NA increments[[i]][j, ] <- rev(growth) * inc.unit.conv } - + ## did core get to pith?? radius <- diameters[[i]][j] / 2 depth[[i]][j] <- maxy * inc.unit.conv @@ -89,15 +87,13 @@ fuse_plot_treering <- function(plot.data, inc.data, tuscon.data, inc.unit.conv = ## if you're within 5% of the diameter, assume hit middle survival[[i]][j, which(is.na(increments[[i]][j, ]))] <- FALSE } - - } ## end mch > 0 - + } ## end mch > 0 + if (length(mchT) > 0) { - if (length(mchT) == 1) { ## only one record, use it. growth <- t(tuscon.data[[mchT]]) * 0.1 - maxy <- sum(growth) + maxy <- sum(growth) increments[[i]][j, (nyr - length(growth) + 1):nyr] <- growth } else { ## create mean increment record (eventually shift this to BAI eliptoid) @@ -106,12 +102,12 @@ fuse_plot_treering <- function(plot.data, inc.data, tuscon.data, inc.unit.conv = g <- t(tuscon.data[[mchT[k]]]) * 0.1 growth[k, (nyr - length(g) + 1):nyr] <- g } - maxy <- max(apply(growth, 1, sum, na.rm = TRUE)) + maxy <- max(apply(growth, 1, sum, na.rm = TRUE)) growth <- apply(growth, 2, mean, na.rm = TRUE) growth[is.nan(growth)] <- NA - increments[[i]][j, ] <- growth + increments[[i]][j, ] <- growth } - + ## did core get to pith?? radius <- diameters[[i]][j] / 2 depth[[i]][j] <- maxy @@ -120,21 +116,23 @@ fuse_plot_treering <- function(plot.data, inc.data, tuscon.data, inc.unit.conv = survival[[i]][j, which(is.na(increments[[i]][j, ]))] <- FALSE } } - } ## end loop over trees - } ## end loop over plots - + } ## end loop over trees + } ## end loop over plots + ## build diameter increment matrix - + ## expand diameter data matrix for (i in seq_along(diameters)) { - dtmp <- matrix(NA, length(diameters[[i]]), nyr + 1) + dtmp <- matrix(NA, length(diameters[[i]]), nyr + 1) dnames <- names(diameters[[i]]) dtmp[, nyr + 1] <- diameters[[i]] - diameters[[i]] <- dtmp - colnames(diameters[[i]]) <- 2012 - (nyr + 1):1 + 1 + diameters[[i]] <- dtmp + colnames(diameters[[i]]) <- 2012 - (nyr + 1):1 + 1 row.names(diameters[[i]]) <- dnames } - - return(list(diameters = diameters, increments = increments, survival = survival, - species = spp, depth = depth)) + + return(list( + diameters = diameters, increments = increments, survival = survival, + species = spp, depth = depth + )) } # fuse_plot_treering diff --git a/modules/data.land/R/gSSURGO_Query.R b/modules/data.land/R/gSSURGO_Query.R index eaf78b94c39..2c194b0e18b 100644 --- a/modules/data.land/R/gSSURGO_Query.R +++ b/modules/data.land/R/gSSURGO_Query.R @@ -6,54 +6,59 @@ #' #' @return a dataframe with soil properties. Units can be looked up from database documentation #' -#' @details +#' @details #' Full documention of available tables and their relationships can be found here \url{www.sdmdataaccess.nrcs.usda.gov/QueryHelp.aspx} #' There have been occasions where NRCS made some minor changes to the structure of the API which this code is where those changes need #' to be implemneted here. -#' Fields need to be defined with their associate tables. For example, sandtotal is a field in chorizon table which needs to be defined as chorizon.sandotal_(r/l/h), where +#' Fields need to be defined with their associate tables. For example, sandtotal is a field in chorizon table which needs to be defined as chorizon.sandotal_(r/l/h), where #' r stands for the representative value, l stands for low and h stands for high. At the moment fields from mapunit, component, muaggatt, and chorizon tables can be extracted. #' #' @examples #' \dontrun{ -#' PEcAn.data.land::gSSURGO.Query( -#' mukeys = 2747727, -#' fields = c( -#' "chorizon.cec7_r", "chorizon.sandtotal_r", -#' "chorizon.silttotal_r","chorizon.claytotal_r", -#' "chorizon.om_r","chorizon.hzdept_r","chorizon.frag3to10_r", -#' "chorizon.dbovendry_r","chorizon.ph1to1h2o_r", -#' "chorizon.cokey","chorizon.chkey")) +#' PEcAn.data.land::gSSURGO.Query( +#' mukeys = 2747727, +#' fields = c( +#' "chorizon.cec7_r", "chorizon.sandtotal_r", +#' "chorizon.silttotal_r", "chorizon.claytotal_r", +#' "chorizon.om_r", "chorizon.hzdept_r", "chorizon.frag3to10_r", +#' "chorizon.dbovendry_r", "chorizon.ph1to1h2o_r", +#' "chorizon.cokey", "chorizon.chkey" +#' ) +#' ) #' } #' @export #' gSSURGO.Query <- function(mukeys, - fields = c("chorizon.sandtotal_r", - "chorizon.silttotal_r", - "chorizon.claytotal_r")) { - + fields = c( + "chorizon.sandtotal_r", + "chorizon.silttotal_r", + "chorizon.claytotal_r" + )) { ######### Retrieve soil # Avoids duplicating fields that are always included in the query fixed_fields <- c("mapunit.mukey", "component.cokey", "component.comppct_r") qry_fields <- unique(fields[!(fields %in% fixed_fields)]) - - body <- paste(' + + body <- paste( + ' SELECT ', - paste(fixed_fields, collapse = ", "), - paste(qry_fields, collapse = ", "), - ' from mapunit + paste(fixed_fields, collapse = ", "), + paste(qry_fields, collapse = ", "), + " from mapunit join muaggatt on mapunit.mukey=muaggatt.mukey join component on mapunit.mukey=component.mukey join chorizon on component.cokey=chorizon.cokey - where mapunit.mukey in (', paste(mukeys,collapse = ", "),'); + where mapunit.mukey in (", paste(mukeys, collapse = ", "), "); - ') + " + ) out <- httr::POST( url = "https://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx", @@ -61,49 +66,53 @@ gSSURGO.Query <- function(mukeys, httr::accept("text/xml"), httr::accept("multipart/*"), httr::add_headers( - SOAPAction = "http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx/RunQuery")), + SOAPAction = "http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx/RunQuery" + ) + ), httr::content_type("text/xml; charset=utf-8"), # I expected this to belong inside `config`, but doesn't seem to work there... - encode="multipart", - body = body) + encode = "multipart", + body = body + ) httr::stop_for_status(out) result <- httr::content(out, "text") suppressWarnings( suppressMessages({ xml_doc <- XML::xmlTreeParse(result) - xmltop <- XML::xmlRoot(xml_doc) + xmltop <- XML::xmlRoot(xml_doc) tablesxml <- (xmltop[[1]]["RunQueryResponse"][[1]]["RunQueryResult"][[1]]["diffgram"][[1]]["NewDataSet"][[1]]) }) ) - - #parsing the table - tryCatch({ - suppressMessages( - suppressWarnings({ - tables <- XML::getNodeSet(tablesxml,"//Table") - - ##### All datatables below newdataset - dfs <- purrr::map_dfr( + + # parsing the table + tryCatch( + { + suppressMessages( + suppressWarnings({ + tables <- XML::getNodeSet(tablesxml, "//Table") + + ##### All datatables below newdataset + dfs <- purrr::map_dfr( tables, - function(tbl){ + function(tbl) { lst <- purrr::map( XML::xmlToList(tbl), - function(v)ifelse(is.null(v), NA, v)) #avoid dropping empty columns + function(v) ifelse(is.null(v), NA, v) + ) # avoid dropping empty columns - lst[names(lst) != ".attrs"]} + lst[names(lst) != ".attrs"] + } ) dfs <- dplyr::mutate(dfs, dplyr::across(dplyr::everything(), as.numeric)) - }) - ) - - - return(dfs) - }, - error=function(cond) { - print(cond) - return(NULL) - }) - -} + }) + ) + return(dfs) + }, + error = function(cond) { + print(cond) + return(NULL) + } + ) +} diff --git a/modules/data.land/R/get.soil.R b/modules/data.land/R/get.soil.R index 8ad20456393..1d728b313be 100644 --- a/modules/data.land/R/get.soil.R +++ b/modules/data.land/R/get.soil.R @@ -1,10 +1,10 @@ ##' Get Soil ##' -##' @title get.soil +##' @title get.soil ##' @param lat latitude ##' @param lon longitude -##' @param soil.nc netCDFe file with soil data -##' @return usda soil class +##' @param soil.nc netCDFe file with soil data +##' @return usda soil class ##' @export ##' @author David LeBauer ##' @importFrom ncdf4 ncvar_get @@ -12,13 +12,15 @@ get.soil <- function(lat, lon, soil.nc = soil.nc) { ## Lat and Lon Lat <- ncvar_get(soil.nc, "latitude") Lon <- ncvar_get(soil.nc, "longitude") - + lati <- which.min(abs(Lat - lat)) loni <- which.min(abs(Lon - lon)) - + ## topsoil usda_class <- ncvar_get(soil.nc, "t_usda_tex", start = c(loni, lati), count = c(1, 1)) - ref_depth <- PEcAn.utils::ud_convert(ncvar_get(soil.nc, "ref_depth", start = c(loni, lati), count = c(1, 1)), - "cm", "m") + ref_depth <- PEcAn.utils::ud_convert( + ncvar_get(soil.nc, "ref_depth", start = c(loni, lati), count = c(1, 1)), + "cm", "m" + ) return(list(usda_class = usda_class, ref_depth = ref_depth)) } # get.soil diff --git a/modules/data.land/R/get_veg_module.R b/modules/data.land/R/get_veg_module.R index ad1971d1ec7..7d9ec2821ec 100644 --- a/modules/data.land/R/get_veg_module.R +++ b/modules/data.land/R/get_veg_module.R @@ -9,7 +9,7 @@ ##' @param host list, host info as in settings$host, host$name forced to be "localhost" upstream ##' @param machine_host local machine hostname, e.g. "pecan2.bu.edu" ##' @param overwrite logical flag for convert_input -##' +##' ##' @export ##' ##' @author Istem Fer @@ -19,24 +19,22 @@ get_veg_module <- function(input_veg, dbparms, new_site, host, machine_host, - overwrite){ - + overwrite) { #--------------------------------------------------------------------------------------------------# # Extract/load data : this step requires DB connections # can be passed to convert_inputs now because process IC locally - lat <- new_site$lat - lon <- new_site$lon - site_id <- new_site$id + lat <- new_site$lat + lon <- new_site$lon + site_id <- new_site$id site_name <- new_site$name ## Prepare to call convert_inputs - pkg <- "PEcAn.data.land" + pkg <- "PEcAn.data.land" con <- PEcAn.DB::db.open(dbparms$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) - + # this check might change depending on what other sources that requires querying its own DB we will have - if(input_veg$source == "FIA" | input_veg$source == "NEON_veg"){ - + if (input_veg$source == "FIA" | input_veg$source == "NEON_veg") { fcn <- "extract_veg" getveg.id <- PEcAn.DB::convert_input( @@ -54,22 +52,21 @@ get_veg_module <- function(input_veg, new_site = new_site, gridres = input_veg$gridres, dbparms = dbparms, machine_host = machine_host, input_veg = input, - source = input_veg$source) - - + source = input_veg$source + ) + + return(getveg.id) - - }else{ - + } else { fcn <- "load_veg" - if(!is.null(input_veg$id)){ + if (!is.null(input_veg$id)) { source.id <- input_veg$id - }else{ + } else { PEcAn.logger::logger.error("Must specify input id") } getveg.id <- PEcAn.DB::convert_input( input.id = NA, - outfolder = outfolder, + outfolder = outfolder, formatname = "spp.info", mimetype = "application/rds", site.id = site_id, @@ -93,11 +90,9 @@ get_veg_module <- function(input_veg, ## 70 ## ## - icmeta = input_veg$metadata) + icmeta = input_veg$metadata + ) return(getveg.id) - } - - } # get.veg.module diff --git a/modules/data.land/R/gis.functions.R b/modules/data.land/R/gis.functions.R index 0fa83941f88..b0fc82385e1 100644 --- a/modules/data.land/R/gis.functions.R +++ b/modules/data.land/R/gis.functions.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -10,38 +10,37 @@ #--------------------------------------------------------------------------------------------------# ##' ##' Convert ESRI shapefile (*.shp) to keyhole markup language (KML) file format -##' +##' ##' @title Convert shapefile to KML -##' +##' ##' @param dir Directory of GIS shapefiles to convert to kml/kmz ##' @param ext File extension for files to convert to kml/kmz. Defaults to ESRI shapefile, ##' '.shp'. [Place holder for other potential vector files to conver to kml] ##' @param kmz TRUE/FALSE. Option to write out file as a compressed kml. Requires zip utility -##' @param proj4 OPTIONAL. Define output proj4 projection string. If set, input vector will be +##' @param proj4 OPTIONAL. Define output proj4 projection string. If set, input vector will be ##' reprojected to desired projection. Not yet implemented. ##' @param color OPTIONAL. Fill color for output kml/kmz file ##' @param NameField OPTIONAL. Define names for individual features in KML/KMZ file ##' @param out.dir OPTIONAL. Output directory for converted files ##' ##' @export -##' +##' ##' @examples ##' \dontrun{ ##' dir <- Sys.glob(file.path(R.home(), 'library', 'PEcAn.data.land','data')) ##' out.dir <- path.expand('~/temp') ##' shp2kml(dir,'.shp',kmz=FALSE,NameField='STATE',out.dir=out.dir) ##' system(paste('rm -r ',out.dir)) -##'} -##' +##' } +##' ##' @author Shawn P. Serbin shp2kml <- function(dir, ext, kmz = FALSE, proj4 = NULL, color = NULL, NameField = NULL, out.dir = NULL) { - # TODO: Enable compression of KML files using zip/gzip utility. Not quite figured this out yet # TODO: Allow assignment of output projection info by entering proj4 string # TODO: Allow for customization of output fill colors and line size # TODO: Allow for selection of taget attribute in output kml/kmz file(s) # TODO: Allow for setting out labels - + if (!is.null(out.dir)) { if (!file.exists(out.dir)) { dir.create(out.dir, recursive = TRUE) @@ -50,72 +49,75 @@ shp2kml <- function(dir, ext, kmz = FALSE, proj4 = NULL, color = NULL, NameField } else { output <- dir } - + # Get list of shapefiles in directory files <- list.files(path = dir, pattern = "*.shp", full.names = FALSE) remove <- grep("*xml", files) if (length(remove) > 0) { files <- files[-remove] } - + # loop here for (i in files) { print("") print(paste0("Converting : ** ", i, " ** to KML/KMZ file")) print("") print("") - + # Read in shapefile(s) & get coordinates/projection info shp.file <- # readShapeSpatial(file.path(dir,i),verbose=TRUE) coordinates(test) <- ~X+Y - + layers <- sf::st_layers(file.path(dir, i)) # shp.file <- readOGR(file.path(dir,i),layer=layers) # no need to read in file - + # Display vector info to the console print("") print(paste0("Input layers: ", layers$name)) print(paste0("Input projection info: ", layers$crs[[1]]$input)) print("") - + # Write out kml/kmz using plotKML package if (is.null(color)){ color <- 'grey70' } - + if (kmz == TRUE) { # NOT YET FULLY IMPLEMENTED in.file <- file.path(dir, i, fsep = .Platform$file.sep) out.file <- file.path(output, unlist(strsplit(i, "\\.")), fsep = .Platform$file.sep) - OGRstring <- paste0("ogr2ogr -progress -f KML", " ", - paste0(out.file, ".kmz"), - " ", in.file, " ", "-dsco NameField=", NameField) - system(OGRstring) # Run KML conversion - + OGRstring <- paste0( + "ogr2ogr -progress -f KML", " ", + paste0(out.file, ".kmz"), + " ", in.file, " ", "-dsco NameField=", NameField + ) + system(OGRstring) # Run KML conversion + # ADD COMPRESSION STEP HERE!!! - } else { # kml(shp.file,file=paste(output,'test.kml'),colour = 'grey70', alpha = 0.75, width=2, # balloon=FALSE) # writeOGR(shp.file['STATE'],'test2.kml',layer='statep010',NameField='STATE',driver='KML') - + # Using ogr2ogr external system utility. Works much better than R packages. - in.file <- file.path(dir, i, fsep = .Platform$file.sep) - out.file <- file.path(output, unlist(strsplit(i, "\\.")), fsep = .Platform$file.sep) - OGRstring <- paste0("ogr2ogr -progress -f KML", " ", paste0(out.file, ".kml"), - " ", in.file, " ", "-dsco NameField=", NameField) - system(OGRstring) # Run KML conversion + in.file <- file.path(dir, i, fsep = .Platform$file.sep) + out.file <- file.path(output, unlist(strsplit(i, "\\.")), fsep = .Platform$file.sep) + OGRstring <- paste0( + "ogr2ogr -progress -f KML", " ", paste0(out.file, ".kml"), + " ", in.file, " ", "-dsco NameField=", NameField + ) + system(OGRstring) # Run KML conversion } - } # End of loop + } # End of loop } # shp2kml #--------------------------------------------------------------------------------------------------# ##' ##' Function to extract attribute information from vector or raster data layer. -##' +##' ##' @title Retrieve attribute information from a vector or raster layer ##' @name get.attributes -##' +##' ##' @param file vector or raster layer ##' @param coords vector containin xmin,ymin,xmax,ymax defing the bounding box for subset -##' +##' ##' @export ##' ##' @examples @@ -129,11 +131,11 @@ shp2kml <- function(dir, ext, kmz = FALSE, proj4 = NULL, color = NULL, NameField get.attributes <- function(file, coords) { # ogr tools do not seem to function properly in R. Need to figure out a work around reading in # kml files drops important fields inside the layers. - - #library(fields) - #require(rgdal) - # note that OGR support is now provided by the sf and terra packages among others - + + # library(fields) + # require(rgdal) + # note that OGR support is now provided by the sf and terra packages among others + # print('NOT IMPLEMENTED YET') subset_layer(file,coords) } # get.attributes @@ -142,7 +144,7 @@ get.attributes <- function(file, coords) { ##' ##' Function to subset and clip a GIS vector or raster layer by a bounding box ##' or clip/subset layer (e.g. shapefile/KML) -##' +##' ##' @param file input file to be subset ##' @param coords vector with xmin,ymin,xmax,ymax defing the bounding box for subset ##' @param sub.layer Vector layer defining the subset region @@ -162,12 +164,11 @@ get.attributes <- function(file, coords) { ##' subset_layer(file=file,coords=c(-95,42,-84,47),out.dir=out.dir) ##' system(paste('rm -r',out.dir,sep='')) ##' } -##' +##' ##' @export subset_layer -##' +##' ##' @author Shawn P. Serbin subset_layer <- function(file, coords = NULL, sub.layer = NULL, clip = FALSE, out.dir = NULL, out.name = NULL) { - # Setup output directory for subset layer if (is.null(out.dir)) { out.dir <- dirname(file) @@ -179,32 +180,38 @@ subset_layer <- function(file, coords = NULL, sub.layer = NULL, clip = FALSE, ou if (!file.exists(out.dir)) { dir.create(out.dir, recursive = TRUE) } - + # Setup output file name for subset layer if (is.null(out.name)) { - out.name <- paste0(unlist(strsplit(basename(file), "\\."))[1], - ".sub.", unlist(strsplit(basename(file), "\\."))[2]) + out.name <- paste0( + unlist(strsplit(basename(file), "\\."))[1], + ".sub.", unlist(strsplit(basename(file), "\\."))[2] + ) } else { out.name <- out.name } - + print(paste0("Subsetting layer: ", out.name)) output <- file.path(out.dir, out.name, fsep = .Platform$file.sep) - + if (unlist(strsplit(basename(file), "\\."))[2] == "kml") { format <- "-f KML" } else { format <- paste0("-f ", "'ESRI Shapefile'") } - + if (clip) { - OGRstring <- paste0("ogr2ogr -spat", " ", coords[1], " ", coords[2], " ", coords[3], " ", coords[4], - " ", format, " ", output, " ", file, " ", "-clipsrc", " ", "spat_extent") + OGRstring <- paste0( + "ogr2ogr -spat", " ", coords[1], " ", coords[2], " ", coords[3], " ", coords[4], + " ", format, " ", output, " ", file, " ", "-clipsrc", " ", "spat_extent" + ) } else { - OGRstring <- paste0("ogr2ogr -spat", " ", coords[1], " ", coords[2], " ", coords[3], " ", coords[4], - " ", format, " ", output, " ", file) + OGRstring <- paste0( + "ogr2ogr -spat", " ", coords[1], " ", coords[2], " ", coords[3], " ", coords[4], + " ", format, " ", output, " ", file + ) } - + # Run subset command system(OGRstring) } # subset_layer diff --git a/modules/data.land/R/ic_process.R b/modules/data.land/R/ic_process.R index 2c402692835..02e004acb31 100644 --- a/modules/data.land/R/ic_process.R +++ b/modules/data.land/R/ic_process.R @@ -8,28 +8,26 @@ ##' @param overwrite Default = FALSE. whether to force ic_process to proceed ##' ##' @author Istem Fer, Hamze Dokoohaki -ic_process <- function(settings, input, dir, overwrite = FALSE){ - - +ic_process <- function(settings, input, dir, overwrite = FALSE) { #--------------------------------------------------------------------------------------------------# # Extract info from settings and setup - site <- settings$run$site + site <- settings$run$site model <- list() - model$type <- settings$model$type - model$id <- settings$model$id - host <- settings$host - dbparms <- settings$database + model$type <- settings$model$type + model$id <- settings$model$id + host <- settings$host + dbparms <- settings$database # Handle IC Workflow locally - if(host$name != "localhost"){ + if (host$name != "localhost") { host$name <- "localhost" - dir <- settings$database$dbfiles + dir <- settings$database$dbfiles } # If overwrite is a plain boolean, fill in defaults for each module if (!is.list(overwrite)) { if (overwrite) { - overwrite <- list(getveg = TRUE, ensveg = TRUE, putveg = TRUE) + overwrite <- list(getveg = TRUE, ensveg = TRUE, putveg = TRUE) } else { overwrite <- list(getveg = FALSE, ensveg = FALSE, putveg = FALSE) } @@ -48,13 +46,15 @@ ic_process <- function(settings, input, dir, overwrite = FALSE){ # set up bety connection con <- PEcAn.DB::db.open(dbparms$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) - - #grab site lat and lon info - latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] + + # grab site lat and lon info + latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] # setup site database number, lat, lon and name and copy for format.vars if new input - new.site <- data.frame(id = as.numeric(site$id), - lat = latlon$lat, - lon = latlon$lon) + new.site <- data.frame( + id = as.numeric(site$id), + lat = latlon$lat, + lon = latlon$lon + ) new.site$name <- settings$run$site$name @@ -71,49 +71,45 @@ ic_process <- function(settings, input, dir, overwrite = FALSE){ # this check might change depending on what other sources that requires querying its own DB we will have # probably something passed from settings - if(input$source == "FIA"){ - + if (input$source == "FIA") { start_date <- settings$run$start.date - end_date <- settings$run$end.date - }else if (input$source=="BADM"){ - + end_date <- settings$run$end.date + } else if (input$source == "BADM") { outfolder <- file.path(dir, paste0(input$source, "_site_", str_ns)) - if(!dir.exists(outfolder)) dir.create(outfolder) + if (!dir.exists(outfolder)) dir.create(outfolder) - #see if there is already files generated there - newfile <-list.files(outfolder, "*.nc$", full.names = TRUE) %>% + # see if there is already files generated there + newfile <- list.files(outfolder, "*.nc$", full.names = TRUE) %>% as.list() names(newfile) <- rep("path", length(newfile)) - if (length(newfile)==0){ - newfile <- PEcAn.data.land::BADM_IC_process(settings, dir=outfolder, overwrite=FALSE) + if (length(newfile) == 0) { + newfile <- PEcAn.data.land::BADM_IC_process(settings, dir = outfolder, overwrite = FALSE) } - settings$run$inputs[['poolinitcond']]$path <- newfile + settings$run$inputs[["poolinitcond"]]$path <- newfile return(settings) - }else if (input$source == "NEON_veg"){ - #For debugging purposes I am hard coding in the start and end dates, will revisit and adjust once extract_NEON_veg is working within ic_process - start_date = as.Date(input$startdate) - end_date = as.Date(input$enddate) + } else if (input$source == "NEON_veg") { + # For debugging purposes I am hard coding in the start and end dates, will revisit and adjust once extract_NEON_veg is working within ic_process + start_date <- as.Date(input$startdate) + end_date <- as.Date(input$enddate) # start_date = as.Date("2020-01-01") # end_date = as.Date("2021-09-01") - #Note the start and end dates for ICs are not the same as those for the forecast runs - #please check out NEON products DP1.10098.001 for your desired site to check data availability before setting start and end dates - }else{ - - query <- paste0("SELECT * FROM inputs where id = ", input$id) - input_file <- PEcAn.DB::db.query(query, con = con) - start_date <- input_file$start_date - end_date <- input_file$end_date - + # Note the start and end dates for ICs are not the same as those for the forecast runs + # please check out NEON products DP1.10098.001 for your desired site to check data availability before setting start and end dates + } else { + query <- paste0("SELECT * FROM inputs where id = ", input$id) + input_file <- PEcAn.DB::db.query(query, con = con) + start_date <- input_file$start_date + end_date <- input_file$end_date } # set up host information machine.host <- ifelse(host == "localhost" || host$name == "localhost", PEcAn.remote::fqdn(), host$name) machine <- PEcAn.DB::db.query(paste0("SELECT * from machines where hostname = '", machine.host, "'"), con) - + # retrieve model type info - if(is.null(model)){ + if (is.null(model)) { modeltype_id <- PEcAn.DB::db.query(paste0("SELECT modeltype_id FROM models where id = '", settings$model$id, "'"), con)[[1]] model <- PEcAn.DB::db.query(paste0("SELECT name FROM modeltypes where id = '", modeltype_id, "'"), con)[[1]] } @@ -129,84 +125,83 @@ ic_process <- function(settings, input, dir, overwrite = FALSE){ # then we'll need to pass pattern, ensemble etc to convert_input nsource <- ifelse(!is.null(input$ensemble), as.numeric(input$ensemble), 1) - -#--------------------------------------------------------------------------------------------------# + + #--------------------------------------------------------------------------------------------------# # Load/extract + match species module if (is.null(getveg.id) & is.null(putveg.id)) { - getveg.id <- list() - for(i in seq_len(nsource)){ - getveg.id[[i]] <- get_veg_module(input_veg = input, - outfolder = outfolder, - start_date = start_date, - end_date = end_date, - dbparms = dbparms, - new_site = new.site, - host = host, - machine_host = machine.host, - overwrite = overwrite$getveg) + for (i in seq_len(nsource)) { + getveg.id[[i]] <- get_veg_module( + input_veg = input, + outfolder = outfolder, + start_date = start_date, + end_date = end_date, + dbparms = dbparms, + new_site = new.site, + host = host, + machine_host = machine.host, + overwrite = overwrite$getveg + ) } - } -#--------------------------------------------------------------------------------------------------# + #--------------------------------------------------------------------------------------------------# # Sampling/ensemble module if (!is.null(getveg.id) & !is.null(input$ensemble) & is.null(putveg.id)) { - ctr <- 1 ensveg.id <- list() - for(i in seq_len(as.numeric(input$ensemble))){ - + for (i in seq_len(as.numeric(input$ensemble))) { ctr <- ifelse(nsource == 1, 1, i) - ensveg.id[[i]] <- ens_veg_module(getveg.id = getveg.id[[ctr]], - dbparms = dbparms, - input_veg = input, - outfolder = outfolder, - machine = machine, - start_date = start_date, - end_date = end_date, - n.ensemble = i, - new_site = new.site, - host = host) + ensveg.id[[i]] <- ens_veg_module( + getveg.id = getveg.id[[ctr]], + dbparms = dbparms, + input_veg = input, + outfolder = outfolder, + machine = machine, + start_date = start_date, + end_date = end_date, + n.ensemble = i, + new_site = new.site, + host = host + ) } getveg.id <- ensveg.id } -#--------------------------------------------------------------------------------------------------# + #--------------------------------------------------------------------------------------------------# # Match species to PFTs + veg2model module if (!is.null(getveg.id) & is.null(putveg.id)) { # probably need a more sophisticated check here putveg.id <- list() - for(i in seq_along(getveg.id)){ - putveg.id[[i]] <- put_veg_module(getveg.id = getveg.id[[i]], - dbparms = dbparms, - input_veg = input, - pfts = settings$pfts, - outfolder = outfolder, - n.ensemble = i, - dir = dir, - machine = machine, - model = model, - start_date = start_date, - end_date = end_date, - new_site = new.site, - host = host, - overwrite = overwrite$putveg) + for (i in seq_along(getveg.id)) { + putveg.id[[i]] <- put_veg_module( + getveg.id = getveg.id[[i]], + dbparms = dbparms, + input_veg = input, + pfts = settings$pfts, + outfolder = outfolder, + n.ensemble = i, + dir = dir, + machine = machine, + model = model, + start_date = start_date, + end_date = end_date, + new_site = new.site, + host = host, + overwrite = overwrite$putveg + ) } - - } #--------------------------------------------------------------------------------------------------# # Fill settings if (!is.null(putveg.id)) { - # extend the inputs list for ensemble members - #settings_inputs <- lapply(seq_along(settings$run$inputs), function(x) rep(settings$run$inputs[[x]], each = length((putveg.id)))) + # settings_inputs <- lapply(seq_along(settings$run$inputs), function(x) rep(settings$run$inputs[[x]], each = length((putveg.id)))) settings_inputs <- settings$run$inputs # make sure all sublists are grouped and renamed to have unique tags, e.g.: @@ -229,72 +224,76 @@ ic_process <- function(settings, input, dir, overwrite = FALSE){ # # if(is.null(tmp.list$path)) tmp.list$path <- list() # # return(tmp.list) # # }) - # + # # # names(settings_inputs) <- names(settings$run$inputs) - # - for(i in seq_along(putveg.id)){ - + # + for (i in seq_along(putveg.id)) { model_file <- PEcAn.DB::db.query(paste("SELECT * from dbfiles where container_id =", putveg.id[[i]], "and machine_id =", machine$id), con) - + # now that we don't have multipasses, convert_input only inserts 1st filename # do we want to change it in convert_inputs such that it loops over the dbfile.insert? path_to_settings <- file.path(model_file[["file_path"]], model_file[["file_name"]]) - settings_inputs[[input$output]][['path']][[paste0('path', i)]] <- path_to_settings + settings_inputs[[input$output]][["path"]][[paste0("path", i)]] <- path_to_settings # NOTE : THIS BIT IS SENSITIVE TO THE ORDER OF TAGS IN PECAN.XML # this took care of "css" only, others have the same prefix - if(input$output == "css"){ - settings_inputs[["pss"]][['path']][[paste0('path', i)]] <- gsub("css","pss", path_to_settings) - settings_inputs[["site"]][['path']][[paste0('path', i)]] <- gsub("css","site", path_to_settings) + if (input$output == "css") { + settings_inputs[["pss"]][["path"]][[paste0("path", i)]] <- gsub("css", "pss", path_to_settings) + settings_inputs[["site"]][["path"]][[paste0("path", i)]] <- gsub("css", "site", path_to_settings) # IF: For now IC workflow is only working for ED and it's the only case for copying to remote # but this copy to remote might need to go out of this if-block and change # Copy to remote, update DB and change paths if needed if (settings$host$name != "localhost") { - folder_dir <- paste0( input$source, "_site_", str_ns, "/", input$source, "_ens", i, ".", - lubridate::year(start_date)) + lubridate::year(start_date) + ) remote_dir <- file.path(settings$host$folder, folder_dir) # copies css - css_file <- settings_inputs[["css"]][['path']][[paste0('path', i)]] + css_file <- settings_inputs[["css"]][["path"]][[paste0("path", i)]] remote.copy.update( input_id = putveg.id[[i]], remote_dir = remote_dir, local_file_path = css_file, host = settings$host, - con = con) - settings_inputs[["css"]][['path']][[paste0('path', i)]] <- file.path( + con = con + ) + settings_inputs[["css"]][["path"]][[paste0("path", i)]] <- file.path( remote_dir, - basename(css_file)) + basename(css_file) + ) - # pss - pss_file <- settings_inputs[["pss"]][['path']][[paste0('path', i)]] + # pss + pss_file <- settings_inputs[["pss"]][["path"]][[paste0("path", i)]] remote.copy.update( input_id = putveg.id[[i]], remote_dir = remote_dir, local_file_path = pss_file, host = settings$host, - con = con) - settings_inputs[["pss"]][['path']][[paste0('path', i)]] <- file.path( + con = con + ) + settings_inputs[["pss"]][["path"]][[paste0("path", i)]] <- file.path( remote_dir, - basename(pss_file)) + basename(pss_file) + ) # site - site_file <- settings_inputs[["site"]][['path']][[paste0('path', i)]] + site_file <- settings_inputs[["site"]][["path"]][[paste0("path", i)]] remote.copy.update( input_id = putveg.id[[i]], remote_dir = remote_dir, local_file_path = site_file, host = settings$host, - con = con) - settings_inputs[["site"]][['path']][[paste0('path', i)]] <- file.path( + con = con + ) + settings_inputs[["site"]][["path"]][[paste0("path", i)]] <- file.path( remote_dir, - basename(site_file)) - + basename(site_file) + ) } } } diff --git a/modules/data.land/R/land.utils.R b/modules/data.land/R/land.utils.R index 0c079ac9f39..d4f8ee27ec9 100644 --- a/modules/data.land/R/land.utils.R +++ b/modules/data.land/R/land.utils.R @@ -1,10 +1,10 @@ get.elevation <- function(lat, lon) { # http://stackoverflow.com/a/8974308/199217 - url <- paste("http://www.earthtools.org/height", lat, lon, sep = "/") - + url <- paste("http://www.earthtools.org/height", lat, lon, sep = "/") + page <- paste0(readLines(curl::curl(url)), collapse = "\n") - ans <- XML::xmlTreeParse(page, useInternalNodes = TRUE) + ans <- XML::xmlTreeParse(page, useInternalNodes = TRUE) heightNode <- XML::xpathApply(ans, "//meters")[[1]] return(as.numeric(XML::xmlValue(heightNode))) } # get.elevation @@ -12,8 +12,8 @@ get.elevation <- function(lat, lon) { is.land <- function(lat, lon) { ncvar_get <- ncdf4::ncvar_get - Lat <- ncvar_get(nc = met.nc, varid = "lat") - Lon <- ncvar_get(nc = met.nc, varid = "lon") + Lat <- ncvar_get(nc = met.nc, varid = "lat") + Lon <- ncvar_get(nc = met.nc, varid = "lon") lati <- which.min(abs(Lat - lat)) loni <- which.min(abs(Lon - lon)) mask <- ncvar_get(nc = met.nc, varid = "mask", start = c(loni, lati), count = c(1, 1)) diff --git a/modules/data.land/R/load_veg.R b/modules/data.land/R/load_veg.R index a4fc445d8d5..e0852bc9d69 100644 --- a/modules/data.land/R/load_veg.R +++ b/modules/data.land/R/load_veg.R @@ -18,47 +18,50 @@ #' @author Istem Fer load_veg <- function(new_site, start_date, end_date, source_id, source, icmeta = NULL, format_name = NULL, - machine_host, dbparms, outfolder, overwrite = FALSE, ...){ - + machine_host, dbparms, outfolder, overwrite = FALSE, ...) { con <- PEcAn.DB::db.open(dbparms$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) #--------------------------------------------------------------------------------------------------# # Load data : this step requires DB connections # get machine id - machine_id <- PEcAn.DB::get.id(table = "machines", colnames = "hostname", - values = machine_host, con = con) + machine_id <- PEcAn.DB::get.id( + table = "machines", colnames = "hostname", + values = machine_host, con = con + ) # query data.path from source id [input id in BETY] - query <- paste0("SELECT * FROM dbfiles where container_id = ", source_id, - "AND machine_id=", machine_id) + query <- paste0( + "SELECT * FROM dbfiles where container_id = ", source_id, + "AND machine_id=", machine_id + ) input_file <- PEcAn.DB::db.query(query, con = con) - data_path <- file.path(input_file[["file_path"]], input_file[["file_name"]]) #File path and file name of source file from bety + data_path <- file.path(input_file[["file_path"]], input_file[["file_name"]]) # File path and file name of source file from bety # query format info - format <- PEcAn.DB::query.format.vars(bety = con, input.id = source_id) + format <- PEcAn.DB::query.format.vars(bety = con, input.id = source_id) # load_data{benchmark} - obs <- PEcAn.benchmark::load_data(data.path = data_path, format, site = new_site) + obs <- PEcAn.benchmark::load_data(data.path = data_path, format, site = new_site) #--------------------------------------------------------------------------------------------------# # Match species : this step requires DB connections - if("species_USDA_symbol" %in% format$vars$bety_name){ - code.col <- "species_USDA_symbol" + if ("species_USDA_symbol" %in% format$vars$bety_name) { + code.col <- "species_USDA_symbol" format_name <- "usda" - }else if("latin_name" %in% format$vars$bety_name){ + } else if ("latin_name" %in% format$vars$bety_name) { # not encountered an actual case yet, put here as a reminder code.col <- "latin_name" format_name <- "latin_name" # might indicate a custom format, should be passed to function - if(is.null(format_name)){ + if (is.null(format_name)) { PEcAn.logger::logger.severe("Can't match code to species. Please provide 'match.format' via settings.") } - }else{ + } else { PEcAn.logger::logger.severe("Can't match code to species. No valid format found.") } # match code to species ID @@ -66,21 +69,21 @@ load_veg <- function(new_site, start_date, end_date, obs[[code.col]] <- toupper(obs[[code.col]]) spp.info <- match_species_id(input_codes = obs[[code.col]], format_name = format_name, bety = con) # merge with data - tmp <- spp.info[ , colnames(spp.info) != "input_code"] + tmp <- spp.info[, colnames(spp.info) != "input_code"] # a hack for now to have a similar structure as the FIA case - veg_info <- list() + veg_info <- list() - if(!is.null(icmeta)){ + if (!is.null(icmeta)) { # the first sublist can be for the metadata maybe? # to be handled by veg2model later veg_info[[1]] <- icmeta - if(is.null(icmeta$area)){ + if (is.null(icmeta$area)) { # this might not be needed for all models but let's put a warning here before it's too late PEcAn.logger::logger.warn("IMPORTANT : No area info passed via metadata, if your model needs plot area in IC calculations please provide it under 'settings$run$inputs$css$metadata$area'.") } - }else{ + } else { veg_info[[1]] <- NULL # this might not be needed for all models but let's put a warning here before it's too late PEcAn.logger::logger.warn("IMPORTANT : No area info passed via metadata, @@ -95,16 +98,16 @@ load_veg <- function(new_site, start_date, end_date, # need check for overwrite sppfilename <- write_veg(outfolder, start_date, veg_info = veg_info, source) # Build results dataframe for convert_input - results <- data.frame(file = sppfilename, - host = machine_host, - mimetype = "application/rds", - formatname = "spp.info", - startdate = start_date, - enddate = end_date, - dbfile.name = basename(sppfilename), - stringsAsFactors = FALSE) + results <- data.frame( + file = sppfilename, + host = machine_host, + mimetype = "application/rds", + formatname = "spp.info", + startdate = start_date, + enddate = end_date, + dbfile.name = basename(sppfilename), + stringsAsFactors = FALSE + ) ### return for convert_inputs return(invisible(results)) - - } # load_veg diff --git a/modules/data.land/R/matchInventoryRings.R b/modules/data.land/R/matchInventoryRings.R index 4a51e48ae3f..f777bbcdc89 100644 --- a/modules/data.land/R/matchInventoryRings.R +++ b/modules/data.land/R/matchInventoryRings.R @@ -8,32 +8,31 @@ #' ##' @export matchInventoryRings <- function(trees, rings, extractor = "TreeCode", nyears = 30, coredOnly = TRUE) { - ## build tree codes id.build <- function(x) { do.call(paste0("to.", extractor), x) } # id.build names(trees) <- toupper(names(trees)) - tree.ID <- id.build(list(SITE = trees$SITE, PLOT = trees$PLOT, SUB = trees$SUB, TAG = trees$TAG)) - + tree.ID <- id.build(list(SITE = trees$SITE, PLOT = trees$PLOT, SUB = trees$SUB, TAG = trees$TAG)) + ## build tree ring codes if (is.list(rings)) { ring.file <- rep(names(rings), times = sapply(rings, ncol)) - rings <- dplR::combine.rwl(rings) + rings <- dplR::combine.rwl(rings) } - ring.ID <- names(rings) + ring.ID <- names(rings) id.extract <- function(x) { do.call(paste0("from.", extractor), list(x = x)) } # id.extract ring.info <- id.extract(ring.ID) - + ## matching up data sets by tree - mch <- match(tree.ID, ring.ID) - cored <- apply(!is.na(trees[, grep("DATE_CORE_COLLECT", names(trees))]), 1, any) + mch <- match(tree.ID, ring.ID) + cored <- apply(!is.na(trees[, grep("DATE_CORE_COLLECT", names(trees))]), 1, any) unmatched <- which(cored & is.na(mch)) utils::write.table(tree.ID[unmatched], file = "unmatched.txt") - mch[duplicated(mch)] <- NA ## if there's multiple stems, match the first - + mch[duplicated(mch)] <- NA ## if there's multiple stems, match the first + ## combine data into one table combined <- cbind(trees, t(as.matrix(rings))[mch, -(nyears - 1):0 + nrow(rings)]) if (coredOnly) { diff --git a/modules/data.land/R/match_pft.R b/modules/data.land/R/match_pft.R index a14bd7cb3cc..6766bc8908c 100644 --- a/modules/data.land/R/match_pft.R +++ b/modules/data.land/R/match_pft.R @@ -1,106 +1,104 @@ -##' +##' ##' @name match_pft ##' @title match_pft ##' @description Matches BETYdb species IDs to model-specific PFTs -##' +##' ##' @param bety_species_id vector of BETYdb species IDs ##' @param pfts settings$pfts. List of pfts with database matching based on name ##' @param con database connection, if NULL use traits package ##' @param allow_missing flag to indicate that settings file does not need to match exactly ##' @param query Default is NULL. query to BETY db. ##' @param model Default is NULL. This is the BETY model ID for matching pfts to the correct model. -##' +##' ##' @author Mike Dietze, Istem Fer ##' @return table of BETYdb PFT IDs matched to species IDs -##' +##' ##' @export -match_pft <- function(bety_species_id, pfts, query = NULL, con = NULL, allow_missing = FALSE, model = NULL){ - +match_pft <- function(bety_species_id, pfts, query = NULL, con = NULL, allow_missing = FALSE, model = NULL) { ### get species to PFT mappting - if(!is.null(con)){ - + if (!is.null(con)) { for (pft in pfts) { if (is.null(query)) { - query <- paste0("SELECT bp.id as bety_pft_id, bp.name as pft, bs.id as bety_species_id, bs.scientificname as latin FROM pfts as bp INNER JOIN ", - "pfts_species AS bps ON bps.pft_id = bp.id INNER JOIN species AS bs ON bs.id = bps.specie_id WHERE ", - "bp.name = '", pft$name, "'") + query <- paste0( + "SELECT bp.id as bety_pft_id, bp.name as pft, bs.id as bety_species_id, bs.scientificname as latin FROM pfts as bp INNER JOIN ", + "pfts_species AS bps ON bps.pft_id = bp.id INNER JOIN species AS bs ON bs.id = bps.specie_id WHERE ", + "bp.name = '", pft$name, "'" + ) } else { query <- paste0(query, " OR bp.name = '", pft$name, "'") } } - if(!is.null(model)){ - modeltype <- PEcAn.DB::db.query(paste0("SELECT * from modeltypes WHERE name = '",model,"'"),con=con) - query <- paste0(query," AND bp.modeltype_id = ",modeltype$id) + if (!is.null(model)) { + modeltype <- PEcAn.DB::db.query(paste0("SELECT * from modeltypes WHERE name = '", model, "'"), con = con) + query <- paste0(query, " AND bp.modeltype_id = ", modeltype$id) } translation <- PEcAn.DB::db.query(query, con = con) - - - }else{ # use traits package - + } else { # use traits package + bety_list <- list() - + for (pft in pfts) { # query pft id - bety_pft <- traits::betydb_query(name = pft$name, modeltype_id = model$id, table = 'pfts', user = 'bety', pwd = 'bety') + bety_pft <- traits::betydb_query(name = pft$name, modeltype_id = model$id, table = "pfts", user = "bety", pwd = "bety") # query species id - bety_species <- traits::betydb_query(pft_id = bety_pft$id, table = 'pfts_species', user = 'bety', pwd = 'bety') + bety_species <- traits::betydb_query(pft_id = bety_pft$id, table = "pfts_species", user = "bety", pwd = "bety") bety_list[[pft$name]] <- bety_species$specie_id } - tmp <- lapply(seq_along(bety_list), function(x){ - data.frame(pft = rep(names(bety_list)[x], length(bety_list[[x]])), - bety_species_id = bety_list[[x]])}) - + tmp <- lapply(seq_along(bety_list), function(x) { + data.frame( + pft = rep(names(bety_list)[x], length(bety_list[[x]])), + bety_species_id = bety_list[[x]] + ) + }) + translation <- do.call("rbind", tmp) - } ## Check for duplicate bety_species_ids in PFTs - bad <- translation[duplicated(translation$bety_species_id),] + bad <- translation[duplicated(translation$bety_species_id), ] if (nrow(bad) > 0) { - for(i in seq_along(nrow(bad))){ - error.pft <- translation[translation$bety_species_id == bad$bety_species_id[i],] + for (i in seq_along(nrow(bad))) { + error.pft <- translation[translation$bety_species_id == bad$bety_species_id[i], ] PEcAn.logger::logger.warn(paste0("Duplicated species id: ", bad$bety_species_id[i], " under ", paste(error.pft$pft, collapse = ", "))) } } ## Check for unmatched bety_species_ids bad2 <- bety_species_id[!(bety_species_id %in% translation$bety_species_id)] - + # skip dead tree codes for 2TB, SNAG, DEAD dead_tree_ids <- c(1000020816, 1000020817, 1438) - bad2 <- bad2[!(bad2 %in% dead_tree_ids)] - + bad2 <- bad2[!(bad2 %in% dead_tree_ids)] + if (length(bad2) > 0) { ubad <- unique(bad2) - for(i in seq_along(ubad)){ + for (i in seq_along(ubad)) { # Coerce id back into species names. Makes a more readable warning. - if(!is.na(ubad[i])){ - if(!is.null(con)){ + if (!is.na(ubad[i])) { + if (!is.null(con)) { latin <- PEcAn.DB::db.query(paste("SELECT scientificname FROM species where id =", ubad[i]), con = con) - }else{ # use traits package - bety_latin <- traits::betydb_query(id = ubad[i], table = 'species', user = 'bety', pwd = 'bety') - latin <- bety_latin$scientificname + } else { # use traits package + bety_latin <- traits::betydb_query(id = ubad[i], table = "species", user = "bety", pwd = "bety") + latin <- bety_latin$scientificname } - - }else{ + } else { latin <- NA } - PEcAn.logger::logger.warn(paste0("Unmatched species: ", ubad[i]," ", latin)) + PEcAn.logger::logger.warn(paste0("Unmatched species: ", ubad[i], " ", latin)) } } - + ## stop after checking both errors if (nrow(bad) > 0) { PEcAn.logger::logger.severe("Within BETY PFT table, please address duplicated species and add unmatched species to PFTs.") } - - if(allow_missing == FALSE & length(bad2) > 0){ + + if (allow_missing == FALSE & length(bad2) > 0) { PEcAn.logger::logger.severe("Within BETY PFT table, please address duplicated species and add unmatched species to PFTs.") } ## Match - matchedpft <- dplyr::right_join(translation, as.data.frame(bety_species_id), type="right") + matchedpft <- dplyr::right_join(translation, as.data.frame(bety_species_id), type = "right") return(matchedpft) - } diff --git a/modules/data.land/R/match_species_id.R b/modules/data.land/R/match_species_id.R index 92c6d9ad5e1..ba72f121b61 100644 --- a/modules/data.land/R/match_species_id.R +++ b/modules/data.land/R/match_species_id.R @@ -27,106 +27,123 @@ #' \dontrun{ #' con <- PEcAn.DB::db.open(list( #' driver = "Postgres", -#' dbname = 'bety', -#' user = 'bety', -#' password = 'bety', -#' host = 'localhost') +#' dbname = "bety", +#' user = "bety", +#' password = "bety", +#' host = "localhost" +#' )) +#' input_codes <- c("ACRU", "PIMA", "TSCA") +#' format_name <- "usda" +#' match_species_id( +#' input_codes = input_codes, +#' format_name = format_name, +#' bety = con #' ) -#' input_codes <- c('ACRU', 'PIMA', 'TSCA') -#' format_name <- 'usda' -#' match_species_id(input_codes = input_codes, -#' format_name = format_name, -#' bety = con) #' } #' #' @importFrom magrittr %>% #' @export -match_species_id <- function(input_codes, format_name = 'custom', bety = NULL, translation_table = NULL, ...) { - +match_species_id <- function(input_codes, format_name = "custom", bety = NULL, translation_table = NULL, ...) { # Relate format names to BETY columns - formats_dict <- c('usda' = 'Symbol', - 'fia' = 'spcd', - 'latin_name' = 'scientificname', - 'custom' = 'custom') + formats_dict <- c( + "usda" = "Symbol", + "fia" = "spcd", + "latin_name" = "scientificname", + "custom" = "custom" + ) if (!format_name %in% names(formats_dict)) { - PEcAn.logger::logger.severe('format_name "', format_name, '" not found. ', - 'Please use one of the following: ', - paste(names(formats_dict), collapse = ', ')) + PEcAn.logger::logger.severe( + 'format_name "', format_name, '" not found. ', + "Please use one of the following: ", + paste(names(formats_dict), collapse = ", ") + ) } if (!is.null(translation_table)) { - msg2 <- c('Found the following columns: ', - paste(colnames(translation_table), collapse = ', ')) - if (!'input_code' %in% colnames(translation_table)) { + msg2 <- c( + "Found the following columns: ", + paste(colnames(translation_table), collapse = ", ") + ) + if (!"input_code" %in% colnames(translation_table)) { PEcAn.logger::logger.severe('Custom translation table must have column "input_code". ', msg2) - } else if (!'bety_species_id' %in% colnames(translation_table)) { + } else if (!"bety_species_id" %in% colnames(translation_table)) { PEcAn.logger::logger.severe('Custom translation table must have column "bety_species_id". ', msg2) } else { - if (any(grepl('^(genus|species)$', colnames(translation_table)))) { - PEcAn.logger::logger.warn('"genus" or "species" columns found in translation table. ', - 'Because these also match the BETY table, ', - 'they will be ignored by the merge, but their names will ', - 'be appended with ".translation_table" for disambiguation') + if (any(grepl("^(genus|species)$", colnames(translation_table)))) { + PEcAn.logger::logger.warn( + '"genus" or "species" columns found in translation table. ', + "Because these also match the BETY table, ", + "they will be ignored by the merge, but their names will ", + 'be appended with ".translation_table" for disambiguation' + ) } - bety_species <- dplyr::tbl(bety, 'species') %>% - dplyr::filter(.data$id %in% !!translation_table[['bety_species_id']]) %>% + bety_species <- dplyr::tbl(bety, "species") %>% + dplyr::filter(.data$id %in% !!translation_table[["bety_species_id"]]) %>% dplyr::select(bety_species_id = "id", "genus", "species") %>% dplyr::collect() translation <- dplyr::left_join(translation_table, bety_species, - by = 'bety_species_id', - suffix = c('.translation_table', '')) + by = "bety_species_id", + suffix = c(".translation_table", "") + ) } } else { column <- formats_dict[[format_name]] if (!is.null(bety)) { # query BETY for species, id, genus, and latin name translation <- dplyr::tbl(bety, "species") %>% - dplyr::select(bety_species_id = "id", "genus", "species", - input_code = !!column) %>% + dplyr::select( + bety_species_id = "id", "genus", "species", + input_code = !!column + ) %>% dplyr::collect() translation <- dplyr::semi_join( translation, data.frame(input_code = input_codes, stringsAsFactors = FALSE), - by = "input_code") - }else{ + by = "input_code" + ) + } else { # use traits package # can call traits::betydb_query one at a time? # reduce the number of calls translation <- data.frame( - bety_species_id = rep(NA, length(unique(input_codes))), - genus = rep(NA, length(unique(input_codes))), - species = rep(NA, length(unique(input_codes))), + bety_species_id = rep(NA, length(unique(input_codes))), + genus = rep(NA, length(unique(input_codes))), + species = rep(NA, length(unique(input_codes))), input_code = unique(input_codes), - stringsAsFactors = FALSE) + stringsAsFactors = FALSE + ) for (i in seq_len(nrow(translation))) { foo <- eval(parse(text = paste0( "traits::betydb_query(", column, "='", translation$input_code[i], - "', table = 'species', user = 'bety', pwd = 'bety')"))) - if(length(foo) == 0){ + "', table = 'species', user = 'bety', pwd = 'bety')" + ))) + if (length(foo) == 0) { PEcAn.logger::logger.error(msg = "Match.species.id translation query returns empty for ", column, "='", translation$input_code[i]) } translation$bety_species_id[i] <- foo$id - translation$genus[i] <- foo$genus - translation$species[i] <- foo$species + translation$genus[i] <- foo$genus + translation$species[i] <- foo$species } } } input_table <- data.frame(input_code = input_codes, stringsAsFactors = FALSE) # preserving the order is important for downstream translation <- dplyr::select( - .data = translation, - "bety_species_id", - "genus", - "species", - "input_code", - dplyr::everything()) + .data = translation, + "bety_species_id", + "genus", + "species", + "input_code", + dplyr::everything() + ) merge_table <- dplyr::left_join(input_table, translation, by = "input_code") if (sum(is.na(merge_table$bety_species_id)) > 0) { bad <- unique(merge_table$input_code[is.na(merge_table$bety_species_id)]) PEcAn.logger::logger.error( "Species for the following code(s) not found : ", - paste(bad, collapse = ", ")) + paste(bad, collapse = ", ") + ) } return(merge_table) } # match_species_id diff --git a/modules/data.land/R/partition_roots.R b/modules/data.land/R/partition_roots.R index 29fc521bd12..ada3c667a65 100644 --- a/modules/data.land/R/partition_roots.R +++ b/modules/data.land/R/partition_roots.R @@ -7,27 +7,29 @@ ##' @param rtsize vector of lower bounds of root size class thresholds in m, length greater than one and equal to roots. Must contain threshold within .0005 m of .002 m ##' @return list containing summed fine root and coarse root carbon (2 values) ##' @author Anne Thomas -##' -partition_roots <- function(roots, rtsize){ - if(length(rtsize) > 1 && length(rtsize) == length(roots)){ +##' +partition_roots <- function(roots, rtsize) { + if (length(rtsize) > 1 && length(rtsize) == length(roots)) { threshold <- .002 epsilon <- .0005 - #find index of threshold in rtsize closest to .002 - rtsize_thresh_idx <- which.min(sapply(rtsize-threshold,abs)) + # find index of threshold in rtsize closest to .002 + rtsize_thresh_idx <- which.min(sapply(rtsize - threshold, abs)) rtsize_thresh <- rtsize[rtsize_thresh_idx] - if(abs(rtsize_thresh-threshold) > epsilon){ - PEcAn.logger::logger.error(paste("Closest rtsize to fine root threshold of", threshold, "m (", rtsize_thresh, - ") is greater than", epsilon, - "m off; fine roots can't be partitioned. Please improve rtsize dimensions.")) + if (abs(rtsize_thresh - threshold) > epsilon) { + PEcAn.logger::logger.error(paste( + "Closest rtsize to fine root threshold of", threshold, "m (", rtsize_thresh, + ") is greater than", epsilon, + "m off; fine roots can't be partitioned. Please improve rtsize dimensions." + )) return(NULL) - } else{ - #sum fine roots from lowest group through group below threshold and coarse from group including threshold to the highest - fine.roots <- sum(roots[1:rtsize_thresh_idx-1]) + } else { + # sum fine roots from lowest group through group below threshold and coarse from group including threshold to the highest + fine.roots <- sum(roots[1:rtsize_thresh_idx - 1]) coarse.roots <- sum(roots) - fine.roots - if(fine.roots >= 0 && coarse.roots >= 0){ + if (fine.roots >= 0 && coarse.roots >= 0) { PEcAn.logger::logger.info("Using partitioned root values", fine.roots, "for fine and", coarse.roots, "for coarse.") return(list(fine.roots = fine.roots, coarse.roots = coarse.roots)) - } else{ + } else { PEcAn.logger::logger.error("Roots could not be partitioned (fine or coarse is less than 0).") return(NULL) } @@ -36,4 +38,4 @@ partition_roots <- function(roots, rtsize){ PEcAn.logger::logger.error("Inadequate or incorrect number of levels of rtsize associated with roots; please ensure roots and rtsize lengths match and are greater than 1.") return(NULL) } -} \ No newline at end of file +} diff --git a/modules/data.land/R/plot2AGB.R b/modules/data.land/R/plot2AGB.R index 71f18969511..1d07dd1f78b 100644 --- a/modules/data.land/R/plot2AGB.R +++ b/modules/data.land/R/plot2AGB.R @@ -1,97 +1,94 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- ##' convert composite ring & census data into AGB -##' +##' ##' @name plot2AGB ##' @title plot2AGB -##' +##' ##' @param combined data frame merging plot inventory and tree ring data ##' @param out MCMC samples for diameter (sample x tree) ##' @param outfolder output folder for graphs & data ##' @param allom.stats Allometry statistics computed by `AllomAve` ##' @param unit.conv area conversion from sum(kg/tree) to kg/area -##' +##' ##' @author Mike Dietze \email{dietze@@bu.edu} ##' @export plot2AGB <- function(combined, out, outfolder, allom.stats, unit.conv = 0.02) { - ## Jenkins: hemlock (kg) b0 <- -2.5384 b1 <- 2.4814 - + ## Allometric statistics - b0 <- allom.stats[[1]][[6]]$statistics["Bg0", "Mean"] - b1 <- allom.stats[[1]][[6]]$statistics["Bg1", "Mean"] - B <- allom.stats[[1]][[6]]$statistics[c("Bg0", "Bg1"), "Mean"] + b0 <- allom.stats[[1]][[6]]$statistics["Bg0", "Mean"] + b1 <- allom.stats[[1]][[6]]$statistics["Bg1", "Mean"] + B <- allom.stats[[1]][[6]]$statistics[c("Bg0", "Bg1"), "Mean"] Bcov <- allom.stats[[1]][[6]]$cov[c("Bg0", "Bg1"), c("Bg0", "Bg1")] - Bsd <- sqrt(allom.stats[[1]][[6]]$statistics["Sg", "Mean"]) - + Bsd <- sqrt(allom.stats[[1]][[6]]$statistics["Sg", "Mean"]) + ## prep data out[out < 0.1] <- 0.1 - nrep <- nrow(out) - ntree <- nrow(combined) - nt <- ncol(out) / ntree - mplot <- 1 ## later need to generalize to splitting up plots + nrep <- nrow(out) + ntree <- nrow(combined) + nt <- ncol(out) / ntree + mplot <- 1 ## later need to generalize to splitting up plots ijindex <- matrix(1, ntree, 1) - yrvec <- as.numeric(colnames(combined)) - yrvec <- yrvec[!is.na(yrvec)] - + yrvec <- as.numeric(colnames(combined)) + yrvec <- yrvec[!is.na(yrvec)] + ## set up storage NPP <- array(NA, c(mplot, nrep, nt - 1)) AGB <- array(NA, c(mplot, nrep, nt)) - biomass_tsca <- array(NA, c(mplot, nrep, nt)) + biomass_tsca <- array(NA, c(mplot, nrep, nt)) biomass_acsa3 <- array(NA, c(mplot, nrep, nt)) biomass_beal2 <- array(NA, c(mplot, nrep, nt)) biomass_thoc2 <- array(NA, c(mplot, nrep, nt)) - + ## sample over tree chronologies pb <- utils::txtProgressBar(min = 0, max = nrep, style = 3) for (g in seq_len(nrep)) { - ## Draw allometries b <- mvtnorm::rmvnorm(1, B, Bcov) - + ## convert tree diameter to biomass biomass <- matrix(exp(b[1] + b[2] * log(out[g, ])), ntree, nt) - + for (j in seq_len(mplot)) { - ## aggregate to stand AGB AGB[j, g, ] <- apply(biomass, 2, sum, na.rm = TRUE) * unit.conv # AGB[j,g,] <- apply(biomass[ijindex[,1]==j,],2,sum,na.rm=TRUE)*unit.conv - + biomass_tsca[j, g, ] <- apply(biomass[combined$SPP == "TSCA", ], 2, sum, na.rm = TRUE) * unit.conv biomass_acsa3[j, g, ] <- apply(biomass[combined$SPP == "ACSA3", ], 2, sum, na.rm = TRUE) * unit.conv biomass_beal2[j, g, ] <- apply(biomass[combined$SPP == "BEAL2", ], 2, sum, na.rm = TRUE) * unit.conv biomass_thoc2[j, g, ] <- apply(biomass[combined$SPP == "THOC2", ], 2, sum, na.rm = TRUE) * unit.conv - + ## diff to get NPP NPP[j, g, ] <- diff(AGB[j, g, ]) } utils::setTxtProgressBar(pb, g) } - + mAGB <- sAGB <- matrix(NA, mplot, nt) mNPP <- sNPP <- matrix(NA, mplot, nt - 1) - - mbiomass_tsca <- sbiomass_tsca <- matrix(NA, mplot, nt) + + mbiomass_tsca <- sbiomass_tsca <- matrix(NA, mplot, nt) mbiomass_acsa3 <- sbiomass_acsa3 <- matrix(NA, mplot, nt) mbiomass_beal2 <- sbiomass_beal2 <- matrix(NA, mplot, nt) mbiomass_thoc2 <- sbiomass_thoc2 <- matrix(NA, mplot, nt) - + for (i in seq_len(mplot)) { mNPP[i, ] <- apply(NPP[i, , ], 2, mean, na.rm = TRUE) sNPP[i, ] <- apply(NPP[i, , ], 2, stats::sd, na.rm = TRUE) mAGB[i, ] <- apply(AGB[i, , ], 2, mean, na.rm = TRUE) sAGB[i, ] <- apply(AGB[i, , ], 2, stats::sd, na.rm = TRUE) - - mbiomass_tsca[i, ] <- apply(biomass_tsca[i, , ], 2, mean, na.rm = TRUE) - sbiomass_tsca[i, ] <- apply(biomass_tsca[i, , ], 2, stats::sd, na.rm = TRUE) + + mbiomass_tsca[i, ] <- apply(biomass_tsca[i, , ], 2, mean, na.rm = TRUE) + sbiomass_tsca[i, ] <- apply(biomass_tsca[i, , ], 2, stats::sd, na.rm = TRUE) mbiomass_acsa3[i, ] <- apply(biomass_acsa3[i, , ], 2, mean, na.rm = TRUE) sbiomass_acsa3[i, ] <- apply(biomass_acsa3[i, , ], 2, stats::sd, na.rm = TRUE) mbiomass_beal2[i, ] <- apply(biomass_beal2[i, , ], 2, mean, na.rm = TRUE) @@ -99,28 +96,31 @@ plot2AGB <- function(combined, out, outfolder, allom.stats, unit.conv = 0.02) { mbiomass_thoc2[i, ] <- apply(biomass_thoc2[i, , ], 2, mean, na.rm = TRUE) sbiomass_thoc2[i, ] <- apply(biomass_thoc2[i, , ], 2, stats::sd, na.rm = TRUE) } - + grDevices::pdf(file.path(outfolder, "plot2AGB.pdf")) graphics::par(mfrow = c(2, 1)) for (i in seq_len(mplot)) { - up <- mNPP[i, ] + sNPP[i, ] * 1.96 + up <- mNPP[i, ] + sNPP[i, ] * 1.96 low <- mNPP[i, ] - sNPP[i, ] * 1.96 graphics::plot(yrvec[-1], mNPP[i, ], ylim = range(c(up, low)), ylab = "Mg/ha/yr", xlab = "year") graphics::lines(yrvec[-1], up) graphics::lines(yrvec[-1], low) - upA <- mAGB[i, ] + sAGB[i, ] * 1.96 + upA <- mAGB[i, ] + sAGB[i, ] * 1.96 lowA <- mAGB[i, ] - sAGB[i, ] * 1.96 graphics::plot(yrvec, mAGB[i, ], ylim = range(c(upA, lowA)), ylab = "Mg/ha", xlab = "year") graphics::lines(yrvec, upA) graphics::lines(yrvec, lowA) } grDevices::dev.off() - - save(AGB, NPP, mNPP, sNPP, mAGB, sAGB, yrvec, - mbiomass_tsca, sbiomass_tsca, mbiomass_acsa3, sbiomass_acsa3, - mbiomass_beal2, sbiomass_beal2, mbiomass_thoc2, sbiomass_thoc2, - file = file.path(outfolder, "plot2AGB.Rdata")) - return(list(AGB = AGB, NPP = NPP, - biomass_tsca = biomass_tsca, biomass_acsa3 = biomass_acsa3, - biomass_beal2 = biomass_beal2, biomass_thoc2 = biomass_thoc2)) + + save(AGB, NPP, mNPP, sNPP, mAGB, sAGB, yrvec, + mbiomass_tsca, sbiomass_tsca, mbiomass_acsa3, sbiomass_acsa3, + mbiomass_beal2, sbiomass_beal2, mbiomass_thoc2, sbiomass_thoc2, + file = file.path(outfolder, "plot2AGB.Rdata") + ) + return(list( + AGB = AGB, NPP = NPP, + biomass_tsca = biomass_tsca, biomass_acsa3 = biomass_acsa3, + biomass_beal2 = biomass_beal2, biomass_thoc2 = biomass_thoc2 + )) } # plot2AGB diff --git a/modules/data.land/R/pool_ic_list2netcdf.R b/modules/data.land/R/pool_ic_list2netcdf.R index 756c359e321..31c700d0f52 100644 --- a/modules/data.land/R/pool_ic_list2netcdf.R +++ b/modules/data.land/R/pool_ic_list2netcdf.R @@ -10,56 +10,58 @@ ##' ##' @author Anne Thomas -pool_ic_list2netcdf <- function(input, outdir, siteid, ens=NA){ - if(is.null(input$vals) || length(input$vals) == 0){ +pool_ic_list2netcdf <- function(input, outdir, siteid, ens = NA) { + if (is.null(input$vals) || length(input$vals) == 0) { PEcAn.logger::logger.severe("Please provide 'vals' list in input with variable names assigned to values") } - - if(is.null(input$dims) || length(input$dims) == 0){ - if (any(sapply(input$vals,length) > 1)){ + + if (is.null(input$dims) || length(input$dims) == 0) { + if (any(sapply(input$vals, length) > 1)) { PEcAn.logger::logger.severe("A variable has length > 1; please provide non-empty 'dims' list in input") } } - #to do: check - + # to do: check + dims <- list() - for(dimname in names(input$dims)){ + for (dimname in names(input$dims)) { vals <- input$dims[[which(names(input$dims) == dimname)]] - ncdim = PEcAn.utils::to_ncdim(dimname, vals) + ncdim <- PEcAn.utils::to_ncdim(dimname, vals) dims[[dimname]] <- ncdim } - + ncvars <- lapply(names(input$vals), PEcAn.utils::to_ncvar, dims) - - #create nc file + + # create nc file str_ns <- paste0(siteid %/% 1e+09, "-", siteid %% 1e+09) - if (is.na(ens)){ + if (is.na(ens)) { basefile <- paste0("IC_site_", str_ns) - } else{ - basefile <- paste0("IC_site_", str_ns,"_",ens) + } else { + basefile <- paste0("IC_site_", str_ns, "_", ens) } - outfile <- file.path(outdir, paste0(basefile,".nc")) - nc <- ncdf4::nc_create(outfile, ncvars) - - #put variables in nc file + outfile <- file.path(outdir, paste0(basefile, ".nc")) + nc <- ncdf4::nc_create(outfile, ncvars) + + # put variables in nc file for (i in seq(ncvars)) { varname <- ncvars[[i]]$name ncdf4::ncvar_put(nc, ncvars[[i]], input$vals[[varname]]) } - - #close file + + # close file ncdf4::nc_close(nc) - - #create results object - results <- data.frame(file = outfile, - host = PEcAn.remote::fqdn(), - mimetype = "application/x-netcdf", - formatname = "pool_initial_conditions", - startdate = NA, - enddate = NA, - dbfile.name = basefile, - stringsAsFactors = FALSE) - + + # create results object + results <- data.frame( + file = outfile, + host = PEcAn.remote::fqdn(), + mimetype = "application/x-netcdf", + formatname = "pool_initial_conditions", + startdate = NA, + enddate = NA, + dbfile.name = basefile, + stringsAsFactors = FALSE + ) + return(results) -} #pool_ic_list2netcdf +} # pool_ic_list2netcdf diff --git a/modules/data.land/R/pool_ic_netcdf2list.R b/modules/data.land/R/pool_ic_netcdf2list.R index d488b004d45..339cca98a30 100644 --- a/modules/data.land/R/pool_ic_netcdf2list.R +++ b/modules/data.land/R/pool_ic_netcdf2list.R @@ -6,24 +6,22 @@ ##' @param nc.path path to netcdf file containing standard dimensions and variables ##' @return list with two elements: list of netcdf dimensions (dims, with named values) and list of variables (vals, with named values) ##' @author Anne Thomas -pool_ic_netcdf2list <- function(nc.path){ +pool_ic_netcdf2list <- function(nc.path) { IC.nc <- try(ncdf4::nc_open(nc.path)) - on.exit(ncdf4::nc_close(IC.nc), add = FALSE) - if(!inherits(IC.nc, "try-error")) { + on.exit(ncdf4::nc_close(IC.nc), add = FALSE) + if (!inherits(IC.nc, "try-error")) { dims <- vector(mode = "list", length = length(IC.nc$dim)) names(dims) <- names(IC.nc$dim) - for(i in seq(IC.nc$dim)){ + for (i in seq(IC.nc$dim)) { dims[[i]] <- IC.nc$dim[[i]]$vals } vals <- vector(mode = "list", length = length(IC.nc$var)) names(vals) <- names(IC.nc$var) - for(varname in names(vals)){ - vals[[varname]] <- ncdf4::ncvar_get(IC.nc,varname) + for (varname in names(vals)) { + vals[[varname]] <- ncdf4::ncvar_get(IC.nc, varname) } return(list(dims = dims, vals = vals)) - } - else{ + } else { PEcAn.logger::logger.severe("Could not read IC file.") } - } diff --git a/modules/data.land/R/prepare_pools.R b/modules/data.land/R/prepare_pools.R index ecb8c2f3b44..5a3365c37d9 100644 --- a/modules/data.land/R/prepare_pools.R +++ b/modules/data.land/R/prepare_pools.R @@ -7,17 +7,17 @@ ##' @param constants list of constants; must include SLA in m2 / kg C if providing LAI for leaf carbon ##' @return list of pool values in kg C / m2 with generic names ##' @author Anne Thomas -prepare_pools <- function(nc.path, constants = NULL){ - #function to check that var was loaded (numeric) and has a valid value (not NA or negative) - is.valid <- function(var){ - return(all(is.numeric(var) && !is.na(var) && var >= 0)) +prepare_pools <- function(nc.path, constants = NULL) { + # function to check that var was loaded (numeric) and has a valid value (not NA or negative) + is.valid <- function(var) { + return(all(is.numeric(var) && !is.na(var) && var >= 0)) } - + IC.params <- list() - + if (!is.null(nc.path)) { IC.list <- PEcAn.data.land::pool_ic_netcdf2list(nc.path) - if(!is.null(IC.list)){ + if (!is.null(IC.list)) { ### load biomass variables from IC list; will be NULL if not present (checked for later) TotLivBiom <- IC.list$vals$TotLivBiom leaf <- IC.list$vals$leaf_carbon_content @@ -27,89 +27,88 @@ prepare_pools <- function(nc.path, constants = NULL){ roots <- IC.list$vals$root_carbon_content fine.roots <- IC.list$vals$fine_root_carbon_content coarse.roots <- IC.list$vals$coarse_root_carbon_content - + ### load non-living variables litter <- IC.list$vals$litter_carbon_content soil <- IC.list$vals$soil_organic_carbon_content wood.debris <- IC.list$vals$wood_debris_carbon_content - + # check if total roots are partitionable # note: if roots are partitionable, they will override fine_ and/or coarse_root_carbon_content if loaded - if(is.valid(roots)){ - if("rtsize" %in% names(IC.list$dims)){ + if (is.valid(roots)) { + if ("rtsize" %in% names(IC.list$dims)) { PEcAn.logger::logger.info("prepare_pools: Attempting to partition root_carbon_content") rtsize <- IC.list$dims$rtsize part_roots <- PEcAn.data.land::partition_roots(roots, rtsize) - if(!is.null(part_roots)){ + if (!is.null(part_roots)) { fine.roots <- part_roots$fine.roots coarse.roots <- part_roots$coarse.roots - } else{ + } else { PEcAn.logger::logger.error("prepare_pools: could not partition roots; please provide fine_root_carbon_content and coarse_root_carbon_content in netcdf.") } - } else{ + } else { PEcAn.logger::logger.error("prepare_pools: Please provide rtsize dimension with root_carbon_content to allow partitioning or provide fine_root_carbon_content and coarse_root_carbon_content in netcdf.") } - } else{ + } else { # proceed without error message } - - + + ### Calculate pools from IC list - + # initial canopy foliar carbon (kgC/m2) if (is.valid(leaf)) { - IC.params[["leaf"]] <- leaf - } else if(is.valid(LAI)){ + IC.params[["leaf"]] <- leaf + } else if (is.valid(LAI)) { sla <- constants$sla - if(!is.null(sla)){ - leaf <- LAI * 1/sla + if (!is.null(sla)) { + leaf <- LAI * 1 / sla PEcAn.logger::logger.info(paste("using LAI", LAI, "and SLA", sla, "to get leaf", leaf)) IC.params[["leaf"]] <- leaf - } else{ + } else { PEcAn.logger::logger.error("Could not convert LAI to leaf carbon without SLA; please include 'constants' list with named element 'sla'") } - } else if(is.valid(TotLivBiom) && is.valid(AbvGrndWood) && - is.valid(fine.roots) && is.valid(coarse.roots)){ - leaf <- (TotLivBiom - AbvGrndWood - fine.roots - coarse.roots) - if(leaf >= 0){ + } else if (is.valid(TotLivBiom) && is.valid(AbvGrndWood) && + is.valid(fine.roots) && is.valid(coarse.roots)) { + leaf <- (TotLivBiom - AbvGrndWood - fine.roots - coarse.roots) + if (leaf >= 0) { IC.params[["leaf"]] <- leaf - } else{ + } else { PEcAn.logger::logger.error("TotLivBiom is less than sum of AbvGrndWood and roots; will use default for leaf biomass") } } - #Calculate LAI given leaf and sla + # Calculate LAI given leaf and sla sla <- constants$sla if (!is.null(sla) && is.valid(leaf)) { LAI <- leaf * sla IC.params[["LAI"]] <- LAI } - + # initial pool of woody carbon (kgC/m2) - if (is.valid(wood)){ + if (is.valid(wood)) { IC.params[["wood"]] <- wood } else if (is.valid(AbvGrndWood)) { - if(is.valid(coarse.roots)){ - IC.params[["wood"]] <- (AbvGrndWood + coarse.roots) - } else{ + if (is.valid(coarse.roots)) { + IC.params[["wood"]] <- (AbvGrndWood + coarse.roots) + } else { PEcAn.logger::logger.error("prepare_pools can't calculate total woody biomass with only AbvGrndWood; checking for total biomass.") } - } else if (is.valid(TotLivBiom) && is.valid(leaf) && is.valid(fine.roots)){ - wood <- (TotLivBiom - leaf - fine.roots) - if (wood >= 0){ + } else if (is.valid(TotLivBiom) && is.valid(leaf) && is.valid(fine.roots)) { + wood <- (TotLivBiom - leaf - fine.roots) + if (wood >= 0) { IC.params[["wood"]] <- wood - }else if ((leaf + fine.roots) < (TotLivBiom * 1.25)){ + } else if ((leaf + fine.roots) < (TotLivBiom * 1.25)) { PEcAn.logger::logger.error(paste("prepare_pools: Sum of leaf (", leaf, ") and fine roots(", fine.roots, ") is greater than TotLivBiom (", TotLivBiom, "); will reapportion with woody biomass.")) - #estimate new woody biomass and reapportion - if(is.valid(coarse.roots)){ - #expand wood by coarse root to stem fraction (currently applied to both woody and non-woody) - root.wood.frac <- 0.2 #deciduous forest, White et al.2000 + # estimate new woody biomass and reapportion + if (is.valid(coarse.roots)) { + # expand wood by coarse root to stem fraction (currently applied to both woody and non-woody) + root.wood.frac <- 0.2 # deciduous forest, White et al.2000 stem.wood.frac <- 0.8 - wood <- coarse.roots + ((stem.wood.frac * coarse.roots) / root.wood.frac) #cross multiply for stem wood and add - - }else{ + wood <- coarse.roots + ((stem.wood.frac * coarse.roots) / root.wood.frac) # cross multiply for stem wood and add + } else { wood <- 0 } - #reapportion wood, leaf and fine roots within TotLivBiom + # reapportion wood, leaf and fine roots within TotLivBiom leaf.new <- (leaf / (leaf + wood + fine.roots)) * TotLivBiom roots.new <- (fine.roots / (leaf + wood + fine.roots)) * TotLivBiom wood.new <- (wood / (leaf + wood + fine.roots)) * TotLivBiom @@ -117,56 +116,54 @@ prepare_pools <- function(nc.path, constants = NULL){ IC.params[["leaf"]] <- leaf.new IC.params[["fine.roots"]] <- roots.new PEcAn.logger::logger.info(paste("prepare_pools: Using", wood.new, "for wood, ", leaf.new, "for leaf,", roots.new, " for fine roots.")) - } else{ + } else { PEcAn.logger::logger.severe(paste("prepare_pools: Sum of leaf (", leaf, ") and fine roots(", fine.roots, ") is more than 25% greater than TotLivBiom (", TotLivBiom, "); please check IC inputs.")) } - } else{ + } else { PEcAn.logger::logger.error("prepare_pools could not calculate woody biomass; will use defaults. Please provide AbvGrndWood and coarse_root_carbon OR leaf_carbon_content/LAI, fine_root_carbon_content, and TotLivBiom in netcdf.") } - + # initial pool of fine root carbon (kgC/m2) if (is.valid(fine.roots)) { - IC.params[["fine.roots"]] <- fine.roots - } else if(is.valid(TotLivBiom) && is.valid(AbvGrndWood) && - is.valid(leaf) && is.valid(coarse.roots)){ - fine.roots <- (TotLivBiom - AbvGrndWood - leaf - coarse.roots) - if(fine.roots >= 0){ + IC.params[["fine.roots"]] <- fine.roots + } else if (is.valid(TotLivBiom) && is.valid(AbvGrndWood) && + is.valid(leaf) && is.valid(coarse.roots)) { + fine.roots <- (TotLivBiom - AbvGrndWood - leaf - coarse.roots) + if (fine.roots >= 0) { IC.params[["fine.roots"]] <- fine.roots - } else{ + } else { PEcAn.logger::logger.error("TotLivBiom is less than sum of AbvGrndWood, coarse roots, and leaf; will use default for fine.roots biomass") } } - - + + # initial pool of litter carbon (kgC/m2) if (is.valid(litter)) { - IC.params[["litter"]] <- litter + IC.params[["litter"]] <- litter } - + # initial pool of soil organic matter (kgC/m2) - if(is.valid(soil)){ + if (is.valid(soil)) { IC.params[["soil"]] <- soil } else { soil <- IC.list$vals$soil_carbon_content - if(is.valid(soil)){ - IC.params[["soil"]] <- soil - } + if (is.valid(soil)) { + IC.params[["soil"]] <- soil + } } - + # initial pool of woody debris (kgC/m2) - if(is.valid(wood.debris)){ - IC.params[["wood.debris"]] <-sum(wood.debris) + if (is.valid(wood.debris)) { + IC.params[["wood.debris"]] <- sum(wood.debris) } - + return(IC.params) - } - else{ + } else { PEcAn.logger::logger.severe("Could not load initial conditions: output list is null") return(NULL) } - } - else{ + } else { PEcAn.logger::logger.severe("Could not load initial conditions: filepath is null") return(NULL) } -} \ No newline at end of file +} diff --git a/modules/data.land/R/put_veg_module.R b/modules/data.land/R/put_veg_module.R index f4fd515d2b2..816251d08cf 100644 --- a/modules/data.land/R/put_veg_module.R +++ b/modules/data.land/R/put_veg_module.R @@ -14,69 +14,68 @@ ##' @param new_site data frame, id/lat/lon/name info about the site ##' @param host list, host info as in settings$host, host$name forced to be "localhost" upstream ##' @param overwrite logical flag for convert_input -##' +##' ##' @export -##' +##' ##' @author Istem Fer put_veg_module <- function(getveg.id, dbparms, - input_veg, pfts, - outfolder, n.ensemble, - dir, machine, model, - start_date, end_date, - new_site, - host, overwrite){ - - - + input_veg, pfts, + outfolder, n.ensemble, + dir, machine, model, + start_date, end_date, + new_site, + host, overwrite) { #--------------------------------------------------------------------------------------------------# # Write model specific IC files con <- PEcAn.DB::db.open(dbparms$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) - + # Determine IC file format name and mimetype if (!is.null(input_veg$output)) { - model_info <- PEcAn.DB::db.query(paste0("SELECT f.name, f.id, mt.type_string from modeltypes as m", " join modeltypes_formats as mf on m.id = mf.modeltype_id", - " join formats as f on mf.format_id = f.id", " join mimetypes as mt on f.mimetype_id = mt.id", - " where m.name = '", model, "' AND mf.tag='", input_veg$output,"'"), con) + model_info <- PEcAn.DB::db.query(paste0( + "SELECT f.name, f.id, mt.type_string from modeltypes as m", " join modeltypes_formats as mf on m.id = mf.modeltype_id", + " join formats as f on mf.format_id = f.id", " join mimetypes as mt on f.mimetype_id = mt.id", + " where m.name = '", model, "' AND mf.tag='", input_veg$output, "'" + ), con) formatname <- model_info[1] - mimetype <- model_info[3] - }else { + mimetype <- model_info[3] + } else { PEcAn.logger::logger.error("Missing required information input_veg$output, this should be the tag identifier from the modeltypes_formats table") } - + PEcAn.logger::logger.info("Begin Model Specific Conversion") - + # spp.file <- db.query(paste("SELECT * from dbfiles where container_id =", getveg.id), con) spp.file <- PEcAn.DB::db.query(paste0("SELECT * from dbfiles where id = ", getveg.id$dbfile.id), con) - pkg <- "PEcAn.data.land" - fcn <- "write_ic" - - host.inputargs = host - - putveg.id <- PEcAn.DB::convert_input(input.id = getveg.id$input.id, - outfolder = spp.file$file_path, - formatname = formatname, - mimetype = mimetype, - site.id = new_site$id, - start_date = start_date, end_date = end_date, - pkg = pkg, fcn = fcn, - con = con, host = host, - write = TRUE, - overwrite = overwrite, - # fcn specific args - in.path = spp.file$file_path, - in.name = spp.file$file_name, - model = model, - new_site = new_site, - pfts = pfts, - source = input_veg$source, - n.ensemble = n.ensemble, - host.inputargs = host.inputargs) - - + pkg <- "PEcAn.data.land" + fcn <- "write_ic" - return(putveg.id) + host.inputargs <- host + putveg.id <- PEcAn.DB::convert_input( + input.id = getveg.id$input.id, + outfolder = spp.file$file_path, + formatname = formatname, + mimetype = mimetype, + site.id = new_site$id, + start_date = start_date, end_date = end_date, + pkg = pkg, fcn = fcn, + con = con, host = host, + write = TRUE, + overwrite = overwrite, + # fcn specific args + in.path = spp.file$file_path, + in.name = spp.file$file_name, + model = model, + new_site = new_site, + pfts = pfts, + source = input_veg$source, + n.ensemble = n.ensemble, + host.inputargs = host.inputargs + ) + + + return(putveg.id) } diff --git a/modules/data.land/R/read.plot.R b/modules/data.land/R/read.plot.R index 56e8f274ac0..42e99e725d3 100644 --- a/modules/data.land/R/read.plot.R +++ b/modules/data.land/R/read.plot.R @@ -1,18 +1,18 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- read.plot <- function(file) { - dat <- utils::read.csv(file) + dat <- utils::read.csv(file) plot <- dat[, which(toupper(names(dat)) == "PLOT")] tree <- dat[, which(toupper(names(dat)) == "TREE")] - spp <- as.character(dat[, which(toupper(names(dat)) == "SPECIES")]) - dbh <- dat[, which(toupper(names(dat)) == "DBH")] - + spp <- as.character(dat[, which(toupper(names(dat)) == "SPECIES")]) + dbh <- dat[, which(toupper(names(dat)) == "DBH")] + return(data.frame(plot = plot, tree = tree, spp = spp, dbh = dbh)) } # read.plot diff --git a/modules/data.land/R/read.velmex.R b/modules/data.land/R/read.velmex.R index af030fc53c8..10d9b64b8ed 100644 --- a/modules/data.land/R/read.velmex.R +++ b/modules/data.land/R/read.velmex.R @@ -1,7 +1,6 @@ ## read velmex non-tuscon files read.velmex <- function(folder) { - filenames <- dir(folder, pattern = ".txt$", full.names = TRUE) filedata <- list() for (file in filenames) { @@ -13,6 +12,6 @@ read.velmex <- function(folder) { filedata[[basename(file)]] <- c(0, dat[1:(notes[1] - 1), 2]) } # names(filedata) <- basename(filenames) - + return(filedata) } # read.velmex diff --git a/modules/data.land/R/sample_ic.R b/modules/data.land/R/sample_ic.R index 765636d7ce3..e43fa777ed2 100644 --- a/modules/data.land/R/sample_ic.R +++ b/modules/data.land/R/sample_ic.R @@ -1,6 +1,6 @@ ##' @name sample_ic ##' @title sample_ic -##' +##' ##' @param in.path path to folder of the file to be sampled ##' @param in.name file name of the file to be sampled ##' @param start_date date in "YYYY-MM-DD" format @@ -13,188 +13,185 @@ ##' @param bin_size bin size for sampling, DEFAULT is 10 ##' @param bin_herb_soil if we want to use bin size for both herb and soil sampling ##' @param ... Other inputs -##' +##' ##' @export ##' @author Istem Fer sample_ic <- function(in.path, in.name, start_date, end_date, outfolder, - n.ensemble, machine_host, source, bin_var = "DBH", bin_size = 10, bin_herb_soil = TRUE, ...){ - - + n.ensemble, machine_host, source, bin_var = "DBH", bin_size = 10, bin_herb_soil = TRUE, ...) { #--------------------------------------------------------------------------------------------------# # Read rds_file <- file.path(in.path, in.name) - veg_info <- readRDS(rds_file) - - + veg_info <- readRDS(rds_file) + + #--------------------------------------------------------------------------------------------------# # Prepare for sampling # NOTE: This function might call different functions in the future, e.g. : sample_cohort, sample_pool, or both # Then, the rest of the script would change, this is cohort-based only - - #--------------------------------------------------------------------------------------------------# - # Sampling - #loop over ensemble members - sppfilename <- rep(NA, n.ensemble) - for (ens in 1:n.ensemble) { - - veg_ens <- veg_info - - #sample DBH - if("DBH" %in% bin_var){ - bin_Var <- "DBH" - obs <- as.data.frame(veg_info[[2]], stringsAsFactors = FALSE) - - year.start <- lubridate::year(start_date) - year.end <- lubridate::year(end_date) - - # subset samples for the year - samples <- obs[obs$year >= year.start & obs$year <= year.end, ] - - # remove rows with NAs (we don't want DBH to be NA but do we want to allow missing taxa?) - #samples <- samples[complete.cases(samples), ] - samples <- samples[!is.na(samples[bin_Var]), ] - - # if there are subplots, sample within each subplot instead of pooling all together, maybe pass down a flag if we want to pool anyway - if(!is.null(samples$Subplot)){ - n.subplot <- length(unique(samples$Subplot)) - subplot.n <- unique(samples$Subplot) - }else{ - n.subplot <- 1 - samples$Subplot <- 1 - subplot.n <- 1 - } - sub.list <- list() - - for(np in seq_len(n.subplot)){ - samples_sub <- samples[samples$Subplot == subplot.n[np],] - - # we can use Tree_number as the index for tapply and sample 1 from MCMC samples - if (!is.null(samples_sub$Tree_number)) { - samp_ind <- tapply(seq_along(samples_sub$Tree_number), samples_sub$Tree_number, sample, 1) - - } else { - #don't have MCMC samples, instead re-sample trees stratified by size - #not every dataset will call DBH DBH, and 10 should be a variable. Add parameter for bin_size and bin_var with defaults set to DBH and 10 - size <- ceiling(samples_sub[,which(colnames(samples_sub)==bin_Var)]/bin_size) - samp_ind <- unlist(tapply(seq_along(size), size, function(x){sample(x, length(x), replace = TRUE)}, simplify = TRUE)) - } - sub_samp <- samples_sub[samp_ind,] - - sub.list[[np]] <- sub_samp - } - veg_ens[[2]] <- do.call("rbind", sub.list) + + #--------------------------------------------------------------------------------------------------# + # Sampling + # loop over ensemble members + sppfilename <- rep(NA, n.ensemble) + for (ens in 1:n.ensemble) { + veg_ens <- veg_info + + # sample DBH + if ("DBH" %in% bin_var) { + bin_Var <- "DBH" + obs <- as.data.frame(veg_info[[2]], stringsAsFactors = FALSE) + + year.start <- lubridate::year(start_date) + year.end <- lubridate::year(end_date) + + # subset samples for the year + samples <- obs[obs$year >= year.start & obs$year <= year.end, ] + + # remove rows with NAs (we don't want DBH to be NA but do we want to allow missing taxa?) + # samples <- samples[complete.cases(samples), ] + samples <- samples[!is.na(samples[bin_Var]), ] + + # if there are subplots, sample within each subplot instead of pooling all together, maybe pass down a flag if we want to pool anyway + if (!is.null(samples$Subplot)) { + n.subplot <- length(unique(samples$Subplot)) + subplot.n <- unique(samples$Subplot) + } else { + n.subplot <- 1 + samples$Subplot <- 1 + subplot.n <- 1 } - - #sample Herb - if("dryMass" %in% bin_var){ - bin_Var <- "dryMass" - obs <- as.data.frame(veg_info[[1]], stringsAsFactors = FALSE) - - year.start <- lubridate::year(start_date) - year.end <- lubridate::year(end_date) - - # subset samples for the year - samples <- obs[obs$year >= year.start & obs$year <= year.end, ] - - # remove rows with NAs (we don't want DBH to be NA but do we want to allow missing taxa?) - #samples <- samples[complete.cases(samples), ] - samples <- samples[!is.na(samples[bin_Var]), ] - - # if there are subplots, sample within each subplot instead of pooling all together, maybe pass down a flag if we want to pool anyway - if(!is.null(samples$plot)){ - n.plot <- length(unique(samples$plot)) - plot.n <- unique(samples$plot) - }else{ - n.plot <- 1 - samples$plot <- 1 - plot.n <- 1 - } - - #if we are using bin_size to sample - if(bin_herb_soil){ - if(n.plot>bin_size){ - plot_bin_size <- bin_size - }else{ - plot_bin_size <- ceiling(n.plot/5) - } - }else{ - plot_bin_size <- 1 - } - - sub.list <- list() - sample_plots <- sample(plot.n, plot_bin_size) - for(np in 1:length(sample_plots)){ - samples_sub <- samples[samples$plot == sample_plots[np],] - sub.list[[np]] <- samples_sub + sub.list <- list() + + for (np in seq_len(n.subplot)) { + samples_sub <- samples[samples$Subplot == subplot.n[np], ] + + # we can use Tree_number as the index for tapply and sample 1 from MCMC samples + if (!is.null(samples_sub$Tree_number)) { + samp_ind <- tapply(seq_along(samples_sub$Tree_number), samples_sub$Tree_number, sample, 1) + } else { + # don't have MCMC samples, instead re-sample trees stratified by size + # not every dataset will call DBH DBH, and 10 should be a variable. Add parameter for bin_size and bin_var with defaults set to DBH and 10 + size <- ceiling(samples_sub[, which(colnames(samples_sub) == bin_Var)] / bin_size) + samp_ind <- unlist(tapply(seq_along(size), size, function(x) { + sample(x, length(x), replace = TRUE) + }, simplify = TRUE)) } - veg_ens[[1]] <- do.call("rbind", sub.list) + sub_samp <- samples_sub[samp_ind, ] + + sub.list[[np]] <- sub_samp } - - #sample Soil Carbon - if("SoilCarbon" %in% bin_var){ - bin_Var <- "SoilCarbon" - obs <- as.data.frame(veg_info[[3]], stringsAsFactors = FALSE) - - year.start <- lubridate::year(start_date) - year.end <- lubridate::year(end_date) - - # subset samples for the year - samples <- obs[obs$year >= year.start & obs$year <= year.end, ] - - # remove rows with NAs (we don't want DBH to be NA but do we want to allow missing taxa?) - #samples <- samples[complete.cases(samples), ] - samples <- samples[!is.na(samples[bin_Var]), ] - - # if there are subplots, sample within each subplot instead of pooling all together, maybe pass down a flag if we want to pool anyway - if(!is.null(samples$plot)){ - n.plot <- length(unique(samples$plot)) - plot.n <- unique(samples$plot) - }else{ - n.plot <- 1 - samples$plot <- 1 - plot.n <- 1 - } - - #if we are using bin_size to sample - if(bin_herb_soil){ - if(n.plot>bin_size){ - plot_bin_size <- bin_size - }else{ - plot_bin_size <- ceiling(n.plot/5) - } - }else{ - plot_bin_size <- 1 + veg_ens[[2]] <- do.call("rbind", sub.list) + } + + # sample Herb + if ("dryMass" %in% bin_var) { + bin_Var <- "dryMass" + obs <- as.data.frame(veg_info[[1]], stringsAsFactors = FALSE) + + year.start <- lubridate::year(start_date) + year.end <- lubridate::year(end_date) + + # subset samples for the year + samples <- obs[obs$year >= year.start & obs$year <= year.end, ] + + # remove rows with NAs (we don't want DBH to be NA but do we want to allow missing taxa?) + # samples <- samples[complete.cases(samples), ] + samples <- samples[!is.na(samples[bin_Var]), ] + + # if there are subplots, sample within each subplot instead of pooling all together, maybe pass down a flag if we want to pool anyway + if (!is.null(samples$plot)) { + n.plot <- length(unique(samples$plot)) + plot.n <- unique(samples$plot) + } else { + n.plot <- 1 + samples$plot <- 1 + plot.n <- 1 + } + + # if we are using bin_size to sample + if (bin_herb_soil) { + if (n.plot > bin_size) { + plot_bin_size <- bin_size + } else { + plot_bin_size <- ceiling(n.plot / 5) } - - sub.list <- list() - sample_plots <- sample(plot.n, plot_bin_size) - for(np in 1:length(sample_plots)){ - samples_sub <- samples[samples$plot == sample_plots[np],] - sub.list[[np]] <- samples_sub + } else { + plot_bin_size <- 1 + } + + sub.list <- list() + sample_plots <- sample(plot.n, plot_bin_size) + for (np in 1:length(sample_plots)) { + samples_sub <- samples[samples$plot == sample_plots[np], ] + sub.list[[np]] <- samples_sub + } + veg_ens[[1]] <- do.call("rbind", sub.list) + } + + # sample Soil Carbon + if ("SoilCarbon" %in% bin_var) { + bin_Var <- "SoilCarbon" + obs <- as.data.frame(veg_info[[3]], stringsAsFactors = FALSE) + + year.start <- lubridate::year(start_date) + year.end <- lubridate::year(end_date) + + # subset samples for the year + samples <- obs[obs$year >= year.start & obs$year <= year.end, ] + + # remove rows with NAs (we don't want DBH to be NA but do we want to allow missing taxa?) + # samples <- samples[complete.cases(samples), ] + samples <- samples[!is.na(samples[bin_Var]), ] + + # if there are subplots, sample within each subplot instead of pooling all together, maybe pass down a flag if we want to pool anyway + if (!is.null(samples$plot)) { + n.plot <- length(unique(samples$plot)) + plot.n <- unique(samples$plot) + } else { + n.plot <- 1 + samples$plot <- 1 + plot.n <- 1 + } + + # if we are using bin_size to sample + if (bin_herb_soil) { + if (n.plot > bin_size) { + plot_bin_size <- bin_size + } else { + plot_bin_size <- ceiling(n.plot / 5) } - veg_ens[[3]] <- do.call("rbind", sub.list) + } else { + plot_bin_size <- 1 } - #--------------------------------------------------------------------------------------------------# - # Write vegetation data as rds, return results to convert_input - - # write with ensemble number - sppfilename[ens] <- write_veg(outfolder, start_date, veg_info = veg_ens, paste0(source, "_ens", ens)) - + + sub.list <- list() + sample_plots <- sample(plot.n, plot_bin_size) + for (np in 1:length(sample_plots)) { + samples_sub <- samples[samples$plot == sample_plots[np], ] + sub.list[[np]] <- samples_sub + } + veg_ens[[3]] <- do.call("rbind", sub.list) + } + #--------------------------------------------------------------------------------------------------# + # Write vegetation data as rds, return results to convert_input + + # write with ensemble number + sppfilename[ens] <- write_veg(outfolder, start_date, veg_info = veg_ens, paste0(source, "_ens", ens)) } # Build results dataframe for convert_input - results <- data.frame(file = sppfilename, - host = machine_host, - mimetype = "application/rds", - formatname = "spp.info", - startdate = start_date, - enddate = end_date, - dbfile.name = basename(sppfilename), - stringsAsFactors = FALSE) - + results <- data.frame( + file = sppfilename, + host = machine_host, + mimetype = "application/rds", + formatname = "spp.info", + startdate = start_date, + enddate = end_date, + dbfile.name = basename(sppfilename), + stringsAsFactors = FALSE + ) + ### return for convert_inputs - return(invisible(results)) - - -} # sample_ic \ No newline at end of file + return(invisible(results)) +} # sample_ic diff --git a/modules/data.land/R/soil2netcdf.R b/modules/data.land/R/soil2netcdf.R index 8f3afb1affc..7d1f0b833fa 100644 --- a/modules/data.land/R/soil2netcdf.R +++ b/modules/data.land/R/soil2netcdf.R @@ -2,13 +2,13 @@ #' #' A table of standard names and units can be displayed by running #' soil.units() without any arguements -#' +#' #' soil_params is called internally to estimate additional soil physical #' parameters from sand/silt/clay & bulk density. Will not overwrite any #' provided values #' #' Need to expand to alternatively take soil_type (texture class) as an input -#' +#' #' On output, soil_type named class is converted to a number because netCDF is a #' pain for storing strings. Conversion back can be done by #' load(system.file ("data/soil_class.RData",package = "PEcAn.data.land")) @@ -20,71 +20,79 @@ #' #' @return none #' @export -#' +#' #' @examples -#' \dontrun{ soil.data <- list(fraction_of_sand_in_soil = c -#' (0.3,0.4,0.5), fraction_of_clay_in_soil = c(0.3,0.3,0.3), soil_depth = c -#' (0.2,0.5,1.0)) -#' -#' soil2netcdf(soil.data,"soil.nc") } -soil2netcdf <- function(soil.data, new.file){ +#' \dontrun{ +#' soil.data <- list(fraction_of_sand_in_soil = c +#' (0.3, 0.4, 0.5), fraction_of_clay_in_soil = c(0.3, 0.3, 0.3), soil_depth = c +#' (0.2, 0.5, 1.0)) +#' +#' soil2netcdf(soil.data, "soil.nc") +#' } +soil2netcdf <- function(soil.data, new.file) { soil.data <- as.list(soil.data) ## convert soil type to parameters via look-up-table / equations - mysoil <- PEcAn.data.land::soil_params(sand=soil.data$fraction_of_sand_in_soil, - silt=soil.data$fraction_of_silt_in_soil, - clay=soil.data$fraction_of_clay_in_soil, - bulk=soil.data$soil_bulk_density, - soil_type=soil.data$soil_type) - + mysoil <- PEcAn.data.land::soil_params( + sand = soil.data$fraction_of_sand_in_soil, + silt = soil.data$fraction_of_silt_in_soil, + clay = soil.data$fraction_of_clay_in_soil, + bulk = soil.data$soil_bulk_density, + soil_type = soil.data$soil_type + ) + ## Merge in new variables - for(n in seq_along(mysoil)){ - if(!(names(mysoil)[n] %in% names(soil.data))){ + for (n in seq_along(mysoil)) { + if (!(names(mysoil)[n] %in% names(soil.data))) { soil.data[[names(mysoil)[n]]] <- mysoil[[n]] } } - + ## convert soil_type to number soil.data$soil_type <- soil.data$soil_n soil.data$soil_n <- NULL - + ## create depth dimension - depth <- ncdf4::ncdim_def(name = "depth", units = "meters", vals = soil.data$soil_depth, create_dimvar = TRUE) + depth <- ncdf4::ncdim_def(name = "depth", units = "meters", vals = soil.data$soil_depth, create_dimvar = TRUE) soil.data$soil_depth <- NULL ## deleting so don't ALSO write as a variable - + ## create netCDF variables ncvar <- list() good_vars <- 0 - for(n in seq_along(soil.data)){ - if(all(is.null(soil.data[[n]])) | all(is.na(soil.data[[n]]))) next + for (n in seq_along(soil.data)) { + if (all(is.null(soil.data[[n]])) | all(is.na(soil.data[[n]]))) next varname <- names(soil.data)[n] - if(length(soil.data[[n]])>1){ + if (length(soil.data[[n]]) > 1) { ## if vector, save by depth - good_vars <- c(good_vars,n) - ncvar[[n]] <- ncdf4::ncvar_def(name = varname, - units = soil.units(varname), - dim = depth) - }else { + good_vars <- c(good_vars, n) + ncvar[[n]] <- ncdf4::ncvar_def( + name = varname, + units = soil.units(varname), + dim = depth + ) + } else { ## else save as scalar - good_vars <- c(good_vars,n) - ncvar[[n]] <- ncdf4::ncvar_def(name = varname, - units = soil.units(varname), - dim=list()) + good_vars <- c(good_vars, n) + ncvar[[n]] <- ncdf4::ncvar_def( + name = varname, + units = soil.units(varname), + dim = list() + ) } } - if(length(good_vars)>1) { - good_vars <- good_vars[2:length(good_vars)] - ncvar <- ncvar[good_vars] - soil.data <- soil.data[good_vars] - ## create new file - nc <- ncdf4::nc_create(new.file, vars = ncvar) - - ## add data - for (i in seq_along(ncvar)) { - if(is.null(soil.data[[i]])|is.na(soil.data[[i]])) next - ncdf4::ncvar_put(nc, ncvar[[i]], soil.data[[i]]) - } - - ncdf4::nc_close(nc) + if (length(good_vars) > 1) { + good_vars <- good_vars[2:length(good_vars)] + ncvar <- ncvar[good_vars] + soil.data <- soil.data[good_vars] + ## create new file + nc <- ncdf4::nc_create(new.file, vars = ncvar) + + ## add data + for (i in seq_along(ncvar)) { + if (is.null(soil.data[[i]]) | is.na(soil.data[[i]])) next + ncdf4::ncvar_put(nc, ncvar[[i]], soil.data[[i]]) + } + + ncdf4::nc_close(nc) } } diff --git a/modules/data.land/R/soil_process.R b/modules/data.land/R/soil_process.R index 6ee57347b21..15e8be9b25d 100644 --- a/modules/data.land/R/soil_process.R +++ b/modules/data.land/R/soil_process.R @@ -10,103 +10,103 @@ #' @return path to soil file #' @export #' -#' -soil_process <- function(settings, input, dbfiles, overwrite = FALSE,run.local=TRUE){ - +#' +soil_process <- function(settings, input, dbfiles, overwrite = FALSE, run.local = TRUE) { # This tries to avoid the problem of having the soil tag under input but not having source in it. - if(is.null(input$source)){ - input$source <- "gSSURGO" ## temporarily hardcoding in the only source + if (is.null(input$source)) { + input$source <- "gSSURGO" ## temporarily hardcoding in the only source ## in the future this should throw an error - }else if(input$source=="PalEON_soil" && is.null(input$id)){ + } else if (input$source == "PalEON_soil" && is.null(input$id)) { PEcAn.logger::logger.severe("currently soil_process requires an input ID to be specified") return(NULL) } # Extract info from settings and setup - site <- settings$run$site - model <- settings$model$type - host <- settings$host - dbparms <- settings$database + site <- settings$run$site + model <- settings$model$type + host <- settings$host + dbparms <- settings$database # set up bety connection con <- PEcAn.DB::db.open(dbparms$bety) on.exit(PEcAn.DB::db.close(con), add = TRUE) # get site info latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] - new.site <- data.frame(id = as.numeric(site$id), - lat = latlon$lat, - lon = latlon$lon) + new.site <- data.frame( + id = as.numeric(site$id), + lat = latlon$lat, + lon = latlon$lon + ) str_ns <- paste0(new.site$id %/% 1e+09, "-", new.site$id %% 1e+09) outfolder <- file.path(dbfiles, paste0(input$source, "_site_", str_ns)) - if(!dir.exists(outfolder)) dir.create(outfolder) + if (!dir.exists(outfolder)) dir.create(outfolder) #--------------------------------------------------------------------------------------------------# # if we are reading from gSSURGO - if (input$source=="gSSURGO"){ - - #see if there is already files generated there - newfile <-list.files(outfolder, "*.nc$", full.names = TRUE) %>% + if (input$source == "gSSURGO") { + # see if there is already files generated there + newfile <- list.files(outfolder, "*.nc$", full.names = TRUE) %>% as.list() names(newfile) <- rep("path", length(newfile)) - if(length(newfile)==0){ + if (length(newfile) == 0) { radiusL <- ifelse(is.null(settings$run$input$soil$radius), 500, as.numeric(settings$run$input$soil$radius)) - newfile<-extract_soil_gssurgo(outfolder, lat = latlon$lat, lon=latlon$lon, radius = radiusL) + newfile <- extract_soil_gssurgo(outfolder, lat = latlon$lat, lon = latlon$lon, radius = radiusL) # register files in DB - for(i in 1:length(newfile)){ - in.path = paste0(dirname(newfile[i]$path), '/') - in.prefix = stringr::str_remove(basename(newfile[i]$path), ".nc") - - PEcAn.DB::dbfile.input.insert (in.path, - in.prefix, - new.site$id, - startdate = NULL, - enddate = NULL, - mimetype = "application/x-netcdf", - formatname = "pecan_soil_standard", - con = con, - ens=TRUE) - } - - + for (i in 1:length(newfile)) { + in.path <- paste0(dirname(newfile[i]$path), "/") + in.prefix <- stringr::str_remove(basename(newfile[i]$path), ".nc") + + PEcAn.DB::dbfile.input.insert(in.path, + in.prefix, + new.site$id, + startdate = NULL, + enddate = NULL, + mimetype = "application/x-netcdf", + formatname = "pecan_soil_standard", + con = con, + ens = TRUE + ) } + } return(newfile) } #--------------------------------------------------------------------------------------------------# # if we are reading PalEON_soil # get existing input info - source.input <- PEcAn.DB::db.query(paste0("SELECT * from Inputs where id =",input$id),con) - if(run.local){ - source.dbfiles <- PEcAn.DB::dbfile.check("Input",input$id,con,hostname='localhost') - }else{ - source.dbfiles <- PEcAn.DB::dbfile.check("Input",input$id,con,hostname=host$name) + source.input <- PEcAn.DB::db.query(paste0("SELECT * from Inputs where id =", input$id), con) + if (run.local) { + source.dbfiles <- PEcAn.DB::dbfile.check("Input", input$id, con, hostname = "localhost") + } else { + source.dbfiles <- PEcAn.DB::dbfile.check("Input", input$id, con, hostname = host$name) } - source.file <- file.path(source.dbfiles$file_path,source.dbfiles$file_name) - if(source.input$site_id == site$id){ + source.file <- file.path(source.dbfiles$file_path, source.dbfiles$file_name) + if (source.input$site_id == site$id) { ## Input is alreadly local - if(!is.null(input$path)){ + if (!is.null(input$path)) { return(input$path) ## path already exists, just return } else { ## path doesn't exist, see if we can find the relevant dbfile return(source.file) } - } ## otherwise continue to process soil + } ## otherwise continue to process soil - # set up host information + # set up host information machine.host <- ifelse(host == "localhost" || host$name == "localhost" || run.local, - PEcAn.remote::fqdn(), host$name) + PEcAn.remote::fqdn(), host$name + ) machine <- PEcAn.DB::db.query(paste0("SELECT * from machines where hostname = '", machine.host, "'"), con) # retrieve model type info - if(is.null(model)){ + if (is.null(model)) { modeltype_id <- PEcAn.DB::db.query(paste0("SELECT modeltype_id FROM models where id = '", settings$model$id, "'"), con)[[1]] model <- db.query(paste0("SELECT name FROM modeltypes where id = '", modeltype_id, "'"), con)[[1]] } - newfile <- PEcAn.data.land::extract_soil_nc(source.file,outfolder,lat = latlon$lat,lon=latlon$lon) + newfile <- PEcAn.data.land::extract_soil_nc(source.file, outfolder, lat = latlon$lat, lon = latlon$lon) return(newfile) } # ic_process diff --git a/modules/data.land/R/soil_utils.R b/modules/data.land/R/soil_utils.R index 9b9f3a83221..7438b0a0672 100644 --- a/modules/data.land/R/soil_utils.R +++ b/modules/data.land/R/soil_utils.R @@ -6,7 +6,7 @@ #' @param clay percent clay #' @param bulk soil bulk density (optional, kg m-3) #' -#' @details +#' @details #' * Specify _either_ soil_type or sand/silt/clay. soil_type will be ignored if sand/silt/clay is provided #' * If only 2 out of sand/silt/clay are provided, it will be assumed they sum to 100% #' * Valid soil class options: "Sand","Loamy sand","Sandy loam","Silt loam","Loam", @@ -15,16 +15,16 @@ #' "Silt","Heavy clay","Clayey sand","Clayey silt" #' * Based on ED2/R-utils/soilutils.r #' * Hydraulics based on Cosby et al 1984, using table 4 and equation 1 (which is incorrect it should be saturated moisture potential over moisture potential) -#' +#' #' #' @return list of soil hydraulic and thermal parameters #' @export #' @importFrom rlang %||% -#' @examples +#' @examples #' sand <- c(0.3, 0.4, 0.5) #' clay <- c(0.3, 0.3, 0.3) -#' soil_params(sand=sand,clay=clay) -soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NULL){ +#' soil_params(sand = sand, clay = clay) +soil_params <- function(soil_type = NULL, sand = NULL, silt = NULL, clay = NULL, bulk = NULL) { ## load soil parameters mysoil <- list() # 'soil_class' is package data, lazy-loaded here when needed @@ -50,7 +50,7 @@ soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NU texture <- PEcAn.data.land::soil_class$texture xclay.def <- PEcAn.data.land::soil_class$xclay.def xsand.def <- PEcAn.data.land::soil_class$xsand.def - + #---------------------------------------------------------------------------------------# # Find soil class and sand, silt, and clay fractions. # #---------------------------------------------------------------------------------------# @@ -59,7 +59,8 @@ soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NU if (is.null(soil_type)) { PEcAn.logger::logger.severe( "Insufficient arguments:", - "Must specify either soil_type or at least 2 of sand, silt, clay") + "Must specify either soil_type or at least 2 of sand, silt, clay" + ) } mysoil$soil_type <- soil_type mysoil$soil_n <- which(toupper(soil.name) == toupper(soil_type)) @@ -68,103 +69,116 @@ soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NU } else { if (any(c(sand, silt, clay) > 2)) { # assume values reported in % not proportion - sand <- if (is.null(sand)) { NULL } else { sand / 100 } - silt <- if (is.null(silt)) { NULL } else { silt / 100 } - clay <- if (is.null(clay)) { NULL } else { clay / 100 } + sand <- if (is.null(sand)) { + NULL + } else { + sand / 100 + } + silt <- if (is.null(silt)) { + NULL + } else { + silt / 100 + } + clay <- if (is.null(clay)) { + NULL + } else { + clay / 100 + } } # compute up to one missing value (>1 missing was handled above) # %||% is a null-filling operator: A %||% B == "B if A is null, A in all other cases" - sand <- sand %||% (1-silt-clay) - silt <- silt %||% (1-sand-clay) - clay <- clay %||% (1-sand-silt) - #normalize + sand <- sand %||% (1 - silt - clay) + silt <- silt %||% (1 - sand - clay) + clay <- clay %||% (1 - sand - silt) + # normalize stot <- sand + silt + clay sand <- sand / stot silt <- silt / stot clay <- clay / stot - mysoil$soil_n <- sclass(sand,clay) - mysoil$soil_type <- soil.name[mysoil$soil_n] + mysoil$soil_n <- sclass(sand, clay) + mysoil$soil_type <- soil.name[mysoil$soil_n] } # mysoil$key <- soil.key [mysoil$soil_n] # turning off these abreviations since they lack a CF equivalent mysoil$fraction_of_sand_in_soil <- sand mysoil$fraction_of_clay_in_soil <- clay mysoil$fraction_of_silt_in_soil <- 1. - mysoil$fraction_of_sand_in_soil - mysoil$fraction_of_clay_in_soil #---------------------------------------------------------------------------------------# - - if(!is.null(bulk)) mysoil$soil_bulk_density = bulk - + + if (!is.null(bulk)) mysoil$soil_bulk_density <- bulk + #---------------------------------------------------------------------------------------# # Set up primary properties. # #---------------------------------------------------------------------------------------# - for(z in which(mysoil$soil_n == 13)){ + for (z in which(mysoil$soil_n == 13)) { #----- Bedrock. Most things are zero, because it is an impermeable soil. -----------# - mysoil$soil_hydraulic_b[z] <- 0. - mysoil$soil_water_potential_at_saturation[z] <- 0. - mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 0. - mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0. - mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0. - mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- 0. - mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- 0. - mysoil$slcpd[z] <- 2130000. + mysoil$soil_hydraulic_b[z] <- 0. + mysoil$soil_water_potential_at_saturation[z] <- 0. + mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 0. + mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0. + mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0. + mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- 0. + mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- 0. + mysoil$slcpd[z] <- 2130000. #------------------------------------------------------------------------------------# } - for(z in which(mysoil$soil_n == 12)){ + for (z in which(mysoil$soil_n == 12)) { #------------------------------------------------------------------------------------# # Peat. High concentration of organic matter. Mineral soil equations don't # # apply here. # #------------------------------------------------------------------------------------# - mysoil$soil_hydraulic_b[z] <- 6.180000 - mysoil$soil_water_potential_at_saturation[z] <- -0.534564359 - mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 2.357930e-6 - mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0.469200 - mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0.285709966 - mysoil$slcpd[z] <- 874000. + mysoil$soil_hydraulic_b[z] <- 6.180000 + mysoil$soil_water_potential_at_saturation[z] <- -0.534564359 + mysoil$soil_hydraulic_conductivity_at_saturation[z] <- 2.357930e-6 + mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- 0.469200 + mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- 0.285709966 + mysoil$slcpd[z] <- 874000. #------------------------------------------------------------------------------------# } - for(z in which(!(mysoil$soil_n %in% c(12,13)))){ + for (z in which(!(mysoil$soil_n %in% c(12, 13)))) { #------------------------------------------------------------------------------------# # Mineral soil. Use the standard Cosby et al 1984 eqns # #------------------------------------------------------------------------------------# ## TO-DO: Cosby Table 4 has equations for soil property STANDARD DEVIATIONS in addition to means ## in future, upgrade to return these and do ensemble sampling - + # B exponent [unitless] - mysoil$soil_hydraulic_b[z] <- 3.10 + 15.7*mysoil$fraction_of_clay_in_soil[z] - 0.3*mysoil$fraction_of_sand_in_soil[z] - + mysoil$soil_hydraulic_b[z] <- 3.10 + 15.7 * mysoil$fraction_of_clay_in_soil[z] - 0.3 * mysoil$fraction_of_sand_in_soil[z] + # Soil moisture potential at saturation [ m ] - mysoil$soil_water_potential_at_saturation[z] <- -0.01 * (10.^(2.17 - 0.63*mysoil$fraction_of_clay_in_soil[z] - 1.58*mysoil$fraction_of_sand_in_soil[z])) - + mysoil$soil_water_potential_at_saturation[z] <- -0.01 * (10.^(2.17 - 0.63 * mysoil$fraction_of_clay_in_soil[z] - 1.58 * mysoil$fraction_of_sand_in_soil[z])) + # Hydraulic conductivity at saturation [ m/s ] - mysoil$soil_hydraulic_conductivity_at_saturation[z] <- PEcAn.utils::ud_convert( - 10.^(-0.60 - + 1.26*mysoil$fraction_of_sand_in_soil[z] - - 0.64*mysoil$fraction_of_clay_in_soil[z]), + mysoil$soil_hydraulic_conductivity_at_saturation[z] <- PEcAn.utils::ud_convert( + 10.^(-0.60 + + 1.26 * mysoil$fraction_of_sand_in_soil[z] + - 0.64 * mysoil$fraction_of_clay_in_soil[z]), "inch/hour", - "meters/second") - + "meters/second" + ) + # Soil moisture at saturation [ m^3/m^3 ] - mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- (50.5 - 14.2*mysoil$fraction_of_sand_in_soil[z] - 3.7*mysoil$fraction_of_clay_in_soil[z]) / 100. - + mysoil$volume_fraction_of_water_in_soil_at_saturation[z] <- (50.5 - 14.2 * mysoil$fraction_of_sand_in_soil[z] - 3.7 * mysoil$fraction_of_clay_in_soil[z]) / 100. + # Soil field capacity[ m^3/m^3 ] - mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- mysoil$volume_fraction_of_water_in_soil_at_saturation[z] * ( fieldcp.K/mysoil$soil_hydraulic_conductivity_at_saturation[z])^ (1. / (2.*mysoil$soil_hydraulic_b[z]+3.)) + mysoil$volume_fraction_of_water_in_soil_at_field_capacity[z] <- mysoil$volume_fraction_of_water_in_soil_at_saturation[z] * (fieldcp.K / mysoil$soil_hydraulic_conductivity_at_saturation[z])^(1. / (2. * mysoil$soil_hydraulic_b[z] + 3.)) } ## end primary properties - + #---------------------------------------------------------------------------------------# # Calculate the derived properties in case this is not bedrock. # #---------------------------------------------------------------------------------------# - mysoil$slpotcp = mysoil$volume_fraction_of_condensed_water_in_dry_soil = mysoil$slpotwp = mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point = 0.0 - for(z in which(!(mysoil$soil_n == 13))){ + mysoil$slpotcp <- mysoil$volume_fraction_of_condensed_water_in_dry_soil <- mysoil$slpotwp <- mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point <- 0.0 + for (z in which(!(mysoil$soil_n == 13))) { # Dry soil capacity (at -3.1MPa) [ m^3/m^3 ] - mysoil$slpotcp[z] <- - soilcp.MPa * 1000. / grav - mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- mpot2smoist(mysoil$slpotcp[z],mysoil$soil_water_potential_at_saturation[z],mysoil$soil_hydraulic_b[z],mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) - + mysoil$slpotcp[z] <- -soilcp.MPa * 1000. / grav + mysoil$volume_fraction_of_condensed_water_in_dry_soil[z] <- mpot2smoist(mysoil$slpotcp[z], mysoil$soil_water_potential_at_saturation[z], mysoil$soil_hydraulic_b[z], mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) + # Wilting point capacity (at -1.5MPa) [ m^3/m^3 ] - mysoil$slpotwp[z] <- - soilwp.MPa * 1000. / grav - mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- mpot2smoist(mysoil$slpotwp[z], mysoil$soil_water_potential_at_saturation[z],mysoil$soil_hydraulic_b[z],mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) - + mysoil$slpotwp[z] <- -soilwp.MPa * 1000. / grav + mysoil$volume_fraction_of_condensed_water_in_soil_at_wilting_point[z] <- mpot2smoist(mysoil$slpotwp[z], mysoil$soil_water_potential_at_saturation[z], mysoil$soil_hydraulic_b[z], mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) + # Water potential for field capacity [ m] # mysoil$slpotfc <- smoist2mpot(mysoil$volume_fraction_of_water_in_soil_at_field_capacity, mysoil) - + #---------------------------------------------------------------------------------! # Specific heat of dry soil [ J/m3/K] ! # Here we take the volume average amongst silt, clay, and ! @@ -174,15 +188,14 @@ soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NU # air in case the soil moisture was halfway between dry air and saturated, so the ! # error is not too biased. ! #---------------------------------------------------------------------------------! - mysoil$slcpd[z] <- (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) * - ( mysoil$fraction_of_sand_in_soil[z] * sand.hcap + - mysoil$fraction_of_silt_in_soil[z] * silt.hcap + - mysoil$fraction_of_clay_in_soil[z] * clay.hcap ) + - 0.5 * (mysoil$volume_fraction_of_water_in_soil_at_saturation[z] - - mysoil$volume_fraction_of_condensed_water_in_dry_soil[z]) * air.hcap - + mysoil$slcpd[z] <- (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation[z]) * + (mysoil$fraction_of_sand_in_soil[z] * sand.hcap + + mysoil$fraction_of_silt_in_soil[z] * silt.hcap + + mysoil$fraction_of_clay_in_soil[z] * clay.hcap) + + 0.5 * (mysoil$volume_fraction_of_water_in_soil_at_saturation[z] - + mysoil$volume_fraction_of_condensed_water_in_dry_soil[z]) * air.hcap } - + #---------------------------------------------------------------------------------------# # Soil thermal conductivity. W/m/K # # # @@ -197,50 +210,50 @@ soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NU # Soil Till. Res., 47(1-2), 5-10. # # # #---------------------------------------------------------------------------------------# - mysoil$thcond0 <- ( ksand * mysoil$fraction_of_sand_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * sand.cond - + ksilt * mysoil$fraction_of_silt_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * silt.cond - + kclay * mysoil$fraction_of_clay_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) * clay.cond - + kair * mysoil$volume_fraction_of_water_in_soil_at_saturation * air.cond ) - mysoil$thcond1 <- rep(h2o.cond - kair * air.cond,length=length(mysoil$thcond0)) - mysoil$thcond2 <- ( ksand * mysoil$fraction_of_sand_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) - + ksilt * mysoil$fraction_of_silt_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) - + kclay * mysoil$fraction_of_clay_in_soil * ( 1. - mysoil$volume_fraction_of_water_in_soil_at_saturation ) - + kair * mysoil$volume_fraction_of_water_in_soil_at_saturation ) - mysoil$thcond3 <- rep(1. - kair,length=length(mysoil$thcond0)) + mysoil$thcond0 <- (ksand * mysoil$fraction_of_sand_in_soil * (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation) * sand.cond + + ksilt * mysoil$fraction_of_silt_in_soil * (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation) * silt.cond + + kclay * mysoil$fraction_of_clay_in_soil * (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation) * clay.cond + + kair * mysoil$volume_fraction_of_water_in_soil_at_saturation * air.cond) + mysoil$thcond1 <- rep(h2o.cond - kair * air.cond, length = length(mysoil$thcond0)) + mysoil$thcond2 <- (ksand * mysoil$fraction_of_sand_in_soil * (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation) + + ksilt * mysoil$fraction_of_silt_in_soil * (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation) + + kclay * mysoil$fraction_of_clay_in_soil * (1. - mysoil$volume_fraction_of_water_in_soil_at_saturation) + + kair * mysoil$volume_fraction_of_water_in_soil_at_saturation) + mysoil$thcond3 <- rep(1. - kair, length = length(mysoil$thcond0)) ## default soil thermal conductivity = dry - mysoil$soil_thermal_conductivity <- ( mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_condensed_water_in_dry_soil) / - ( mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_condensed_water_in_dry_soil) - mysoil$soil_thermal_conductivity_at_saturation <- ( mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_water_in_soil_at_saturation) / - ( mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_water_in_soil_at_saturation) - + mysoil$soil_thermal_conductivity <- (mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_condensed_water_in_dry_soil) / + (mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_condensed_water_in_dry_soil) + mysoil$soil_thermal_conductivity_at_saturation <- (mysoil$thcond0 + mysoil$thcond1 * mysoil$volume_fraction_of_water_in_soil_at_saturation) / + (mysoil$thcond2 + mysoil$thcond3 * mysoil$volume_fraction_of_water_in_soil_at_saturation) + #---------------------------------------------------------------------------------------# - + ## final values to look up - for(z in which(!(mysoil$soil_n <= 13))){ + for (z in which(!(mysoil$soil_n <= 13))) { mysoil$soil_albedo[z] <- texture$albdry[mysoil$soil_n[z]] - if(is.null(bulk)) mysoil$soil_bulk_density[z] <- texture$xrobulk[mysoil$soil_n[z]] - mysoil$slden[z] <- texture$slden[mysoil$soil_n[z]] + if (is.null(bulk)) mysoil$soil_bulk_density[z] <- texture$xrobulk[mysoil$soil_n[z]] + mysoil$slden[z] <- texture$slden[mysoil$soil_n[z]] } - for(z in which(!(mysoil$soil_n > 13))){ + for (z in which(!(mysoil$soil_n > 13))) { ## if lack class-specific values, use across-soil average mysoil$soil_albedo[z] <- stats::median(texture$albdry) - if(is.null(bulk)) mysoil$soil_bulk_density[z] <- stats::median(texture$xrobulk) - mysoil$slden[z] <- stats::median(texture$slden) + if (is.null(bulk)) mysoil$soil_bulk_density[z] <- stats::median(texture$xrobulk) + mysoil$slden[z] <- stats::median(texture$slden) } - + ## Conversions to standard variables - mysoil$soil_thermal_capacity <- mysoil$slcpd / mysoil$soil_bulk_density ## J/m3/K / [kg m-3] -> J/kg/K - + mysoil$soil_thermal_capacity <- mysoil$slcpd / mysoil$soil_bulk_density ## J/m3/K / [kg m-3] -> J/kg/K + ## drop variables that are only meaningful internally - #mysoil$slpotcp <- NULL - #mysoil$slpotwp <- NULL - #mysoil$slden <- NULL ## not clear how this is is different from bulk density in the look-up-table - #mysoil$slcpd <- NULL - + # mysoil$slpotcp <- NULL + # mysoil$slpotwp <- NULL + # mysoil$slden <- NULL ## not clear how this is is different from bulk density in the look-up-table + # mysoil$slcpd <- NULL + return(mysoil) -}#end function -#==========================================================================================# -#==========================================================================================# +} # end function +# ==========================================================================================# +# ==========================================================================================# @@ -258,71 +271,69 @@ soil_params <- function(soil_type=NULL, sand=NULL, silt=NULL, clay=NULL, bulk=NU #' @export #' #' @examples -#' sclass(0.3,0.3) -sclass <- function(sandfrac,clayfrac){ - +#' sclass(0.3, 0.3) +sclass <- function(sandfrac, clayfrac) { #----- Define the percentage of sand, clay, and silt. ----------------------------------# sand <- 100. * sandfrac clay <- 100. * clayfrac silt <- 100. - sand - clay #---------------------------------------------------------------------------------------# - + #---------------------------------------------------------------------------------------# # Here there is not much we can do other than explore where in the triangle space # # we are. # #---------------------------------------------------------------------------------------# - - if (any(silt > 100.) | any(silt < 0.) | any(sand > 100.) | - any(sand < 0.) | any(clay > 100.) | any(clay < 0.) ) { + + if (any(silt > 100.) | any(silt < 0.) | any(sand > 100.) | + any(sand < 0.) | any(clay > 100.) | any(clay < 0.)) { PEcAn.logger::logger.warn(" At least one of your percentages is screwy...") - PEcAn.logger::logger.warn(paste("SAND <- ",sprintf("%.2f",sand),"%",sep="")) - PEcAn.logger::logger.warn(paste("CLAY <- ",sprintf("%.2f",clay),"%",sep="")) - PEcAn.logger::logger.warn(paste("SILT <- ",sprintf("%.2f",silt),"%",sep="")) + PEcAn.logger::logger.warn(paste("SAND <- ", sprintf("%.2f", sand), "%", sep = "")) + PEcAn.logger::logger.warn(paste("CLAY <- ", sprintf("%.2f", clay), "%", sep = "")) + PEcAn.logger::logger.warn(paste("SILT <- ", sprintf("%.2f", silt), "%", sep = "")) PEcAn.logger::logger.severe("This soil doesn''t fit into any category...") - } - nlayer = max(length(silt),length(clay),length(sand)) - mysoil = NA - for(z in seq_len(nlayer)){ - if(sand[z] > 85.0 + 0.5 * clay[z]) { - mysoil[z] <- 1 #----- Sand. ------------------------------------------------------------# - }else if(sand[z] > 70.0 + clay[z]) { - mysoil[z] <- 2 #----- Loamy sand. ------------------------------------------------------# - }else if((clay[z] <= 20.0 & sand[z] > 52.5) | (clay[z] <= 7.5 & silt[z] <= 50.0)) { - mysoil[z] <- 3 #----- Sandy loam. ------------------------------------------------------# - }else if((clay[z] <= 27.5 & silt[z] > 50.0 & silt[z] <= 80.0) | (silt[z] > 80.0 & clay[z] > 12.5)) { - mysoil[z] <- 4 #----- Silt loam. -------------------------------------------------------# - }else if(clay[z] > 7.5 & clay[z] <= 27.5 & silt[z] > 27.5 & silt[z] <= 50.0 & sand[z] <= 52.5) { - mysoil[z] <- 5 #----- Loam. ------------------------------------------------------------# - }else if(clay[z] > 20.0 & clay[z] <= 35.0 & silt[z] <= 27.5 & sand[z] > 45.0) { - mysoil[z] <- 6 #----- Sandy clay loam. -------------------------------------------------# - }else if(clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] <= 20.0) { - mysoil[z] <- 7 #----- Silty clay loam. -------------------------------------------------# - }else if(clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] > 20.0 & sand[z] <= 45.0) { - mysoil[z] <- 8 #----- Clayey loam. -----------------------------------------------------# - }else if(clay[z] > 35.0 & sand[z] > 45.0) { - mysoil[z] <- 9 #----- Sandy clay. ------------------------------------------------------# - }else if(clay[z] > 40.0 & silt[z] > 40.0) { + nlayer <- max(length(silt), length(clay), length(sand)) + mysoil <- NA + for (z in seq_len(nlayer)) { + if (sand[z] > 85.0 + 0.5 * clay[z]) { + mysoil[z] <- 1 #----- Sand. ------------------------------------------------------------# + } else if (sand[z] > 70.0 + clay[z]) { + mysoil[z] <- 2 #----- Loamy sand. ------------------------------------------------------# + } else if ((clay[z] <= 20.0 & sand[z] > 52.5) | (clay[z] <= 7.5 & silt[z] <= 50.0)) { + mysoil[z] <- 3 #----- Sandy loam. ------------------------------------------------------# + } else if ((clay[z] <= 27.5 & silt[z] > 50.0 & silt[z] <= 80.0) | (silt[z] > 80.0 & clay[z] > 12.5)) { + mysoil[z] <- 4 #----- Silt loam. -------------------------------------------------------# + } else if (clay[z] > 7.5 & clay[z] <= 27.5 & silt[z] > 27.5 & silt[z] <= 50.0 & sand[z] <= 52.5) { + mysoil[z] <- 5 #----- Loam. ------------------------------------------------------------# + } else if (clay[z] > 20.0 & clay[z] <= 35.0 & silt[z] <= 27.5 & sand[z] > 45.0) { + mysoil[z] <- 6 #----- Sandy clay loam. -------------------------------------------------# + } else if (clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] <= 20.0) { + mysoil[z] <- 7 #----- Silty clay loam. -------------------------------------------------# + } else if (clay[z] > 27.5 & clay[z] <= 40.0 & sand[z] > 20.0 & sand[z] <= 45.0) { + mysoil[z] <- 8 #----- Clayey loam. -----------------------------------------------------# + } else if (clay[z] > 35.0 & sand[z] > 45.0) { + mysoil[z] <- 9 #----- Sandy clay. ------------------------------------------------------# + } else if (clay[z] > 40.0 & silt[z] > 40.0) { mysoil[z] <- 10 #----- Silty clay. ------------------------------------------------------# - }else if(clay[z] <= 70.0 & sand[z] <= 30.0 & silt[z] <= 30.0) { + } else if (clay[z] <= 70.0 & sand[z] <= 30.0 & silt[z] <= 30.0) { mysoil[z] <- 11 #----- Clay. ------------------------------------------------------------# - }else if( silt[z] > 80.0 & clay[z] <= 12.5) { + } else if (silt[z] > 80.0 & clay[z] <= 12.5) { mysoil[z] <- 14 #----- Silt. ------------------------------------------------------------# - }else if( clay[z] > 70.0) { + } else if (clay[z] > 70.0) { mysoil[z] <- 15 #----- Heavy clay. ------------------------------------------------------# - }else if( clay[z] > 40.0 & sand[z] > 30.0 & sand[z] <= 45.0) { + } else if (clay[z] > 40.0 & sand[z] > 30.0 & sand[z] <= 45.0) { mysoil[z] <- 16 #----- Clayey sand. -----------------------------------------------------# - }else if( clay[z] > 40.0 & silt[z] > 30.0 & silt[z] <= 40.0) { + } else if (clay[z] > 40.0 & silt[z] > 30.0 & silt[z] <= 40.0) { mysoil[z] <- 17 #----- Clayey silt. -----------------------------------------------------# - }else{ - PEcAn.logger::logger.warn(paste("SAND <- ",sprintf("%.2f",sand[z]),"%",sep="")) - PEcAn.logger::logger.warn(paste("CLAY <- ",sprintf("%.2f",clay[z]),"%",sep="")) - PEcAn.logger::logger.warn(paste("SILT <- ",sprintf("%.2f",silt[z]),"%",sep="")) - PEcAn.logger::logger.severe ("This soil doesn''t fit into any category...") - }#end if + } else { + PEcAn.logger::logger.warn(paste("SAND <- ", sprintf("%.2f", sand[z]), "%", sep = "")) + PEcAn.logger::logger.warn(paste("CLAY <- ", sprintf("%.2f", clay[z]), "%", sep = "")) + PEcAn.logger::logger.warn(paste("SILT <- ", sprintf("%.2f", silt[z]), "%", sep = "")) + PEcAn.logger::logger.severe("This soil doesn''t fit into any category...") + } # end if } return(mysoil) -}#end function +} # end function #' Convert a matric potential to a soil moisture @@ -335,9 +346,9 @@ sclass <- function(sandfrac,clayfrac){ #' @return volumetric soil water content #' @export #' -#' -mpot2smoist <- function(mpot,soil_water_potential_at_saturation,soil_hydraulic_b,volume_fraction_of_water_in_soil_at_saturation){ - smfrac = ( mpot / soil_water_potential_at_saturation) ^ (-1. / soil_hydraulic_b) - smoist = smfrac * volume_fraction_of_water_in_soil_at_saturation +#' +mpot2smoist <- function(mpot, soil_water_potential_at_saturation, soil_hydraulic_b, volume_fraction_of_water_in_soil_at_saturation) { + smfrac <- (mpot / soil_water_potential_at_saturation)^(-1. / soil_hydraulic_b) + smoist <- smfrac * volume_fraction_of_water_in_soil_at_saturation return(smoist) -}#end function \ No newline at end of file +} # end function diff --git a/modules/data.land/R/soilgrids_soc_extraction.R b/modules/data.land/R/soilgrids_soc_extraction.R index 138bf540a2e..44d89846cc9 100644 --- a/modules/data.land/R/soilgrids_soc_extraction.R +++ b/modules/data.land/R/soilgrids_soc_extraction.R @@ -1,90 +1,90 @@ ##' soilgrids_soilC_extract function -##' A function to extract total soil organic carbon for a single or group of -##' lat/long locationsbased on user-defined site location from SoilGrids250m +##' A function to extract total soil organic carbon for a single or group of +##' lat/long locationsbased on user-defined site location from SoilGrids250m ##' version 2.0 : https://soilgrids.org ##' @title soilgrids_soilC_extract ##' @name soilgrids_soilC_extract -##' -##' @param site_info A dataframe of site info containing the BETYdb site ID, -##' site name, latitude, and longitude, e.g. +##' +##' @param site_info A dataframe of site info containing the BETYdb site ID, +##' site name, latitude, and longitude, e.g. ##' (site_id, site_name, lat, lon) -##' @param outdir Optional. Provide the results as a CSV file +##' @param outdir Optional. Provide the results as a CSV file ##' (soilgrids_soilC_data.csv) ##' @param verbose Provide progress feedback to the terminal? TRUE/FALSE -##' +##' ##' @examples ##' \dontrun{ -##' +##' ##' # Example 1 - using the modex.bnl.gov BETYdb and site IDs to extract data ##' db <- 'betydb' ##' host_db <- 'modex.bnl.gov' ##' db_port <- '5432' ##' db_user <- 'bety' ##' db_password <- 'bety' -##' +##' ##' bety <- list(user='bety', password='bety', host=host_db, ##' dbname='betydb', driver=RPostgres::Postgres(),write=FALSE) -##' -##' con <- DBI::dbConnect(drv=bety$driver, dbname=bety$dbname, host=bety$host, +##' +##' con <- DBI::dbConnect(drv=bety$driver, dbname=bety$dbname, host=bety$host, ##' password=bety$password, user=bety$user) -##' +##' ##' suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ##' ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", ##' ids = c("676","622","678","766","764"), .con = con)) -##' +##' ##' suppressWarnings(qry_results.1 <- DBI::dbSendQuery(con,site_qry)) ##' suppressWarnings(qry_results.2 <- DBI::dbFetch(qry_results.1)) ##' DBI::dbClearResult(qry_results.1) ##' DBI::dbDisconnect(con) -##' +##' ##' site_info <- qry_results.2 ##' verbose <- TRUE -##' system.time(result_soc <- PEcAn.data.land::soilgrids_soilC_extract(site_info=site_info, +##' system.time(result_soc <- PEcAn.data.land::soilgrids_soilC_extract(site_info=site_info, ##' verbose=verbose)) ##' result_soc -##' +##' ##' } -##' @return a dataframe containing the total soil carbon values -##' and the corresponding standard deviation values (uncertainties) for each location +##' @return a dataframe containing the total soil carbon values +##' and the corresponding standard deviation values (uncertainties) for each location ##' Output column names are c("Site_ID","Site_Name","Latitude","Longitude", ##' "Total_soilC","Std_soilC") -##' +##' ##' @export ##' @author Qianyu Li, Shawn P. Serbin ##' @importFrom magrittr %>% -soilgrids_soilC_extract <- function (site_info, outdir=NULL, verbose=TRUE) { +soilgrids_soilC_extract <- function(site_info, outdir = NULL, verbose = TRUE) { if (future::supportsMulticore()) { future::plan(future::multicore) } else { future::plan(future::multisession) } if (is.null(site_info)) { - PEcAn.logger::logger.error("No site information found. Please provide a BETY DB site list containing at least the site id and PostGIS geometry\ + PEcAn.logger::logger.error("No site information found. Please provide a BETY DB site list containing at least the site id and PostGIS geometry\ as lon and lat") } - + # prepare site info for extraction - internal_site_info <- data.frame(site_info$site_id, site_info$site_name, site_info$lat,site_info$lon) - #create a variable to store mean and quantile of organic carbon density (ocd) for each soil depth - ocdquant <- matrix(NA, nrow = 6, ncol = length(internal_site_info$site_info.lon) * 4) #row represents soil depth, col represents mean, 5%, 50% and 95%-quantile of ocd for all sites + internal_site_info <- data.frame(site_info$site_id, site_info$site_name, site_info$lat, site_info$lon) + # create a variable to store mean and quantile of organic carbon density (ocd) for each soil depth + ocdquant <- matrix(NA, nrow = 6, ncol = length(internal_site_info$site_info.lon) * 4) # row represents soil depth, col represents mean, 5%, 50% and 95%-quantile of ocd for all sites lonlat <- cbind(internal_site_info$site_info.lon, internal_site_info$site_info.lat) base_data_url <- "/vsicurl?max_retry=30&retry_delay=60&list_dir=no&url=https://files.isric.org/soilgrids/latest/data/ocd/ocd_" depths <- c("0-5cm", "5-15cm", "15-30cm", "30-60cm", "60-100cm", "100-200cm") - layer_thick <- c(0.05,0.10,0.15,0.30,0.40,1.00) # in unit m - + layer_thick <- c(0.05, 0.10, 0.15, 0.30, 0.40, 1.00) # in unit m + # reproject locations to soilgrids projection - #Soilgrids data is using Homolosine projection https://www.isric.org/explore/soilgrids/faq-soilgrids + # Soilgrids data is using Homolosine projection https://www.isric.org/explore/soilgrids/faq-soilgrids p <- terra::vect(lonlat, crs = "+proj=longlat +datum=WGS84") # Users need to provide lon/lat - newcrs <- "+proj=igh +datum=WGS84 +no_defs +towgs84=0,0,0" + newcrs <- "+proj=igh +datum=WGS84 +no_defs +towgs84=0,0,0" p_reproj <- terra::project(p, newcrs) # Transform the point vector to data with Homolosine projection data_tag <- c("_mean.vrt", "_Q0.05.vrt", "_Q0.5.vrt", "_Q0.95.vrt") - name_tag <- expand.grid(depths, data_tag, stringsAsFactors = F)#find the combinations between data and depth tags. - L <- split(as.data.frame(name_tag), seq(nrow(as.data.frame(name_tag))))#convert tags into lists. - if ("try-error" %in% class(try(ocd_real <- L %>% furrr::future_map(function(l){ + name_tag <- expand.grid(depths, data_tag, stringsAsFactors = F) # find the combinations between data and depth tags. + L <- split(as.data.frame(name_tag), seq(nrow(as.data.frame(name_tag)))) # convert tags into lists. + if ("try-error" %in% class(try(ocd_real <- L %>% furrr::future_map(function(l) { ocd_url <- paste0(base_data_url, l[[1]], l[[2]]) ocd_map <- terra::extract(terra::rast(ocd_url), p_reproj) - unlist(ocd_map[, -1])/10 + unlist(ocd_map[, -1]) / 10 }, .progress = T)))) { ocd_real <- vector("list", length = length(L)) pb <- utils::txtProgressBar(min = 0, max = length(L), style = 3) @@ -92,51 +92,53 @@ soilgrids_soilC_extract <- function (site_info, outdir=NULL, verbose=TRUE) { l <- L[[i]] ocd_url <- paste0(base_data_url, l[[1]], l[[2]]) ocd_map <- terra::extract(terra::rast(ocd_url), p_reproj) - ocd_real[[i]] <- unlist(ocd_map[, -1])/10 + ocd_real[[i]] <- unlist(ocd_map[, -1]) / 10 utils::setTxtProgressBar(pb, i) } } - + for (dep in seq_along(depths)) { dep.ind <- which(grepl(depths[dep], name_tag[, 1])) - ocdquant[dep, ] <- ocd_real[dep.ind] %>% unlist + ocdquant[dep, ] <- ocd_real[dep.ind] %>% unlist() } na.ind <- which(is.na(ocdquant[1, 1:length(site_info$site_id)])) - internal_site_info <- data.frame(site_info$site_id[-na.ind], - site_info$site_name[-na.ind], - site_info$lat[-na.ind], - site_info$lon[-na.ind]) %>% `colnames<-`(names(site_info)) - - # parse extracted data and prepare for output - quantile_name <-c(paste("Mean_",site_info$site_id,sep=""),paste("0.05_",site_info$site_id,sep=""),paste("0.5_",site_info$site_id,sep=""),paste("0.95_",site_info$site_id,sep="")) + internal_site_info <- data.frame( + site_info$site_id[-na.ind], + site_info$site_name[-na.ind], + site_info$lat[-na.ind], + site_info$lon[-na.ind] + ) %>% `colnames<-`(names(site_info)) + + # parse extracted data and prepare for output + quantile_name <- c(paste("Mean_", site_info$site_id, sep = ""), paste("0.05_", site_info$site_id, sep = ""), paste("0.5_", site_info$site_id, sep = ""), paste("0.95_", site_info$site_id, sep = "")) colnames(ocdquant) <- quantile_name - ocdquant_dep <- cbind(ocdquant,depths) - ocd_df <- tidyr::pivot_longer(as.data.frame(ocdquant_dep),cols=tidyselect::all_of(quantile_name),names_to=c("Quantile", "Siteid"),names_sep = "_") - #remove NA from ocd_df + ocdquant_dep <- cbind(ocdquant, depths) + ocd_df <- tidyr::pivot_longer(as.data.frame(ocdquant_dep), cols = tidyselect::all_of(quantile_name), names_to = c("Quantile", "Siteid"), names_sep = "_") + # remove NA from ocd_df ocd_df <- stats::na.omit(ocd_df) - colnames(ocd_df) <- c("Depth","Quantile", "Siteid","Value") - ocd_df$Value<-as.numeric(ocd_df$Value) - f1<-factor(ocd_df$Siteid,levels=unique(ocd_df$Siteid)) - f2<-factor(ocd_df$Depth,levels=unique(ocd_df$Depth)) - #split data by groups of sites and soil depth, while keeping the original order of each group - dat <- split(ocd_df, list(f1, f2)) - - #assume the ocd profile follows gamma distribution best + colnames(ocd_df) <- c("Depth", "Quantile", "Siteid", "Value") + ocd_df$Value <- as.numeric(ocd_df$Value) + f1 <- factor(ocd_df$Siteid, levels = unique(ocd_df$Siteid)) + f2 <- factor(ocd_df$Depth, levels = unique(ocd_df$Depth)) + # split data by groups of sites and soil depth, while keeping the original order of each group + dat <- split(ocd_df, list(f1, f2)) + + # assume the ocd profile follows gamma distribution best cgamma <- function(theta, val, stat) { pred <- rep(NA, 4) - names(pred) = stat + names(pred) <- stat if ("Mean" %in% stat) { pred["Mean"] <- theta[1] / theta[2] } qstat <- as.numeric(stat)[!is.na(as.numeric(stat))] pred[as.character(qstat)] <- stats::qgamma(qstat, theta[1], theta[2]) - return(sum((pred - val) ^ 2)) + return(sum((pred - val)^2)) } - + fitQ <- function(x) { - val = x$Value - stat = as.character(x$Quantile) - theta = c(10, 10) + val <- x$Value + stat <- as.character(x$Quantile) + theta <- c(10, 10) fit <- list(Gamma = stats::optim(theta, cgamma, val = val, stat = stat)) SS <- sapply(fit, function(f) { @@ -147,51 +149,58 @@ soilgrids_soilC_extract <- function (site_info, outdir=NULL, verbose=TRUE) { }) return(list(par = par, SS = SS)) } - + score <- suppressWarnings(lapply(dat, fitQ)) - bestPar <- sapply(score, function(f) { f$par }) - mean <- bestPar[1,] / bestPar[2,] - std <- sqrt(bestPar[1,] / bestPar[2,] ^ 2) + bestPar <- sapply(score, function(f) { + f$par + }) + mean <- bestPar[1, ] / bestPar[2, ] + std <- sqrt(bestPar[1, ] / bestPar[2, ]^2) mean_site <- matrix(mean, length(internal_site_info$lon), 6) rownames(mean_site) <- as.numeric(internal_site_info$site_id) colnames(mean_site) <- depths - mean_site.2 <- data.frame(site_id=internal_site_info$site_id, - lat=internal_site_info$lat, - lon=internal_site_info$lon, - mean_site) - colnames(mean_site.2)[4:9] <- depths + mean_site.2 <- data.frame( + site_id = internal_site_info$site_id, + lat = internal_site_info$lat, + lon = internal_site_info$lon, + mean_site + ) + colnames(mean_site.2)[4:9] <- depths std_site <- matrix(std, length(internal_site_info$lon), 6) rownames(std_site) <- as.numeric(internal_site_info$site_id) colnames(std_site) <- depths - std_site.2 <- data.frame(site_id=internal_site_info$site_id, - lat=internal_site_info$lat, - lon=internal_site_info$lon, - std_site) - colnames(std_site.2)[4:9] <- depths - #calculate organic carbon stock (ocs) as the sum of organic carbon density multiplied by layer thickness, the unit of ocs is kg/m2, based on Eq. (6)in paper https://www.sciencedirect.com/science/article/pii/S2215016122000462 - ocs_sum <- mean_site[,1]*layer_thick[1]+mean_site[,2]*layer_thick[2]+mean_site[,3]*layer_thick[3]+mean_site[,4]*layer_thick[4]+mean_site[,5]*layer_thick[5]+mean_site[,6]*layer_thick[6] - #calculate standard deviation of ocs as the square root of sum of variance of layer-specific ocs, the unit of ocs is kg/m2, based on Eq. (8) in paper https://www.sciencedirect.com/science/article/pii/S2215016122000462, except the correlation term due to the lack of information - ocs_std <- sqrt((std_site[,1]*layer_thick[1])^2+(std_site[,2]*layer_thick[2])^2+(std_site[,3]*layer_thick[3])^2+(std_site[,4]*layer_thick[4])^2+(std_site[,5]*layer_thick[5])^2+(std_site[,6]*layer_thick[6])^2) - ocs_sum_30cm <- mean_site[,1]*layer_thick[1]+mean_site[,2]*layer_thick[2]+mean_site[,3]*layer_thick[3] - ocs_std_30cm <- sqrt((std_site[,1]*layer_thick[1])^2+(std_site[,2]*layer_thick[2])^2+(std_site[,3]*layer_thick[3])^2) - soilgrids_soilC_data <- data.frame(internal_site_info$site_id, - internal_site_info$site_name, - internal_site_info$lat, - internal_site_info$lon, - ocs_sum,ocs_std, - ocs_sum_30cm, - ocs_std_30cm) - colnames(soilgrids_soilC_data)<- c("Site_ID","Site_Name","Latitude","Longitude","Total_soilC_0-200cm","Std_soilC_0-200cm","Total_soilC_0-30cm","Std_soilC_0-30cm") + std_site.2 <- data.frame( + site_id = internal_site_info$site_id, + lat = internal_site_info$lat, + lon = internal_site_info$lon, + std_site + ) + colnames(std_site.2)[4:9] <- depths + # calculate organic carbon stock (ocs) as the sum of organic carbon density multiplied by layer thickness, the unit of ocs is kg/m2, based on Eq. (6)in paper https://www.sciencedirect.com/science/article/pii/S2215016122000462 + ocs_sum <- mean_site[, 1] * layer_thick[1] + mean_site[, 2] * layer_thick[2] + mean_site[, 3] * layer_thick[3] + mean_site[, 4] * layer_thick[4] + mean_site[, 5] * layer_thick[5] + mean_site[, 6] * layer_thick[6] + # calculate standard deviation of ocs as the square root of sum of variance of layer-specific ocs, the unit of ocs is kg/m2, based on Eq. (8) in paper https://www.sciencedirect.com/science/article/pii/S2215016122000462, except the correlation term due to the lack of information + ocs_std <- sqrt((std_site[, 1] * layer_thick[1])^2 + (std_site[, 2] * layer_thick[2])^2 + (std_site[, 3] * layer_thick[3])^2 + (std_site[, 4] * layer_thick[4])^2 + (std_site[, 5] * layer_thick[5])^2 + (std_site[, 6] * layer_thick[6])^2) + ocs_sum_30cm <- mean_site[, 1] * layer_thick[1] + mean_site[, 2] * layer_thick[2] + mean_site[, 3] * layer_thick[3] + ocs_std_30cm <- sqrt((std_site[, 1] * layer_thick[1])^2 + (std_site[, 2] * layer_thick[2])^2 + (std_site[, 3] * layer_thick[3])^2) + soilgrids_soilC_data <- data.frame( + internal_site_info$site_id, + internal_site_info$site_name, + internal_site_info$lat, + internal_site_info$lon, + ocs_sum, ocs_std, + ocs_sum_30cm, + ocs_std_30cm + ) + colnames(soilgrids_soilC_data) <- c("Site_ID", "Site_Name", "Latitude", "Longitude", "Total_soilC_0-200cm", "Std_soilC_0-200cm", "Total_soilC_0-30cm", "Std_soilC_0-30cm") rownames(soilgrids_soilC_data) <- NULL if (!is.null(outdir)) { - PEcAn.logger::logger.info(paste0("Storing results in: ",file.path(outdir,"soilgrids_soilC_data.csv"))) - utils::write.csv(soilgrids_soilC_data,file=file.path(outdir,"soilgrids_soilC_data.csv"),row.names = FALSE) - } - else { + PEcAn.logger::logger.info(paste0("Storing results in: ", file.path(outdir, "soilgrids_soilC_data.csv"))) + utils::write.csv(soilgrids_soilC_data, file = file.path(outdir, "soilgrids_soilC_data.csv"), row.names = FALSE) + } else { PEcAn.logger::logger.error("No output directory found.") } # return the results to the terminal as well return(soilgrids_soilC_data) -} \ No newline at end of file +} diff --git a/modules/data.land/R/write_ic.R b/modules/data.land/R/write_ic.R index aa571472f6c..235a499b152 100644 --- a/modules/data.land/R/write_ic.R +++ b/modules/data.land/R/write_ic.R @@ -1,6 +1,6 @@ ##' @name write_ic ##' @title write_ic -##' +##' ##' @param in.path file path to rds file with IC data ##' @param in.name file name of IC data ##' @param start_date YYYY-MM-DD @@ -8,38 +8,36 @@ ##' @param outfolder Location to store function outputs ##' @param model BETY model ID ##' @param new_site Site info including lat, lon, and BETT site ID -##' @param pfts list settings$pfts. +##' @param pfts list settings$pfts. ##' @param source Data source as saved in the BETY db -##' @param overwrite DEfault is FALSE. Option to overwrite existing files. +##' @param overwrite DEfault is FALSE. Option to overwrite existing files. ##' @param n.ensemble number of ensemble members ##' @param ... Additional parameters -##' @param host.inputargs host info taken from settings object +##' @param host.inputargs host info taken from settings object ##' ##' @export -##' +##' ##' @author Istem Fer -write_ic <- function(in.path, in.name, start_date, end_date, +write_ic <- function(in.path, in.name, start_date, end_date, outfolder, model, new_site, pfts, - source = input_veg$source, overwrite = FALSE, n.ensemble, host.inputargs, ...){ - - + source = input_veg$source, overwrite = FALSE, n.ensemble, host.inputargs, ...) { #--------------------------------------------------------------------------------------------------# # Read rds_file <- file.path(in.path, in.name) - veg_info <- readRDS(rds_file) - + veg_info <- readRDS(rds_file) + #--------------------------------------------------------------------------------------------------# # Match PFTs - #revisit later need to fix species matching first + # revisit later need to fix species matching first obs <- as.data.frame(veg_info[[2]], stringsAsFactors = FALSE) # NOTE : match_pft may return NAs for unmatched dead trees pft.info <- PEcAn.data.land::match_pft(bety_species_id = obs$bety_species_id, pfts = pfts, model = model, con = NULL) # merge with other stuff obs$pft <- pft.info$pft - + veg_info[[2]] <- obs - + #--------------------------------------------------------------------------------------------------# # veg2model # Set model-specific functions @@ -48,41 +46,38 @@ write_ic <- function(in.path, in.name, start_date, end_date, fcnx <- paste("veg2model.", model$type, sep = "") if (!exists(fcnx)) { PEcAn.logger::logger.severe(paste(fcnx, "does not exist.")) - }else{ + } else { fcn <- match.fun(fcnx) } # Cohort2Pool ------------------------------------------------------------- # read in registration xml for pool specific information register.xml <- system.file(paste0("register.", model$type, ".xml"), package = paste0("PEcAn.", model$type)) - if(file.exists(register.xml)){ - register <- XML::xmlToList(XML::xmlParse(register.xml)) - - }else{ + if (file.exists(register.xml)) { + register <- XML::xmlToList(XML::xmlParse(register.xml)) + } else { PEcAn.logger::logger.warn("No model register file found") } - #check if register,model.xml includes "POOL" + # check if register,model.xml includes "POOL" if (register$initcond == "POOL") { poolinfo <- cohort2pool(dat = veg_info, allom_param = NULL, dbh_name = "DBH") siteid <- as.numeric(new_site$id) out <- fcn(outfolder, poolinfo, siteid, ens = n.ensemble) - - } else{ + } else { out <- fcn(outfolder, veg_info, start_date, new_site, source, ens = n.ensemble) - } # Build results dataframe for convert_input - results <- data.frame(file = out$file, - host = host.inputargs$name, - mimetype = out$mimetype, - formatname = out$formatname, - startdate = start_date, - enddate = end_date, - dbfile.name = out$dbfile.name, - stringsAsFactors = FALSE) - - + results <- data.frame( + file = out$file, + host = host.inputargs$name, + mimetype = out$mimetype, + formatname = out$formatname, + startdate = start_date, + enddate = end_date, + dbfile.name = out$dbfile.name, + stringsAsFactors = FALSE + ) + + ### return for convert_inputs return(invisible(results)) - - -} # write_ic \ No newline at end of file +} # write_ic diff --git a/modules/data.land/R/write_veg.R b/modules/data.land/R/write_veg.R index 3615ead69f3..71e24e50250 100644 --- a/modules/data.land/R/write_veg.R +++ b/modules/data.land/R/write_veg.R @@ -1,5 +1,5 @@ ##' Function to save intermediate rds file -##' +##' ##' @name write_veg ##' @title write_veg ##' @param outfolder output folder @@ -7,18 +7,16 @@ ##' @param veg_info vegetation data to be saved ##' @param source name of data source (used in file naming) ##' @export -write_veg <- function(outfolder, start_date, veg_info, source){ - - start_year <- lubridate::year(start_date) - out_file <- paste(source, start_year, "veg", "rds", sep = ".") +write_veg <- function(outfolder, start_date, veg_info, source) { + start_year <- lubridate::year(start_date) + out_file <- paste(source, start_year, "veg", "rds", sep = ".") out_file_full <- file.path(outfolder, out_file) - + dir.create(outfolder, showWarnings = FALSE, recursive = TRUE) - + saveRDS(veg_info, file = out_file_full) - + return(out_file_full) - } # write_veg # Maybe a remove_dead_trees function diff --git a/modules/data.land/data-raw/build_soil_texture_variables.R b/modules/data.land/data-raw/build_soil_texture_variables.R index 6df7ed1ce2b..136fa265762 100644 --- a/modules/data.land/data-raw/build_soil_texture_variables.R +++ b/modules/data.land/data-raw/build_soil_texture_variables.R @@ -1,156 +1,181 @@ ### Build soil texture table variables ### Code from ED2/R-utils/soilutils.r -#==========================================================================================# -#==========================================================================================# +# ==========================================================================================# +# ==========================================================================================# # This variable has the "edges" of all soil types. # #------------------------------------------------------------------------------------------# -stext.lines = list() -stext.lines[[ 1]] = list(sand=c(0.900,0.850),clay=c(0.100,0.000)) -stext.lines[[ 2]] = list(sand=c(0.850,0.700),clay=c(0.150,0.000)) -stext.lines[[ 3]] = list(sand=c(0.800,0.525),clay=c(0.200,0.200)) -stext.lines[[ 4]] = list(sand=c(0.520,0.525),clay=c(0.200,0.075)) -stext.lines[[ 5]] = list(sand=c(0.425,0.525),clay=c(0.075,0.075)) -stext.lines[[ 6]] = list(sand=c(0.225,0.500),clay=c(0.275,0.000)) -stext.lines[[ 7]] = list(sand=c(0.200,0.075),clay=c(0.000,0.125)) -stext.lines[[ 8]] = list(sand=c(0.075,0.000),clay=c(0.125,0.125)) -stext.lines[[ 9]] = list(sand=c(0.525,0.450),clay=c(0.200,0.275)) -stext.lines[[10]] = list(sand=c(0.450,0.000),clay=c(0.275,0.275)) -stext.lines[[11]] = list(sand=c(0.200,0.200),clay=c(0.275,0.400)) -stext.lines[[12]] = list(sand=c(0.650,0.450),clay=c(0.350,0.350)) -stext.lines[[13]] = list(sand=c(0.450,0.450),clay=c(0.275,0.550)) -stext.lines[[14]] = list(sand=c(0.450,0.000),clay=c(0.400,0.400)) -stext.lines[[15]] = list(sand=c(0.200,0.000),clay=c(0.400,0.600)) -stext.lines[[16]] = list(sand=c(0.300,0.000),clay=c(0.400,0.700)) -stext.lines[[17]] = list(sand=c(0.300,0.300),clay=c(0.400,0.700)) -stext.lines[[18]] = list(sand=c(0.300,0.000),clay=c(0.700,0.700)) -nstext.lines = length(stext.lines) -for(n in 1:nstext.lines){ - stext.lines[[n]]$silt = pmax(0,pmin(1,1.-stext.lines[[n]]$sand-stext.lines[[n]]$clay)) -}#end for -#==========================================================================================# -#==========================================================================================# - - - - -#==========================================================================================# -#==========================================================================================# +stext.lines <- list() +stext.lines[[1]] <- list(sand = c(0.900, 0.850), clay = c(0.100, 0.000)) +stext.lines[[2]] <- list(sand = c(0.850, 0.700), clay = c(0.150, 0.000)) +stext.lines[[3]] <- list(sand = c(0.800, 0.525), clay = c(0.200, 0.200)) +stext.lines[[4]] <- list(sand = c(0.520, 0.525), clay = c(0.200, 0.075)) +stext.lines[[5]] <- list(sand = c(0.425, 0.525), clay = c(0.075, 0.075)) +stext.lines[[6]] <- list(sand = c(0.225, 0.500), clay = c(0.275, 0.000)) +stext.lines[[7]] <- list(sand = c(0.200, 0.075), clay = c(0.000, 0.125)) +stext.lines[[8]] <- list(sand = c(0.075, 0.000), clay = c(0.125, 0.125)) +stext.lines[[9]] <- list(sand = c(0.525, 0.450), clay = c(0.200, 0.275)) +stext.lines[[10]] <- list(sand = c(0.450, 0.000), clay = c(0.275, 0.275)) +stext.lines[[11]] <- list(sand = c(0.200, 0.200), clay = c(0.275, 0.400)) +stext.lines[[12]] <- list(sand = c(0.650, 0.450), clay = c(0.350, 0.350)) +stext.lines[[13]] <- list(sand = c(0.450, 0.450), clay = c(0.275, 0.550)) +stext.lines[[14]] <- list(sand = c(0.450, 0.000), clay = c(0.400, 0.400)) +stext.lines[[15]] <- list(sand = c(0.200, 0.000), clay = c(0.400, 0.600)) +stext.lines[[16]] <- list(sand = c(0.300, 0.000), clay = c(0.400, 0.700)) +stext.lines[[17]] <- list(sand = c(0.300, 0.300), clay = c(0.400, 0.700)) +stext.lines[[18]] <- list(sand = c(0.300, 0.000), clay = c(0.700, 0.700)) +nstext.lines <- length(stext.lines) +for (n in 1:nstext.lines) { + stext.lines[[n]]$silt <- pmax(0, pmin(1, 1. - stext.lines[[n]]$sand - stext.lines[[n]]$clay)) +} # end for +# ==========================================================================================# +# ==========================================================================================# + + + + +# ==========================================================================================# +# ==========================================================================================# # This variable has the "polygons" for all soil types. # #------------------------------------------------------------------------------------------# -stext.polygon = list() -stext.polygon[[ 1]] = list(sand = c(1.000,0.900,0.850) - ,clay = c(0.000,0.100,0.000) -)#end list -stext.polygon[[ 2]] = list(sand = c(0.900,0.850,0.700,0.850) - ,clay = c(0.100,0.150,0.000,0.000) -)#end list -stext.polygon[[ 3]] = list(sand = c(0.850,0.800,0.525,0.525,0.425,0.500,0.700) - ,clay = c(0.150,0.200,0.200,0.075,0.075,0.000,0.000) -)#end list -stext.polygon[[ 4]] = list(sand = c(0.500,0.225,0.000,0.000,0.075,0.200) - ,clay = c(0.000,0.275,0.275,0.125,0.125,0.000) -)#end list -stext.polygon[[ 5]] = list(sand = c(0.525,0.450,0.225,0.425,0.525) - ,clay = c(0.200,0.275,0.275,0.075,0.075) -)#end list -stext.polygon[[ 6]] = list(sand = c(0.800,0.650,0.450,0.450,0.525) - ,clay = c(0.200,0.350,0.350,0.275,0.200) -)#end list -stext.polygon[[ 7]] = list(sand = c(0.200,0.000,0.000,0.200) - ,clay = c(0.400,0.400,0.275,0.275) -)#end list -stext.polygon[[ 8]] = list(sand = c(0.450,0.200,0.200,0.450) - ,clay = c(0.400,0.400,0.275,0.275) -)#end list -stext.polygon[[ 9]] = list(sand = c(0.650,0.450,0.450) - ,clay = c(0.350,0.550,0.350) -)#end list -stext.polygon[[10]] = list(sand = c(0.200,0.000,0.000) - ,clay = c(0.400,0.600,0.400) -)#end list -stext.polygon[[11]] = list(sand = c(0.300,0.300,0.000) - ,clay = c(0.400,0.700,0.700) -)#end list -stext.polygon[[12]] = list(sand = c(NA,NA) - ,clay = c(NA,NA) -)#end list -stext.polygon[[13]] = list(sand = c(NA,NA) - ,clay = c(NA,NA) -)#end list -stext.polygon[[14]] = list(sand = c(0.200,0.075,0.000,0.000) - ,clay = c(0.000,0.125,0.125,0.000) -)#end list -stext.polygon[[15]] = list(sand = c(0.300,0.000,0.000) - ,clay = c(0.700,1.000,0.700) -)#end list -stext.polygon[[16]] = list(sand = c(0.450,0.300,0.300,0.450) - ,clay = c(0.550,0.700,0.400,0.400) -)#end list -stext.polygon[[17]] = list(sand = c(0.300,0.000,0.000,0.200) - ,clay = c(0.400,0.700,0.600,0.400) -)#end list -nstext.polygon = length(stext.polygon) - -for(n in 1:nstext.polygon){ - sand.now = stext.polygon[[n]]$sand - clay.now = stext.polygon[[n]]$clay - stext.polygon[[n]]$silt = pmax(0,pmin(1,1.-sand.now-clay.now)) -}#end for -#==========================================================================================# -#==========================================================================================# +stext.polygon <- list() +stext.polygon[[1]] <- list( + sand = c(1.000, 0.900, 0.850), + clay = c(0.000, 0.100, 0.000) +) # end list +stext.polygon[[2]] <- list( + sand = c(0.900, 0.850, 0.700, 0.850), + clay = c(0.100, 0.150, 0.000, 0.000) +) # end list +stext.polygon[[3]] <- list( + sand = c(0.850, 0.800, 0.525, 0.525, 0.425, 0.500, 0.700), + clay = c(0.150, 0.200, 0.200, 0.075, 0.075, 0.000, 0.000) +) # end list +stext.polygon[[4]] <- list( + sand = c(0.500, 0.225, 0.000, 0.000, 0.075, 0.200), + clay = c(0.000, 0.275, 0.275, 0.125, 0.125, 0.000) +) # end list +stext.polygon[[5]] <- list( + sand = c(0.525, 0.450, 0.225, 0.425, 0.525), + clay = c(0.200, 0.275, 0.275, 0.075, 0.075) +) # end list +stext.polygon[[6]] <- list( + sand = c(0.800, 0.650, 0.450, 0.450, 0.525), + clay = c(0.200, 0.350, 0.350, 0.275, 0.200) +) # end list +stext.polygon[[7]] <- list( + sand = c(0.200, 0.000, 0.000, 0.200), + clay = c(0.400, 0.400, 0.275, 0.275) +) # end list +stext.polygon[[8]] <- list( + sand = c(0.450, 0.200, 0.200, 0.450), + clay = c(0.400, 0.400, 0.275, 0.275) +) # end list +stext.polygon[[9]] <- list( + sand = c(0.650, 0.450, 0.450), + clay = c(0.350, 0.550, 0.350) +) # end list +stext.polygon[[10]] <- list( + sand = c(0.200, 0.000, 0.000), + clay = c(0.400, 0.600, 0.400) +) # end list +stext.polygon[[11]] <- list( + sand = c(0.300, 0.300, 0.000), + clay = c(0.400, 0.700, 0.700) +) # end list +stext.polygon[[12]] <- list( + sand = c(NA, NA), + clay = c(NA, NA) +) # end list +stext.polygon[[13]] <- list( + sand = c(NA, NA), + clay = c(NA, NA) +) # end list +stext.polygon[[14]] <- list( + sand = c(0.200, 0.075, 0.000, 0.000), + clay = c(0.000, 0.125, 0.125, 0.000) +) # end list +stext.polygon[[15]] <- list( + sand = c(0.300, 0.000, 0.000), + clay = c(0.700, 1.000, 0.700) +) # end list +stext.polygon[[16]] <- list( + sand = c(0.450, 0.300, 0.300, 0.450), + clay = c(0.550, 0.700, 0.400, 0.400) +) # end list +stext.polygon[[17]] <- list( + sand = c(0.300, 0.000, 0.000, 0.200), + clay = c(0.400, 0.700, 0.600, 0.400) +) # end list +nstext.polygon <- length(stext.polygon) + +for (n in 1:nstext.polygon) { + sand.now <- stext.polygon[[n]]$sand + clay.now <- stext.polygon[[n]]$clay + stext.polygon[[n]]$silt <- pmax(0, pmin(1, 1. - sand.now - clay.now)) +} # end for +# ==========================================================================================# +# ==========================================================================================# #----- Define some prescribed fractions. -----------------------------------------------# -xsand.def = c( 0.920, 0.825, 0.660, 0.200, 0.410, 0.590 - , 0.100, 0.320, 0.520, 0.060, 0.200, 0.200 - , 0.333, 0.075, 0.100, 0.375, 0.125) -xclay.def = c( 0.030, 0.060, 0.110, 0.160, 0.170, 0.270 - , 0.340, 0.340, 0.420, 0.470, 0.600, 0.200 - , 0.333, 0.050, 0.800, 0.525, 0.525) - -soil.name = c("Sand","Loamy sand","Sandy loam","Silt loam","Loam","Sandy clay loam" - ,"Silty clay loam","Clayey loam","Sandy clay","Silty clay","Clay" - ,"Peat","Bedrock","Silt","Heavy clay","Clayey sand","Clayey silt") - -texture <- read.csv("texture.csv",header=TRUE,stringsAsFactors = FALSE) - - - -soil.key = c("Sa","LSa","SaL","SiL","L","SaCL","SiCL","CL","SaC","SiC","C","P","BR" - ,"Si","CC","CSa","CSi") +xsand.def <- c( + 0.920, 0.825, 0.660, 0.200, 0.410, 0.590, + 0.100, 0.320, 0.520, 0.060, 0.200, 0.200, + 0.333, 0.075, 0.100, 0.375, 0.125 +) +xclay.def <- c( + 0.030, 0.060, 0.110, 0.160, 0.170, 0.270, + 0.340, 0.340, 0.420, 0.470, 0.600, 0.200, + 0.333, 0.050, 0.800, 0.525, 0.525 +) + +soil.name <- c( + "Sand", "Loamy sand", "Sandy loam", "Silt loam", "Loam", "Sandy clay loam", + "Silty clay loam", "Clayey loam", "Sandy clay", "Silty clay", "Clay", + "Peat", "Bedrock", "Silt", "Heavy clay", "Clayey sand", "Clayey silt" +) + +texture <- read.csv("texture.csv", header = TRUE, stringsAsFactors = FALSE) + + + +soil.key <- c( + "Sa", "LSa", "SaL", "SiL", "L", "SaCL", "SiCL", "CL", "SaC", "SiC", "C", "P", "BR", + "Si", "CC", "CSa", "CSi" +) #----- Define some constants. ----------------------------------------------------------# -fieldcp.K = PEcAn.utils::ud_convert(0.1,"mm/day","meters/second") - # hydraulic conduct. at field capacity [ mm/day] -soilcp.MPa = 3.1 # soil-water potential for air dry soil [ MPa] -soilwp.MPa = 1.5 # soil-water potential at wilting point [ MPa] -soilld.MPa = 0.75 # soil-water potential that plants start dropping leaves [ MPa] -theta.crit = 0.11 # fractional soil moisture that plants start dropping leaves [ m3/m3] -grav <- 9.80665 # Gravity acceleration [ m/s2] +fieldcp.K <- PEcAn.utils::ud_convert(0.1, "mm/day", "meters/second") +# hydraulic conduct. at field capacity [ mm/day] +soilcp.MPa <- 3.1 # soil-water potential for air dry soil [ MPa] +soilwp.MPa <- 1.5 # soil-water potential at wilting point [ MPa] +soilld.MPa <- 0.75 # soil-water potential that plants start dropping leaves [ MPa] +theta.crit <- 0.11 # fractional soil moisture that plants start dropping leaves [ m3/m3] +grav <- 9.80665 # Gravity acceleration [ m/s2] #---------------------------------------------------------------------------------------# #---------------------------------------------------------------------------------------# # Soil heat capacity. Didn't find silt values, using average between sand and clay # #---------------------------------------------------------------------------------------# -sand.hcap = 2.128e6 -clay.hcap = 2.385e6 -silt.hcap = .5 * (sand.hcap + clay.hcap) -air.hcap = 1212 +sand.hcap <- 2.128e6 +clay.hcap <- 2.385e6 +silt.hcap <- .5 * (sand.hcap + clay.hcap) +air.hcap <- 1212 #---------------------------------------------------------------------------------------# #---------------------------------------------------------------------------------------# # Soil heat capacity. Didn't find silt values, using average between sand and clay # #---------------------------------------------------------------------------------------# -sand.cond = 8.80 -clay.cond = 2.92 -silt.cond = .5 * (sand.cond + clay.cond) -air.cond = 0.025 -h2o.cond = 0.57 +sand.cond <- 8.80 +clay.cond <- 2.92 +silt.cond <- .5 * (sand.cond + clay.cond) +air.cond <- 0.025 +h2o.cond <- 0.57 -ksand <- 3. * h2o.cond / ( 2. * h2o.cond + sand.cond ) -ksilt <- 3. * h2o.cond / ( 2. * h2o.cond + silt.cond ) -kclay <- 3. * h2o.cond / ( 2. * h2o.cond + clay.cond ) -kair <- 3. * h2o.cond / ( 2. * h2o.cond + air.cond ) +ksand <- 3. * h2o.cond / (2. * h2o.cond + sand.cond) +ksilt <- 3. * h2o.cond / (2. * h2o.cond + silt.cond) +kclay <- 3. * h2o.cond / (2. * h2o.cond + clay.cond) +kair <- 3. * h2o.cond / (2. * h2o.cond + air.cond) # TODO may be more useful to collect related variables into sublists... soil_class <- list( @@ -179,6 +204,7 @@ soil_class <- list( texture = texture, theta.crit = theta.crit, xclay.def = xclay.def, - xsand.def = xsand.def) + xsand.def = xsand.def +) save(soil_class, file = "../data/soil_class.rda") diff --git a/modules/data.land/data-raw/load_BADM.R b/modules/data.land/data-raw/load_BADM.R index f5732a69e9b..b614a41b377 100644 --- a/modules/data.land/data-raw/load_BADM.R +++ b/modules/data.land/data-raw/load_BADM.R @@ -1,7 +1,7 @@ # Convert the BADM data to rda for ~30x compression! # # BADM = Biological, Ancillary, Disturbance and Metadata; -# part of the FluxNet protocol. +# part of the FluxNet protocol. # Exact provenance of this particular file is not clear, but it contains data # for 246 siteIDs and includes observations from 120 variable names. # Not all rows have dates, but those that do range from 1983 to 2018. @@ -11,7 +11,7 @@ # * Could probably reduce size further if desired by dropping variables not # used by the package # * Seems to be intended as an internal reference rather than a dataset -# intended to be exposed to users. Consider moving to sysdata.rda instead? +# intended to be exposed to users. Consider moving to sysdata.rda instead? BADM <- read.csv("BADM.csv") save(BADM, file = "../data/BADM.rda") diff --git a/modules/data.land/inst/BCI_AGB.R b/modules/data.land/inst/BCI_AGB.R index d2e120142a2..93d9f4c3bc5 100644 --- a/modules/data.land/inst/BCI_AGB.R +++ b/modules/data.land/inst/BCI_AGB.R @@ -1,54 +1,54 @@ ## Barro Colorado Island AGB timeseries library(dplyr) indir <- "~/Dropbox/Desktop/Projects/BCI/" -files <- dir(indir,pattern = "*.txt",full.names = TRUE) -files <- files[grep(pattern = "Census",files)] +files <- dir(indir, pattern = "*.txt", full.names = TRUE) +files <- files[grep(pattern = "Census", files)] nf <- length(files) -stats <- data.frame(date=rep(NA,nf),AGB=rep(NA,nf)) +stats <- data.frame(date = rep(NA, nf), AGB = rep(NA, nf)) area <- 50 ## ha -for(f in seq_along(files)){ - myfile = files[f] - +for (f in seq_along(files)) { + myfile <- files[f] + ## get census number sl <- nchar(myfile) - i <- as.numeric(substr(myfile,start = sl-4,stop = sl-4)) - print(c(f,i)) - + i <- as.numeric(substr(myfile, start = sl - 4, stop = sl - 4)) + print(c(f, i)) + ## read raw data - dat <- read.table(myfile,header = TRUE,as.is = TRUE,sep="\t") - + dat <- read.table(myfile, header = TRUE, as.is = TRUE, sep = "\t") + ## filter data - dat <- dplyr::filter(dat,Status == 'alive') - dbh <- dat$DBH/10 - + dat <- dplyr::filter(dat, Status == "alive") + dbh <- dat$DBH / 10 + ## allometric estimation of height and AGB - m = 0.64 - c = 0.37 - max_dbh = 68 - wood_density = 0.75 ## shameless hack -- should be set on a species basis - h = ifelse(dbh < max_dbh,10^(log10(dbh) * m + c),10^(log10(max_dbh)*m + c)) ## FATES allom, meters - bdead = 0.06896*(h^0.572)*(dbh^1.94)*wood_density^0.931 ## FATES allom, units = KgC?? - + m <- 0.64 + c <- 0.37 + max_dbh <- 68 + wood_density <- 0.75 ## shameless hack -- should be set on a species basis + h <- ifelse(dbh < max_dbh, 10^(log10(dbh) * m + c), 10^(log10(max_dbh) * m + c)) ## FATES allom, meters + bdead <- 0.06896 * (h^0.572) * (dbh^1.94) * wood_density^0.931 ## FATES allom, units = KgC?? + ## conversion to plot-level mean - stats$AGB[i] <- PEcAn.utils::ud_convert(sum(bdead,na.rm=TRUE)/area,"kg ha-1","kg m-2") #AbvGrndWood kgC m-2 - + stats$AGB[i] <- PEcAn.utils::ud_convert(sum(bdead, na.rm = TRUE) / area, "kg ha-1", "kg m-2") # AbvGrndWood kgC m-2 + ## extract dates date <- as.Date(dat$Date) - + ## assign the plot the mean census date - stats$date[i] <- mean(date,na.rm=TRUE) - + stats$date[i] <- mean(date, na.rm = TRUE) + ## save in case there's a crash - saveRDS(stats,"BCI_AGB.RDS") + saveRDS(stats, "BCI_AGB.RDS") } ## sanity check -agb <- PEcAn.utils::ud_convert(stats$AGB,"kg m-2","Mg ha-1")/0.48 -plot(as.Date(stats$date,origin = "1970-01-01"),agb) +agb <- PEcAn.utils::ud_convert(stats$AGB, "kg m-2", "Mg ha-1") / 0.48 +plot(as.Date(stats$date, origin = "1970-01-01"), agb) stats$AGB[3] <- NA ## odd outlier, original value = 22.95379 ## save as csv -outstats <- data.frame(date=as.Date(stats$date,origin = "1970-01-01"),AGB=stats$AGB) -write.csv(outstats,file = "BCI_AGB.csv",quote = FALSE,row.names = FALSE) +outstats <- data.frame(date = as.Date(stats$date, origin = "1970-01-01"), AGB = stats$AGB) +write.csv(outstats, file = "BCI_AGB.csv", quote = FALSE, row.names = FALSE) diff --git a/modules/data.land/inst/CanopyCover2NLCD.R b/modules/data.land/inst/CanopyCover2NLCD.R index a4f4be2a4bd..969e769d052 100644 --- a/modules/data.land/inst/CanopyCover2NLCD.R +++ b/modules/data.land/inst/CanopyCover2NLCD.R @@ -1,32 +1,32 @@ ##' CanopyCover2NLCD ##' ##' It uses css file which is made by fia2ED module and provides the canopy cover area. -##' During development of cohort and patch level information from FIA database, -##' we started to compare the extracted data to RS data for same plots in order to see the differences -##' and also adjust the patch level information for ED input. We calculated the height based on DBH using -##' allometry. We know the area of each plot which is sum of 4 sub plot and plant density (from FIA). -##' We calculated tree height, based on DBH using allometry and then sorted them from tallest to shortest. -##' We then calculated the canopy area base on allometry and following that calculated crown to plot ratio. -##' Therefore we have canopy area of each tree, height and total area of each plot. -##' We sorted them from tallest to shortest and start adding canopy areas together to reach 100% land cover, -##' if adding the last tree causes to go beyond 100%, we dropped that. +##' During development of cohort and patch level information from FIA database, +##' we started to compare the extracted data to RS data for same plots in order to see the differences +##' and also adjust the patch level information for ED input. We calculated the height based on DBH using +##' allometry. We know the area of each plot which is sum of 4 sub plot and plant density (from FIA). +##' We calculated tree height, based on DBH using allometry and then sorted them from tallest to shortest. +##' We then calculated the canopy area base on allometry and following that calculated crown to plot ratio. +##' Therefore we have canopy area of each tree, height and total area of each plot. +##' We sorted them from tallest to shortest and start adding canopy areas together to reach 100% land cover, +##' if adding the last tree causes to go beyond 100%, we dropped that. ##' ##' @title CanopyCover2NLCD ##' @author Afshin Pourmokhtarian & Michael Dietze -library (data.table) +library(data.table) cls <- c("numeric", "character", rep("numeric", 8)) -filelist = list.files("/home/apourmok/My scripts/",pattern = "sitefiles.radius_0.075.*.css", full.names = TRUE) -#filelist = list.files(".",pattern = "sitefiles.radius_0.075.*.css", full.names = TRUE) +filelist <- list.files("/home/apourmok/My scripts/", pattern = "sitefiles.radius_0.075.*.css", full.names = TRUE) +# filelist = list.files(".",pattern = "sitefiles.radius_0.075.*.css", full.names = TRUE) -#loop over all files -#filelist = list.files("/home/apourmok/pecan/modules/data.land/R/~/",pattern = "sitefiles.radius_0.075.*.css", full.names = TRUE) -#for (k in 1:20){ - #mydata = fread(filelist[i], header=TRUE, colClasses = cls) -#} +# loop over all files +# filelist = list.files("/home/apourmok/pecan/modules/data.land/R/~/",pattern = "sitefiles.radius_0.075.*.css", full.names = TRUE) +# for (k in 1:20){ +# mydata = fread(filelist[i], header=TRUE, colClasses = cls) +# } -mydata = fread(filelist, header=TRUE, colClasses = cls) +mydata <- fread(filelist, header = TRUE, colClasses = cls) # Canopy Area allometry from Dietze and Clark (2008) ==> Allometry.f90 @@ -37,84 +37,86 @@ mydata = fread(filelist, header=TRUE, colClasses = cls) # b1Ca(5:13) = 2.490154 # !----- Slope. -------------------------------------------------------------------------! # b2Ca(5:13) = 0.8068806 -dbh2ca = 2.490154 * mydata$dbh ** 0.8068806 -#writing calculated crown size based on allometry back to the file -mydata [,c("Crown") := dbh2ca] -#Temperate PFT allometry -#dbh2h = hgt_ref(ipft) + b1Ht(ipft) * (1.0 - exp(b2Ht(ipft) * dbh)) +dbh2ca <- 2.490154 * mydata$dbh**0.8068806 +# writing calculated crown size based on allometry back to the file +mydata[, c("Crown") := dbh2ca] +# Temperate PFT allometry +# dbh2h = hgt_ref(ipft) + b1Ht(ipft) * (1.0 - exp(b2Ht(ipft) * dbh)) if (mydata$pft == 7) { - dbh2h = 0.3 + 27.14 * (1.0 - exp((-0.03884) * mydata$dbh)) -}else if (mydata$pft == 8){ - dbh2h = 0.3 + 22.79 * (1.0 - exp((-0.04445) * mydata$dbh)) -}else if (mydata$pft == 9){ - dbh2h = 0.3 + 22.6799 * (1.0 - exp((-0.06534) * mydata$dbh)) -}else if (mydata$pft == 10){ - dbh2h = 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) -}else if (mydata$pft == 11){ - dbh2h = 0.3 + 23.3874 * (1.0 - exp((-0.05404) * mydata$dbh)) -}else if (mydata$pft == 18){ - dbh2h = 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) -}else if (mydata$pft == 19){ - dbh2h = 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) -}else { - dbh2h = 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) + dbh2h <- 0.3 + 27.14 * (1.0 - exp((-0.03884) * mydata$dbh)) +} else if (mydata$pft == 8) { + dbh2h <- 0.3 + 22.79 * (1.0 - exp((-0.04445) * mydata$dbh)) +} else if (mydata$pft == 9) { + dbh2h <- 0.3 + 22.6799 * (1.0 - exp((-0.06534) * mydata$dbh)) +} else if (mydata$pft == 10) { + dbh2h <- 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) +} else if (mydata$pft == 11) { + dbh2h <- 0.3 + 23.3874 * (1.0 - exp((-0.05404) * mydata$dbh)) +} else if (mydata$pft == 18) { + dbh2h <- 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) +} else if (mydata$pft == 19) { + dbh2h <- 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) +} else { + dbh2h <- 0.3 + 25.18 * (1.0 - exp((-0.04964) * mydata$dbh)) } -#writing calculated height based on allometry back to the file -mydata [,c("Height") := dbh2h] - -#Each subplot radius is 7.32m, area is 168.33 m2, 4 subplot in each plot, therefore total area is 673.32 m2 -crown_plot_ratio = dbh2ca*mydata$n*100 -mydata [,c("Area") := crown_plot_ratio] - -#sorting trees from tallest to shortest starting for each patch -md = mydata[order(patch,-rank(Height),decreasing=FALSE)] - -#summing up the crown area of trees from tallest to shortest -patches = unique(mydata$patch) -pfts = unique(mydata$pft) -LandCover = matrix(numeric(1),length(patches),max(pfts)) - -Canopy = numeric(nrow(md)) -md = cbind(md,Canopy) -grass = numeric(length(patches)) -for(i in seq_along(patches)){ - sel = which(md$patch == patches[i]) - if(length(sel)>0){ - CAsum = cumsum(md$Area[sel]) - md$Canopy[sel] = CAsum < 100 - patchCov = tapply(md$Area[sel]*md$Canopy[sel],md$pft[sel],sum) - LandCover[i,as.numeric(names(patchCov))]=patchCov - if(CAsum[length(CAsum)]<100){grass[i]=100-CAsum[length(CAsum)]} - } +# writing calculated height based on allometry back to the file +mydata[, c("Height") := dbh2h] + +# Each subplot radius is 7.32m, area is 168.33 m2, 4 subplot in each plot, therefore total area is 673.32 m2 +crown_plot_ratio <- dbh2ca * mydata$n * 100 +mydata[, c("Area") := crown_plot_ratio] + +# sorting trees from tallest to shortest starting for each patch +md <- mydata[order(patch, -rank(Height), decreasing = FALSE)] + +# summing up the crown area of trees from tallest to shortest +patches <- unique(mydata$patch) +pfts <- unique(mydata$pft) +LandCover <- matrix(numeric(1), length(patches), max(pfts)) + +Canopy <- numeric(nrow(md)) +md <- cbind(md, Canopy) +grass <- numeric(length(patches)) +for (i in seq_along(patches)) { + sel <- which(md$patch == patches[i]) + if (length(sel) > 0) { + CAsum <- cumsum(md$Area[sel]) + md$Canopy[sel] <- CAsum < 100 + patchCov <- tapply(md$Area[sel] * md$Canopy[sel], md$pft[sel], sum) + LandCover[i, as.numeric(names(patchCov))] <- patchCov + if (CAsum[length(CAsum)] < 100) { + grass[i] <- 100 - CAsum[length(CAsum)] + } + } } -LandCover = cbind(LandCover,grass) +LandCover <- cbind(LandCover, grass) LandCover <- data.table(LandCover) -row.names(LandCover) = patches +row.names(LandCover) <- patches LandCover -#write out the output -setnames(LandCover,c(1:20,"grass")) -write.csv(LandCover,row.names=TRUE,file =paste0(filelist,".csv")) +# write out the output +setnames(LandCover, c(1:20, "grass")) +write.csv(LandCover, row.names = TRUE, file = paste0(filelist, ".csv")) -apply(LandCover,2,mean) -#patchnumbers<-mydata[, count := (unique(patch)] +apply(LandCover, 2, mean) +# patchnumbers<-mydata[, count := (unique(patch)] -#writing out the crown and height back to the same file -write.table(mydata, filelist, row.names=FALSE, quote=FALSE) +# writing out the crown and height back to the same file +write.table(mydata, filelist, row.names = FALSE, quote = FALSE) -#b1Ht(7) = 27.14 -#b1Ht(8) = 22.79 -#b1Ht(9) = 22.6799 -#b1Ht(10) = 25.18 -#b1Ht(11) = 23.3874 +# b1Ht(7) = 27.14 +# b1Ht(8) = 22.79 +# b1Ht(9) = 22.6799 +# b1Ht(10) = 25.18 +# b1Ht(11) = 23.3874 -#b2Ht(7) = -0.03884 -#b2Ht(8) = -0.04445 -#b2Ht(9) = -0.06534 -#b2Ht(10) = -0.04964 -#b2Ht(11) = -0.05404 +# b2Ht(7) = -0.03884 +# b2Ht(8) = -0.04445 +# b2Ht(9) = -0.06534 +# b2Ht(10) = -0.04964 +# b2Ht(11) = -0.05404 -#hgt_ref(6:11) = 0.3 +# hgt_ref(6:11) = 0.3 diff --git a/modules/data.land/inst/LoadFLUXNETsites.R b/modules/data.land/inst/LoadFLUXNETsites.R index dc5eb7490f2..69dd226cb1b 100644 --- a/modules/data.land/inst/LoadFLUXNETsites.R +++ b/modules/data.land/inst/LoadFLUXNETsites.R @@ -1,23 +1,25 @@ ## Script to load AmeriFLUX and FLUXNET sites into the BETY database ## Mike Dietze ## -## - expected behaviour is to ignore existing records where the FLUXNET code +## - expected behaviour is to ignore existing records where the FLUXNET code ## is in the site.sitename ## - not all site attributes are available ## - additional info (e.g. IGBP code) is put in notes -user.id = 1000000001 +user.id <- 1000000001 library("RCurl") library("XML") library(PEcAn.DB) -nu <- function(x){as.numeric(as.character(x))} ## simple function to convert data to numeric +nu <- function(x) { + as.numeric(as.character(x)) +} ## simple function to convert data to numeric ############### OPEN PSQL PORT ON VM ####### # -# This was tested by running the code on my laptop and writing the entries +# This was tested by running the code on my laptop and writing the entries # to my local VM. Here's how I opened that port. # # 0) port forwarding: open 5432 as 3232 @@ -28,180 +30,178 @@ nu <- function(x){as.numeric(as.character(x))} ## simple function to convert da # 2) sudo nano /etc/postgresql/9.3/main/pg_hba.conf # added: ## IPv4 local connections: -#host all all 127.0.0.1/32 md5 -#host all all 192.168.1.0/24 md5 -#host all all 10.0.2.2/32 md5 +# host all all 127.0.0.1/32 md5 +# host all all 192.168.1.0/24 md5 +# host all all 10.0.2.2/32 md5 # # 3) sudo service postgresql restart -site.map <- data.frame(FLUX.id=rep(NA,2000),site.id=rep(NA,2000)) +site.map <- data.frame(FLUX.id = rep(NA, 2000), site.id = rep(NA, 2000)) ########### AMERIFLUX # Turns out the AMERIFLUX website is dynamics so the following code doesn't work # One currently has to download the html from the browser by hand # and then work with the copy below -#AMERIFLUX_html <- getURL("http://ameriflux.lbl.gov/sites/site-list-and-pages/") ## grab raw html -#AMERIFLUX_table = readHTMLTable(AMERIFLUX_html)[[1]] ##grab first table on the webpage +# AMERIFLUX_html <- getURL("http://ameriflux.lbl.gov/sites/site-list-and-pages/") ## grab raw html +# AMERIFLUX_table = readHTMLTable(AMERIFLUX_html)[[1]] ##grab first table on the webpage ## GET LIST OF AMERIFLUX SITES AMERIFLUX_html <- getURL("file:///home/dietze/ListofAmeriFluxSites.html") -AMERIFLUX_table = readHTMLTable(AMERIFLUX_html)[[1]] ##grab first table on the webpage +AMERIFLUX_table <- readHTMLTable(AMERIFLUX_html)[[1]] ## grab first table on the webpage ################### PROCESS DOWNLOADED FILE ###################### ## open database connection library(RPostgreSQL) -driver <- "PostgreSQL" -user <- "bety" -dbname <- "bety" +driver <- "PostgreSQL" +user <- "bety" +dbname <- "bety" password <- "bety" -host <- "localhost" -port <- "3232" -dbparms <- list(driver=driver, user=user, dbname=dbname, password=password, host=host,port=port) -con <- db.open(dbparms) +host <- "localhost" +port <- "3232" +dbparms <- list(driver = driver, user = user, dbname = dbname, password = password, host = host, port = port) +con <- db.open(dbparms) ## GET LIST OF PEcAn site names -pecan.sites = db.query("SELECT * from sites",con) +pecan.sites <- db.query("SELECT * from sites", con) -nsite = nrow(AMERIFLUX_table) -for(s in 1:nsite){ - +nsite <- nrow(AMERIFLUX_table) +for (s in 1:nsite) { ## check whether site exists in BETYdb - code = as.character(AMERIFLUX_table$SITE_ID[s]) - id = grep(code,pecan.sites$sitename) - site.id = pecan.sites$id[id] - - site.map[which(is.na(site.map[,1]))[1],] <- c(code,site.id) - - - print(c(s,code,id)) - + code <- as.character(AMERIFLUX_table$SITE_ID[s]) + id <- grep(code, pecan.sites$sitename) + site.id <- pecan.sites$id[id] + + site.map[which(is.na(site.map[, 1]))[1], ] <- c(code, site.id) + + + print(c(s, code, id)) + ## if site does not exist, insert a new site - if(length(id) == 0){ - longname = as.character(AMERIFLUX_table$SITE_NAME[s]) - longname = sub("'","",longname) # drop single quotes from name - sitename = paste0(longname," (",code,")") - country = substr(code,1,2) - mat = nu(AMERIFLUX_table$MAT[s]) - if(is.na(mat)) mat = "NULL" - map = as.integer(nu(AMERIFLUX_table$MAP[s])) - if(is.na(map)) map = "NULL" - lat = nu(AMERIFLUX_table$LOCATION_LAT[s]) - lon = nu(AMERIFLUX_table$LOCATION_LONG[s]) - elev = nu(AMERIFLUX_table$LOCATION_ELEV[s]) - if(is.na(elev)) elev = 0 - notes = paste("IGBP =",as.character(AMERIFLUX_table$IGBP[s]), - " CLIMATE_KOEPPEN =",as.character(AMERIFLUX_table$CLIMATE_KOEPPEN[s]), - " TOWER_BEGAN =",as.character(AMERIFLUX_table$TOWER_BEGAN[s]), - " TOWER_END =",as.character(AMERIFLUX_table$TOWER_END[s]) - ) - InsertString = paste0("INSERT INTO sites(sitename,country,mat,map,notes,geometry,user_id) VALUES(", - "'",sitename,"', ", - "'",country,"', ", - mat,", ", - map,", ", - "'",notes,"', ", - "ST_GeomFromText('POINT(",lon," ",lat," ",elev,")', 4326), ", - user.id, - ");") - db.query(InsertString,con) + if (length(id) == 0) { + longname <- as.character(AMERIFLUX_table$SITE_NAME[s]) + longname <- sub("'", "", longname) # drop single quotes from name + sitename <- paste0(longname, " (", code, ")") + country <- substr(code, 1, 2) + mat <- nu(AMERIFLUX_table$MAT[s]) + if (is.na(mat)) mat <- "NULL" + map <- as.integer(nu(AMERIFLUX_table$MAP[s])) + if (is.na(map)) map <- "NULL" + lat <- nu(AMERIFLUX_table$LOCATION_LAT[s]) + lon <- nu(AMERIFLUX_table$LOCATION_LONG[s]) + elev <- nu(AMERIFLUX_table$LOCATION_ELEV[s]) + if (is.na(elev)) elev <- 0 + notes <- paste( + "IGBP =", as.character(AMERIFLUX_table$IGBP[s]), + " CLIMATE_KOEPPEN =", as.character(AMERIFLUX_table$CLIMATE_KOEPPEN[s]), + " TOWER_BEGAN =", as.character(AMERIFLUX_table$TOWER_BEGAN[s]), + " TOWER_END =", as.character(AMERIFLUX_table$TOWER_END[s]) + ) + InsertString <- paste0( + "INSERT INTO sites(sitename,country,mat,map,notes,geometry,user_id) VALUES(", + "'", sitename, "', ", + "'", country, "', ", + mat, ", ", + map, ", ", + "'", notes, "', ", + "ST_GeomFromText('POINT(", lon, " ", lat, " ", elev, ")', 4326), ", + user.id, + ");" + ) + db.query(InsertString, con) } - } # FLUXNET ---------------------------------------------------------------- -FLUXNET_html <- getURL("http://fluxnet.ornl.gov/site_status") ## grab raw html -FLUXNET_table = readHTMLTable(FLUXNET_html)[[1]] ##grab first table on the webpage -FLUXNET_siteURL = "http://fluxnet.ornl.gov/site/" +FLUXNET_html <- getURL("http://fluxnet.ornl.gov/site_status") ## grab raw html +FLUXNET_table <- readHTMLTable(FLUXNET_html)[[1]] ## grab first table on the webpage +FLUXNET_siteURL <- "http://fluxnet.ornl.gov/site/" ## preprocess raw html -raw.rows = strsplit(FLUXNET_html,"")[[1]] - raw.site = raw.site[grep("href",raw.site)] - raw.site = sub("")[[1]] + raw.site <- raw.site[grep("href", raw.site)] + raw.site <- sub("0) + return(length(grep("Elevation", paste(y))) > 0) }) - y[1] = FALSE # if "Elevation" is in the description, skip - if(sum(y)>0){ - site.tab = which(as.logical(y))[1] - site.char = apply(site_table[[site.tab]],2,as.character) - site.char = paste(site.char[,1],site.char[,2],collapse="; ") - erow = grep("Elevation",as.character(site_table[[site.tab]][,1])) - elev = as.numeric(sub("m","",as.character(site_table[[site.tab]][erow,2]))) - } else{ - elev = -9999 - site.char = NULL + y[1] <- FALSE # if "Elevation" is in the description, skip + if (sum(y) > 0) { + site.tab <- which(as.logical(y))[1] + site.char <- apply(site_table[[site.tab]], 2, as.character) + site.char <- paste(site.char[, 1], site.char[, 2], collapse = "; ") + erow <- grep("Elevation", as.character(site_table[[site.tab]][, 1])) + elev <- as.numeric(sub("m", "", as.character(site_table[[site.tab]][erow, 2]))) + } else { + elev <- -9999 + site.char <- NULL } - - notes = paste0("PI: ",PI,"; ",site.char,"; FLUXNET DESCRIPTION: ",description) - notes = gsub("'","",notes) # drop single quotes from notes - - InsertString = paste0("INSERT INTO sites(sitename,country,notes,geometry,user_id) VALUES(", - "'",sitename,"', ", - "'",country,"', ", - "'",notes,"', ", - "ST_GeomFromText('POINT(",lon," ",lat," ",elev,")', 4326), ", - user.id, - ");") - db.query(InsertString,con) - + + notes <- paste0("PI: ", PI, "; ", site.char, "; FLUXNET DESCRIPTION: ", description) + notes <- gsub("'", "", notes) # drop single quotes from notes + + InsertString <- paste0( + "INSERT INTO sites(sitename,country,notes,geometry,user_id) VALUES(", + "'", sitename, "', ", + "'", country, "', ", + "'", notes, "', ", + "ST_GeomFromText('POINT(", lon, " ", lat, " ", elev, ")', 4326), ", + user.id, + ");" + ) + db.query(InsertString, con) } ## end IF new site - -} ## end loop over sites +} ## end loop over sites db.close(con) ## remove duplicates & save -site.map = na.omit(site.map) -site.map = unique(site.map) -write.csv(site.map,"FLUXNET.sitemap.csv") - +site.map <- na.omit(site.map) +site.map <- unique(site.map) +write.csv(site.map, "FLUXNET.sitemap.csv") diff --git a/modules/data.land/inst/LoadPalEONsites.R b/modules/data.land/inst/LoadPalEONsites.R index 3c010bc7ccc..5be98aba9f2 100644 --- a/modules/data.land/inst/LoadPalEONsites.R +++ b/modules/data.land/inst/LoadPalEONsites.R @@ -2,90 +2,90 @@ ## Mike Dietze ## -user.id = 1000000001 ## replace with your own personal user.id +user.id <- 1000000001 ## replace with your own personal user.id library("RCurl") library("XML") library(PEcAn.DB) -nu <- function(x){as.numeric(as.character(x))} ## simple function to convert data to numeric +nu <- function(x) { + as.numeric(as.character(x)) +} ## simple function to convert data to numeric ## load up PalEON site files -paleon <- read.csv("~/paleon/Paleon_MIP_Phase2_ED_Order_Status.csv",stringsAsFactors = FALSE) +paleon <- read.csv("~/paleon/Paleon_MIP_Phase2_ED_Order_Status.csv", stringsAsFactors = FALSE) priority <- read.csv("~/paleon/new.ed.mat.csv") paleon$notes[paleon$num %in% priority$num] <- "priority" -paleon$notes[1:6] <- "site" ## mark the original 6 Site MIP grid cells +paleon$notes[1:6] <- "site" ## mark the original 6 Site MIP grid cells paleon.sitegroups <- unique(paleon$notes) - paleon.sitegroups <- paleon.sitegroups[-which(paleon.sitegroups=="site")] +paleon.sitegroups <- paleon.sitegroups[-which(paleon.sitegroups == "site")] # note: code currently assumes sitegroups were created in BETY with the pattern "PalEON_" - - + + ## open database connection library(RPostgreSQL) -driver <- "PostgreSQL" -user <- "bety" -dbname <- "bety" +driver <- "PostgreSQL" +user <- "bety" +dbname <- "bety" password <- "bety" -host <- "psql-pecan.bu.edu" -dbparms <- list(driver=driver, user=user, dbname=dbname, password=password, host=host) -con <- db.open(dbparms) +host <- "psql-pecan.bu.edu" +dbparms <- list(driver = driver, user = user, dbname = dbname, password = password, host = host) +con <- db.open(dbparms) ############# -for(i in seq_along(paleon.sitegroups)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - +for (i in seq_along(paleon.sitegroups)) { + print(paste("************", paleon.sitegroups[i], "*************")) + ## subset data - group <- paleon[which(paleon$notes == paleon.sitegroups[i]),] - + group <- paleon[which(paleon$notes == paleon.sitegroups[i]), ] + ## query existing site group and sites - pecan.sites = db.query("SELECT * from sites",con) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - if(nrow(pecan.sitegroup) == 0){ + pecan.sites <- db.query("SELECT * from sites", con) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + if (nrow(pecan.sitegroup) == 0) { print("SITEGROUP not found") break() } - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - - ## loop over new sites - for(j in seq_len(nrow(group))){ + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + ## loop over new sites + for (j in seq_len(nrow(group))) { ## detect if site exists or not - code <- paste0("PalEON_",group$num[j]) - id = grep(code,pecan.sites$sitename) - site.id = pecan.sites$id[id] - print(paste(code,id,site.id)) - + code <- paste0("PalEON_", group$num[j]) + id <- grep(code, pecan.sites$sitename) + site.id <- pecan.sites$id[id] + print(paste(code, id, site.id)) + ## if new site add - if(length(id) == 0){ - sitename = code - country = "US" - lat = nu(group$lat[j]) - lon = nu(group$lon[j]) - elev = 0 - InsertString = paste0("INSERT INTO sites(sitename,country,geometry,user_id) VALUES(", - "'",sitename,"', ", - "'",country,"', ", - "ST_GeomFromText('POINT(",lon," ",lat," ",elev,")', 4326), ", - user.id,") returning id ;") - new.id = db.query(InsertString,con) - + if (length(id) == 0) { + sitename <- code + country <- "US" + lat <- nu(group$lat[j]) + lon <- nu(group$lon[j]) + elev <- 0 + InsertString <- paste0( + "INSERT INTO sites(sitename,country,geometry,user_id) VALUES(", + "'", sitename, "', ", + "'", country, "', ", + "ST_GeomFromText('POINT(", lon, " ", lat, " ", elev, ")', 4326), ", + user.id, ") returning id ;" + ) + new.id <- db.query(InsertString, con) + ## add to sitegroup - if(new.id %in% pecan.sgs$site_id){ + if (new.id %in% pecan.sgs$site_id) { print("SITE already in SITEGROUP") } else { - InsertSite = paste0("INSERT INTO sitegroups_sites(sitegroup_id,site_id) VALUES(", - pecan.sitegroup$id,", ", - new.id,")") - db.query(InsertSite,con) + InsertSite <- paste0( + "INSERT INTO sitegroups_sites(sitegroup_id,site_id) VALUES(", + pecan.sitegroup$id, ", ", + new.id, ")" + ) + db.query(InsertSite, con) } - } ## end new site insert - } ## end site loop - - } ## end sitegroup loop db.close(con) @@ -95,7 +95,7 @@ db.close(con) in.path <- "/fs/data4/PalEON_Regional" in.prefix <- "" -outfolder <- paste0(in.path,"_nc") +outfolder <- paste0(in.path, "_nc") start_date <- "850-01-01" end_date <- "2010-12-31" overwrite <- FALSE @@ -104,74 +104,72 @@ verbose <- FALSE ## After create site and Input records citation_id <- 1000000012 # Kumar et al 2012 -#INSERT INTO sites (sitename,user_id,geometry) VALUES ('PalEON Regional',1000000001,ST_Geomfromtext('POLYGON((-100.0 35 0, -100 50 0, -60 50 0, -60 35 0, -100 35 0))', 4326)) RETURNING id; +# INSERT INTO sites (sitename,user_id,geometry) VALUES ('PalEON Regional',1000000001,ST_Geomfromtext('POLYGON((-100.0 35 0, -100 50 0, -60 50 0, -60 35 0, -100 35 0))', 4326)) RETURNING id; site_id <- 1000025661 -#INSERT INTO inputs (site_id,start_date,end_date,name,user_id,format_id) VALUES (1000025661,'850-01-01 00:00:00','2010-12-31 23:59:59','PalEON Regional Met',1000000001,33) RETURNING id; +# INSERT INTO inputs (site_id,start_date,end_date,name,user_id,format_id) VALUES (1000025661,'850-01-01 00:00:00','2010-12-31 23:59:59','PalEON Regional Met',1000000001,33) RETURNING id; input_id <- 1000011261 ################################ ## Recode PalEON site names -for(i in seq_along(paleon.sitegroups)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - +for (i in seq_along(paleon.sitegroups)) { + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + ## loop over new sites - for(j in seq_len(nrow(pecan.sgs))){ - sitename <- db.query(paste0("SELECT sitename from sites where id =",pecan.sgs$site_id[j]),con) - if(length(grep("PEcAn",sitename))>0){ - sitename <- sub("PEcAn","PalEON",sitename) - db.query(paste0("UPDATE sites set sitename = '",sitename,"' where id =",pecan.sgs$site_id[j]),con) + for (j in seq_len(nrow(pecan.sgs))) { + sitename <- db.query(paste0("SELECT sitename from sites where id =", pecan.sgs$site_id[j]), con) + if (length(grep("PEcAn", sitename)) > 0) { + sitename <- sub("PEcAn", "PalEON", sitename) + db.query(paste0("UPDATE sites set sitename = '", sitename, "' where id =", pecan.sgs$site_id[j]), con) } - } + } } ### Create Multisettings -for(i in c(1,6,2,3,4)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - +for (i in c(1, 6, 2, 3, 4)) { + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + template <- PEcAn.settings::read.settings("pecan_DALEC_priority.xml") - template$run$site <- NULL ## make sure to zero out template site - + template$run$site <- NULL ## make sure to zero out template site + multiRunSettings <- createSitegroupMultiSettings(template, sitegroupId = pecan.sitegroup$id) - PEcAn.settings::write.settings(multiRunSettings, outputfile = paste0("pecan.",paleon.sitegroups[i],".xml")) - + PEcAn.settings::write.settings(multiRunSettings, outputfile = paste0("pecan.", paleon.sitegroups[i], ".xml")) } ## set paths for remote -##geo -#settings$outdir <- "/projectnb/dietzelab/pecan.data/output" +## geo +# settings$outdir <- "/projectnb/dietzelab/pecan.data/output" ##################################### ## Set up to run extract.nc on cluster -for(i in seq_along(paleon.sitegroups)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - +for (i in seq_along(paleon.sitegroups)) { + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + ## loop over sites site.info <- NULL - - for(j in seq_len(nrow(pecan.sgs))){ + + for (j in seq_len(nrow(pecan.sgs))) { site_id <- as.numeric(pecan.sgs$site_id[j]) - sitename <- db.query(paste0("SELECT sitename from sites where id =",site_id),con) + sitename <- db.query(paste0("SELECT sitename from sites where id =", site_id), con) str_ns <- paste0(site_id %/% 1e+09, "-", site_id %% 1e+09) outfile <- paste0("PalEONregional_CF_site_", str_ns) - latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] - site.info <- rbind(site.info,data.frame(id = site_id, - lat = latlon$lat, - lon = latlon$lon, - str_ns = str_ns, - outfile = outfile)) - } - save(site.info,file=paste0("PalEON_siteInfo_",paleon.sitegroups[i],".RData")) + latlon <- PEcAn.DB::query.site(site$id, con = con)[c("lat", "lon")] + site.info <- rbind(site.info, data.frame( + id = site_id, + lat = latlon$lat, + lon = latlon$lon, + str_ns = str_ns, + outfile = outfile + )) + } + save(site.info, file = paste0("PalEON_siteInfo_", paleon.sitegroups[i], ".RData")) } @@ -182,111 +180,112 @@ for(i in seq_along(paleon.sitegroups)){ ## establish remote tunnel library(getPass) -host <- list(name="geo.bu.edu",tunnel="~/.pecan/tunnel/") -is.open <- PEcAn.remote::open_tunnel(host$name,host$tunnel) -if(!is.open){ +host <- list(name = "geo.bu.edu", tunnel = "~/.pecan/tunnel/") +is.open <- PEcAn.remote::open_tunnel(host$name, host$tunnel) +if (!is.open) { print("Could not open remote tunnel") } else { - host$tunnel <- file.path(host$tunnel,"tunnel") + host$tunnel <- file.path(host$tunnel, "tunnel") } ## get db entry of parent met -parent.input <- db.query("SELECT * from inputs where id = 1000011261",con) +parent.input <- db.query("SELECT * from inputs where id = 1000011261", con) local.prefix <- "/fs/data1/pecan.data/dbfiles/PalEONregional_CF_site_" remote.prefix <- "/projectnb/dietzelab/pecan.data/input/PalEONregional_CF_site_" ## remote parent: /projectnb/dietzelab/pecan.data/input/PalEON_Regional_nc/ ## local parent: /fs/data4/PalEON_Regional_nc -start_date <- lubridate::force_tz(lubridate::as_date("0850-01-01 00:00:00"), 'UTC') -end_date <- lubridate::force_tz(lubridate::as_date("2010-12-31 23:59:59"), 'UTC') +start_date <- lubridate::force_tz(lubridate::as_date("0850-01-01 00:00:00"), "UTC") +end_date <- lubridate::force_tz(lubridate::as_date("2010-12-31 23:59:59"), "UTC") years <- 850:2010 format_id <- 33 paleon.site.errors <- list() -for(i in c(1:5,7)){ +for (i in c(1:5, 7)) { paleon.site.errors[[i]] <- list() - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - + + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + ## load site info - load(paste0("PalEON_siteInfo_",paleon.sitegroups[i],".RData")) - - - for(j in seq_len(nrow(pecan.sgs))){ - print(c(i,j)) - + load(paste0("PalEON_siteInfo_", paleon.sitegroups[i], ".RData")) + + + for (j in seq_len(nrow(pecan.sgs))) { + print(c(i, j)) + ## make local folder - local.dir <- paste0(local.prefix,site.info$str_ns[j],"/") - remote.dir <- paste0(remote.prefix,site.info$str_ns[j],"/") + local.dir <- paste0(local.prefix, site.info$str_ns[j], "/") + remote.dir <- paste0(remote.prefix, site.info$str_ns[j], "/") dir.create(local.dir) - + ## copy from remote to local - PEcAn.utils::remote.copy.from(host,remote.dir,local.dir) - #rsync -avz -e 'ssh -o ControlPath="~/.pecan/tunnel/tunnel"' geo.bu.edu:/projectnb/dietzelab/pecan.data/input/PalEONregional_CF_site_1-24043/ /fs/data1/pecan.data/dbfiles/PalEONregional_CF_site_1-24043/ - + PEcAn.utils::remote.copy.from(host, remote.dir, local.dir) + # rsync -avz -e 'ssh -o ControlPath="~/.pecan/tunnel/tunnel"' geo.bu.edu:/projectnb/dietzelab/pecan.data/input/PalEONregional_CF_site_1-24043/ /fs/data1/pecan.data/dbfiles/PalEONregional_CF_site_1-24043/ + ## check if all files exist - local.files <- dir(local.dir,".nc") - local.years <- as.numeric(sub(".nc","",local.files,fixed = TRUE)) - if(all(years %in% local.years)){ + local.files <- dir(local.dir, ".nc") + local.years <- as.numeric(sub(".nc", "", local.files, fixed = TRUE)) + if (all(years %in% local.years)) { ## check for input - input.check <- db.query(paste0("SELECT * FROM inputs where site_id = ",site.info$id[j]," AND format_id = 33"),con) - if(nrow(input.check) == 0 | length(grep("PalEON",input.check$name))==0){ + input.check <- db.query(paste0("SELECT * FROM inputs where site_id = ", site.info$id[j], " AND format_id = 33"), con) + if (nrow(input.check) == 0 | length(grep("PalEON", input.check$name)) == 0) { ## create new input - cmd <- paste0("INSERT INTO inputs (site_id, format_id, start_date, end_date, name, parent_id) VALUES (", - site.info$id[j], ", ", format_id, ", '",start_date, "', '", end_date,"','", site.info$outfile[j], "',",parent.input$id,") RETURNING id") + cmd <- paste0( + "INSERT INTO inputs (site_id, format_id, start_date, end_date, name, parent_id) VALUES (", + site.info$id[j], ", ", format_id, ", '", start_date, "', '", end_date, "','", site.info$outfile[j], "',", parent.input$id, ") RETURNING id" + ) site.input.id <- db.query(cmd, con) } else { ## use existing input - sel <- grep("PalEON",input.check$name) + sel <- grep("PalEON", input.check$name) site.input.id <- input.check$id[sel] } - + ## create remote dbfile - dbfile.insert(in.path = remote.dir,in.prefix = "",type = "Input",id = site.input.id, - con = con,hostname = host$name) - + dbfile.insert( + in.path = remote.dir, in.prefix = "", type = "Input", id = site.input.id, + con = con, hostname = host$name + ) + ## create local dbfile - dbfile.insert(in.path = local.dir,in.prefix = "",type = "Input",id = site.input.id, con = con) + dbfile.insert(in.path = local.dir, in.prefix = "", type = "Input", id = site.input.id, con = con) } else { - ## else add to error list - k = length(paleon.site.errors[[i]]) + 1 - paleon.site.errors[[i]] <- rbind(paleon.site.errors[[i]],cbind(site.info,min(local.years),max(local.years))) - save(paleon.site.errors,file="PalEON_siteInfo_errors.RData") + ## else add to error list + k <- length(paleon.site.errors[[i]]) + 1 + paleon.site.errors[[i]] <- rbind(paleon.site.errors[[i]], cbind(site.info, min(local.years), max(local.years))) + save(paleon.site.errors, file = "PalEON_siteInfo_errors.RData") } - - } - save(paleon.site.errors,file="PalEON_siteInfo_errors.RData") - + } + save(paleon.site.errors, file = "PalEON_siteInfo_errors.RData") } -PEcAn.remote::kill.tunnel(list(host=host)) +PEcAn.remote::kill.tunnel(list(host = host)) ################################## ### merge in CO2 into met data merge.file <- "~/paleon/paleon_monthly_co2.nc" start_date <- "0850-01-01" -end_date <- "2010-12-31" +end_date <- "2010-12-31" local.prefix <- "/fs/data1/pecan.data/dbfiles/PalEONregional_CF_site_" -for(i in seq_along(paleon.sitegroups)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - load(paste0("PalEON_siteInfo_",paleon.sitegroups[i],".RData")) - - for(j in seq_len(nrow(pecan.sgs))){ - print(c(i,j)) - +for (i in seq_along(paleon.sitegroups)) { + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + load(paste0("PalEON_siteInfo_", paleon.sitegroups[i], ".RData")) + + for (j in seq_len(nrow(pecan.sgs))) { + print(c(i, j)) + ## local folder - local.dir <- paste0(local.prefix,site.info$str_ns[j],"/") - if(!file.exists(local.dir) | length(dir(local.dir))==0) next - - merge_met_variable(local.dir,in.prefix,start_date,end_date,merge.file) + local.dir <- paste0(local.prefix, site.info$str_ns[j], "/") + if (!file.exists(local.dir) | length(dir(local.dir)) == 0) next + + merge_met_variable(local.dir, in.prefix, start_date, end_date, merge.file) } } @@ -297,34 +296,33 @@ for(i in seq_along(paleon.sitegroups)){ ## need to have files numbered 0850.nc not 850.nc for JULES ## moving forward, has been fixed in extract.nc start_year <- 850 -end_year <- 999 +end_year <- 999 in.prefix <- "" local.prefix <- "/fs/data1/pecan.data/dbfiles/PalEONregional_CF_site_" -for(i in seq_along(paleon.sitegroups)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - load(paste0("PalEON_siteInfo_",paleon.sitegroups[i],".RData")) - - for(j in seq_len(nrow(pecan.sgs))){ - print(c(i,j)) - +for (i in seq_along(paleon.sitegroups)) { + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + load(paste0("PalEON_siteInfo_", paleon.sitegroups[i], ".RData")) + + for (j in seq_len(nrow(pecan.sgs))) { + print(c(i, j)) + ## local folder - local.dir <- paste0(local.prefix,site.info$str_ns[j],"/") - if(!file.exists(local.dir) | length(dir(local.dir))==0) next + local.dir <- paste0(local.prefix, site.info$str_ns[j], "/") + if (!file.exists(local.dir) | length(dir(local.dir)) == 0) next for (year in start_year:end_year) { year_txt <- formatC(year, width = 4, format = "d", flag = "0") infile <- file.path(local.dir, paste0(in.prefix, year, ".nc")) outfile <- file.path(local.dir, paste0(in.prefix, year_txt, ".nc")) - if(file.exists(infile)) file.rename(infile,outfile) - } - for(year in -150:849){ + if (file.exists(infile)) file.rename(infile, outfile) + } + for (year in -150:849) { ## remove symbolic links year_txt <- formatC(year, width = 4, format = "d", flag = "0") infile <- file.path(local.dir, paste0(in.prefix, year, ".nc")) - system2("rm",infile) + system2("rm", infile) } } } @@ -332,29 +330,22 @@ for(i in seq_along(paleon.sitegroups)){ ### Add eastward_wind and northward_wind ## need to go back to met2CF.PalEONregional and fix start_date <- "0850-01-01" -end_date <- "2010-12-31" +end_date <- "2010-12-31" local.prefix <- "/fs/data1/pecan.data/dbfiles/PalEONregional_CF_site_" in.prefix <- "" -for(i in seq_along(paleon.sitegroups)){ - - print(paste("************",paleon.sitegroups[i],"*************")) - pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_",paleon.sitegroups[i],"'"),con) - pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =",pecan.sitegroup$id),con) - load(paste0("PalEON_siteInfo_",paleon.sitegroups[i],".RData")) - - for(j in seq_len(nrow(pecan.sgs))){ - print(c(i,j)) - - ## local folder - local.dir <- paste0(local.prefix,site.info$str_ns[j],"/") - if(!file.exists(local.dir) | length(dir(local.dir))==0) next - - PEcAn.data.atmosphere::split_wind(local.dir,in.prefix,start_date,end_date) - } -} - - - +for (i in seq_along(paleon.sitegroups)) { + print(paste("************", paleon.sitegroups[i], "*************")) + pecan.sitegroup <- db.query(paste0("SELECT * from sitegroups where name = 'PalEON_", paleon.sitegroups[i], "'"), con) + pecan.sgs <- db.query(paste("SELECT * from sitegroups_sites where sitegroup_id =", pecan.sitegroup$id), con) + load(paste0("PalEON_siteInfo_", paleon.sitegroups[i], ".RData")) + for (j in seq_len(nrow(pecan.sgs))) { + print(c(i, j)) + ## local folder + local.dir <- paste0(local.prefix, site.info$str_ns[j], "/") + if (!file.exists(local.dir) | length(dir(local.dir)) == 0) next + PEcAn.data.atmosphere::split_wind(local.dir, in.prefix, start_date, end_date) + } +} diff --git a/modules/data.land/inst/Multi_Site_IC_Process_Script.R b/modules/data.land/inst/Multi_Site_IC_Process_Script.R index 85dc55293c9..e2c08d0c2e9 100644 --- a/modules/data.land/inst/Multi_Site_IC_Process_Script.R +++ b/modules/data.land/inst/Multi_Site_IC_Process_Script.R @@ -1,80 +1,91 @@ -#reading settings and setting up environments for later processes +# reading settings and setting up environments for later processes settings <- read.settings("/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/pecan.xml") -input_veg <- list(storedir='/projectnb/dietzelab/dongchen/test_IC/download') +input_veg <- list(storedir = "/projectnb/dietzelab/dongchen/test_IC/download") outdir <- "/projectnb/dietzelab/dongchen/test_IC/test/" start_date <- as.Date(settings$state.data.assimilation$start.date) end_date <- Sys.Date() -source='NEON_veg' +source <- "NEON_veg" machine_host <- "test-pecan.bu.edu" n_ens <- settings$ensemble$size log_file <- c() neonsites <- neonstore::neon_sites(api = "https://data.neonscience.org/api/v0", .token = Sys.getenv("NEON_TOKEN")) -site.IDs <- settings %>% map(~.x[['run']] ) %>% map('site') %>% map('id') %>% unlist() %>% as.character() +site.IDs <- settings %>% + map(~ .x[["run"]]) %>% + map("site") %>% + map("id") %>% + unlist() %>% + as.character() Drop <- TRUE Write_into_settings <- TRUE -#loop over different sites +# loop over different sites for (i in 1:length(settings)) { temp_settings <- settings[[i]] print(temp_settings$run$site$id) print(i) - temp_outdir <- file.path(outdir,as.character(temp_settings$run$site$id)) - if(dir.exists(temp_outdir)){ + temp_outdir <- file.path(outdir, as.character(temp_settings$run$site$id)) + if (dir.exists(temp_outdir)) { next - }else{ + } else { dir.create(temp_outdir) } - - #extract veg function - veg_info <- try(extract_NEON_veg(lat = as.numeric(temp_settings$run$site$lat), - lon = as.numeric(temp_settings$run$site$lon), - start_date = start_date, - end_date = end_date, - neonsites = neonsites, - store_dir = input_veg$storedir)) - #checks + + # extract veg function + veg_info <- try(extract_NEON_veg( + lat = as.numeric(temp_settings$run$site$lat), + lon = as.numeric(temp_settings$run$site$lon), + start_date = start_date, + end_date = end_date, + neonsites = neonsites, + store_dir = input_veg$storedir + )) + # checks Var <- c("dryMass", "DBH", "SoilCarbon") Ind <- !is.na(veg_info) bin_var <- Var[Ind] - - #write into log file - if(sum(Ind) != 3){ - log_file <- c(log_file, paste0("Site: ", temp_settings$run$site$id, ". No ", paste(Var[which(!(Var %in% bin_var))]), - " data found in NEON for this site! Herbacious downloaded!")) - }else if(sum(Ind) == 0){ - log_file <- c(log_file, paste0("No data for site: ", temp_settings$run$site$id),". Jump to the next site.") + + # write into log file + if (sum(Ind) != 3) { + log_file <- c(log_file, paste0( + "Site: ", temp_settings$run$site$id, ". No ", paste(Var[which(!(Var %in% bin_var))]), + " data found in NEON for this site! Herbacious downloaded!" + )) + } else if (sum(Ind) == 0) { + log_file <- c(log_file, paste0("No data for site: ", temp_settings$run$site$id), ". Jump to the next site.") unlink(temp_outdir, recursive = T) next } - - #check if we have the situation that only subplot ID 41 observed in one plot. if so, we might need to drop this record when Drop == TRUE. - if(Ind[2]){ + + # check if we have the situation that only subplot ID 41 observed in one plot. if so, we might need to drop this record when Drop == TRUE. + if (Ind[2]) { for (PLOT in unique(veg_info[[2]]$plot)) { - temp <- veg_info[[2]][which(veg_info[[2]]$plot==PLOT),]$Subplot - if(length(unique(temp)) == 1){ - if(unique(temp) == 41){ + temp <- veg_info[[2]][which(veg_info[[2]]$plot == PLOT), ]$Subplot + if (length(unique(temp)) == 1) { + if (unique(temp) == 41) { print(paste0("Only subplot 41 observed in plot: ", PLOT)) log_file <- c(log_file, paste0("Only subplot 41 observed in plot: ", PLOT)) - if(Drop){ - veg_info[[2]] <- veg_info[[2]][-which(veg_info[[2]]$plot==PLOT),] + if (Drop) { + veg_info[[2]] <- veg_info[[2]][-which(veg_info[[2]]$plot == PLOT), ] } } } } } - + sppfilename <- write_veg(temp_outdir, start_date, veg_info = veg_info, source) - - #sample_ic function - sample_ic_results <- PEcAn.data.land::sample_ic(in.path = gsub(basename(sppfilename), "", sppfilename), - in.name = basename(sppfilename), - start_date = start_date, - end_date = end_date, - outfolder = temp_outdir, - n.ensemble = n_ens, - machine_host = machine_host, - bin_var = bin_var, - source = source) + + # sample_ic function + sample_ic_results <- PEcAn.data.land::sample_ic( + in.path = gsub(basename(sppfilename), "", sppfilename), + in.name = basename(sppfilename), + start_date = start_date, + end_date = end_date, + outfolder = temp_outdir, + n.ensemble = n_ens, + machine_host = machine_host, + bin_var = bin_var, + source = source + ) # Calculate poolinfo and Write into NC file paths <- c() for (j in 1:n_ens) { @@ -82,10 +93,10 @@ for (i in 1:length(settings)) { out <- PEcAn.SIPNET::veg2model.SIPNET(temp_outdir, poolinfo, as.numeric(temp_settings$run$site$id), ens = j) paths <- c(out$file, paths) } - - if(Write_into_settings){ - #populated IC file paths into settings - Create_mult_list <- function(list.names, paths){ + + if (Write_into_settings) { + # populated IC file paths into settings + Create_mult_list <- function(list.names, paths) { out <- as.list(paths) names(out) <- list.names out @@ -96,11 +107,11 @@ for (i in 1:length(settings)) { settings[[i]]$run$inputs$poolinitcond$path <- Create_mult_list(rep("path", n_ens), paths) } } -if(Write_into_settings){ +if (Write_into_settings) { write.settings(settings, outputdir = file.path(outdir), outputfile = "pecan.xml") } -#write log file. -fileConn<-file(paste0(outdir, "/log.txt")) +# write log file. +fileConn <- file(paste0(outdir, "/log.txt")) writeLines(log_file, fileConn) -close(fileConn) \ No newline at end of file +close(fileConn) diff --git a/modules/data.land/inst/StatsModel_FIA.R b/modules/data.land/inst/StatsModel_FIA.R index a9ab11bae20..8b4bae06702 100644 --- a/modules/data.land/inst/StatsModel_FIA.R +++ b/modules/data.land/inst/StatsModel_FIA.R @@ -1,88 +1,84 @@ -#This code reads in output of CanopyCover2NLCD.r and using the thresholds of NLCD legens to convert them to NLCD classes. +# This code reads in output of CanopyCover2NLCD.r and using the thresholds of NLCD legens to convert them to NLCD classes. library(rjags) -#Reading in files -PSS <- read.csv(file="/sitefiles.radius_0.075.lat_29.8848_lon_-83.3542.pss.csv", head=TRUE, sep=",") +# Reading in files +PSS <- read.csv(file = "/sitefiles.radius_0.075.lat_29.8848_lon_-83.3542.pss.csv", head = TRUE, sep = ",") -#Using NLCD thresholds to change PSS file to NLCD landcover. -#Since all MANDIFORE points have at least 50% forest cover, I do not check for minimum vegetation cover -landcover <- data.frame (matrix(ncol=4, nrow=length(PSS$X))) -colnames(landcover) = c("Deciduous_Forest", "Evergreen_Forest", "Mixed_Forest", "Woody_Wetlands") -rownames(landcover) = c(PSS$X) +# Using NLCD thresholds to change PSS file to NLCD landcover. +# Since all MANDIFORE points have at least 50% forest cover, I do not check for minimum vegetation cover +landcover <- data.frame(matrix(ncol = 4, nrow = length(PSS$X))) +colnames(landcover) <- c("Deciduous_Forest", "Evergreen_Forest", "Mixed_Forest", "Woody_Wetlands") +rownames(landcover) <- c(PSS$X) - -for (i in 1:nrow(PSS)){ - if (sum (PSS$X9[i], PSS$X10[i], PSS$X11[i], PSS$X18[i]) > 75){ - landcover$"Deciduous_Forest"[i] = 1 - landcover$"Evergreen_Forest"[i] = 0 - landcover$"Mixed_Forest"[i] = 0 - landcover$"Woody_Wetlands"[i] = 0 +for (i in 1:nrow(PSS)) { + if (sum(PSS$X9[i], PSS$X10[i], PSS$X11[i], PSS$X18[i]) > 75) { + landcover$"Deciduous_Forest"[i] <- 1 + landcover$"Evergreen_Forest"[i] <- 0 + landcover$"Mixed_Forest"[i] <- 0 + landcover$"Woody_Wetlands"[i] <- 0 } - if (sum (PSS$X6[i], PSS$X7[i], PSS$X8[i], PSS$X20[i]) > 75){ - landcover$"Deciduous_Forest"[i] = 0 - landcover$"Evergreen_Forest"[i] = 1 - landcover$"Mixed_Forest"[i] = 0 - landcover$"Woody_Wetlands"[i] = 0 + if (sum(PSS$X6[i], PSS$X7[i], PSS$X8[i], PSS$X20[i]) > 75) { + landcover$"Deciduous_Forest"[i] <- 0 + landcover$"Evergreen_Forest"[i] <- 1 + landcover$"Mixed_Forest"[i] <- 0 + landcover$"Woody_Wetlands"[i] <- 0 } - if ((sum (PSS$X9[i], PSS$X10[i], PSS$X11[i], PSS$X18[i]) < 75) & (sum (PSS$X6[i], PSS$X7[i], PSS$X8[i], PSS$X20[i]) < 75)){ - landcover$"Deciduous_Forest"[i] = 0 - landcover$"Evergreen_Forest"[i] = 0 - landcover$"Mixed_Forest"[i] = 1 - landcover$"Woody_Wetlands"[i] = 0 - } - if (PSS$X19[i] > 20){ - landcover$"Deciduous_Forest"[i] = 0 - landcover$"Evergreen_Forest"[i] = 0 - landcover$"Mixed_Forest"[i] = 0 - landcover$"Woody_Wetlands"[i] = 1 + if ((sum(PSS$X9[i], PSS$X10[i], PSS$X11[i], PSS$X18[i]) < 75) & (sum(PSS$X6[i], PSS$X7[i], PSS$X8[i], PSS$X20[i]) < 75)) { + landcover$"Deciduous_Forest"[i] <- 0 + landcover$"Evergreen_Forest"[i] <- 0 + landcover$"Mixed_Forest"[i] <- 1 + landcover$"Woody_Wetlands"[i] <- 0 + } + if (PSS$X19[i] > 20) { + landcover$"Deciduous_Forest"[i] <- 0 + landcover$"Evergreen_Forest"[i] <- 0 + landcover$"Mixed_Forest"[i] <- 0 + landcover$"Woody_Wetlands"[i] <- 1 } } -#Counting each cover class inside the whole grid -landcover["Total",] <- colSums(landcover) -X1 <- landcover[nrow(landcover),"Deciduous_Forest"] -X2 <- landcover[nrow(landcover),"Evergreen_Forest"] -X3 <- landcover[nrow(landcover),"Mixed_Forest"] -X4 <- landcover[nrow(landcover),"Woody_Wetlands"] +# Counting each cover class inside the whole grid +landcover["Total", ] <- colSums(landcover) +X1 <- landcover[nrow(landcover), "Deciduous_Forest"] +X2 <- landcover[nrow(landcover), "Evergreen_Forest"] +X3 <- landcover[nrow(landcover), "Mixed_Forest"] +X4 <- landcover[nrow(landcover), "Woody_Wetlands"] X <- c(X1, X2, X3, X4) n <- sum(X) -a <- rep (1, ncol(landcover)) -loop <- (nrow(landcover)-1) #Could be deleted since it is not in the data model. -class <- (ncol(landcover)) #Could be deleted since it is not in the data model. +a <- rep(1, ncol(landcover)) +loop <- (nrow(landcover) - 1) # Could be deleted since it is not in the data model. +class <- (ncol(landcover)) # Could be deleted since it is not in the data model. -#Data model (multinomial) +# Data model (multinomial) -FIAmodel = " +FIAmodel <- " model{ P ~ ddirch(a) #prior X ~ dmulti(P, n) # for(i in 1:loop){ # X[i, 1:class] ~ dmulti(P, n) -# } +# } } " -#JAGS stuff +# JAGS stuff -#data = list(n=n, class=class, loop=loop, a=a, X=X) -data = list(n=n, a=a, X=X) -init = NULL -j.model <- jags.model (file = textConnection(FIAmodel), data = data, inits = init, n.chains = 3) -update(j.model, n.iter=1000) -j.out <- coda.samples (model = j.model,variable.names= c("P"), n.iter = 10000) +# data = list(n=n, class=class, loop=loop, a=a, X=X) +data <- list(n = n, a = a, X = X) +init <- NULL +j.model <- jags.model(file = textConnection(FIAmodel), data = data, inits = init, n.chains = 3) +update(j.model, n.iter = 1000) +j.out <- coda.samples(model = j.model, variable.names = c("P"), n.iter = 10000) summary(j.out) output <- as.data.frame(as.matrix(j.out)) colnames(output) <- colnames(landcover) -par(mfrow = c(2,1)) -x <- seq(0,1,length = 100) -for(i in 1:ncol(output)){ - plot(output[,i], type="l", main = colnames(output)[i]) - plot(x,dbeta(x,a[i],sum(a)-a[i]), type="l", col="red", ylim = c(0,5)) - lines(density(output[,i])) +par(mfrow = c(2, 1)) +x <- seq(0, 1, length = 100) +for (i in 1:ncol(output)) { + plot(output[, i], type = "l", main = colnames(output)[i]) + plot(x, dbeta(x, a[i], sum(a) - a[i]), type = "l", col = "red", ylim = c(0, 5)) + lines(density(output[, i])) } - - - diff --git a/modules/data.land/inst/diametergrow.R b/modules/data.land/inst/diametergrow.R index f9f99ba359c..0d5d89eeff5 100644 --- a/modules/data.land/inst/diametergrow.R +++ b/modules/data.land/inst/diametergrow.R @@ -6,7 +6,6 @@ ## See InventoryGrowthFusion for newer implementation diametergrow <- function(diameters, increment, survival = NULL) { - #### data structures: ## ## diameters <- list of tree X year DBH census data matrices by site @@ -15,156 +14,156 @@ diametergrow <- function(diameters, increment, survival = NULL) { ## both matrices have lots of NAs -- includes ALL years and trees, not just measured ## ## - - plotend <- function(fname) { grDevices::dev.off() } - plotstart <- function(fname) { grDevices::pdf(fname) } - - ##################################################################################### + + plotend <- function(fname) { + grDevices::dev.off() + } + plotstart <- function(fname) { + grDevices::pdf(fname) + } + + ##################################################################################### tnorm <- function(n, lo, hi, mu, sig) { # normal truncated lo and hi - + if (length(lo) == 1 & length(mu) > 1) { lo <- rep(lo, length(mu)) } if (length(hi) == 1 & length(mu) > 1) { hi <- rep(hi, length(mu)) } - + z <- stats::runif(n, stats::pnorm(lo, mu, sig), stats::pnorm(hi, mu, sig)) z <- stats::qnorm(z, mu, sig) z[z == Inf] <- lo[z == Inf] z[z == -Inf] <- hi[z == -Inf] z } # tnorm - ########################################################### - + ########################################################### + diamint <- function() { # initialize diameters - - diamt <- matrix(NA, n, nt) #true values, initialized here - d0 <- matrix(0, n, 2) #range of values in yr 1 - first <- rep(0, n) #first observation yr - + + diamt <- matrix(NA, n, nt) # true values, initialized here + d0 <- matrix(0, n, 2) # range of values in yr 1 + first <- rep(0, n) # first observation yr + bginc <- mean(sapply(increment, mean, na.rm = TRUE), na.rm = TRUE) - + for (i in seq_len(n)) { - wf <- min(c(1:nt)[is.finite(surv[i, ]) & surv[i, ] > 0], na.rm = TRUE) wl <- max(c(1:nt)[is.finite(surv[i, ]) & surv[i, ] > 0], na.rm = TRUE) first[i] <- wf - + wi <- which(is.finite(dcens[i, ]), arr.ind = TRUE) - xi <- time[wi] - wf + 1 # recenter to first year + xi <- time[wi] - wf + 1 # recenter to first year yi <- dcens[i, wi] intercept <- mean(yi) - mean(xi) * (stats::cov(xi, yi) / stats::var(xi)) - + ## modification: if only one census, assume mean increment if (length(xi) == 1) { intercept <- yi - bginc * xi } - + if (!is.finite(intercept)) { intercept <- min(yi, na.rm = TRUE) } if (intercept < 1) { - intercept <- 0.001 #max(.1,(min(yi) - 5) ) + intercept <- 0.001 # max(.1,(min(yi) - 5) ) } - slope <- (mean(xi * yi) - intercept * mean(xi))/mean(xi^2) + slope <- (mean(xi * yi) - intercept * mean(xi)) / mean(xi^2) if (slope < 0.001) { slope <- 0.001 intercept <- mean(yi) - slope * mean(xi) } - + dfit <- intercept + slope * (time - wf + 1) diamt[i, wf:wl] <- dfit[wf:wl] d0[i, ] <- c((diamt[i, wf] - 2), (diamt[i, wf] + 2)) } - + d0[d0 < 0.1] <- 0.1 - + list(d0 = d0, diamt = diamt, firstyri = first) } # diamint - ############################################# - + ############################################# + f.update <- function() { # sample fixed effects - - alpha <- numeric(0) + + alpha <- numeric(0) allvars <- sig + sigd + sigp - nn <- max(nyr) - 1 + nn <- max(nyr) - 1 # ky <- c(c(1:max(nn))[ntt == 0],nn) #this sets last year effect to zero - ky <- c(1:nn)[ntt == 0] - + ky <- c(1:nn)[ntt == 0] + betaMat <- matrix(0, ny, (nt - 1)) - muVec <- rep(0, ny) - + muVec <- rep(0, ny) + if (COVARIATES) { - v <- crossprod(X, (dgrow[aincr] - teffect[aincr])) / allvars + prior.IVm %*% prior.mu V <- solve(crossprod(X) / allvars + prior.IVm) alpha <- matrix(mvtnorm::rmvnorm(1, V %*% v, V), (ncovars + 1), 1) - + mumat[aincr] <- X %*% alpha - - v <- apply((dgrow - mumat), 2, sum, na.rm = TRUE) / allvars - V <- 1 / (ntt / allvars + 1 / prior.Vmu) - beta <- stats::rnorm(length(v), (V * v), sqrt(V)) - mb <- mean(beta[ntt > 0]) #extract mean + + v <- apply((dgrow - mumat), 2, sum, na.rm = TRUE) / allvars + V <- 1 / (ntt / allvars + 1 / prior.Vmu) + beta <- stats::rnorm(length(v), (V * v), sqrt(V)) + mb <- mean(beta[ntt > 0]) # extract mean beta.t <- beta - mb beta.t[ntt == 0] <- 0 - mu <- mumat + mu <- mumat } - + if (!COVARIATES) { - for (m in seq_len(ny)) { im <- which(nindex == m) - + if (length(im) == 1) { nmm <- dgrow[im, ] * 0 + 1 nmm[!is.finite(nmm)] <- 0 v <- dgrow[im, ] / allvars + prior.mu / prior.Vmu v[!is.finite(v)] <- 0 - V <- 1 / (nmm/allvars + 1 / prior.Vmu) + V <- 1 / (nmm / allvars + 1 / prior.Vmu) } if (length(im) > 1) { nmm <- t(apply((dgrow[im, ] * 0 + 1), 2, sum, na.rm = TRUE)) - v <- t(apply((dgrow[im, ]), 2, sum, na.rm = TRUE)) / allvars + prior.mu / prior.Vmu - V <- 1 / (nmm / allvars + 1 / prior.Vmu) + v <- t(apply((dgrow[im, ]), 2, sum, na.rm = TRUE)) / allvars + prior.mu / prior.Vmu + V <- 1 / (nmm / allvars + 1 / prior.Vmu) } if (length(im) >= 1) { - bt <- tnorm(length(v), 0, 4, (V * v), sqrt(V)) - bt[nmm == 0] <- 0 - mu <- mean(bt) - beta.t <- bt - mu + bt <- tnorm(length(v), 0, 4, (V * v), sqrt(V)) + bt[nmm == 0] <- 0 + mu <- mean(bt) + beta.t <- bt - mu beta.t[nmm == 0] <- 0 if (length(ky) > 0) { - mu <- mean(beta.t[-ky]) #extract mean + mu <- mean(beta.t[-ky]) # extract mean beta.t[-ky] <- beta.t[-ky] - mu } } - beta.t[ky] <- 0 + beta.t[ky] <- 0 betaMat[m, ] <- beta.t - muVec[m] <- mu + muVec[m] <- mu } - } - + list(mu = mu, betaMat = betaMat, alpha = alpha, muVec = muVec) } # f.update - + in.update <- function() { # sample individual effects - + v <- t(apply((dgrow - teffect - peffect - mumat), 1, sum, na.rm = TRUE)) / sig V <- 1 / (nti / sig + 1 / sigd) ind <- tnorm(n, -1, 1, V * v, sqrt(V)) ind[!is.finite(ind)] <- 0 ind } # in.update - + p.update <- function() { # sample individual effects - + p <- apply((dgrow - teffect - ieffect - mumat), 1, sum, na.rm = TRUE) pmat[pindex] <- p v <- apply(pmat, 2, sum, na.rm = TRUE) / sig @@ -173,192 +172,192 @@ diametergrow <- function(diameters, increment, survival = NULL) { p[nm == 0] <- 0 p } # p.update - + di.update_new <- function() { # sample diameters - + # first yr - - dtmp <- dgrow + + dtmp <- dgrow dtmp[is.na(dtmp)] <- 0 - - delta <- dgrow + + delta <- dgrow delta[is.na(delta)] <- 0 - delta <- t(apply(delta, 1, cumsum)) - delta <- cbind(rep(0, n), delta) - - dmu <- delta * 0 - dmu[dobs] <- dcens[dobs] - delta[dobs] - dmu[dmu == 0] <- NA - nid <- dmu - nid[nid == 0] <- NA + delta <- t(apply(delta, 1, cumsum)) + delta <- cbind(rep(0, n), delta) + + dmu <- delta * 0 + dmu[dobs] <- dcens[dobs] - delta[dobs] + dmu[dmu == 0] <- NA + nid <- dmu + nid[nid == 0] <- NA nid <- apply((nid * 0 + 1), 1, sum, na.rm = TRUE) dmu <- apply(dmu, 1, mean, na.rm = TRUE) - + d00 <- tnorm(n, d0[, 1], d0[, 2], dmu, sqrt(w.error / nid)) - + diam.t <- cbind(rep(0, n), dgrow) diam.t[cbind(seq_len(n), firstyr.i)] <- d00 - + diam.t[!is.finite(diam.t)] <- 0 diam.t <- t(apply(diam.t, 1, cumsum)) - dgrow <- t(apply(diam.t, 1, diff, na.rm = TRUE)) + dgrow <- t(apply(diam.t, 1, diff, na.rm = TRUE)) dgrow[, firstyr.i] <- 0 # diam.t[surv != 1] <- NA - + # direct sample increments - + ddobs <- dcens[, -1] - diam.t[, -nt] - - lreg <- mu + ieffect + teffect + peffect #regression + + lreg <- mu + ieffect + teffect + peffect # regression lreg[is.na(dgrow)] <- NA - + V <- dgrow * 0 + 1 / sig v <- dgrow * 0 + lreg / sig - + if (length(iobs) > 0) { V[iobs] <- V[iobs] + 1 / v.error v[iobs] <- v[iobs] + dincr[iobs] / v.error } - + V[is.finite(ddobs)] <- V[is.finite(ddobs)] + 1 / w.error v[is.finite(ddobs)] <- v[is.finite(ddobs)] + ddobs[is.finite(ddobs)] / w.error - + V <- 1 / V - + dgrow[aincr] <- tnorm(nrow(aincr), mindinc[aincr], maxdinc[aincr], (V * v)[aincr], sqrt(V[aincr])) - + diam.t <- cbind(rep(0, n), dgrow) diam.t[cbind(seq_len(n), firstyr.i)] <- d00 diam.t[!is.finite(diam.t)] <- 0 diam.t <- t(apply(diam.t, 1, cumsum)) - + # errors: - - ss <- sum((dcens[dobs] - diam.t[dobs]) ^ 2, na.rm = TRUE) #diameter error + + ss <- sum((dcens[dobs] - diam.t[dobs])^2, na.rm = TRUE) # diameter error sw <- 1 / (stats::rgamma(1, (w1 + ndobs / 2), (w2 + 0.5 * ss))) - + sv <- 0 if (length(iobs) > 0) { - ss <- sum((dgrow[iobs] - dincr[iobs]) ^ 2, na.rm = TRUE) #growth error + ss <- sum((dgrow[iobs] - dincr[iobs])^2, na.rm = TRUE) # growth error sv <- 1 / (stats::rgamma(1, (v11 + 0.5 * niobs), (v22 + 0.5 * ss))) } - + list(diam.t = diam.t, sw = sw, sv = sv, ad = ad, aa = aa) } # di.update_new - + di.update <- function() { # sample diameters - + djump <- 0.02 - diam.t[is.na(surv) | surv < 1] <- NA # yr not in data set are NA + diam.t[is.na(surv) | surv < 1] <- NA # yr not in data set are NA dgrow <- t(apply(diam.t, 1, diff, na.rm = TRUE)) - dnew <- matrix(0, n, (nt - 1)) - aa <- rep(0, n) #acceptance counter - + dnew <- matrix(0, n, (nt - 1)) + aa <- rep(0, n) # acceptance counter + # propose diameters and increments - dstart <- tnorm(n, d0[, 1], d0[, 2], diam.t[cbind(seq_len(n), firstyr.i)], 0.05) + dstart <- tnorm(n, d0[, 1], d0[, 2], diam.t[cbind(seq_len(n), firstyr.i)], 0.05) dnew[aincr] <- tnorm(nrow(aincr), mindinc[aincr], maxdinc[aincr], dgrow[aincr], djump) - + diamnew <- cbind(rep(0, n), dnew) diamnew[cbind(seq_len(n), firstyr.i)] <- dstart - diamnew <- t(apply(diamnew, 1, cumsum)) # proposed diameters - + diamnew <- t(apply(diamnew, 1, cumsum)) # proposed diameters + dnew[is.na(surv[, -nt]) | surv[, -nt] < 1] <- NA diamnew[is.na(surv) | surv < 1] <- NA - - lreg <- mumat + ieffect + teffect + peffect # regression + + lreg <- mumat + ieffect + teffect + peffect # regression lreg[is.na(dgrow)] <- NA - + pnow <- diam.t * 0 pnew <- pnow - + # diameter data pnow[dobs] <- pnow[dobs] + stats::dnorm(dcens[dobs], diam.t[dobs], sqrt(w.error), log = TRUE) pnew[dobs] <- pnew[dobs] + stats::dnorm(dcens[dobs], diamnew[dobs], sqrt(w.error), log = TRUE) - + # regression pnow[, -1] <- pnow[, -1] + stats::dnorm(dgrow, lreg, sqrt(sig), log = TRUE) pnew[, -1] <- pnew[, -1] + stats::dnorm(dnew, lreg, sqrt(sig), log = TRUE) - + # increment data if (length(iobs) > 0) { pnow[iobs] <- pnow[iobs] + stats::dnorm(dincr[iobs], dgrow[iobs], sqrt(v.error), log = TRUE) pnew[iobs] <- pnew[iobs] + stats::dnorm(dincr[iobs], dnew[iobs], sqrt(v.error), log = TRUE) } - + pnow <- apply(pnow, 1, sum, na.rm = TRUE) pnew <- apply(pnew, 1, sum, na.rm = TRUE) - + a <- exp(pnew - pnow) z <- runif(n, 0, 1) - cindex <- which(z < a, arr.ind = TRUE) #accept tree-by-tree + cindex <- which(z < a, arr.ind = TRUE) # accept tree-by-tree ad <- sum(diam.t[cindex, ] * 0 + 1, na.rm = TRUE) aa[cindex] <- aa[cindex] + 1 diam.t[cindex, ] <- diamnew[cindex, ] dgrow[cindex, ] <- dnew[cindex, ] - + # errors: - - ss <- sum((dcens[dobs] - diam.t[dobs]) ^ 2, na.rm = TRUE) #diameter error + + ss <- sum((dcens[dobs] - diam.t[dobs])^2, na.rm = TRUE) # diameter error sw <- 1 / (stats::rgamma(1, (w1 + ndobs / 2), (w2 + 0.5 * ss))) - - ss <- sum((dgrow[iobs] - dincr[iobs])^2, na.rm = TRUE) #growth error + + ss <- sum((dgrow[iobs] - dincr[iobs])^2, na.rm = TRUE) # growth error sv <- 1 / (stats::rgamma(1, (v11 + 0.5 * niobs), (v22 + 0.5 * ss))) if (length(iobs) == 0) { sv <- 0 } - + list(diam.t = diam.t, sw = sw, sv = sv, ad = ad, aa = aa) } # di.update - + sd.update <- function() { # variance on random effects - 1 / stats::rgamma(1, (vi1 + n/2), (vi2 + 0.5 * sum(beta.i ^ 2))) + 1 / stats::rgamma(1, (vi1 + n / 2), (vi2 + 0.5 * sum(beta.i^2))) } # sd.update - + sp.update <- function() { # variance on random plot effects - 1 / stats::rgamma(1, (pi1 + mplot / 2), (pi2 + 0.5 * sum(beta.p ^ 2))) + 1 / stats::rgamma(1, (pi1 + mplot / 2), (pi2 + 0.5 * sum(beta.p^2))) } # sp.update - + se.update <- function() { # process error - ss <- sum((dgrow - mumat - ieffect - teffect - peffect) ^ 2, na.rm = TRUE) + ss <- sum((dgrow - mumat - ieffect - teffect - peffect)^2, na.rm = TRUE) 1 / (stats::rgamma(1, (s1 + 0.5 * sum(nti)), (s2 + 0.5 * ss))) } # se.update - - + + ##################################################################################### - + ############################################### ### ### ### initialization ### ### ### ############################################### - + ### set up input/output folders - REMOTE <- FALSE ## if true produced graphics; if false, assumes running on node w/o graphics - INCREMENTS <- FALSE ## if true, plots increment data - outfolder <- settings$outdir ## output folder for saving files - + REMOTE <- FALSE ## if true produced graphics; if false, assumes running on node w/o graphics + INCREMENTS <- FALSE ## if true, plots increment data + outfolder <- settings$outdir ## output folder for saving files + ### set up years - yrvec <- as.numeric(colnames(diameters[[1]])) - nyr <- length(yrvec) + yrvec <- as.numeric(colnames(diameters[[1]])) + nyr <- length(yrvec) beginyr <- yrvec[1] - endyr <- yrvec[nyr] - - ierr <- 0.1 #width (in cm) of error window for increments - mind <- 0.005 - maxd <- 3 - + endyr <- yrvec[nyr] + + ierr <- 0.1 # width (in cm) of error window for increments + mind <- 0.005 + maxd <- 3 + # all plots - mplot <- length(diameters) ## number of sites - dcens <- numeric(0) ## combined diameter data - dincr <- numeric(0) ## combined increment data - surv <- numeric(0) ## combined survival data - ijindex <- numeric(0) ## indexing for combined data - + mplot <- length(diameters) ## number of sites + dcens <- numeric(0) ## combined diameter data + dincr <- numeric(0) ## combined increment data + surv <- numeric(0) ## combined survival data + ijindex <- numeric(0) ## indexing for combined data + ## if not provided, assume all trees survive if (is.null(survival)) { survival <- list() @@ -369,73 +368,73 @@ diametergrow <- function(diameters, increment, survival = NULL) { survival[[i]] <- matrix(TRUE, nrow(diameters[[i]]), nyr) } } - + # stack data from all plots for (j in seq_len(mplot)) { if (length(survival[[j]]) == 0) { next } - + wc <- match(colnames(diameters[[j]]), yrvec) nr <- nrow(diameters[[j]]) - + dc <- matrix(NA, nr, length(yrvec)) dc[, wc] <- diameters[[j]] dcens <- rbind(dcens, dc) - + di <- matrix(NA, nr, (length(yrvec) - 1)) di[, wc[-length(wc)]] <- increment[[j]] dincr <- rbind(dincr, di) - + sv <- matrix(NA, nr, length(yrvec)) sv[, wc] <- survival[[j]] surv <- rbind(surv, sv) - + ijindex <- rbind(ijindex, cbind(rep(j, nr), c(1:nr))) } dincr[dincr < 0] <- NA mtree <- sapply(diameters, nrow) - - n <- nrow(dcens) ## number of trees - nt <- ncol(dcens) ## number of years - dobs <- which(is.finite(dcens), arr.ind = TRUE) #diameter obs - nod <- which(!is.finite(dcens) & surv == 1, arr.ind = TRUE) #no diam obs + + n <- nrow(dcens) ## number of trees + nt <- ncol(dcens) ## number of years + dobs <- which(is.finite(dcens), arr.ind = TRUE) # diameter obs + nod <- which(!is.finite(dcens) & surv == 1, arr.ind = TRUE) # no diam obs ndobs <- nrow(dobs) - iobs <- which(is.finite(dincr), arr.ind = TRUE) #incr obs - noi <- which(!is.finite(dincr) & surv[, -nt] == 1, arr.ind = TRUE) #no diam obs + iobs <- which(is.finite(dincr), arr.ind = TRUE) # incr obs + noi <- which(!is.finite(dincr) & surv[, -nt] == 1, arr.ind = TRUE) # no diam obs niobs <- nrow(iobs) if (length(niobs) == 0) { niobs <- 0 } - + time <- seq_along(yrvec) - tindex <- time[-nt] #time index for plot 2 - - mindinc <- matrix(mind, nrow(dincr), ncol(dincr)) ## minimum incerment - maxdinc <- maxd + mindinc * 0 ## maximum increment - mindinc[iobs] <- dincr[iobs] - ierr - maxdinc[iobs] <- dincr[iobs] + ierr + tindex <- time[-nt] # time index for plot 2 + + mindinc <- matrix(mind, nrow(dincr), ncol(dincr)) ## minimum incerment + maxdinc <- maxd + mindinc * 0 ## maximum increment + mindinc[iobs] <- dincr[iobs] - ierr + maxdinc[iobs] <- dincr[iobs] + ierr mindinc[mindinc < mind] <- mind maxdinc[maxdinc < mindinc] <- maxd - - dtmp <- diamint() - diam.t <- dtmp$diamt - d0 <- dtmp$d0 + + dtmp <- diamint() + diam.t <- dtmp$diamt + d0 <- dtmp$d0 firstyr.i <- dtmp$firstyri - dgrow <- t(apply(diam.t, 1, diff, na.rm = TRUE)) #diameter increment - - dplot <- matrix(rep(ijindex[, 1], each = (nt - 1)), n, (nt - 1), byrow = TRUE) + dgrow <- t(apply(diam.t, 1, diff, na.rm = TRUE)) # diameter increment + + dplot <- matrix(rep(ijindex[, 1], each = (nt - 1)), n, (nt - 1), byrow = TRUE) dplot[is.na(dgrow)] <- NA - ntt <- t(apply((dgrow * 0 + 1), 2, sum, na.rm = TRUE)) #values per yr - nti <- t(apply((dgrow * 0 + 1), 1, sum, na.rm = TRUE)) #values per individual - ntmp <- table(dplot, deparse.level = 0) #values per plot - ntp <- rep(0, mplot) - pii <- match(as.numeric(unlist(dimnames(ntmp))), c(seq_len(mplot))) + ntt <- t(apply((dgrow * 0 + 1), 2, sum, na.rm = TRUE)) # values per yr + nti <- t(apply((dgrow * 0 + 1), 1, sum, na.rm = TRUE)) # values per individual + ntmp <- table(dplot, deparse.level = 0) # values per plot + ntp <- rep(0, mplot) + pii <- match(as.numeric(unlist(dimnames(ntmp))), c(seq_len(mplot))) ntp[pii] <- ntmp - - nm <- table(cut(ijindex[, 1], c(0:mplot))) #trees per plot - - pmat <- matrix(0, max(nm), mplot) #matrix to hope plot values + + nm <- table(cut(ijindex[, 1], c(0:mplot))) # trees per plot + + pmat <- matrix(0, max(nm), mplot) # matrix to hope plot values pindex <- numeric(0) for (j in seq_len(mplot)) { if (nm[j] == 0) { @@ -444,133 +443,132 @@ diametergrow <- function(diameters, increment, survival = NULL) { jj <- rep(j, nm[j]) pindex <- rbind(pindex, cbind(c(1:nm[j]), jj)) } - - aincr <- which(is.finite(dgrow) & is.finite(surv[, -nt]) & surv[, -nt] == 1, arr.ind = TRUE) #intervals that individual is alive - + + aincr <- which(is.finite(dgrow) & is.finite(surv[, -nt]) & surv[, -nt] == 1, arr.ind = TRUE) # intervals that individual is alive + plotyear <- rep(1, mplot) # plotyear[plotnames %in% c('218','318','427')] <- 2 plotyear[plotnames == 527] <- 3 # plotyear[plotnames %in% c('LG','UG')] <- 4 plotyear[plotnames %in% c('MHP','MHF')] <- 5 # plotyear[plotnames %in% c('BW','HW','EW')] <- 6 - + ny <- max(plotyear) nindex <- rep(plotyear, times = nm) - + ################## COVARIATES ###################### - - COVARIATES <- FALSE # T if there are covariates + + COVARIATES <- FALSE # T if there are covariates ncovars <- 0 # covariates if (COVARIATES) { - ncovars <- 1 #number of covariates - X <- matrix(1, nrow(aincr), (ncovars + 1)) - nx <- nrow(X) - X[, 2] <- stats::rnorm(nx, (dgrow[aincr] * 0.5 + 1), 0.1) #simulated data - prior.mu <- rep(0, (1 + ncovars)) - prior.Vmu <- rep(10, (1 + ncovars)) - prior.IVm <- solve(diag(prior.Vmu)) - - tmat <- matrix(0, nrow(X), (nt - 1)) #matrix for year effects - tvector <- matrix(c(1:(nt - 1)), nrow(dgrow), (nt - 1), byrow = TRUE)[aincr] - tind <- cbind(c(1:nx), tvector) + ncovars <- 1 # number of covariates + X <- matrix(1, nrow(aincr), (ncovars + 1)) + nx <- nrow(X) + X[, 2] <- stats::rnorm(nx, (dgrow[aincr] * 0.5 + 1), 0.1) # simulated data + prior.mu <- rep(0, (1 + ncovars)) + prior.Vmu <- rep(10, (1 + ncovars)) + prior.IVm <- solve(diag(prior.Vmu)) + + tmat <- matrix(0, nrow(X), (nt - 1)) # matrix for year effects + tvector <- matrix(c(1:(nt - 1)), nrow(dgrow), (nt - 1), byrow = TRUE)[aincr] + tind <- cbind(c(1:nx), tvector) tmat[tind] <- tvector } - + ############ PRIORS ################# - prior.mu <- 0.3 #prior mean variance for mean growth rate + prior.mu <- 0.3 # prior mean variance for mean growth rate prior.Vmu <- 10 - + # lo.t <- rep(-4,(nyr-1)) hi.t <- lo.t*0 + 4 lo.t[nyr-1] <- -.0001 hi.t[nyr-1] <- .0001 - - mui <- 0.4 #individual effects var + + mui <- 0.4 # individual effects var vi1 <- 2 vi2 <- mui * (vi1 - 1) - - mup <- 0.4 #plot effects var + + mup <- 0.4 # plot effects var pi1 <- 2 pi2 <- mup * (pi1 - 1) - - mus <- 0.1^2 #process error + + mus <- 0.1^2 # process error s1 <- 10 s2 <- mus * (s1 - 1) - - muw <- 0.2^2 #diameter error + + muw <- 0.2^2 # diameter error w1 <- ndobs * 2 w2 <- muw * (w1 - 1) - - muv <- 0.02^2 #increment error + + muv <- 0.02^2 # increment error v11 <- niobs v22 <- muv * (v11 - 1) if (niobs == 0) { v11 <- 0 v22 <- 0 } - + ############# initial values ################ - mu <- tnorm(1, 0, 1, prior.mu, 1) - sig <- 1 / stats::rgamma(1, s1, s2) + mu <- tnorm(1, 0, 1, prior.mu, 1) + sig <- 1 / stats::rgamma(1, s1, s2) sigd <- 1 / stats::rgamma(1, vi1, vi2) sigp <- 1 / stats::rgamma(1, pi1, pi2) w.error <- 1 / stats::rgamma(1, w1, w2) v.error <- 1 / stats::rgamma(1, vi1, vi2) - beta.i <- rep(0, n) #individual random effects - beta.p <- rep(0, mplot) #plot random effects - beta.t <- rep(0, (nt - 1)) #fixed year effects + beta.i <- rep(0, n) # individual random effects + beta.p <- rep(0, mplot) # plot random effects + beta.t <- rep(0, (nt - 1)) # fixed year effects peffect <- matrix(rep(beta.p, times = (nm * (nt - 1))), nrow = n, byrow = TRUE) - mumat <- peffect * 0 + mumat <- peffect * 0 teffect <- matrix(rep(beta.t, each = n), nrow = n, byrow = FALSE) - - ng <- 5000 ## length of MCMC - nrep <- 100 - burnin <- 400 ## length of burn-in + + ng <- 5000 ## length of MCMC + nrep <- 100 + burnin <- 400 ## length of burn-in printseq <- seq(10, ng, by = 20) - + ####### Storage ######### if (COVARIATES) { agibbs <- matrix(NA, ng, (ncovars + 1)) colnames(agibbs) <- paste("a", c(1:(ncovars + 1)), sep = "-") } - mgibbs <- rep(0, ng) - bigibbs <- rep(0, n) - bpgibbs <- rep(0, mplot) + mgibbs <- rep(0, ng) + bigibbs <- rep(0, n) + bpgibbs <- rep(0, mplot) bp2gibbs <- rep(0, mplot) - ingibbs <- matrix(0, ng, 4) #a sample of individuals - ins <- sort(sample(seq_len(n), 4)) - sgibbs <- matrix(0, ng, 5) + ingibbs <- matrix(0, ng, 4) # a sample of individuals + ins <- sort(sample(seq_len(n), 4)) + sgibbs <- matrix(0, ng, 5) colnames(sgibbs) <- c("proc", "ind", "plot", "diam", "incr") - tgibbs <- matrix(0, ng, (nt - 1)) + tgibbs <- matrix(0, ng, (nt - 1)) colnames(tgibbs) <- yrvec[-nt] - dgibbs <- diam.t * 0 #sums and sums of squares, diam - d2gibbs <- diam.t * 0 - ggibbs <- diam.t[, -nt] * 0 #sums and sums of squares, growth - g2gibbs <- diam.t[, -nt] * 0 - ad <- rep(0, ng) #acceptance counter by step - ai <- rep(0, n) #acceptance counter by tree - + dgibbs <- diam.t * 0 # sums and sums of squares, diam + d2gibbs <- diam.t * 0 + ggibbs <- diam.t[, -nt] * 0 # sums and sums of squares, growth + g2gibbs <- diam.t[, -nt] * 0 + ad <- rep(0, ng) # acceptance counter by step + ai <- rep(0, n) # acceptance counter by tree + # log scale - ldgibbs <- diam.t * 0 #sums and sums of squares, diam + ldgibbs <- diam.t * 0 # sums and sums of squares, diam ld2gibbs <- diam.t * 0 - lggibbs <- diam.t[, -nt] * 0 #sums and sums of squares, growth + lggibbs <- diam.t[, -nt] * 0 # sums and sums of squares, growth lg2gibbs <- diam.t[, -nt] * 0 - + printseq <- seq(10, ng, by = 100) - + ## full data - to.save <- floor(seq(burnin, ng, length = nrep)) + to.save <- floor(seq(burnin, ng, length = nrep)) full.dia <- list() - + ###################################################### #### MCMC #### #### for (g in seq_len(ng)) { - - ftmp <- f.update() - mu <- ftmp$mu + ftmp <- f.update() + mu <- ftmp$mu # beta.t <- ftmp$beta.t alpha <- ftmp$alpha - - muVec <- ftmp$muVec + + muVec <- ftmp$muVec betaMat <- ftmp$betaMat - + teffect <- betaMat[nindex, ] - + mumat <- mu if (length(mu) == 1) { mumat <- matrix(mu, n, (nt - 1)) @@ -578,61 +576,61 @@ diametergrow <- function(diameters, increment, survival = NULL) { if (!COVARIATES) { mumat <- matrix(muVec[nindex], n, (nt - 1)) } - + beta.i <- in.update() ieffect <- matrix(rep(beta.i, each = (nt - 1)), nrow = n, byrow = TRUE) - + if (mplot > 1) { beta.p <- p.update() - sigp <- sp.update() #plots + sigp <- sp.update() # plots } if (mplot == 1) { beta.p <- 0 - sigp <- 0 + sigp <- 0 } peffect <- matrix(rep(beta.p, times = (nm * (nt - 1))), nrow = n, byrow = TRUE) - - sig <- se.update() #proc - sigd <- sd.update() #individuals - - dtmp <- di.update() - diam.t <- dtmp$diam.t - w.error <- dtmp$sw #error diam - v.error <- dtmp$sv #error growth - + + sig <- se.update() # proc + sigd <- sd.update() # individuals + + dtmp <- di.update() + diam.t <- dtmp$diam.t + w.error <- dtmp$sw # error diam + v.error <- dtmp$sv # error growth + if (g < 100) { v.error <- muv - } - + } + ad[g] <- dtmp$ad - ai <- ai + dtmp$aa - dtmp <- diam.t + ai <- ai + dtmp$aa + dtmp <- diam.t dtmp[is.na(surv) | surv != 1] <- NA # dtmp[cbind(seq_len(n),firstyr.i)] <- 0 - dgrow <- t(apply(dtmp, 1, diff, na.rm = TRUE)) #diameter increment - + dgrow <- t(apply(dtmp, 1, diff, na.rm = TRUE)) # diameter increment + if (!COVARIATES) { mgibbs[g] <- mu } if (COVARIATES) { agibbs[g, ] <- alpha } - + ingibbs[g, ] <- beta.i[ins] tgibbs[g, ] <- beta.t sgibbs[g, ] <- c(sig, sigd, sigp, w.error, v.error) if (g > burnin) { - bigibbs <- bigibbs + beta.i - bpgibbs <- bpgibbs + beta.p - bp2gibbs <- bp2gibbs + beta.p ^ 2 - dgibbs <- dgibbs + diam.t - d2gibbs <- d2gibbs + diam.t ^ 2 - ggibbs <- ggibbs + dgrow - g2gibbs <- g2gibbs + dgrow ^ 2 - ldgibbs <- ldgibbs + log(diam.t) - ld2gibbs <- ld2gibbs + log(diam.t) ^ 2 - lggibbs <- lggibbs + log(dgrow) - lg2gibbs <- lg2gibbs + log(dgrow) ^ 2 + bigibbs <- bigibbs + beta.i + bpgibbs <- bpgibbs + beta.p + bp2gibbs <- bp2gibbs + beta.p^2 + dgibbs <- dgibbs + diam.t + d2gibbs <- d2gibbs + diam.t^2 + ggibbs <- ggibbs + dgrow + g2gibbs <- g2gibbs + dgrow^2 + ldgibbs <- ldgibbs + log(diam.t) + ld2gibbs <- ld2gibbs + log(diam.t)^2 + lggibbs <- lggibbs + log(dgrow) + lg2gibbs <- lg2gibbs + log(dgrow)^2 } if (g %in% printseq) { # print(g) print(betaMat)#[5:6,]) @@ -644,83 +642,83 @@ diametergrow <- function(diameters, increment, survival = NULL) { } } ########### END MCMC ########## - + print("after diameter gibbs") - + keep <- c(burnin:g) nk <- length(keep) - + print("after diameter gibbs") - + mdiam <- dgibbs / nk sdiam <- sqrt(d2gibbs / nk - mdiam^2) mgrow <- ggibbs / nk sgrow <- sqrt(g2gibbs / nk - mgrow^2) - peff <- bpgibbs / nk + peff <- bpgibbs / nk # names(peff) <- plotnames - sdp <- sqrt(bp2gibbs / nk - peff ^ 2) + sdp <- sqrt(bp2gibbs / nk - peff^2) pci <- cbind((peff - 1.96 * sdp), (peff + 1.96 * sdp)) # rownames(pci) <- plotnames pci[mtree == 0, ] <- NA - + mldiam <- ldgibbs / nk - sldiam <- sqrt(ld2gibbs / nk - mldiam ^ 2) + sldiam <- sqrt(ld2gibbs / nk - mldiam^2) mlgrow <- lggibbs / nk - slgrow <- sqrt(lg2gibbs / nk - mlgrow ^ 2) - + slgrow <- sqrt(lg2gibbs / nk - mlgrow^2) + # priors and posteriors - + nall <- sum(nti) vp <- matrix(c(s1, s2, mus, vi1, vi2, mui, pi1, pi2, mup, w1, w2, muw, v11, v22, muv), 5, 3, byrow = TRUE) rownames(vp) <- c("process", "ind effect", "plot effect", "diameter", "growth") - + if (COVARIATES) { mgibbs <- agibbs } - + estimate <- c(apply(cbind(mgibbs, sgibbs, tgibbs)[keep, ], 2, mean), peff) - std_err <- c(apply(cbind(mgibbs, sgibbs, tgibbs)[keep, ], 2, sd), sdp) + std_err <- c(apply(cbind(mgibbs, sgibbs, tgibbs)[keep, ], 2, sd), sdp) p3 <- t(apply(cbind(mgibbs, sgibbs, tgibbs)[keep, ], 2, stats::quantile, c(0.025, 0.975))) p3 <- rbind(p3, pci) nn <- c(rep(nall, (ncovars + 2)), n, mplot, ndobs, niobs, ntt, ntp) p3 <- cbind(nn, estimate, std_err, p3) - + pvals <- cbind(prior.mu, prior.Vmu, prior.mu) pvals <- rbind(pvals, vp) - + diampars <- matrix(NA, nrow(p3), (ncol(p3) + ncol(pvals))) diampars[, 1:5] <- p3 diampars[1:nrow(pvals), 6:8] <- pvals rownames(diampars) <- names(estimate) colnames(diampars) <- c(colnames(p3), "par1", "par2", "prior mean") - + outfile <- file.path(outfolder, "diampars.txt") utils::write.table(signif(diampars, 3), outfile, row.names = TRUE, col.names = TRUE, quote = FALSE) - + # determine posterior means and sd's for diameter, growth, and other columns in treemat - + treeindex <- cumsum(mtree) treeindex <- cbind(c(1, (1 + treeindex[1:(mplot - 1)])), treeindex) treeindex[mtree == 0, 2] <- treeindex[mtree == 0, 1] if (mplot == 1) { treeindex <- matrix(treeindex[1, ], 1, 2) } - + # tre - meandiam <- numeric(0) #log diameter - sddiam <- numeric(0) + meandiam <- numeric(0) # log diameter + sddiam <- numeric(0) meangrow <- numeric(0) - sdgrow <- numeric(0) - mg <- numeric(0) - lgdiam <- numeric(0) - lgdinc <- numeric(0) - lgdisd <- numeric(0) - icol <- numeric(0) - jcol <- numeric(0) - tcol <- numeric(0) - ticol <- numeric(0) - surv <- numeric(0) - + sdgrow <- numeric(0) + mg <- numeric(0) + lgdiam <- numeric(0) + lgdinc <- numeric(0) + lgdisd <- numeric(0) + icol <- numeric(0) + jcol <- numeric(0) + tcol <- numeric(0) + ticol <- numeric(0) + surv <- numeric(0) + if (length(beginyr) < mplot) { beginyr <- rep(beginyr, mplot) } @@ -730,13 +728,13 @@ diametergrow <- function(diameters, increment, survival = NULL) { if (length(nyr) < mplot) { nyr <- rep(nyr, mplot) } - + for (j in seq_len(mplot)) { if (mtree[j] == 0) { meandiam <- append(meandiam, list(numeric(0))) - sddiam <- append(sddiam, list(numeric(0))) + sddiam <- append(sddiam, list(numeric(0))) meangrow <- append(meangrow, list(numeric(0))) - sdgrow <- append(sdgrow, list(numeric(0))) + sdgrow <- append(sdgrow, list(numeric(0))) next } yj <- c(beginyr[j]:endyr[j]) @@ -753,15 +751,15 @@ diametergrow <- function(diameters, increment, survival = NULL) { colnames(sd) <- yjvec colnames(mg) <- yjvec[-length(yjvec)] colnames(sg) <- yjvec[-length(yjvec)] - + # remove NA's, extra column for growth rates mgna <- which(is.na(mg), arr.ind = TRUE) if (length(mgna) > 0) { - meanmg <- apply(mg, 1, mean, na.rm = TRUE) + meanmg <- apply(mg, 1, mean, na.rm = TRUE) mg[mgna] <- meanmg[mgna[, 1]] } mg <- cbind(mg, mg[, (nyr[j] - 1)]) - + growmat <- exp(mg) cumgrow <- t(apply(growmat, 1, cumsum)) wna <- which(is.na(md), arr.ind = TRUE) @@ -769,12 +767,12 @@ diametergrow <- function(diameters, increment, survival = NULL) { wna <- sort(unique(wna[, 1])) } yjvec <- c(1:nyr[j]) - + for (w in wna) { lfit <- stats::lm(md[w, ] ~ yjvec) newvals <- stats::predict.lm(lfit, newdata = data.frame(yjvec)) md[w, is.na(md[w, ])] <- newvals[is.na(md[w, ])] - + check <- diff(md[w, ]) if (min(check) < 0) { check[check < 0] <- 1e-05 @@ -782,22 +780,22 @@ diametergrow <- function(diameters, increment, survival = NULL) { md[w, ] <- dnew } } - + sdna <- which(is.na(sd), arr.ind = TRUE) sd[sdna] <- 1 sgna <- which(is.na(sg), arr.ind = TRUE) sg[sgna] <- 1 sg <- cbind(sg, rep(1, nrow(sg))) - + meandiam <- append(meandiam, list(md)) - sddiam <- append(sddiam, list(sd)) + sddiam <- append(sddiam, list(sd)) meangrow <- append(meangrow, list(mg)) - sdgrow <- append(sdgrow, list(sg)) - + sdgrow <- append(sdgrow, list(sg)) + lgdiam <- c(lgdiam, as.vector(t(md))) lgdinc <- c(lgdinc, as.vector(t(mg))) lgdisd <- c(lgdisd, as.vector(t(sg))) - + yb <- match(beginyr[j], yrvec) ye <- match(endyr[j], yrvec) tcol <- c(tcol, rep(c(yb:ye), mtree[j])) @@ -806,32 +804,30 @@ diametergrow <- function(diameters, increment, survival = NULL) { ticol <- c(ticol, rep(c(treeindex[j, 1]:treeindex[j, 2]), each = (ye - yb + 1))) surv <- c(surv, as.vector(t(survival[[j]]))) } - + ## plot treerings - + if (INCREMENTS) { - plotfile <- file.path(outfolder, "incrementdata.ps") plotstart(plotfile) - + graphics::par(mfrow = c(6, 2), mar = c(1, 1, 2, 1), bty = "n") for (j in seq_len(mplot)) { - if (mtree[j] == 0) { next } yindex <- c(1:nyr[j]) tindex <- yindex + beginyr[j] - yrvec[1] - - yjvec <- yrvec[tindex] - rframe <- NULL #intreering(j,yjvec[1],yjvec[nyr[j]],order[[j]]) + + yjvec <- yrvec[tindex] + rframe <- NULL # intreering(j,yjvec[1],yjvec[nyr[j]],order[[j]]) if (length(rframe) == 0) { next } if (nrow(rframe) == 0) { next } - + ordring <- sort(unique(rframe[, "order"])) r1 <- rframe[rframe[, "order"] == ordring[1], ] plot(r1[, "yr"], r1[, "cm"], type = "l", xlim = c(1990, 2010), ylim = c(0, 2)) @@ -841,13 +837,13 @@ diametergrow <- function(diameters, increment, survival = NULL) { } # title(plotnames[j]) } - + plotend(plotfile) } - - #plotfile <- paste(outfolder,spgroup,'diamchains.ps',sep='') - #plotstart(plotfile) - + + # plotfile <- paste(outfolder,spgroup,'diamchains.ps',sep='') + # plotstart(plotfile) + # par(mfrow=c(3,3)) # for(j in 1:5){ # plot(sgibbs[,j],type='l') @@ -860,17 +856,17 @@ diametergrow <- function(diameters, increment, survival = NULL) { # aii <- ai/g # wlo <- which(aii < .3,arr.ind=T) #which trees have low acceptance rates? # hist(aii,probability=T,main='acceptance by tree') - - #plotend(plotfile) - + + # plotend(plotfile) + plotfile <- file.path(outfolder, "diampost.ps") plotstart(plotfile) - + vs <- seq(1e-05, 1.2, length = 200) - - prior.mu <- 0.3 #prior mean variance for mean growth rate + + prior.mu <- 0.3 # prior mean variance for mean growth rate prior.Vmu <- 10 - + graphics::par(mfrow = c(3, 2)) for (j in 1:5) { mj <- vp[j, 2] / (vp[j, 1] - 1) @@ -882,111 +878,114 @@ diametergrow <- function(diameters, increment, survival = NULL) { xl <- 0.5 * min(dj$x) vss <- seq(xl, xm, length = 200) plot(dj$x, dj$y, type = "l", lwd = 2, xlim = c(xl, xm), xlab = "Parameter value", ylab = "Density") - lines(vss, vss ^ (-2) * dgamma(1 / vss, vp[j, 1], vp[j, 2]), col = "darkgreen", lwd = 2) #include Jacobian + lines(vss, vss^(-2) * dgamma(1 / vss, vp[j, 1], vp[j, 2]), col = "darkgreen", lwd = 2) # include Jacobian title(colnames(sgibbs)[j]) } vt <- seq(-0.3, 0.3, length = 100) - plot(vt, stats::dnorm(vt, 0, sqrt(prior.Vmu)), - col = "darkgreen", type = "l", lwd = 2, ylim = c(0, 60), - xlab = "Parameter value", ylab = "Density") + plot(vt, stats::dnorm(vt, 0, sqrt(prior.Vmu)), + col = "darkgreen", type = "l", lwd = 2, ylim = c(0, 60), + xlab = "Parameter value", ylab = "Density" + ) title("yr effects") for (j in seq_len(nt - 1)) { dj <- density(tgibbs[keep, j]) lines(dj$x, dj$y, type = "l", lwd = 2) } - + plotend(plotfile) - + plotfile <- file.path(outfolder, "diamvars.ps") plotstart(plotfile) - + # var comparison sdi <- apply(tgibbs, 1, sd) - + graphics::par(mfrow = c(2, 1)) meanyr <- apply(tgibbs[keep, ], 2, mean) - + plot(yrvec[-nt], log10(mgrow[1, ]), ylim = c(-2, 0.5), type = "l", xlab = "Year", ylab = "Diameter increment (log cm)") for (i in seq_len(n)) { lines(yrvec[-nt], log10(mgrow[i, ])) } - - #dj <- density(log10(sdi)) - #plot(dj$x,dj$y,type='l',xlim=c(-2,0),ylim=c(0,240), + + # dj <- density(log10(sdi)) + # plot(dj$x,dj$y,type='l',xlim=c(-2,0),ylim=c(0,240), # ylab='Density',xlab="Standard deviation (log scale)") # text(mean(log10(sdi)),(20+max(dj$y)),'yr') # abline(h=0) - + dj <- density(log10(sqrt(sgibbs[keep, "ind"] + sgibbs[keep, "plot"]))) plot(dj$x, dj$y, type = "l", ylab = "Density", xlab = "Standard deviation (log scale)") text(mean(log10(sdi)), (20 + max(dj$y)), "yr") abline(h = 0) # lines(dj$x,dj$y,lwd=2,col='red') - text(mean(log10(sqrt(sgibbs[keep, "ind"] + sgibbs[keep, "plot"]))), - (50 + max(dj$y)), - "population", - col = "red") - + text(mean(log10(sqrt(sgibbs[keep, "ind"] + sgibbs[keep, "plot"]))), + (50 + max(dj$y)), + "population", + col = "red" + ) + for (j in 1:5) { if (min(sgibbs[keep, j]) == max(sgibbs[keep, j])) { next } dj <- density(log10(sqrt(sgibbs[keep, j]))) lines(dj$x, dj$y) - text(mean(log10(sqrt(sgibbs[keep, j]))), - (20 + max(dj$y)), - colnames(sgibbs)[j]) + text( + mean(log10(sqrt(sgibbs[keep, j]))), + (20 + max(dj$y)), + colnames(sgibbs)[j] + ) } - + plotend(plotfile) - - #plotfile <- paste(outfolder,spgroup,'diamyr.ps',sep='') - #plotstart(plotfile) - - #par(mfrow=c(4,4)) - #for(j in 1:(nt-1)){ + + # plotfile <- paste(outfolder,spgroup,'diamyr.ps',sep='') + # plotstart(plotfile) + + # par(mfrow=c(4,4)) + # for(j in 1:(nt-1)){ # plot(tgibbs[keep,j],type='l',ylim=c(-.4,.4)) # title(colnames(tgibbs)[j]) # abline(h=0,lty=2) - #} - - #plotend(plotfile) - - #plotfile <- paste(outfolder,spgroup,'diam_ind.ps',sep='') - #plotstart(plotfile) - - #par(mfrow=c(2,2)) - #for(j in 1:4){ + # } + + # plotend(plotfile) + + # plotfile <- paste(outfolder,spgroup,'diam_ind.ps',sep='') + # plotstart(plotfile) + + # par(mfrow=c(2,2)) + # for(j in 1:4){ # plot(ingibbs[keep,j],type='l',ylim=c(-.3,.3)) # title(ins[j]) # abline(h=0,lty=2) - #} - - #plotend(plotfile) - + # } + + # plotend(plotfile) + if (COVARIATES) { - plotfile <- file.path(outfolder, "diam_ind.ps") plotstart(plotfile) - + graphics::par(mfrow = c(2, 2)) for (j in seq_along(mgibbs)) { plot(mgibbs[keep, j], type = "l") # title(ins[j]) abline(h = 0, lty = 2) } - + plotend(plotfile) } - - plotfile <- file.path(outfolder, "diam_fit.ps") #large file + + plotfile <- file.path(outfolder, "diam_fit.ps") # large file plotstart(plotfile) - + # diameters and growth rates - + ## par(mfrow=c(2,2)) graphics::par(mfrow = c(1, 1)) - + if (length(iobs) > 0) { lo <- mgrow[iobs] - 1.96 * sgrow[iobs] hi <- mgrow[iobs] + 1.96 * sgrow[iobs] @@ -998,7 +997,7 @@ diametergrow <- function(diameters, increment, survival = NULL) { lines(c(xx[i], xx[i]), c(lo[i], hi[i])) } } - + lo <- mdiam[dobs] - 1.96 * sdiam[dobs] hi <- mdiam[dobs] + 1.96 * sdiam[dobs] xx <- dcens[dobs] @@ -1011,23 +1010,22 @@ diametergrow <- function(diameters, increment, survival = NULL) { } lines(c(xx[i], xx[i]), c(lo[i], hi[i]), col = coll) } - + plotend(plotfile) - + if (!REMOTE) { - jj <- 1 - + jj <- jj + 1 iplot <- sort(sample(seq_len(n), 5)) graphics::par(mfrow = c(5, 2)) graphics::par(mar = c(3, 2, 2, 1)) - + for (j in 1:5) { - md <- exp(mldiam[iplot[j], ]) + md <- exp(mldiam[iplot[j], ]) lsd <- exp(mldiam[iplot[j], ] - 1.96 * sldiam[iplot[j], ]) hsd <- exp(mldiam[iplot[j], ] + 1.96 * sldiam[iplot[j], ]) - mg <- exp(mlgrow[iplot[j], ]) + mg <- exp(mlgrow[iplot[j], ]) lsg <- exp(mlgrow[iplot[j], ] - 1.96 * slgrow[iplot[j], ]) hsg <- exp(mlgrow[iplot[j], ] + 1.96 * slgrow[iplot[j], ]) # y1 <- max(0,(mean(md,na.rm=T) - 6)) y2 <- y1 + 12 @@ -1035,51 +1033,56 @@ diametergrow <- function(diameters, increment, survival = NULL) { y2 <- max(hsd, na.rm = TRUE) id <- paste(ijindex[iplot[j], 1], ijindex[iplot[j], 2], iplot[j], sep = ", ") id <- ijindex[iplot[j], 2] - - plot(yrvec, md, type = "l", - ylim = range(c(y1, y2, dcens[iplot[j], ]), na.rm = TRUE), - xlab = " ", ylab = " ") + + plot(yrvec, md, + type = "l", + ylim = range(c(y1, y2, dcens[iplot[j], ]), na.rm = TRUE), + xlab = " ", ylab = " " + ) lines(yrvec, lsd, lty = 2) lines(yrvec, hsd, lty = 2) lines(yrvec, mdiam[iplot[j], ], col = 3) points(yrvec, dcens[iplot[j], ]) title(id) - plot(yrvec[-nt], mg, type = "l", - ylim = c(0, max(hsg, na.rm = TRUE)), - xlab = " ", ylab = " ") + plot(yrvec[-nt], mg, + type = "l", + ylim = c(0, max(hsg, na.rm = TRUE)), + xlab = " ", ylab = " " + ) lines(yrvec[-nt], lsg, lty = 2) lines(yrvec[-nt], hsg, lty = 2) lines(yrvec[-nt], mgrow[iplot[j], ], col = 3) points(yrvec[-nt], dincr[iplot[j], ]) } - + # outfile <- paste(outfolder,spgroup,'diam_pred',jj,'.ps',sep='') # dev.print(device=postscript,file=outfile,width=6, height=8, horizontal=FALSE) } - + # columns for treemat - + diam <- signif(exp(lgdiam), 4) - + # we now have the following columns for treemat: tindex,j,i,t,surv,diam,lgdiam,lgdinc,lgdisd treematindex <- cbind(ticol, jcol, icol, tcol) colnames(treematindex) <- c("tindex", "j", "i", "t") - + # if( length(grep('ordvec',objects())) == 0 )order <- tindex # if( length(grep('ordvec',objects())) > 0 )order <- ordvec # ordlist <- order - + print("after diameter analysis") - + ################# FINAL OUTPUT ################## save.image(file.path(outfolder, "DBH.Rdata")) - save(yrvec, mdiam, sdiam, mgrow, sgrow, full.dia, ijindex, mplot, - file = file.path(outfolder, "DBH_summary.Rdata")) + save(yrvec, mdiam, sdiam, mgrow, sgrow, full.dia, ijindex, mplot, + file = file.path(outfolder, "DBH_summary.Rdata") + ) ## mdiam -- modeled diameter mean ## sdiam -- modeled diameter s.d. ## mgrow -- modeled growth mean ## sgrow -- modeled growth s.d. - + ## equivalents exist for log-transformed values as well print("after save") } # diametergrow diff --git a/modules/data.land/inst/download_vswc.R b/modules/data.land/inst/download_vswc.R index bce7a29a691..e082e461e53 100644 --- a/modules/data.land/inst/download_vswc.R +++ b/modules/data.land/inst/download_vswc.R @@ -7,55 +7,57 @@ #' @return a ncdf file with the soil moisture value for the most recent available day #' @author Christina Francis and Alexis Helgeson #' -#' @examples -#' outdir <- "/projectnb/dietzelab/cfranci1/soilmoist" +#' @examples +#' outdir <- "/projectnb/dietzelab/cfranci1/soilmoist" #' site_ID <- "BART" - download_vswc <- function(outdir, site_ID) { + # reading in terrestrial targets data at daily level + PEcAn.utils::download_file( + "https://data.ecoforecast.org/targets/terrestrial/terrestrial_daily-targets.csv.gz", + "terrestrial_daily-targets.csv.gz" + ) -# reading in terrestrial targets data at daily level -PEcAn.utils::download_file("https://data.ecoforecast.org/targets/terrestrial/terrestrial_daily-targets.csv.gz", - "terrestrial_daily-targets.csv.gz") - -terr_daily <- read.csv(file.path(outdir, "terrestrial_daily-targets.csv.gz"), - col_types = cols( - time = col_datetime(format = ""), - siteID = col_character(), - nee = col_double(), - le = col_double(), - vswc = col_double(), - vswc_sd = col_double() -)) - + terr_daily <- read.csv(file.path(outdir, "terrestrial_daily-targets.csv.gz"), + col_types = cols( + time = col_datetime(format = ""), + siteID = col_character(), + nee = col_double(), + le = col_double(), + vswc = col_double(), + vswc_sd = col_double() + ) + ) -## subset out rows that contain vswc data - non_na_daily <- terr_daily[stats::complete.cases(terr_daily$vswc), ] %>% - dplyr::mutate(time = as.Date(time)) -## find the most recent date with vswc data for each site at the daily target level -non_na_daily <- non_na_daily %>% - dplyr::filter(siteID == site_ID) + ## subset out rows that contain vswc data + non_na_daily <- terr_daily[stats::complete.cases(terr_daily$vswc), ] %>% + dplyr::mutate(time = as.Date(time)) -max_time <- max(non_na_daily$time) + ## find the most recent date with vswc data for each site at the daily target level + non_na_daily <- non_na_daily %>% + dplyr::filter(siteID == site_ID) + max_time <- max(non_na_daily$time) -# extracting a VSWC value based on date and siteID columns -filter.date.daily <- dplyr::filter(non_na_daily, time == max_time & siteID == site_ID) -print(filter.date.daily) -# save the lastest vswc value into object and turn this into a netCDF -depth <- ncdf4::ncdim_def(name = "depth", units = "meters" , vals = filter.date.daily$vswc) -time <- ncdf4::ncdim_def(name = "time", units = "yyyymmdd" , longname = "date", vals = as.numeric(gsub("-","", filter.date.daily$time))) + # extracting a VSWC value based on date and siteID columns + filter.date.daily <- dplyr::filter(non_na_daily, time == max_time & siteID == site_ID) + print(filter.date.daily) -VSWC <- ncdf4::ncvar_def(name = "mass_fraction_of_unfrozen_water_in_soil_moisture", units = "volumetric soil water content", - list(depth, time), missval = -999, longname = paste0(site_ID, "_", "VSWC"), prec = "float") + # save the lastest vswc value into object and turn this into a netCDF + depth <- ncdf4::ncdim_def(name = "depth", units = "meters", vals = filter.date.daily$vswc) + time <- ncdf4::ncdim_def(name = "time", units = "yyyymmdd", longname = "date", vals = as.numeric(gsub("-", "", filter.date.daily$time))) -soilmoisture.nc <- ncdf4::nc_create(paste0(site_ID, "_", "VSWC.nc"), VSWC) + VSWC <- ncdf4::ncvar_def( + name = "mass_fraction_of_unfrozen_water_in_soil_moisture", units = "volumetric soil water content", + list(depth, time), missval = -999, longname = paste0(site_ID, "_", "VSWC"), prec = "float" + ) -ncdf4::ncvar_put(nc=soilmoisture.nc, "mass_fraction_of_unfrozen_water_in_soil_moisture", vals = filter.date.daily$vswc) + soilmoisture.nc <- ncdf4::nc_create(paste0(site_ID, "_", "VSWC.nc"), VSWC) -print(soilmoisture.nc) + ncdf4::ncvar_put(nc = soilmoisture.nc, "mass_fraction_of_unfrozen_water_in_soil_moisture", vals = filter.date.daily$vswc) -ncdf4::nc_close(soilmoisture.nc) + print(soilmoisture.nc) + ncdf4::nc_close(soilmoisture.nc) } diff --git a/modules/data.land/inst/meta2format.EML.R b/modules/data.land/inst/meta2format.EML.R index 7a91a613e48..671624f2995 100644 --- a/modules/data.land/inst/meta2format.EML.R +++ b/modules/data.land/inst/meta2format.EML.R @@ -1,8 +1,8 @@ library(devtools) -devtools::install_github("ropensci/EML") #install EML package +devtools::install_github("ropensci/EML") # install EML package library(EML) -#Steps for meta2format.EML: +# Steps for meta2format.EML: # 1) Extract the required metadata from the file # 2) Insert metadata into the database. # Brown Dog will extract the required metadata and @@ -12,19 +12,19 @@ library(EML) # outside of meta2format create a generic function for inserting # database formats and formats variables into Bety -#meta2format.EML <- function(){} -###try on file I gave to Gene -#f3 <- "/Users/josh/Downloads/doi_10.5063_AA_nceas.980.2-METADATA(1).xml" +# meta2format.EML <- function(){} +### try on file I gave to Gene +# f3 <- "/Users/josh/Downloads/doi_10.5063_AA_nceas.980.2-METADATA(1).xml" f3 <- "/fs/data3/jam2767/EML/doi_10.5063_AA_nceas.980.2-METADATA.xml" eml3 <- read_eml(f3) eml_validate(eml3) dt3 <- eml_get(eml3, "dataTable") -length(dt3) #5 data tables +length(dt3) # 5 data tables entities3 <- sapply(dt3, eml_get, "entityName") urls3 <- sapply(dt3, eml_get, "url") -#urls3 <- sapply(dt3, function(x) x@physical[[1]]@distribution[[1]]@online@url) -attrs3 <- eml_get(dt3, "attributeName") #multiple ways to access attributes +# urls3 <- sapply(dt3, function(x) x@physical[[1]]@distribution[[1]]@online@url) +attrs3 <- eml_get(dt3, "attributeName") # multiple ways to access attributes length(attrs3) -#get_attributes(eml3@dataset@dataTable[[1]]@attributeList) -attr3 <- get_attributes(eml3@dataset@dataTable[[1]]@attributeList) #only getting attributes for 1st table -attr3$attributes$attributeName #look at attribute names +# get_attributes(eml3@dataset@dataTable[[1]]@attributeList) +attr3 <- get_attributes(eml3@dataset@dataTable[[1]]@attributeList) # only getting attributes for 1st table +attr3$attributes$attributeName # look at attribute names diff --git a/modules/data.land/inst/treecore_StateSpace.R b/modules/data.land/inst/treecore_StateSpace.R index 6178523a98f..08549e3429a 100644 --- a/modules/data.land/inst/treecore_StateSpace.R +++ b/modules/data.land/inst/treecore_StateSpace.R @@ -8,12 +8,12 @@ trees <- read.csv("H 2012 Adult Field Data.csv") ## Read tree ring data rings <- Read_Tuscon("Revised 2/") -combined <- matchInventoryRings(trees,rings,nyears=10) +combined <- matchInventoryRings(trees, rings, nyears = 10) data <- buildJAGSdata_InventoryRings(combined) -jags.out = InventoryGrowthFusion(data,n.iter=2000) +jags.out <- InventoryGrowthFusion(data, n.iter = 2000) pdf("HF_JAM_treerings.pdf") -InventoryGrowthFusionDiagnostics(jags.out,combined) -dev.off() \ No newline at end of file +InventoryGrowthFusionDiagnostics(jags.out, combined) +dev.off() diff --git a/modules/data.land/tests/testthat.R b/modules/data.land/tests/testthat.R index b0c9f33bba9..092712f1276 100644 --- a/modules/data.land/tests/testthat.R +++ b/modules/data.land/tests/testthat.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html diff --git a/modules/data.land/tests/testthat/test-PFT_consistency.R b/modules/data.land/tests/testthat/test-PFT_consistency.R index e2c3d677de4..5514d708b38 100644 --- a/modules/data.land/tests/testthat/test-PFT_consistency.R +++ b/modules/data.land/tests/testthat/test-PFT_consistency.R @@ -1,6 +1,6 @@ # Various tests to ensure that selected PFTs are internally-consistent and consistent with the FIA database # (recreates bugs in issue #775) -# +# # @author Anthony Cohen # ---------------------------------------------------------------------------------------------------------- @@ -10,35 +10,30 @@ betyparms <- PEcAn.DB::get_postgres_envvars( user = "bety", password = "bety", driver = "Postgres", - write = FALSE) + write = FALSE +) fiaparms <- betyparms fiaparms$dbname <- "fia5data" if (PEcAn.DB::db.exists(params = betyparms) && PEcAn.DB::db.exists(fiaparms)) { - - test_that("PFTs don't overlap species", { - - overlapping.pfts <- PEcAn.settings::read.settings("dup_species.xml") #settings list - #expect_output(fia.to.psscss(overlapping.pfts), "ERROR \\[.*\\] : There are [0123456789]+ spcd entries that are duplicated. Please remove overlapping PFTs.") - expect_error(fia.to.psscss(overlapping.pfts)) - + overlapping.pfts <- PEcAn.settings::read.settings("dup_species.xml") # settings list + # expect_output(fia.to.psscss(overlapping.pfts), "ERROR \\[.*\\] : There are [0123456789]+ spcd entries that are duplicated. Please remove overlapping PFTs.") + expect_error(fia.to.psscss(overlapping.pfts)) }) - - + + test_that("User is warned if PFTs have extra species not suggested by FIA", { - extra.pft <- PEcAn.settings::read.settings("wrong_pft.xml") - expect_error(fia.to.psscss(extra.pft)) + expect_error(fia.to.psscss(extra.pft)) }) - - + + test_that("PFTs encompass all species suggested by FIA", { - insufficient.pft <- PEcAn.settings::read.settings("wrong_pft.xml") expect_error(fia.to.psscss(insufficient.pft)) }) - - # Regex notes: 1) the timestamp that goes into the console is not read in the regex! + + # Regex notes: 1) the timestamp that goes into the console is not read in the regex! # 2) [:digit:] does not work without changing locale. Neither does \d # 3) logger does not interpret whitespace requests like \n } diff --git a/modules/data.land/tests/testthat/test-match_species_id.R b/modules/data.land/tests/testthat/test-match_species_id.R index ae3ba6dafb3..9f930fbe96b 100644 --- a/modules/data.land/tests/testthat/test-match_species_id.R +++ b/modules/data.land/tests/testthat/test-match_species_id.R @@ -1,15 +1,15 @@ test_that("Species matching works", { - test_merge <- function(input_codes, format_name, bety, ...) { dat_merge <- match_species_id( input_codes = input_codes, format_name = format_name, bety = bety, - ...) - expect_equal(dat_merge[1, 'genus'], 'Acer') - expect_equal(dat_merge[1, 'species'], 'rubrum') - expect_equal(dat_merge[2, 'genus'], 'Tsuga') - expect_equal(dat_merge[2, 'species'], 'canadensis') + ... + ) + expect_equal(dat_merge[1, "genus"], "Acer") + expect_equal(dat_merge[1, "species"], "rubrum") + expect_equal(dat_merge[2, "genus"], "Tsuga") + expect_equal(dat_merge[2, "species"], "canadensis") expect_equal(nrow(dat_merge), length(input_codes)) expect_false(any(is.na(dat_merge$bety_species_id))) expect_false(any(duplicated(dat_merge))) @@ -22,20 +22,23 @@ test_that("Species matching works", { user = "bety", password = "bety", host = "localhost", - driver = "Postgres") + driver = "Postgres" + ) con <- PEcAn.DB::db.open(db_params) - test_merge(c('ACRU', 'TSCA'), 'usda', con) - test_merge(c(316L, 261L), 'fia', con) - test_merge(c('Acer rubrum', 'Tsuga canadensis'), 'latin_name', con) + test_merge(c("ACRU", "TSCA"), "usda", con) + test_merge(c(316L, 261L), "fia", con) + test_merge(c("Acer rubrum", "Tsuga canadensis"), "latin_name", con) test_table <- data.frame( bety_species_id = c(30L, 1419L), - input_code = c('AceRub', 'TsuCan')) + input_code = c("AceRub", "TsuCan") + ) test_merge( input_codes = test_table$input_code, - format_name = 'custom', + format_name = "custom", bety = con, - translation_table = test_table) + translation_table = test_table + ) }) diff --git a/modules/data.land/tests/testthat/test-soil_params.R b/modules/data.land/tests/testthat/test-soil_params.R index 331112c1815..8d50958f0ff 100644 --- a/modules/data.land/tests/testthat/test-soil_params.R +++ b/modules/data.land/tests/testthat/test-soil_params.R @@ -1,23 +1,22 @@ - test_that("accepts 2-component textures", { - sand <- c(0.3, 0.4, 0.5) - clay <- c(0.2, 0.3, 0.3) - res <- soil_params(sand = sand, clay = clay) + sand <- c(0.3, 0.4, 0.5) + clay <- c(0.2, 0.3, 0.3) + res <- soil_params(sand = sand, clay = clay) - expect_equal(res$fraction_of_sand_in_soil, sand) - expect_equal(res$fraction_of_clay_in_soil, clay) - expect_equal(res$fraction_of_silt_in_soil, 1 - sand - clay) - expect_equal(res$soil_type, c("Loam", "Clayey loam", "Sandy clay loam")) + expect_equal(res$fraction_of_sand_in_soil, sand) + expect_equal(res$fraction_of_clay_in_soil, clay) + expect_equal(res$fraction_of_silt_in_soil, 1 - sand - clay) + expect_equal(res$soil_type, c("Loam", "Clayey loam", "Sandy clay loam")) }) test_that("converts percent to proportion and normalizes to sum of fractions", { - res_prop <- soil_params(sand = 0.1, silt = 0.6, clay = 0.3) - res_pct <- soil_params(sand = 10, silt = 60, clay = 30) - res_halfagain <- soil_params(sand = 0.15, silt = 0.9, clay = 0.45) - res_150pct <- soil_params(sand = 15, silt = 90, clay = 45) + res_prop <- soil_params(sand = 0.1, silt = 0.6, clay = 0.3) + res_pct <- soil_params(sand = 10, silt = 60, clay = 30) + res_halfagain <- soil_params(sand = 0.15, silt = 0.9, clay = 0.45) + res_150pct <- soil_params(sand = 15, silt = 90, clay = 45) - expect_equal(res_prop, res_pct) - expect_equal(res_halfagain, res_150pct) - expect_equal(res_prop, res_150pct) - expect_equal(res_prop$soil_type, "Silty clay loam") + expect_equal(res_prop, res_pct) + expect_equal(res_halfagain, res_150pct) + expect_equal(res_prop, res_150pct) + expect_equal(res_prop$soil_type, "Silty clay loam") }) diff --git a/modules/data.mining/inst/ModelResidualSpectra.R b/modules/data.mining/inst/ModelResidualSpectra.R index 32ac5b99c30..914a946e90a 100644 --- a/modules/data.mining/inst/ModelResidualSpectra.R +++ b/modules/data.mining/inst/ModelResidualSpectra.R @@ -1,6 +1,6 @@ ## Script to analyse the spectral signal of model error ## -## Michael Dietze, Boston University +## Michael Dietze, Boston University ## ## read functions @@ -10,10 +10,12 @@ source("ResidSpectra.R") model.dir <- "NEEm" ## set of models to analyze -model.set <- c(sort(c("BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", - "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", - "BIOME_BGC", "SSiB2", "TRIPLEX", "EPIC")), "MEAN") -Nmodel <- length(model.set) ## number of models +model.set <- c(sort(c( + "BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", + "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", + "BIOME_BGC", "SSiB2", "TRIPLEX", "EPIC" +)), "MEAN") +Nmodel <- length(model.set) ## number of models ## listing of available 'site' files Files should contain a time column, ## an observed flux column, and a column for each model @@ -24,65 +26,65 @@ yset <- 1990:2010 ######### LOOP OVER SITES ########## for (i in seq_along(site.files)) { - ## load site data table - dat <- read.table(paste(model.dir, site.files[i], sep = "/"), - header = TRUE, na.string = "-999.000") + dat <- read.table(paste(model.dir, site.files[i], sep = "/"), + header = TRUE, na.string = "-999.000" + ) ysel <- which(dat$X.YEAR %in% yset) dat <- dat[ysel, ] - - m2c <- match(model.set, names(dat)) ## match model names to data table columns - day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day - Nperiod <- 1 + 4 * log2(nrow(dat)) ## number of periods (# of col's in Power matrix) - + + m2c <- match(model.set, names(dat)) ## match model names to data table columns + day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day + Nperiod <- 1 + 4 * log2(nrow(dat)) ## number of periods (# of col's in Power matrix) + ######### LOOP OVER MODELS ########## for (j in seq_len(Nmodel)) { - - k <- m2c[j] ## desired column in data table for specified model - + k <- m2c[j] ## desired column in data table for specified model + wv <- ResidSpectra(data = dat$NEE_FILLED, model = dat[, k], obsPerDay == day, case = 3) - + ## save the spectra for each model and each site to a separate file save(wv, Power, day, file = paste0("spec", i, ".", j, ".Rdata")) - + print(c(i, j)) - } ## end model loop - -} ## loop over sites + } ## end model loop +} ## loop over sites if (FALSE) { ## Useful Diagnostics - + ## overall power spectrum Power <- wv$Power period <- wv$period Pglobe <- apply(Power, 2, mean) plot(period, Pglobe, log = "xy", xlab = "Period (days)", ylab = "Power") - + ## divide up spectra into bins - + ## daily peak day.mid <- findInterval(1, period) day.bin <- day.mid + c(-4, 4) abline(v = period[day.bin]) - + ## annual peak year.mid <- findInterval(365.25, period) year.bin <- year.mid + c(-4, 4) abline(v = period[year.bin]) - - synop.bin <- (max(day.bin) + 1):(min(year.bin) - 1) ## period between daily and annual - subday.bin <- 1:(min(day.bin) - 1) ## period < daily - inter.bin <- (max(year.bin) + 1):length(period) ## interannual + + synop.bin <- (max(day.bin) + 1):(min(year.bin) - 1) ## period between daily and annual + subday.bin <- 1:(min(day.bin) - 1) ## period < daily + inter.bin <- (max(year.bin) + 1):length(period) ## interannual if (length(period) <= max(year.bin)) { inter.bin <- NA } - + ## bin power by periods - pow.bin <- c(sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), - sum(Pglobe[year.bin]), - sum(Pglobe[inter.bin])) + pow.bin <- c( + sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), + sum(Pglobe[year.bin]), + sum(Pglobe[inter.bin]) + ) names(pow.bin) <- c("subdaily", "daily", "synoptic", "annual", "interannual") - format(pow.bin / sum(pow.bin, na.rm = TRUE) * 100, digits = 3, scientific = FALSE) ## % of spectral power in each bin + format(pow.bin / sum(pow.bin, na.rm = TRUE) * 100, digits = 3, scientific = FALSE) ## % of spectral power in each bin } diff --git a/modules/data.mining/inst/NACPspecPost.R b/modules/data.mining/inst/NACPspecPost.R index a92797c33ac..b3a091086c5 100644 --- a/modules/data.mining/inst/NACPspecPost.R +++ b/modules/data.mining/inst/NACPspecPost.R @@ -7,127 +7,139 @@ ## ## libraries and functions -library(dplR, lib.loc = "~/lib/R") ## Andy Bunn's Dendrochronology package +library(dplR, lib.loc = "~/lib/R") ## Andy Bunn's Dendrochronology package ## directory of spectra spec.dir <- "Rdata" ## Top ten sites -sites <- c(1, ##CA-Ca1 - 5, ##CA-Let - 7, ##CA-Mer - 8, ##CA-Oas - 9, ##CA-Obs - 25,##US-Ha1 - 26,##US-Ho1 - 30,##US-Me2 - 38,##US-Ne3 - 45) ##US-UMB - -site.name <- c("CA-Ca1", - "CA-Let", - "CA-Mer", - "CA-Oas", - "CA-Obs", - "US-Ha1", - "US-Ho1", - "US-Me2", - "US-Ne3", - "US-UMB") - -model.set <- sort(c("BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", - "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", "BIOME_BGC", - "SSiB2", "TRIPLEX", "EPIC")) -Nmodel <- length(model.set) ## number of models +sites <- c( + 1, ## CA-Ca1 + 5, ## CA-Let + 7, ## CA-Mer + 8, ## CA-Oas + 9, ## CA-Obs + 25, ## US-Ha1 + 26, ## US-Ho1 + 30, ## US-Me2 + 38, ## US-Ne3 + 45 +) ## US-UMB + +site.name <- c( + "CA-Ca1", + "CA-Let", + "CA-Mer", + "CA-Oas", + "CA-Obs", + "US-Ha1", + "US-Ho1", + "US-Me2", + "US-Ne3", + "US-UMB" +) + +model.set <- sort(c( + "BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", + "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", "BIOME_BGC", + "SSiB2", "TRIPLEX", "EPIC" +)) +Nmodel <- length(model.set) ## number of models cross.model <- list() cross.site <- list() for (s in sites) { - if (s == 30) { next } - + # cross.site[[s]] <- list() cross.model[[s]] <- list() - + avg.spec <- norm.spec <- geom.spec <- NULL mcount <- 0 - + load(paste0(sub("-", "", site.name[which(sites == s)]), ".specCI.Rdata")) - + for (m in seq_len(Nmodel)) { - print(c(s, m)) - + load(paste0(spec.dir, "/NACPspecNORMpre2.", s, ".", m, ".Rdata")) - period <- wv$period/day - + period <- wv$period / day + if (length(period) != length(Period)) { print(c("PERIOD MISSMATCH", length(period), length(Period))) next } - + ## to calculate: ## - global spectra for each model, graph multi-site and median ## - composite spectra for each site, avg across models ## - relative power in different bands: ## - overall summary ## - stat model of site & model effects - + if (sum(Power, na.rm = TRUE) > 0) { ### if model was run for site - - + + mcount <- mcount + 1 - + ## divide up spectra Pglobe <- apply(Power, 2, sum, na.rm = TRUE) - Pnorm <- Pglobe/pCI[3, ] - + Pnorm <- Pglobe / pCI[3, ] + if (FALSE) { - plot(period, Pglobe, log = "xy", main = "US-Ho1 ED2", - ylim = c(10000, max(Pglobe)), - ylab = "Power", type = "l", lwd = 2, - cex.lab = 1.5, cex.main = 1.5, cex.axis = 1.2) + plot(period, Pglobe, + log = "xy", main = "US-Ho1 ED2", + ylim = c(10000, max(Pglobe)), + ylab = "Power", type = "l", lwd = 2, + cex.lab = 1.5, cex.main = 1.5, cex.axis = 1.2 + ) lines(Period, pCI[3, ], col = 2, lty = 2) - + ## plot(period,Pglobe/pCI[3,],log='x') - plot(period, Pglobe/pCI[3, ], log = "xy", - ylab = "Power/(Null 95%)", type = "l", lwd = 2, - cex.lab = 1.5, cex.main = 1.5, cex.axis = 1.2, - main = "US-Ho1 ED2") + plot(period, Pglobe / pCI[3, ], + log = "xy", + ylab = "Power/(Null 95%)", type = "l", lwd = 2, + cex.lab = 1.5, cex.main = 1.5, cex.axis = 1.2, + main = "US-Ho1 ED2" + ) abline(v = c(1, 365.25), lty = 3, col = 6, lwd = 2) abline(h = 1, lty = 2, lwd = 2) } - + day.mid <- findInterval(1, period) day.bin <- day.mid + c(-4:4) ## abline(v=period[day.bin]) - + year.mid <- findInterval(365.25, period) year.bin <- year.mid + c(-4:4) ## abline(v=period[year.bin]) - + synop.bin <- (max(day.bin) + 1):(min(year.bin) - 1) subday.bin <- 1:(min(day.bin) - 1) inter.bin <- (max(year.bin) + 1):length(period) if (length(period) <= max(year.bin)) { inter.bin <- NA } - - pow.bin <- c(sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), - sum(Pglobe[year.bin]), - sum(Pglobe[inter.bin])) + + pow.bin <- c( + sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), + sum(Pglobe[year.bin]), + sum(Pglobe[inter.bin]) + ) pow.bin <- pow.bin / sum(pow.bin) - - pow.binN <- c(sum(Pnorm[subday.bin]), sum(Pnorm[day.bin]), sum(Pnorm[synop.bin]), - sum(Pnorm[year.bin]), - sum(Pnorm[inter.bin])) + + pow.binN <- c( + sum(Pnorm[subday.bin]), sum(Pnorm[day.bin]), sum(Pnorm[synop.bin]), + sum(Pnorm[year.bin]), + sum(Pnorm[inter.bin]) + ) ## pow.binN <- pow.binN / sum(pow.binN) - - + + if (is.null(avg.spec)) { avg.spec <- Power norm.spec <- Power / sum(as.vector(Power), na.rm = TRUE) @@ -137,24 +149,27 @@ for (s in sites) { norm.spec <- norm.spec + Power / sum(as.vector(Power), na.rm = TRUE) geom.spec <- geom.spec + log(Power) } - - - cross.model[[s]][[m]] <- list(Pglobe = Pglobe, - pow.bin = pow.bin, - Pnorm = Pnorm, - pow.binN = pow.binN, - period = period, - day = day) - + + + cross.model[[s]][[m]] <- list( + Pglobe = Pglobe, + pow.bin = pow.bin, + Pnorm = Pnorm, + pow.binN = pow.binN, + period = period, + day = day + ) } else { ## end if model run cross.model[[s]][[m]] <- NULL } - } ## end loop over models - - cross.site[[s]] <- list(avg = avg.spec/mcount, norm = norm.spec/mcount, geom = exp(geom.spec/mcount), - period = period, day = day) - + } ## end loop over models + + cross.site[[s]] <- list( + avg = avg.spec / mcount, norm = norm.spec / mcount, geom = exp(geom.spec / mcount), + period = period, day = day + ) + save(cross.site, cross.model, file = "CrossSpec.v3.Rdata") } @@ -183,7 +198,6 @@ for (m in seq_len(Nmodel)) { } for (m in seq_len(Nmodel)) { - ## calc ranges maxlen <- 0 nsite <- 0 @@ -195,10 +209,10 @@ for (m in seq_len(Nmodel)) { periods[i, seq_along(cross.site[[s]]$period)] <- cross.site[[s]]$period } } - - + + Period <- sort(unique(as.vector(periods))) - sel <- c(1, 1 + which(diff(log(Period)) > 0.001)) + sel <- c(1, 1 + which(diff(log(Period)) > 0.001)) Period <- Period[sel] maxlen <- length(Period) if (nsite > 0) { @@ -231,39 +245,40 @@ for (m in seq_len(Nmodel)) { } } } - + ## calc means - P.cnt <- apply(!is.na(Power), 2, sum) - P.bar <- apply(Power, 2, mean, na.rm = TRUE) - P.geom <- exp(apply(log(Power), 2, mean, na.rm = TRUE)) - Psum <- apply(Power, 1, sum, na.rm = TRUE) - P.norm <- apply(Power/Psum, 2, mean, na.rm = TRUE) + P.cnt <- apply(!is.na(Power), 2, sum) + P.bar <- apply(Power, 2, mean, na.rm = TRUE) + P.geom <- exp(apply(log(Power), 2, mean, na.rm = TRUE)) + Psum <- apply(Power, 1, sum, na.rm = TRUE) + P.norm <- apply(Power / Psum, 2, mean, na.rm = TRUE) P.gnorm <- exp(apply(log(Power / Psum), 2, mean, na.rm = TRUE)) - P.med <- apply(Power, 2, median, na.rm = TRUE) + P.med <- apply(Power, 2, median, na.rm = TRUE) P.norm.med <- apply(Power / Psum, 2, median, na.rm = TRUE) - - PN.cnt <- apply(!is.na(PowerNorm), 2, sum) - PN.bar <- apply(PowerNorm, 2, mean, na.rm = TRUE) - PN.geom <- exp(apply(log(PowerNorm), 2, mean, na.rm = TRUE)) - PNsum <- apply(PowerNorm, 1, sum, na.rm = TRUE) - PN.norm <- apply(PowerNorm/Psum, 2, mean, na.rm = TRUE) + + PN.cnt <- apply(!is.na(PowerNorm), 2, sum) + PN.bar <- apply(PowerNorm, 2, mean, na.rm = TRUE) + PN.geom <- exp(apply(log(PowerNorm), 2, mean, na.rm = TRUE)) + PNsum <- apply(PowerNorm, 1, sum, na.rm = TRUE) + PN.norm <- apply(PowerNorm / Psum, 2, mean, na.rm = TRUE) PN.gnorm <- exp(apply(log(PowerNorm / Psum), 2, mean, na.rm = TRUE)) - PN.med <- apply(PowerNorm, 2, median, na.rm = TRUE) + PN.med <- apply(PowerNorm, 2, median, na.rm = TRUE) PN.norm.med <- apply(PowerNorm / Psum, 2, median, na.rm = TRUE) - + day.mid <- findInterval(1, Period) day.bin <- day.mid + c(-4:4) year.mid <- findInterval(365.25, Period) year.bin <- year.mid + c(-4:4) breaks <- c(range(day.bin), range(year.bin)) - + ModelMean[[m]] <- list(Period = Period, P.norm.med = P.norm.med, PN.med = PN.med) - + if (FALSE) { - ### Raw power - arithmetic mean, geometirc mean, and median - plot(Period, P.bar, ylim = c(1, maxP), log = "xy", type = "n", ylab = "Power", main = paste("RAW", - model.set[m]), xlab = "period (days)") + plot(Period, P.bar, ylim = c(1, maxP), log = "xy", type = "n", ylab = "Power", main = paste( + "RAW", + model.set[m] + ), xlab = "period (days)") for (i in seq_along(sites)) { # lines(cross.model[[s]][[m]]$Pglobe) lines(Period, Power[i, ]) @@ -273,10 +288,12 @@ for (m in seq_len(Nmodel)) { lines(Period, P.med, ylim = c(1, maxP), type = "l", lwd = 3, col = 4) abline(v = Period[breaks]) } - + ### Normalized Spectra - plot(Period, P.norm, ylim = range(1e-07, 1), log = "xy", type = "n", ylab = "Power", main = paste("NORMALISED", - model.set[m]), xlab = "period (days)") + plot(Period, P.norm, ylim = range(1e-07, 1), log = "xy", type = "n", ylab = "Power", main = paste( + "NORMALISED", + model.set[m] + ), xlab = "period (days)") for (i in seq_along(sites)) { # lines(cross.model[[s]][[m]]$Pglobe) lines(Period, Power[i, ] / Psum[i]) @@ -285,12 +302,14 @@ for (m in seq_len(Nmodel)) { lines(Period, P.gnorm, ylim = c(1, maxP), type = "l", lwd = 3, col = 3) lines(Period, P.norm.med, ylim = c(1, maxP), type = "l", lwd = 3, col = 4) abline(v = Period[breaks]) - + ### CI Raw power - arithmetic mean, geometirc mean, and median - plot(Period, PN.bar, ylim = c(0.1, maxPN), log = "xy", type = "n", - ylab = "Power/(95% NULL)", - main = paste("TEST RAW", model.set[m]), - xlab = "period (days)") + plot(Period, PN.bar, + ylim = c(0.1, maxPN), log = "xy", type = "n", + ylab = "Power/(95% NULL)", + main = paste("TEST RAW", model.set[m]), + xlab = "period (days)" + ) for (i in seq_along(sites)) { # lines(cross.model[[s]][[m]]$Pglobe) lines(Period, PowerNorm[i, ]) @@ -300,16 +319,18 @@ for (m in seq_len(Nmodel)) { lines(Period, PN.med, ylim = c(1, maxP), type = "l", lwd = 3, col = 4) abline(v = Period[breaks], lty = 3, col = 6) abline(h = 1, lty = 2) - + if (FALSE) { ## Nothing new learned here and loose signif. threshold CI Normalized Spectra - plot(Period, PN.norm, ylim = range(1e-07, 1), log = "xy", type = "n", - ylab = "Power", - main = paste("NORMALISED", model.set[m]), - xlab = "period (days)") + plot(Period, PN.norm, + ylim = range(1e-07, 1), log = "xy", type = "n", + ylab = "Power", + main = paste("NORMALISED", model.set[m]), + xlab = "period (days)" + ) for (i in seq_along(sites)) { # lines(cross.model[[s]][[m]]$Pglobe) - lines(Period, PowerNorm[i, ]/PNsum[i]) + lines(Period, PowerNorm[i, ] / PNsum[i]) } lines(Period, PN.norm, ylim = c(1, maxP), type = "l", lwd = 3, col = 2) lines(Period, PN.gnorm, ylim = c(1, maxP), type = "l", lwd = 3, col = 3) @@ -317,26 +338,28 @@ for (m in seq_len(Nmodel)) { abline(v = Period[breaks]) } } -} ### end BY MODEL +} ### end BY MODEL ### Normalized Spectra i <- 9 -plot(ModelMean[[i]]$Period, ModelMean[[i]]$P.norm.med, - ylim = range(1e-07, 1), log = "xy", type = "n", - ylab = "Power", - main = paste("ALL MODELS - NORMALISED"), - xlab = "period (days)") +plot(ModelMean[[i]]$Period, ModelMean[[i]]$P.norm.med, + ylim = range(1e-07, 1), log = "xy", type = "n", + ylab = "Power", + main = paste("ALL MODELS - NORMALISED"), + xlab = "period (days)" +) for (i in seq_along(ModelMean)) { lines(ModelMean[[i]]$Period, ModelMean[[i]]$P.norm.med) } abline(v = c(1, 365.25), lty = 3, col = 6) ### CI Raw power i <- 9 -plot(ModelMean[[i]]$Period, ModelMean[[i]]$PN.med, - ylim = c(0.1, maxPN), log = "xy", type = "n", - ylab = "Power/(95% NULL)", - main = paste("ALL MODELS - SIGNIFICANCE"), - xlab = "period (days)") +plot(ModelMean[[i]]$Period, ModelMean[[i]]$PN.med, + ylim = c(0.1, maxPN), log = "xy", type = "n", + ylab = "Power/(95% NULL)", + main = paste("ALL MODELS - SIGNIFICANCE"), + xlab = "period (days)" +) for (i in seq_along(ModelMean)) { lines(ModelMean[[i]]$Period, ModelMean[[i]]$PN.med) } @@ -351,46 +374,50 @@ library(fields) for (i in sitesel) { print(i) s <- sites[i] - + pdf(paste0("bySite.", s, ".pdf"), width = 6.5, height = 11) par(mfrow = c(3, 1)) - + ## spec.bar <- spec.gmean <- spec.norm <- spec.gnorm <- matrix(0, cross.site[[s]] <- ## list(avg=avg.spec,norm=norm.spec,geom=exp(geom.spec),period=period,day=day) nbreak <- 10 mylevels <- quantile(cross.site[[s]]$avg, seq(0, 1, length = (nbreak + 1)), na.rm = TRUE) - image.plot((seq_len(nrow(cross.site[[s]]$avg))) / cross.site[[s]]$day, - cross.site[[s]]$period, cross.site[[s]]$avg, - ylab = "Period (days)", log = "y", - main = paste(site.name[i], "AVG"), - breaks = mylevels, - axis.args = list(at = log(mylevels), labels = formatC(mylevels, digits = 2)), - nlevel = nbreak, - xlab = "Time (days)") + image.plot((seq_len(nrow(cross.site[[s]]$avg))) / cross.site[[s]]$day, + cross.site[[s]]$period, cross.site[[s]]$avg, + ylab = "Period (days)", log = "y", + main = paste(site.name[i], "AVG"), + breaks = mylevels, + axis.args = list(at = log(mylevels), labels = formatC(mylevels, digits = 2)), + nlevel = nbreak, + xlab = "Time (days)" + ) # lab.breaks=formatC(mylevels,digits = 2), # breaks=exp(seq(min(log(cross.site[[s]]$avg),na.rm=TRUE), # max(log(cross.site[[s]]$avg),na.rm=TRUE),length=65)) mylevels <- quantile(cross.site[[s]]$geom, seq(0, 1, length = (nbreak + 1)), na.rm = TRUE) - image.plot((seq_len(nrow(cross.site[[s]]$geom))) / cross.site[[s]]$day, - cross.site[[s]]$period, - cross.site[[s]]$geom, ylab = "Period (days)", - log = "y", main = paste(site.name[i], "GEOM"), - breaks = mylevels, - axis.args = list(at = log(mylevels), labels = formatC(mylevels, digits = 2)), - nlevel = nbreak, - xlab = "Time (days)") + image.plot((seq_len(nrow(cross.site[[s]]$geom))) / cross.site[[s]]$day, + cross.site[[s]]$period, + cross.site[[s]]$geom, + ylab = "Period (days)", + log = "y", main = paste(site.name[i], "GEOM"), + breaks = mylevels, + axis.args = list(at = log(mylevels), labels = formatC(mylevels, digits = 2)), + nlevel = nbreak, + xlab = "Time (days)" + ) # lab.breaks=formatC(mylevels,digits = 2), # breaks=exp(seq(min(log(cross.site[[s]]$geom),na.rm=TRUE), # max(log(cross.site[[s]]$geom),na.rm=TRUE),length=65)) mylevels <- quantile(cross.site[[s]]$norm, seq(0, 1, length = (nbreak + 1)), na.rm = TRUE) - image.plot((seq_len(nrow(cross.site[[s]]$norm))) / cross.site[[s]]$day, - cross.site[[s]]$period, - cross.site[[s]]$norm, - ylab = "Period (days)", log = "y", main = paste(site.name[i], "NORM"), - breaks = mylevels, - axis.args = list(at = log(mylevels), labels = formatC(mylevels, digits = 2)), - nlevel = nbreak, - xlab = "Time (days)") + image.plot((seq_len(nrow(cross.site[[s]]$norm))) / cross.site[[s]]$day, + cross.site[[s]]$period, + cross.site[[s]]$norm, + ylab = "Period (days)", log = "y", main = paste(site.name[i], "NORM"), + breaks = mylevels, + axis.args = list(at = log(mylevels), labels = formatC(mylevels, digits = 2)), + nlevel = nbreak, + xlab = "Time (days)" + ) # lab.breaks=formatC(mylevels,digits = 2), # breaks=exp(seq(min(log(cross.site[[s]]$norm),na.rm=TRUE), # max(log(cross.site[[s]]$norm),na.rm=TRUE),length=65)) @@ -419,7 +446,7 @@ smid <- tapply(ptable[, 5], ptable[, 1], mean) syear <- tapply(ptable[, 6], ptable[, 1], mean) sinter <- tapply(ptable[, 7], ptable[, 1], mean) -barplot(rbind(ssub, sday, smid, syear, sinter), col = 1:5, names.arg = site.name[sitesel]) #, +barplot(rbind(ssub, sday, smid, syear, sinter), col = 1:5, names.arg = site.name[sitesel]) # , ## legend.text=c('subdaily','diurnal', 'intermediate','annual','interannual') ) msub <- tapply(ptable[, 3], ptable[, 2], mean) @@ -431,6 +458,6 @@ mbar <- rbind(msub, mday, mmid, myear, minter) colnames(mbar) <- model.set par(las = 3, mar = c(7, 4, 4, 2)) -bp <- barplot(mbar, col = 1:5, space = 0.1, ) #, +bp <- barplot(mbar, col = 1:5, space = 0.1, ) # , ## legend.text=c('subdaily','diurnal', 'intermediate','annual','interannual') ) ## text(bp,-2,model.set,pos=2,srt=45) diff --git a/modules/data.mining/inst/NACPspectral.Keenan.R b/modules/data.mining/inst/NACPspectral.Keenan.R index a9e490d39a2..90374bea294 100644 --- a/modules/data.mining/inst/NACPspectral.Keenan.R +++ b/modules/data.mining/inst/NACPspectral.Keenan.R @@ -7,7 +7,7 @@ ## ### Specify required functions -library(dplR, lib.loc = "~/lib/R") ## Andy Bunn's Dendrochronology package +library(dplR, lib.loc = "~/lib/R") ## Andy Bunn's Dendrochronology package WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { ## simple function based on Bunn's wavelet.plot fcn that returns wavelet info if (is.null(p2)) { @@ -16,7 +16,7 @@ WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { n <- length(crn.vec) Dt <- 1 s0 <- 1 - j1 <- p2/dj + j1 <- p2 / dj mother <- "morlet" crn.vec.ac <- acf(crn.vec, lag.max = 2, plot = FALSE) lag1 <- (crn.vec.ac$acf[2] + sqrt(crn.vec.ac$acf[3])) / 2 @@ -24,28 +24,28 @@ WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { } # WAVE if (FALSE) { - ## set of models to analyze - model.set <- c(sort(c("BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", - "ED2", "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", - "LPJ", "BIOME_BGC", "SSiB2", "TRIPLEX", "EPIC")), "MEAN") - - Nmodel <- length(model.set) ## number of models - + model.set <- c(sort(c( + "BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", + "ED2", "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", + "LPJ", "BIOME_BGC", "SSiB2", "TRIPLEX", "EPIC" + )), "MEAN") + + Nmodel <- length(model.set) ## number of models + vars <- c("NEEm", "GPPm", "REm") - + ## directory to find model files for (model.dir in vars) { - ## listing of available site files site.files <- dir(model.dir, "txt") - + ######### LOOP OVER SITES ########## for (i in seq_along(site.files)) { ## for(i in c(5,25,38,45)){ for(i in c(1,5,7,8,9,25,26,30,38,45)){ - + site <- sub(".txt", "", site.files[i]) - + yset <- 1990:2010 if (i == 5) { yset <- 1999:2007 @@ -56,51 +56,50 @@ if (FALSE) { } else if (i == 45) { yset <- 1999:2003 } - + ## load site data table - dat <- read.table(paste(model.dir, site.files[i], sep = "/"), header = TRUE, na.string = "-999.000") + dat <- read.table(paste(model.dir, site.files[i], sep = "/"), header = TRUE, na.string = "-999.000") ysel <- which(dat$X.YEAR %in% yset) - dat <- dat[ysel, ] - - day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day + dat <- dat[ysel, ] + + day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day Nperiod <- 1 + 4 * log2(nrow(dat)) - + POWER <- NULL - + ######### LOOP OVER MODELS ########## for (j in 3:ncol(dat)) { - ## option 1 - absolute fluxes - y <- dat[, j] ### Model error fcn - y[is.na(y)] <- 0 ## need to fill in missing values - - wv <- WAVE(y) #,p2=17) ## Calculate wavelet spectrum - period <- wv$period/day ## wavelet periods - Power <- (abs(wv$wave))^2 ## wavelet power + y <- dat[, j] ### Model error fcn + y[is.na(y)] <- 0 ## need to fill in missing values + + wv <- WAVE(y) # ,p2=17) ## Calculate wavelet spectrum + period <- wv$period / day ## wavelet periods + Power <- (abs(wv$wave))^2 ## wavelet power for (t in seq_along(wv$Scale)) { ### bias correction - Power[, t] <- Power[, t]/wv$Scale[t] + Power[, t] <- Power[, t] / wv$Scale[t] } ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) + coi <- wv$coi ## cone of influence (valid if below value) for (t in seq_along(coi)) { sel <- which(period > coi[t]) Power[t, sel] <- NA } Pglobe <- apply(Power, 2, sum, na.rm = TRUE) - + if (j == 3) { POWER <- matrix(NA, length(Pglobe), ncol(dat) - 2) colnames(POWER) <- colnames(dat)[3:ncol(dat)] } POWER[, j - 2] <- Pglobe - + save(Nperiod, period, day, POWER, file = paste0("NACPspec.Keenan.", site, ".Rdata")) - + print(c(site, colnames(dat)[j])) - } ## end model loop - } ## loop over sites - } ## loop over variables + } ## end model loop + } ## loop over sites + } ## loop over variables } ############### PROCESSING for 1000 Monte Carlo reps @@ -108,12 +107,11 @@ model.dir <- "NEEm" site.files <- dir(model.dir, "txt") for (sitenum in c(1, 5, 7, 8, 9, 25, 26, 38, 45)) { - site.name <- site.files[sitenum] site.name <- sub("_NEE.txt", "", site.name) site.name <- sub("-", "", site.name) prefix <- paste0("MDSNEE_", site.name, "-") - + yset <- 1990:2010 if (sitenum == 5) { yset <- 1999:2007 @@ -124,41 +122,40 @@ for (sitenum in c(1, 5, 7, 8, 9, 25, 26, 38, 45)) { } else if (sitenum == 45) { yset <- 1999:2003 } - + ## load the pseudo data load(paste0(prefix, "pseudo.Rdata")) - + POWER <- NULL - + ######### LOOP OVER REPS ########## for (j in seq_along(dat)) { - ## option 1 - absolute fluxes - y <- dat[, j] ### Model error fcn - y[is.na(y)] <- 0 ## need to fill in missing values - - wv <- WAVE(y) #,p2=17) ## Calculate wavelet spectrum - period <- wv$period ## wavelet periods - Power <- (abs(wv$wave))^2 ## wavelet power + y <- dat[, j] ### Model error fcn + y[is.na(y)] <- 0 ## need to fill in missing values + + wv <- WAVE(y) # ,p2=17) ## Calculate wavelet spectrum + period <- wv$period ## wavelet periods + Power <- (abs(wv$wave))^2 ## wavelet power for (t in seq_along(wv$Scale)) { ### bias correction - Power[, t] <- Power[, t]/wv$Scale[t] + Power[, t] <- Power[, t] / wv$Scale[t] } ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) + coi <- wv$coi ## cone of influence (valid if below value) for (t in seq_along(coi)) { sel <- which(period > coi[t]) Power[t, sel] <- NA } Pglobe <- apply(Power, 2, sum, na.rm = TRUE) - + if (j == 1) { POWER <- matrix(NA, length(Pglobe), ncol(dat)) } POWER[, j] <- Pglobe - + save(period, POWER, file = paste0("NACPspecNULL.Keenan.", site.name, ".Rdata")) - + print(c(site.name, j)) - } ## end model loop -} ## loop over sites + } ## end model loop +} ## loop over sites diff --git a/modules/data.mining/inst/NACPspectral.R b/modules/data.mining/inst/NACPspectral.R index cd16ee6da6f..029d0fb7204 100644 --- a/modules/data.mining/inst/NACPspectral.R +++ b/modules/data.mining/inst/NACPspectral.R @@ -7,7 +7,7 @@ ## ### Specify required functions -library(dplR, lib.loc = "~/lib/R") ## Andy Bunn's Dendrochronology package +library(dplR, lib.loc = "~/lib/R") ## Andy Bunn's Dendrochronology package WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { ## simple function based on Bunn's wavelet.plot fcn that returns wavelet info if (is.null(p2)) { @@ -28,11 +28,13 @@ WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { model.dir <- "NEEm" ## set of models to analyze -model.set <- sort(c("BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", - "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", "BIOME_BGC", - "SSiB2", "TRIPLEX", "EPIC")) +model.set <- sort(c( + "BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", + "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", "BIOME_BGC", + "SSiB2", "TRIPLEX", "EPIC" +)) -Nmodel <- length(model.set) ## number of models +Nmodel <- length(model.set) ## number of models ## listing of available site files site.files <- dir(model.dir, "txt") @@ -40,7 +42,6 @@ site.files <- dir(model.dir, "txt") ######### LOOP OVER SITES ########## for (i in seq_along(site.files)) { - ## load site data table dat <- read.table(paste(model.dir, site.files[i], sep = "/"), header = TRUE, na.string = "-999.000") @@ -49,19 +50,18 @@ for (i in seq_along(site.files)) { # dat <- dat[dat$X.YEAR %in% 1999:2007,] ## LET # dat <- dat[dat$X.YEAR %in% 1992:2006,] ## Ha1 - m2c <- match(model.set, names(dat)) ## match model names to data table columns - day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day + m2c <- match(model.set, names(dat)) ## match model names to data table columns + day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day Nperiod <- 1 + 4 * log2(nrow(dat)) - ##POWER <- array(NA,c(Nmodel,Nperiod,nrow(dat))) - ##SIGNIF <- matrix(NA,Nmodel,Nperiod) + ## POWER <- array(NA,c(Nmodel,Nperiod,nrow(dat))) + ## SIGNIF <- matrix(NA,Nmodel,Nperiod) ######### LOOP OVER MODELS ########## for (j in seq_len(Nmodel)) { - - k <- m2c[j] ## desired column in data table for specified model + k <- m2c[j] ## desired column in data table for specified model ## option 1 - absolute residuals - y <- dat$NEE_FILLED - dat[, k] ### Model error fcn + y <- dat$NEE_FILLED - dat[, k] ### Model error fcn ## option 2 - normalized residuals (post) if (FALSE) { @@ -75,7 +75,7 @@ for (i in seq_along(site.files)) { if (is.nan(ysd)) { ysd <- NA } - y <- (y - ybar) / ysd ## normalize error + y <- (y - ybar) / ysd ## normalize error } ## option 3 - normalized residuals (pre) @@ -89,7 +89,7 @@ for (i in seq_along(site.files)) { } else { NEEt.sd <- sqrt(stats::var(dat$NEE_FILLED, na.rm = TRUE)) } - NEEt.norm <- (dat$NEE_FILLED - NEEt.bar)/NEEt.sd + NEEt.norm <- (dat$NEE_FILLED - NEEt.bar) / NEEt.sd ## normalize model NEEm.bar <- mean(dat[, k], na.rm = TRUE) NEEm.sd <- NA @@ -98,16 +98,16 @@ for (i in seq_along(site.files)) { } else { NEEm.sd <- sqrt(stats::var(dat[, k], na.rm = TRUE)) } - NEEm.norm <- (dat[, k] - NEEm.bar)/NEEm.sd - y <- NEEm.norm - NEEt.norm ## calc residuals of normalized + NEEm.norm <- (dat[, k] - NEEm.bar) / NEEm.sd + y <- NEEm.norm - NEEt.norm ## calc residuals of normalized - y[is.na(y)] <- 0 ## need to fill in missing values + y[is.na(y)] <- 0 ## need to fill in missing values - wv <- WAVE(y) ## Calculate wavelet spectrum - period <- wv$period/day ## wavelet periods - Power <- (abs(wv$wave))^2 ## wavelet power + wv <- WAVE(y) ## Calculate wavelet spectrum + period <- wv$period / day ## wavelet periods + Power <- (abs(wv$wave))^2 ## wavelet power ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) + coi <- wv$coi ## cone of influence (valid if below value) for (t in seq_along(coi)) { sel <- which(period > coi[t]) Power[t, sel] <- NA @@ -121,31 +121,30 @@ for (i in seq_along(site.files)) { print(c(i, j)) } -} ## loop over sites +} ## loop over sites if (FALSE) { - plot(period, apply(Power, 2, mean), log = "xy", xlab = "Period (days)") abline(v = 1) abline(v = 365.25) - abline(v = 365.25/4) + abline(v = 365.25 / 4) ########################### SCRATCH ################################ - j <- 9 ## model + j <- 9 ## model - tm <- seq(1, by = 1/48, length = nrow(dat)) - NEE <- ts(dat$NEE_FILLED, deltat = 1/48) + tm <- seq(1, by = 1 / 48, length = nrow(dat)) + NEE <- ts(dat$NEE_FILLED, deltat = 1 / 48) plot(spectrum(NEE)) plot(dat$FDOY, dat$NEE) plot(dat$FDOY, dat$NEE_FILLED, pch = ".") - m2c <- match(model.set, names(dat)) ## match model names to data table columns - k <- m2c[j] ## desired column in data table for specified model + m2c <- match(model.set, names(dat)) ## match model names to data table columns + k <- m2c[j] ## desired column in data table for specified model plot(dat$FDOY, dat[, k], pch = ".") @@ -175,25 +174,25 @@ if (FALSE) { plot(acf(ra)) - wavelet.plot(ra, tm, p2 = 10) ##floor(log2(nrow(dat))))) + wavelet.plot(ra, tm, p2 = 10) ## floor(log2(nrow(dat))))) ### TESTING - y <- sin((1:512)/pi) + sin((1:512)/10) + y <- sin((1:512) / pi) + sin((1:512) / 10) ## null model construction: t <- seq_len(nrow(dat)) - y <- sin(2 * pi * t/day) + sin(2 * pi * t/(365.25 * day)) + y <- sin(2 * pi * t / day) + sin(2 * pi * t / (365.25 * day)) wavelet.plot(y, seq_along(y), log2(length(y))) plot(y, type = "l") wv <- WAVE(y) - day <- 1/diff(dat$FDOY[1:2]) - period <- wv$period/day + day <- 1 / diff(dat$FDOY[1:2]) + period <- wv$period / day Power <- (abs(wv$wave))^2 Signif <- t(matrix(wv$Signif, dim(wv$wave)[2], dim(wv$wave)[1])) - Signif <- Power/Signif + Signif <- Power / Signif image(Power) plot(apply(Power, 2, mean), log = "y") @@ -203,7 +202,7 @@ if (FALSE) { plot(period, apply(Power, 2, mean), log = "xy", xlab = "Period (days)") abline(v = 1) abline(v = 365.25) - abline(v = 365.25/4) + abline(v = 365.25 / 4) ## divide up spectra Pglobe <- apply(Power, 2, mean) @@ -218,21 +217,24 @@ if (FALSE) { synop.bin <- (max(day.bin) + 1):(min(year.bin) - 1) subday.bin <- 1:(min(day.bin) - 1) inter.bin <- (max(year.bin) + 1):length(period) - if (length(period) <= max(year.bin)) + if (length(period) <= max(year.bin)) { inter.bin <- NA + } - pow.bin <- c(sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), sum(Pglobe[year.bin]), - sum(Pglobe[inter.bin])) + pow.bin <- c( + sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), sum(Pglobe[year.bin]), + sum(Pglobe[inter.bin]) + ) - plot(1/period, apply(Power, 2, mean), log = "y") + plot(1 / period, apply(Power, 2, mean), log = "y") plot(apply(Signif, 2, mean), log = "y") plot(apply(Signif, 2, mean)) abline(h = 1) ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) + coi <- wv$coi ## cone of influence (valid if below value) for (t in seq_along(coi)) { sel <- which(period > coi[t]) Power[t, sel] <- NA diff --git a/modules/data.mining/inst/NACPspectral.v2.R b/modules/data.mining/inst/NACPspectral.v2.R index d718d1dde07..5055041601f 100644 --- a/modules/data.mining/inst/NACPspectral.v2.R +++ b/modules/data.mining/inst/NACPspectral.v2.R @@ -8,21 +8,21 @@ ### Specify required functions -library(dplR) ## Andy Bunn's Dendrochronology package +library(dplR) ## Andy Bunn's Dendrochronology package WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { - ## simple function based on Bunn's wavelet.plot fcn that returns wavelet info - if (is.null(p2)) { - p2 <- floor(log2(length(crn.vec))) - } - n <- length(crn.vec) - Dt <- 1 - s0 <- 1 - j1 <- p2 / dj - mother <- "morlet" - crn.vec.ac <- acf(crn.vec, lag.max = 2, plot = FALSE) - lag1 <- (crn.vec.ac$acf[2] + sqrt(crn.vec.ac$acf[3])) / 2 - wavelet(y1 = crn.vec, Dt = Dt, s0 = s0, dj = dj, J = j1, mother = mother, siglvl = siglvl) + ## simple function based on Bunn's wavelet.plot fcn that returns wavelet info + if (is.null(p2)) { + p2 <- floor(log2(length(crn.vec))) + } + n <- length(crn.vec) + Dt <- 1 + s0 <- 1 + j1 <- p2 / dj + mother <- "morlet" + crn.vec.ac <- acf(crn.vec, lag.max = 2, plot = FALSE) + lag1 <- (crn.vec.ac$acf[2] + sqrt(crn.vec.ac$acf[3])) / 2 + wavelet(y1 = crn.vec, Dt = Dt, s0 = s0, dj = dj, J = j1, mother = mother, siglvl = siglvl) } # WAVE @@ -30,21 +30,22 @@ WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { model.dir <- "NEEm" ## set of models to analyze -model.set <- c(sort(c("BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", - "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", "BIOME_BGC", - "SSiB2", "TRIPLEX", "EPIC")), "MEAN") +model.set <- c(sort(c( + "BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", "ED2", + "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", "ORCHIDEE", "LPJ", "BIOME_BGC", + "SSiB2", "TRIPLEX", "EPIC" +)), "MEAN") -Nmodel <- length(model.set) ## number of models +Nmodel <- length(model.set) ## number of models ## listing of available site files site.files <- dir(model.dir, "txt") ######### LOOP OVER SITES ########## -#for(i in 1:length(site.files)){ -#for(i in c(5,25,38,45)){ +# for(i in 1:length(site.files)){ +# for(i in c(5,25,38,45)){ for (i in c(1, 5, 7, 8, 9, 25, 26, 30, 38, 45)) { - yset <- 1990:2010 if (i == 5) { yset <- 1999:2007 @@ -56,96 +57,94 @@ for (i in c(1, 5, 7, 8, 9, 25, 26, 30, 38, 45)) { yset <- 1999:2003 } - ## load site data table - dat <- read.table(paste(model.dir, site.files[i], sep = "/"), header = TRUE, na.string = "-999.000") - ysel <- which(dat$X.YEAR %in% yset) - dat <- dat[ysel, ] - - m2c <- match(model.set, names(dat)) ## match model names to data table columns - day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day - Nperiod <- 1 + 4 * log2(nrow(dat)) - - ##POWER <- array(NA,c(Nmodel,Nperiod,nrow(dat))) - ##SIGNIF <- matrix(NA,Nmodel,Nperiod) - ######### LOOP OVER MODELS ########## - j <- 22 - k <- m2c[j] ## desired column in data table for specified model - - ## option 1 - absolute residuals - y <- dat$NEE_FILLED - dat[, k] ### Model error fcn - - ## option 2 - normalized residuals (post) - if (FALSE) { - ybar <- mean(y, na.rm = TRUE) - ysd <- NA - if (is.nan(ybar)) { - ybar <- NA - } else { - ysd <- sqrt(stats::var(y, na.rm = TRUE)) - } - if (is.nan(ysd)) { - ysd <- NA - } - y <- (y - ybar)/ysd ## normalize error - } - - ## option 3 - normalized residuals (pre) subscripts: t = tower, m = model - - ## normalize tower - NEEt.bar <- mean(dat$NEE_FILLED, na.rm = TRUE) - NEEt.sd <- NA - if (is.nan(NEEt.bar)) { - NEEt.bar <- NA + ## load site data table + dat <- read.table(paste(model.dir, site.files[i], sep = "/"), header = TRUE, na.string = "-999.000") + ysel <- which(dat$X.YEAR %in% yset) + dat <- dat[ysel, ] + + m2c <- match(model.set, names(dat)) ## match model names to data table columns + day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day + Nperiod <- 1 + 4 * log2(nrow(dat)) + + ## POWER <- array(NA,c(Nmodel,Nperiod,nrow(dat))) + ## SIGNIF <- matrix(NA,Nmodel,Nperiod) + ######### LOOP OVER MODELS ########## + j <- 22 + k <- m2c[j] ## desired column in data table for specified model + + ## option 1 - absolute residuals + y <- dat$NEE_FILLED - dat[, k] ### Model error fcn + + ## option 2 - normalized residuals (post) + if (FALSE) { + ybar <- mean(y, na.rm = TRUE) + ysd <- NA + if (is.nan(ybar)) { + ybar <- NA } else { - NEEt.sd <- sqrt(stats::var(dat$NEE_FILLED, na.rm = TRUE)) + ysd <- sqrt(stats::var(y, na.rm = TRUE)) } - NEEt.norm <- (dat$NEE_FILLED - NEEt.bar)/NEEt.sd - ## normalize model - NEEm.bar <- mean(dat[, k], na.rm = TRUE) - NEEm.sd <- NA - if (is.nan(NEEm.bar)) { - NEEm.bar <- NA - } else { - NEEm.sd <- sqrt(stats::var(dat[, k], na.rm = TRUE)) + if (is.nan(ysd)) { + ysd <- NA } - NEEm.norm <- (dat[, k] - NEEm.bar)/NEEm.sd - y <- NEEm.norm - NEEt.norm ## calc residuals of normalized + y <- (y - ybar) / ysd ## normalize error + } - y[is.na(y)] <- 0 ## need to fill in missing values + ## option 3 - normalized residuals (pre) subscripts: t = tower, m = model - wv <- WAVE(y) #,p2=17) ## Calculate wavelet spectrum - period <- wv$period/day ## wavelet periods - Power <- (abs(wv$wave))^2 ## wavelet power - for (t in seq_along(wv$Scale)) { - ### bias correction - Power[, t] <- Power[, t]/wv$Scale[t] - } - ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) - for (t in seq_along(coi)) { - sel <- which(period > coi[t]) - Power[t, sel] <- NA - } + ## normalize tower + NEEt.bar <- mean(dat$NEE_FILLED, na.rm = TRUE) + NEEt.sd <- NA + if (is.nan(NEEt.bar)) { + NEEt.bar <- NA + } else { + NEEt.sd <- sqrt(stats::var(dat$NEE_FILLED, na.rm = TRUE)) + } + NEEt.norm <- (dat$NEE_FILLED - NEEt.bar) / NEEt.sd + ## normalize model + NEEm.bar <- mean(dat[, k], na.rm = TRUE) + NEEm.sd <- NA + if (is.nan(NEEm.bar)) { + NEEm.bar <- NA + } else { + NEEm.sd <- sqrt(stats::var(dat[, k], na.rm = TRUE)) + } + NEEm.norm <- (dat[, k] - NEEm.bar) / NEEm.sd + y <- NEEm.norm - NEEt.norm ## calc residuals of normalized - ## update storage - ## POWER[j,,] <- Power - ## SIGNIF[j,] <- wv$Signif + y[is.na(y)] <- 0 ## need to fill in missing values - ## save(wv,Power,day,file=paste('NACPspecNORMpre2v2_17.',i,'.',j,'.Rdata',sep='')) - save(wv, Power, day, file = paste0("NACPspecNORMpre2.clip.", i, ".", j, ".Rdata")) + wv <- WAVE(y) # ,p2=17) ## Calculate wavelet spectrum + period <- wv$period / day ## wavelet periods + Power <- (abs(wv$wave))^2 ## wavelet power + for (t in seq_along(wv$Scale)) { + ### bias correction + Power[, t] <- Power[, t] / wv$Scale[t] + } + ## Crop out cone of influence + coi <- wv$coi ## cone of influence (valid if below value) + for (t in seq_along(coi)) { + sel <- which(period > coi[t]) + Power[t, sel] <- NA + } - print(c(i, j)) - # } ## end model loop -} ## loop over sites + ## update storage + ## POWER[j,,] <- Power + ## SIGNIF[j,] <- wv$Signif + ## save(wv,Power,day,file=paste('NACPspecNORMpre2v2_17.',i,'.',j,'.Rdata',sep='')) + save(wv, Power, day, file = paste0("NACPspecNORMpre2.clip.", i, ".", j, ".Rdata")) -if (FALSE) { + print(c(i, j)) + # } ## end model loop +} ## loop over sites - plot(period, apply(Power, 2, mean), log = "xy", xlab = "Period (days)") - abline(v = 1) - abline(v = 365.25) - abline(v = 365.25/4) +if (FALSE) { + plot(period, apply(Power, 2, mean), log = "xy", xlab = "Period (days)") + abline(v = 1) + abline(v = 365.25) + abline(v = 365.25 / 4) @@ -155,130 +154,134 @@ if (FALSE) { - ########################### SCRATCH ################################ - j <- 9 ## model + ########################### SCRATCH ################################ + j <- 9 ## model - tm <- seq(1, by = 1/48, length = nrow(dat)) - NEE <- ts(dat$NEE_FILLED, deltat = 1/48) - plot(spectrum(NEE)) - plot(dat$FDOY, dat$NEE) - plot(dat$FDOY, dat$NEE_FILLED, pch = ".") + tm <- seq(1, by = 1 / 48, length = nrow(dat)) + NEE <- ts(dat$NEE_FILLED, deltat = 1 / 48) + plot(spectrum(NEE)) - m2c <- match(model.set, names(dat)) ## match model names to data table columns - k <- m2c[j] ## desired column in data table for specified model + plot(dat$FDOY, dat$NEE) + plot(dat$FDOY, dat$NEE_FILLED, pch = ".") - plot(dat$FDOY, dat[, k], pch = ".") + m2c <- match(model.set, names(dat)) ## match model names to data table columns + k <- m2c[j] ## desired column in data table for specified model - dat[is.na(dat)] <- 0 + plot(dat$FDOY, dat[, k], pch = ".") + dat[is.na(dat)] <- 0 - ## coherence spectra - ct <- which(names(dat) == "NEE_FILLED") - sp <- spectrum(dat[, c(ct, k)]) - par(mfrow = c(3, 1)) - plot(sp$spec[, 1], log = "y") - plot(sp$spec[, 2], log = "y") - plot(sp$spec[, 1] - sp$spec[, 2], log = "y") - X <- dat[, c(ct, k)] - wt <- dwt(X) + ## coherence spectra + ct <- which(names(dat) == "NEE_FILLED") + sp <- spectrum(dat[, c(ct, k)]) + par(mfrow = c(3, 1)) + plot(sp$spec[, 1], log = "y") + plot(sp$spec[, 2], log = "y") + plot(sp$spec[, 1] - sp$spec[, 2], log = "y") + X <- dat[, c(ct, k)] + wt <- dwt(X) - ## absolute residuals spectra - ra <- dat$NEE_FILLED - dat[, k] - rav <- abs(ra) - spectrum(ra) - spectrum(rav) - ra.w <- dwt(ra) - ra.wm <- modwt(ra[1:10]) - plot(ra.w) - plot(ra.wm) - plot(acf(ra)) + ## absolute residuals spectra + ra <- dat$NEE_FILLED - dat[, k] + rav <- abs(ra) + spectrum(ra) + spectrum(rav) + ra.w <- dwt(ra) + ra.wm <- modwt(ra[1:10]) + plot(ra.w) + plot(ra.wm) - wavelet.plot(ra, tm, p2 = 10) ##floor(log2(nrow(dat))))) + plot(acf(ra)) + wavelet.plot(ra, tm, p2 = 10) ## floor(log2(nrow(dat))))) - ### TESTING - y <- sin((1:512)/pi) + sin((1:512)/10) - ## null model construction: - t <- 1:nrow(dat) - y <- sin(2 * pi * t/day) + sin(2 * pi * t/(365.25 * day)) + ### TESTING + y <- sin((1:512) / pi) + sin((1:512) / 10) - wavelet.plot(y, seq_along(y), log2(length(y))) - plot(y, type = "l") + ## null model construction: + t <- 1:nrow(dat) + y <- sin(2 * pi * t / day) + sin(2 * pi * t / (365.25 * day)) - wv <- WAVE(y) - day <- 1/diff(dat$FDOY[1:2]) - period <- wv$period/day - Power <- (abs(wv$wave))^2 - Signif <- t(matrix(wv$Signif, dim(wv$wave)[2], dim(wv$wave)[1])) - Signif <- Power/Signif - image(Power) + wavelet.plot(y, seq_along(y), log2(length(y))) + plot(y, type = "l") - plot(apply(Power, 2, mean), log = "y") - plot(period, apply(Power, 2, mean), log = "y") - plot(period, apply(Power, 2, mean)) + wv <- WAVE(y) + day <- 1 / diff(dat$FDOY[1:2]) + period <- wv$period / day + Power <- (abs(wv$wave))^2 + Signif <- t(matrix(wv$Signif, dim(wv$wave)[2], dim(wv$wave)[1])) + Signif <- Power / Signif + image(Power) - plot(period, apply(Power, 2, mean), log = "xy", xlab = "Period (days)") - abline(v = 1) - abline(v = 365.25) - abline(v = 365.25/4) + plot(apply(Power, 2, mean), log = "y") + plot(period, apply(Power, 2, mean), log = "y") + plot(period, apply(Power, 2, mean)) - ## divide up spectra - Pglobe <- apply(Power, 2, mean) - day.mid <- findInterval(1, period) - day.bin <- day.mid + c(-4:4) - abline(v = period[day.bin]) + plot(period, apply(Power, 2, mean), log = "xy", xlab = "Period (days)") + abline(v = 1) + abline(v = 365.25) + abline(v = 365.25 / 4) - year.mid <- findInterval(365.25, period) - year.bin <- year.mid + c(-4:4) - abline(v = period[year.bin]) + ## divide up spectra + Pglobe <- apply(Power, 2, mean) + day.mid <- findInterval(1, period) + day.bin <- day.mid + c(-4:4) + abline(v = period[day.bin]) - synop.bin <- (max(day.bin) + 1):(min(year.bin) - 1) - subday.bin <- 1:(min(day.bin) - 1) - inter.bin <- (max(year.bin) + 1):length(period) - if (length(period) <= max(year.bin)) - inter.bin <- NA + year.mid <- findInterval(365.25, period) + year.bin <- year.mid + c(-4:4) + abline(v = period[year.bin]) - pow.bin <- c(sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), sum(Pglobe[year.bin]), - sum(Pglobe[inter.bin])) + synop.bin <- (max(day.bin) + 1):(min(year.bin) - 1) + subday.bin <- 1:(min(day.bin) - 1) + inter.bin <- (max(year.bin) + 1):length(period) + if (length(period) <= max(year.bin)) { + inter.bin <- NA + } + pow.bin <- c( + sum(Pglobe[subday.bin]), sum(Pglobe[day.bin]), sum(Pglobe[synop.bin]), sum(Pglobe[year.bin]), + sum(Pglobe[inter.bin]) + ) - plot(1/period, apply(Power, 2, mean), log = "y") - plot(apply(Signif, 2, mean), log = "y") - plot(apply(Signif, 2, mean)) - abline(h = 1) - ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) - for (t in seq_along(coi)) { - sel <- which(period > coi[t]) - Power[t, sel] <- NA - } + plot(1 / period, apply(Power, 2, mean), log = "y") + plot(apply(Signif, 2, mean), log = "y") + plot(apply(Signif, 2, mean)) + abline(h = 1) + + ## Crop out cone of influence + coi <- wv$coi ## cone of influence (valid if below value) + for (t in seq_along(coi)) { + sel <- which(period > coi[t]) + Power[t, sel] <- NA + } - ## normalized residuals spectra - - ## histogram approach (% of significant failure events) - - i - ## analysis of spectra - mixed model - - ## - ## TODO: - ## - define model set - ## - define time-step - ## - define sites - ## - incorporation of uncertainty - ## - gaps or gap-filled - ## - alternative visualizations - ## - identification of whether model failures are consistent in time - ## - separation of pattern from accuracy - ## - look at cross-spectra? - ## - absolute vs normalised residuals - ## + ## normalized residuals spectra + + ## histogram approach (% of significant failure events) + + i + ## analysis of spectra - mixed model + + ## + ## TODO: + ## - define model set + ## - define time-step + ## - define sites + ## - incorporation of uncertainty + ## - gaps or gap-filled + ## - alternative visualizations + ## - identification of whether model failures are consistent in time + ## - separation of pattern from accuracy + ## - look at cross-spectra? + ## - absolute vs normalised residuals + ## } diff --git a/modules/data.mining/inst/NullSpectra.R b/modules/data.mining/inst/NullSpectra.R index 085c42c7e07..a357779db8f 100644 --- a/modules/data.mining/inst/NullSpectra.R +++ b/modules/data.mining/inst/NullSpectra.R @@ -12,9 +12,9 @@ source("ResidSpectra.R") ## Get site sel'n from cmd line -sitenum <- as.numeric(system("echo $SITENUM", intern = TRUE)) ## site number -nstart <- as.numeric(system("echo $NSTART", intern = TRUE)) ## first Monte Carlo to process -n2proc <- 50 ## number of spectra to process +sitenum <- as.numeric(system("echo $SITENUM", intern = TRUE)) ## site number +nstart <- as.numeric(system("echo $NSTART", intern = TRUE)) ## first Monte Carlo to process +n2proc <- 50 ## number of spectra to process ## Paths and prefixes path <- "/home/mdietze/stats/spectral/" @@ -26,10 +26,11 @@ site.name <- site.files[sitenum] site.name <- sub("_NEE.txt", "", site.name) site.name <- sub("-", "", site.name) prefix <- paste0("NEE_", site.name, "-") -field <- paste0("NEEf/", site.name, "/FilledNEE/") -dat <- read.table(paste(model.dir, site.files[sitenum], sep = "/"), - header = TRUE, na.string = "-999.000") -day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day +field <- paste0("NEEf/", site.name, "/FilledNEE/") +dat <- read.table(paste(model.dir, site.files[sitenum], sep = "/"), + header = TRUE, na.string = "-999.000" +) +day <- 1 / diff(dat$FDOY[1:2]) ## number of observations per day ##################################### load up the 'pseudo' data ## files <- dir(path, prefix) @@ -59,8 +60,8 @@ save(dat, ylen, yrs, file = paste0(prefix, "pseudo.Rdata")) #################################### ## load up the 'true' data ## ffiles <- dir(paste0(path, field), "Moving") -fdat <- NULL -fylen <- rep(NA, length(yrs)) +fdat <- NULL +fylen <- rep(NA, length(yrs)) for (i in seq_along(yrs)) { print(yrs[i]) @@ -81,7 +82,6 @@ fdat[fdat == -9999] <- NA pspec <- matrix(NA, nrow(dat), ncol(dat)) Pspec <- matrix(NA, 100, 1000) for (i in seq(nstart, length = n2proc, by = 1)) { - print(i) ### Calculate the error @@ -96,7 +96,7 @@ for (i in seq(nstart, length = n2proc, by = 1)) { } else { NEEt.sd <- sqrt(stats::var(fdat[, 5], na.rm = TRUE)) } - NEEt.norm <- (fdat[, 5] - NEEt.bar)/NEEt.sd + NEEt.norm <- (fdat[, 5] - NEEt.bar) / NEEt.sd ## normalize model NEEp.bar <- mean(dat[, i], na.rm = TRUE) @@ -106,10 +106,10 @@ for (i in seq(nstart, length = n2proc, by = 1)) { } else { NEEp.sd <- sqrt(stats::var(dat[, i], na.rm = TRUE)) } - NEEp.norm <- (dat[, i] - NEEp.bar)/NEEp.sd ########### - y <- NEEp.norm - NEEt.norm ## calc residuals of normalized + NEEp.norm <- (dat[, i] - NEEp.bar) / NEEp.sd ########### + y <- NEEp.norm - NEEt.norm ## calc residuals of normalized - y[is.na(y)] <- 0 ## need to fill in missing values + y[is.na(y)] <- 0 ## need to fill in missing values ### Do the wavelet power spectrum @@ -121,7 +121,7 @@ for (i in seq(nstart, length = n2proc, by = 1)) { ### Also, do Fourier power spectra s <- spectrum(wv$y, plot = FALSE) pspec[seq_along(s$spec), i] <- s$spec - period <- 1 / s$freq/day + period <- 1 / s$freq / day save(wv, Power, day, file = paste0("pseudo.", sitenum, ".", i, ".Rdata")) save(i, Pspec, pspec, Period, period, file = paste0(site.name, ".", nstart, ".specCI.Rdata")) @@ -129,25 +129,27 @@ for (i in seq(nstart, length = n2proc, by = 1)) { if (FALSE) { ## some diagnostics - period <- 1 / s$freq/48 - pspec <- pspec[seq_along(period), ] + period <- 1 / s$freq / 48 + pspec <- pspec[seq_along(period), ] pbar <- apply(pspec, 1, mean, na.rm = TRUE) - pCI <- apply(pspec, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) + pCI <- apply(pspec, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) plot(period, pbar, log = "xy", ylim = range(pCI), type = "l", ylab = "Power", xlab = "Period (days)") lines(period, pCI[1, ], col = 3) # lines(period,pCI[2,],col=2) lines(period, pCI[3, ], col = 4) - abline(v = c(0.5, 1, 365.25/2, 365.25), col = 2, lty = 2) + abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) sel <- which(period > 0.8 & period < 1.3) - plot(period[sel], pbar[sel], log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") + plot(period[sel], pbar[sel], + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) lines(period[sel], pCI[1, sel], col = 3) # lines(period,pCI[2,],col=2) lines(period[sel], pCI[3, sel], col = 4) - abline(v = c(0.5, 1, 365.25/2, 365.25), col = 2, lty = 2) + abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) save.image("USHo1.specCI.Rdata") } diff --git a/modules/data.mining/inst/NullSpectra.v2.R b/modules/data.mining/inst/NullSpectra.v2.R index 6afa8006515..e63aa537949 100644 --- a/modules/data.mining/inst/NullSpectra.v2.R +++ b/modules/data.mining/inst/NullSpectra.v2.R @@ -13,7 +13,7 @@ n2proc <- 50 ## Paths and prefixes .libPaths("/home/mdietze/lib/R") # library(R.matlab) -library(dplR) ## Andy Bunn's Dendrochronology package +library(dplR) ## Andy Bunn's Dendrochronology package WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { ## simple function based on Bunn's wavelet.plot fcn that returns wavelet info if (is.null(p2)) { @@ -30,18 +30,18 @@ WAVE <- function(crn.vec, yr.vec, p2 = NULL, dj = 0.25, siglvl = 0.99, ...) { } # WAVE # path <- '/home/mcd/Desktop/NACP/Spectral/NEEbarr' -path <- "/home/mdietze/stats/spectral/" +path <- "/home/mdietze/stats/spectral/" ppath <- "/home/mdietze/stats/spectral/MDS/" -model.dir <- "NEEm" +model.dir <- "NEEm" site.files <- dir(model.dir, "txt") site.name <- site.files[sitenum] site.name <- sub("_NEE.txt", "", site.name) site.name <- sub("-", "", site.name) prefix <- paste0("MDSNEE_", site.name, "-") -field <- paste0("NEEf/", site.name, "/FilledNEE/") -rdat <- read.table(file.path(model.dir, site.files[sitenum]), header = TRUE, na.string = "-999.000") -day <- 1 / diff(rdat$FDOY[1:2]) ## number of observations per day +field <- paste0("NEEf/", site.name, "/FilledNEE/") +rdat <- read.table(file.path(model.dir, site.files[sitenum]), header = TRUE, na.string = "-999.000") +day <- 1 / diff(rdat$FDOY[1:2]) ## number of observations per day # day <- 48 sitenum <- 26 @@ -111,7 +111,6 @@ fdat[fdat == -9999] <- NA pspec <- matrix(NA, nrow(dat), ncol(dat)) Pspec <- matrix(NA, 100, 1000) for (i in seq(nstart, length = n2proc, by = 1)) { - print(i) ### Calculate the error @@ -128,13 +127,13 @@ for (i in seq(nstart, length = n2proc, by = 1)) { } else { NEEt.sd <- sqrt(stats::var(fdat[, 5], na.rm = TRUE)) } - NEEt.norm <- (fdat[, 5] - NEEt.bar)/NEEt.sd + NEEt.norm <- (fdat[, 5] - NEEt.bar) / NEEt.sd ## normalize model mydat <- dat[, i] if (day < 30) { ## if dealing with 60 min day, average pseudodata - grp <- rep(1:(nrow(dat)/2), each = 2) + grp <- rep(1:(nrow(dat) / 2), each = 2) mydat <- tapply(mydat, grp, mean) } NEEp.bar <- mean(mydat, na.rm = TRUE) @@ -144,10 +143,10 @@ for (i in seq(nstart, length = n2proc, by = 1)) { } else { NEEp.sd <- sqrt(stats::var(mydat, na.rm = TRUE)) } - NEEp.norm <- (mydat - NEEp.bar)/NEEp.sd ########### - y <- NEEp.norm - NEEt.norm ## calc residuals of normalized + NEEp.norm <- (mydat - NEEp.bar) / NEEp.sd ########### + y <- NEEp.norm - NEEt.norm ## calc residuals of normalized - y[is.na(y)] <- 0 ## need to fill in missing values + y[is.na(y)] <- 0 ## need to fill in missing values ### first do overall power spectra @@ -156,19 +155,19 @@ for (i in seq(nstart, length = n2proc, by = 1)) { pspec[seq_along(s$spec), i] <- s$spec ## plot(1/s$freq,s$spec,log='xy') - period <- 1/s$freq/day + period <- 1 / s$freq / day ### Do the wavelet power spectrum (implement later) - wv <- WAVE(y) #,p2=17) ## Calculate wavelet spectrum ************************* - Period <- wv$period/day ## wavelet periods - Power <- (abs(wv$wave))^2 ## wavelet power + wv <- WAVE(y) # ,p2=17) ## Calculate wavelet spectrum ************************* + Period <- wv$period / day ## wavelet periods + Power <- (abs(wv$wave))^2 ## wavelet power ## power correction, Liu et al 2007 for (t in seq_along(wv$Scale)) { - Power[, t] <- Power[, t]/wv$Scale[t] + Power[, t] <- Power[, t] / wv$Scale[t] } ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) + coi <- wv$coi ## cone of influence (valid if below value) for (t in seq_along(coi)) { sel <- which(Period > coi[t]) Power[t, sel] <- NA @@ -191,8 +190,10 @@ if (FALSE) { pbar <- apply(pspec, 1, mean, na.rm = TRUE) pCI <- apply(pspec, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) - plot(period, pbar, log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") + plot(period, pbar, + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) lines(period, pCI[1, ], col = 3) # lines(period,pCI[2,],col=2) lines(period, pCI[3, ], col = 4) @@ -200,8 +201,10 @@ if (FALSE) { sel <- which(period > 0.8 & period < 1.3) - plot(period[sel], pbar[sel], log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") + plot(period[sel], pbar[sel], + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) lines(period[sel], pCI[1, sel], col = 3) # lines(period,pCI[2,],col=2) lines(period[sel], pCI[3, sel], col = 4) diff --git a/modules/data.mining/inst/NullSpectraPost.R b/modules/data.mining/inst/NullSpectraPost.R index 260cfb03a98..f2e12afa1ec 100644 --- a/modules/data.mining/inst/NullSpectraPost.R +++ b/modules/data.mining/inst/NullSpectraPost.R @@ -2,7 +2,7 @@ ### random and gap-filling uncertainty based on the Barr et al ### NACP data product -### inputs: annual NEE matrices of [time x ensemble member] +### inputs: annual NEE matrices of [time x ensemble member] ### outputs: mean and quantile spectra ## THIS PART OF THE CODE COMBINES SPECTRA @@ -17,68 +17,74 @@ site.files <- dir(model.dir, "txt") stepsize <- 50 for (sitenum in c(5, 7, 8, 9, 25, 38, 45)) { - # 1,26 - - site.name <- site.files[sitenum] - site.name <- sub("_NEE.txt", "", site.name) - site.name <- sub("-", "", site.name) - prefix <- paste0("NEE_", site.name, "-") - field <- paste0("NEEf/", site.name, "/FilledNEE/") - - PspecG <- NULL - - ## for(nstart in c(1,101,201,301,401,501,601,701,801,901)){ - for (nstart in seq(1, 951, by = 50)) { - ## for(nstart in c(751,801,851,901,951)){ - print(c(sitenum, nstart)) - load(paste0("Rdata/", site.name, ".", nstart, ".specCI.Rdata")) - - if (is.null(PspecG)) { - PspecG <- matrix(NA, length(Period), 1000) - } - PspecG[, seq(nstart, length = stepsize, by = 1)] <- Pspec[seq_along(Period), seq(nstart, length = stepsize, - by = 1)] - - } - - pbar <- apply(PspecG, 1, mean, na.rm = TRUE) - pCI <- apply(PspecG, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) - pSD <- sqrt(apply(PspecG, 1, var, na.rm = TRUE)) - if (FALSE) { - plot(Period, pbar, log = "xy", ylim = c(10000, max(pCI)), type = "l", ylab = "Power", xlab = "Period (days)", - main = "US-Ho1 Null Spectra", lwd = 2, cex.lab = 1.5, cex.axis = 1.2, cex.main = 1.5) - lines(Period, pCI[1, ], col = 2, lty = 2, lwd = 2) - ## lines(period,pCI[2,],col=2) - lines(Period, pCI[3, ], col = 2, lty = 2, lwd = 2) - abline(v = c(1, 365.25), col = 6, lty = 3, lwd = 2) + # 1,26 + + site.name <- site.files[sitenum] + site.name <- sub("_NEE.txt", "", site.name) + site.name <- sub("-", "", site.name) + prefix <- paste0("NEE_", site.name, "-") + field <- paste0("NEEf/", site.name, "/FilledNEE/") + + PspecG <- NULL + + ## for(nstart in c(1,101,201,301,401,501,601,701,801,901)){ + for (nstart in seq(1, 951, by = 50)) { + ## for(nstart in c(751,801,851,901,951)){ + print(c(sitenum, nstart)) + load(paste0("Rdata/", site.name, ".", nstart, ".specCI.Rdata")) + + if (is.null(PspecG)) { + PspecG <- matrix(NA, length(Period), 1000) } - - save(pSD, pbar, pCI, PspecG, Period, file = paste0(site.name, ".specCI.Rdata")) - + PspecG[, seq(nstart, length = stepsize, by = 1)] <- Pspec[seq_along(Period), seq(nstart, + length = stepsize, + by = 1 + )] + } + + pbar <- apply(PspecG, 1, mean, na.rm = TRUE) + pCI <- apply(PspecG, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) + pSD <- sqrt(apply(PspecG, 1, var, na.rm = TRUE)) + if (FALSE) { + plot(Period, pbar, + log = "xy", ylim = c(10000, max(pCI)), type = "l", ylab = "Power", xlab = "Period (days)", + main = "US-Ho1 Null Spectra", lwd = 2, cex.lab = 1.5, cex.axis = 1.2, cex.main = 1.5 + ) + lines(Period, pCI[1, ], col = 2, lty = 2, lwd = 2) + ## lines(period,pCI[2,],col=2) + lines(Period, pCI[3, ], col = 2, lty = 2, lwd = 2) + abline(v = c(1, 365.25), col = 6, lty = 3, lwd = 2) + } + + save(pSD, pbar, pCI, PspecG, Period, file = paste0(site.name, ".specCI.Rdata")) } if (FALSE) { - period <- 1 / s$freq / 48 - pspec <- pspec[seq_along(period), ] - - pbar <- apply(pspec, 1, mean, na.rm = TRUE) - pCI <- apply(pspec, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) - plot(period, pbar, log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") - lines(period, pCI[1, ], col = 3) - # lines(period,pCI[2,],col=2) - lines(period, pCI[3, ], col = 4) - abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) - - sel <- which(period > 0.8 & period < 1.3) - - plot(period[sel], pbar[sel], log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") - lines(period[sel], pCI[1, sel], col = 3) - # lines(period,pCI[2,],col=2) - lines(period[sel], pCI[3, sel], col = 4) - abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) - - save.image("USHo1.specCI.Rdata") + period <- 1 / s$freq / 48 + pspec <- pspec[seq_along(period), ] + + pbar <- apply(pspec, 1, mean, na.rm = TRUE) + pCI <- apply(pspec, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) + plot(period, pbar, + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) + lines(period, pCI[1, ], col = 3) + # lines(period,pCI[2,],col=2) + lines(period, pCI[3, ], col = 4) + abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) + + sel <- which(period > 0.8 & period < 1.3) + + plot(period[sel], pbar[sel], + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) + lines(period[sel], pCI[1, sel], col = 3) + # lines(period,pCI[2,],col=2) + lines(period[sel], pCI[3, sel], col = 4) + abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) + + save.image("USHo1.specCI.Rdata") } diff --git a/modules/data.mining/inst/NullSpectraPost.v2.R b/modules/data.mining/inst/NullSpectraPost.v2.R index d8e1c531502..b43050758b2 100644 --- a/modules/data.mining/inst/NullSpectraPost.v2.R +++ b/modules/data.mining/inst/NullSpectraPost.v2.R @@ -2,7 +2,7 @@ ### random and gap-filling uncertainty based on the Barr et al ### NACP data product -### inputs: annual NEE matrices of [time x ensemble member] +### inputs: annual NEE matrices of [time x ensemble member] ### outputs: mean and quantile spectra ## THIS PART OF THE CODE COMBINES SPECTRA @@ -11,43 +11,46 @@ ## Paths and prefixes .libPaths("/home/mdietze/lib/R") -path <- "/home/mdietze/stats/spectral/" +path <- "/home/mdietze/stats/spectral/" spath <- "/scratch/NACP/spectral/" -model.dir <- "NEEm" +model.dir <- "NEEm" site.files <- dir(model.dir, "txt") stepsize <- 50 # for(sitenum in c(1,5,7,8,9,25,26,38,45)){ for (sitenum in c(25, 38, 45)) { - site.name <- site.files[sitenum] site.name <- sub("_NEE.txt", "", site.name) site.name <- sub("-", "", site.name) prefix <- paste0("NEE_", site.name, "-") field <- paste0("NEEf/", site.name, "/FilledNEE/") - + PspecG <- NULL - + for (nstart in seq(1, 951, by = 50)) { print(c(sitenum, nstart)) load(paste0(spath, site.name, ".", nstart, ".specCIclip.Rdata")) ## load(paste(spath,site.name,'.',nstart,'.specCI.Rdata',sep='')) - + if (is.null(PspecG)) { PspecG <- matrix(NA, length(Period), 1000) } - PspecG[, seq(nstart, length = stepsize, by = 1)] <- Pspec[seq_along(Period), - seq(nstart, length = stepsize, by = 1)] + PspecG[, seq(nstart, length = stepsize, by = 1)] <- Pspec[ + seq_along(Period), + seq(nstart, length = stepsize, by = 1) + ] } - + pbar <- apply(PspecG, 1, mean, na.rm = TRUE) - pCI <- apply(PspecG, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) - pSD <- sqrt(apply(PspecG, 1, var, na.rm = TRUE)) + pCI <- apply(PspecG, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) + pSD <- sqrt(apply(PspecG, 1, var, na.rm = TRUE)) if (FALSE) { - plot(Period, pbar, log = "xy", ylim = c(10000, max(pCI)), type = "l", - ylab = "Power", xlab = "Period (days)", - main = "US-Ho1 Null Spectra", lwd = 2, - cex.lab = 1.5, cex.axis = 1.2, cex.main = 1.5) + plot(Period, pbar, + log = "xy", ylim = c(10000, max(pCI)), type = "l", + ylab = "Power", xlab = "Period (days)", + main = "US-Ho1 Null Spectra", lwd = 2, + cex.lab = 1.5, cex.axis = 1.2, cex.main = 1.5 + ) lines(Period, pCI[1, ], col = 2, lty = 2, lwd = 2) ## lines(period,pCI[2,],col=2) lines(Period, pCI[3, ], col = 2, lty = 2, lwd = 2) @@ -59,24 +62,28 @@ for (sitenum in c(25, 38, 45)) { if (FALSE) { period <- 1 / s$freq / 48 pspec <- pspec[seq_along(period), ] - + pbar <- apply(pspec, 1, mean, na.rm = TRUE) pCI <- apply(pspec, 1, quantile, c(0.05, 0.5, 0.95), na.rm = TRUE) - plot(period, pbar, log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") + plot(period, pbar, + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) lines(period, pCI[1, ], col = 3) # lines(period,pCI[2,],col=2) lines(period, pCI[3, ], col = 4) abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) - + sel <- which(period > 0.8 & period < 1.3) - - plot(period[sel], pbar[sel], log = "xy", ylim = range(pCI), type = "l", - ylab = "Power", xlab = "Period (days)") + + plot(period[sel], pbar[sel], + log = "xy", ylim = range(pCI), type = "l", + ylab = "Power", xlab = "Period (days)" + ) lines(period[sel], pCI[1, sel], col = 3) # lines(period,pCI[2,],col=2) lines(period[sel], pCI[3, sel], col = 4) abline(v = c(0.5, 1, 365.25 / 2, 365.25), col = 2, lty = 2) - + save.image("USHo1.specCI.Rdata") } diff --git a/modules/data.mining/inst/ResidSpectra.R b/modules/data.mining/inst/ResidSpectra.R index be947883396..c84d43ccc0f 100644 --- a/modules/data.mining/inst/ResidSpectra.R +++ b/modules/data.mining/inst/ResidSpectra.R @@ -1,7 +1,7 @@ #--------------------------------------------------------------------------------------------------# ##' Calculate wavelet spectra of data-model residuals ##' -##' @title Wavelet spectra of data-model residuals +##' @title Wavelet spectra of data-model residuals ##' @param data numeric vector ##' @param model numeric vector ##' @param obsPerDay used to scale time axis to days @@ -12,53 +12,53 @@ #--------------------------------------------------------------------------------------------------# ResidSpectra <- function(data, model = NULL, obsPerDay = 1, case = 3) { - library(dplR) - - ## make sure everything's the right type - data <- as.vector(data) - if (is.null(model)) { - model <- rep(0, length(data)) - } - model <- as.vector(model) - y <- NULL - - ## option 1 - absolute residuals - if (case == 1) { - y <- data - model ### Model error fcn - } - ## option 2 - normalized residuals (post) - if (case == 2) { - y <- scale(data - model) - } - ## option 3 - normalized residuals (pre) - if (case == 3) { - ## normalize data - data.norm <- as.vector(scale(data)) - - ## normalize model - model.norm <- as.vector(scale(model)) - y <- data.norm - model.norm ## calc residuals of normalized - } - - y[is.na(y)] <- 0 ## need to fill in missing values - - ## Calculate Morlet wavelet spectrum - wv <- morlet(y) - period <- wv$period/day ## wavelet periods - Power <- (abs(wv$wave))^2 ## wavelet power - for (t in seq_along(wv$Scale)) { - ## bias correction - Power[, t] <- Power[, t] / wv$Scale[t] - } - - ## Crop out cone of influence - coi <- wv$coi ## cone of influence (valid if below value) - for (t in seq_along(coi)) { - sel <- which(period > coi[t]) - Power[t, sel] <- NA - } - wv$Power <- Power - wv$period <- period - - return(wv) + library(dplR) + + ## make sure everything's the right type + data <- as.vector(data) + if (is.null(model)) { + model <- rep(0, length(data)) + } + model <- as.vector(model) + y <- NULL + + ## option 1 - absolute residuals + if (case == 1) { + y <- data - model ### Model error fcn + } + ## option 2 - normalized residuals (post) + if (case == 2) { + y <- scale(data - model) + } + ## option 3 - normalized residuals (pre) + if (case == 3) { + ## normalize data + data.norm <- as.vector(scale(data)) + + ## normalize model + model.norm <- as.vector(scale(model)) + y <- data.norm - model.norm ## calc residuals of normalized + } + + y[is.na(y)] <- 0 ## need to fill in missing values + + ## Calculate Morlet wavelet spectrum + wv <- morlet(y) + period <- wv$period / day ## wavelet periods + Power <- (abs(wv$wave))^2 ## wavelet power + for (t in seq_along(wv$Scale)) { + ## bias correction + Power[, t] <- Power[, t] / wv$Scale[t] + } + + ## Crop out cone of influence + coi <- wv$coi ## cone of influence (valid if below value) + for (t in seq_along(coi)) { + sel <- which(period > coi[t]) + Power[t, sel] <- NA + } + wv$Power <- Power + wv$period <- period + + return(wv) } # ResidSpectra diff --git a/modules/data.mining/inst/SpectralSlice.R b/modules/data.mining/inst/SpectralSlice.R index 636574fa29b..e562644681b 100644 --- a/modules/data.mining/inst/SpectralSlice.R +++ b/modules/data.mining/inst/SpectralSlice.R @@ -1,50 +1,52 @@ ## Code to evaluate one specific Intermediate Time slice for one model at one site -site <- 26 ## Howland -model <- 9 ## ED +site <- 26 ## Howland +model <- 9 ## ED -site <- 8 ## CA-Oas -model <- 10 ## LOTEC +site <- 8 ## CA-Oas +model <- 10 ## LOTEC -site <- 25 ## Harvard -model <- 5 ## ecosys +site <- 25 ## Harvard +model <- 5 ## ecosys -site <- 25 ## Harvard -model <- 5 ## ecosys +site <- 25 ## Harvard +model <- 5 ## ecosys -site <- 5 ## Lethbridge -model <- 15 ## orchidee +site <- 5 ## Lethbridge +model <- 15 ## orchidee -site <- 7 ## Mer Bleue -model <- 12 ## isolsm ## first instance where model more variable than the data +site <- 7 ## Mer Bleue +model <- 12 ## isolsm ## first instance where model more variable than the data -site <- 9 ## obs -model <- 4 ## canibis ## model more variable than data +site <- 9 ## obs +model <- 4 ## canibis ## model more variable than data -tp <- c(10, 70) ## time period (10 days) +tp <- c(10, 70) ## time period (10 days) outdir <- "/scratch/NACP/spectral/" ## directory to find model files -model.dir <- "NEEm" +model.dir <- "NEEm" site.files <- dir(model.dir, "txt") -model.set <- sort(c("BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", - "ED2", "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", - "ORCHIDEE", "LPJ", "BIOME_BGC", "SSiB2", "TRIPLEX", "EPIC")) +model.set <- sort(c( + "BEPS", "CNCLASS", "ISOLSM", "TECO", "ecosys", "SiBCASA", "SiB", "DLEM", + "ED2", "LoTEC_DA", "AgroIBIS", "DNDC", "SiBcrop", "can.ibis", "EDCM", + "ORCHIDEE", "LPJ", "BIOME_BGC", "SSiB2", "TRIPLEX", "EPIC" +)) load(paste0(outdir, "NACPspecNORMpre2.", site, ".", model, ".Rdata")) -period <- wv$period / day ## wavelet periods +period <- wv$period / day ## wavelet periods -dat <- read.table(paste(model.dir, site.files[site], sep = "/"), header = TRUE, na.string = "-999.000") -m2c <- match(model.set, names(dat)) ## match model names to data table columns -date <- dat$X.YEAR + dat$FDOY/366 +dat <- read.table(paste(model.dir, site.files[site], sep = "/"), header = TRUE, na.string = "-999.000") +m2c <- match(model.set, names(dat)) ## match model names to data table columns +date <- dat$X.YEAR + dat$FDOY / 366 NEEm <- dat[, m2c[model]] NEEt <- dat$NEE_FILLED ## normalize tower NEEt.bar <- mean(NEEt, na.rm = TRUE) -NEEt.sd <- NA +NEEt.sd <- NA if (is.nan(NEEt.bar)) { NEEt.bar <- NA } else { @@ -60,7 +62,7 @@ if (is.nan(NEEm.bar)) { NEEm.sd <- sqrt(stats::var(NEEm, na.rm = TRUE)) } NEEm.norm <- (NEEm - NEEm.bar) / NEEm.sd -y <- NEEm.norm - NEEt.norm ## calc residuals of normalized +y <- NEEm.norm - NEEt.norm ## calc residuals of normalized NEEt[is.na(NEEt)] <- 0 NEEm.s <- NEEt.s <- list() for (i in seq_along(tp)) { @@ -74,12 +76,12 @@ for (i in seq_along(tp)) { ## find the spectral band for the desired period band <- list() for (i in seq_along(tp)) { - k <- which.min((period - tp[i]) ^ 2) + k <- which.min((period - tp[i])^2) ## band = apply(Power[,k+(-2:2)],1,mean) band[[i]] <- apply(Power[, k + (-1:1)], 1, mean) ## band = Power[,k] coi <- wv$coi / day - j <- which.min((coi - tp[i]) ^ 2) + j <- which.min((coi - tp[i])^2) sel <- j:(length(band[[i]]) - j) band[[i]][1:j] <- NA band[[i]][length(band[[i]]) - (0:j)] <- NA @@ -87,7 +89,7 @@ for (i in seq_along(tp)) { if (FALSE) { ## find the null spectra - stepsize <- 50 + stepsize <- 50 site.name <- site.files[site] site.name <- sub("_NEE.txt", "", site.name) site.name <- sub("-", "", site.name) @@ -111,11 +113,11 @@ if (FALSE) { if (is.null(bandN)) { bandN <- matrix(NA, 1000, nrow(Power)) } - period <- wv$period / day ## wavelet periods - kg <- which.min((period - tp) ^ 2) + period <- wv$period / day ## wavelet periods + kg <- which.min((period - tp)^2) bandN[i, ] <- apply(Power[, kg + (-1:1)], 1, mean) coi <- wv$coi / day - j <- which.min((coi - tp) ^ 2) + j <- which.min((coi - tp)^2) sel <- j:(length(band) - j) bandN[i, 1:j] <- NA bandN[i, length(band) - (0:j)] <- NA @@ -130,13 +132,15 @@ if (FALSE) { thresh <- 10 -sel <- which(date < 2004) +sel <- which(date < 2004) par(mfrow = c(3, 1)) par(cex = 1.2, lwd = 3) par(mar = c(2, 4, 0.5, 0.1)) -plot(date[sel], band[[1]][sel], type = "l", - log = "y", ylim = c(0.05, max(sapply(band, max, na.rm = TRUE))), - xlab = "time", ylab = "Power") +plot(date[sel], band[[1]][sel], + type = "l", + log = "y", ylim = c(0.05, max(sapply(band, max, na.rm = TRUE))), + xlab = "time", ylab = "Power" +) abline(h = thresh, col = "grey") lines(date[sel], band[[2]][sel], col = 2) # lines(date[sel],band[[3]][sel],col=3) @@ -146,9 +150,11 @@ legend("bottomleft", legend = tp, col = c(1, 2), lty = 1, horiz = TRUE, bg = "wh for (i in 1:2) { ## plot(date[sel],NEEm.s[[i]][sel],col=2,type='l',ylim=c(-4,2), - plot(date[sel], NEEm.s[[i]][sel], col = 2, type = "l", - ylim = range(c(NEEt.s[[i]], NEEm.s[[i]]), na.rm = TRUE), - xlab = "time", ylab = "NEE (umol/m2/s)") + plot(date[sel], NEEm.s[[i]][sel], + col = 2, type = "l", + ylim = range(c(NEEt.s[[i]], NEEm.s[[i]]), na.rm = TRUE), + xlab = "time", ylab = "NEE (umol/m2/s)" + ) abline(h = 0, col = "grey") lines(date[sel], NEEt.s[[i]][sel], col = 4, type = "l") diff --git a/modules/data.mining/inst/bigmv.R b/modules/data.mining/inst/bigmv.R index bbdcffa22b5..b400ca1a71d 100644 --- a/modules/data.mining/inst/bigmv.R +++ b/modules/data.mining/inst/bigmv.R @@ -1,4 +1,3 @@ - src <- "/home/mdietze/stats/spectral/pseudo/" dst <- "/scratch/NACP/spectral/pseudo/" @@ -6,20 +5,20 @@ sfiles <- dir(src, "Rdata") dfiles <- dir(dst, "Rdata") for (i in seq_along(sfiles)) { - print(sfiles[i]) - if (sfiles[i] %in% dfiles) { - ## check if equal - sdu <- strsplit(system(paste0("du ", src, sfiles[i]), intern = TRUE), "\t")[[1]] - rdu <- strsplit(system(paste0("du ", dst, dfiles[i]), intern = TRUE), "\t")[[1]] - ## delete - if (sdu[1] == rdu[1]) { - system(paste0("rm ", src, sfiles[i])) - } else { - ## move - system(paste0("mv ", src, sfiles[i], " ", dst)) - } + print(sfiles[i]) + if (sfiles[i] %in% dfiles) { + ## check if equal + sdu <- strsplit(system(paste0("du ", src, sfiles[i]), intern = TRUE), "\t")[[1]] + rdu <- strsplit(system(paste0("du ", dst, dfiles[i]), intern = TRUE), "\t")[[1]] + ## delete + if (sdu[1] == rdu[1]) { + system(paste0("rm ", src, sfiles[i])) } else { - ## move - system(paste0("mv ", src, sfiles[i], " ", dst)) + ## move + system(paste0("mv ", src, sfiles[i], " ", dst)) } + } else { + ## move + system(paste0("mv ", src, sfiles[i], " ", dst)) + } } diff --git a/modules/data.remote/R/GEDI_AGB_prep.R b/modules/data.remote/R/GEDI_AGB_prep.R index 6ed9ec3f4ab..fff8000928c 100644 --- a/modules/data.remote/R/GEDI_AGB_prep.R +++ b/modules/data.remote/R/GEDI_AGB_prep.R @@ -16,20 +16,20 @@ #' @param search_window search window (any length of time. e.g., 3 month) for locate available GEDI AGB values. #' #' @return A data frame containing AGB and sd for each site and each time step. -#' +#' #' @examples #' \dontrun{ #' settings <- PEcAn.settings::read.settings("pecan.xml") -#' site_info <- settings %>% -#' purrr::map(~.x[['run']] ) %>% -#' purrr::map('site')%>% -#' purrr::map(function(site.list){ -#' #conversion from string to number +#' site_info <- settings %>% +#' purrr::map(~ .x[["run"]]) %>% +#' purrr::map("site") %>% +#' purrr::map(function(site.list) { +#' # conversion from string to number #' site.list$lat <- as.numeric(site.list$lat) #' site.list$lon <- as.numeric(site.list$lon) -#' list(site_id=site.list$id, lat=site.list$lat, lon=site.list$lon, site_name=site.list$name) -#' })%>% -#' dplyr::bind_rows() %>% +#' list(site_id = site.list$id, lat = site.list$lat, lon = site.list$lon, site_name = site.list$name) +#' }) %>% +#' dplyr::bind_rows() %>% #' as.list() #' time_points <- seq(start.date, end.date, by = time.step) #' buffer <- 0.01 @@ -53,8 +53,9 @@ GEDI_AGB_prep <- function(site_info, time_points, outdir = file.path(getwd(), "G } # if we have dates with observations. # summarize data lists into data frame. - AGB_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_AGB"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, AGB, std, target time point. + AGB_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_AGB"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, AGB, std, target time point. AGB_Output$site_id <- site_info$site_id # loop over each time point for (i in seq_along(time_points)) { @@ -88,9 +89,9 @@ GEDI_AGB_prep <- function(site_info, time_points, outdir = file.path(getwd(), "G #' @param start_date start date (date with YYYY-MM-DD format) for filtering out the existing CSV file. #' @param end_date end date (date with YYYY-MM-DD format) for filtering out the existing CSV file. #' -#' @return -#' @export -#' +#' @return +#' @export +#' #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% @@ -98,7 +99,7 @@ GEDI_AGB_prep <- function(site_info, time_points, outdir = file.path(getwd(), "G GEDI_AGB_plot <- function(outdir, site.id, start_date, end_date) { # redirect to the site folder. site_folder <- file.path(outdir, site.id) - # + # if (file.exists(file.path(site_folder, "Error.txt"))) { PEcAn.logger::logger.info("The current point is outside of GEDI domain!") return(FALSE) @@ -108,22 +109,24 @@ GEDI_AGB_plot <- function(outdir, site.id, start_date, end_date) { return(FALSE) } else { extent <- utils::read.table(file.path(site_folder, "extent.txt"), skip = 1) %>% - as.numeric %>% + as.numeric() %>% purrr::set_names(c("ymax", "ymin", "xmin", "xmax")) - point.lat.lon <- matrix(c(mean(extent[c("ymin", "ymax")]), mean(extent[c("xmin", "xmax")])), nrow = 1) %>% + point.lat.lon <- matrix(c(mean(extent[c("ymin", "ymax")]), mean(extent[c("xmin", "xmax")])), nrow = 1) %>% `colnames<-`(c("lat", "lon")) %>% - as.data.frame - extent.x.y <- data.frame(matrix(c(extent["xmin"], extent["ymin"], - extent["xmax"], extent["ymin"], - extent["xmax"], extent["ymax"], - extent["xmin"], extent["ymax"]), nrow = 4, byrow = T)) %>% `colnames<-`(c("lon", "lat")) + as.data.frame() + extent.x.y <- data.frame(matrix(c( + extent["xmin"], extent["ymin"], + extent["xmax"], extent["ymin"], + extent["xmax"], extent["ymax"], + extent["xmin"], extent["ymax"] + ), nrow = 4, byrow = T)) %>% `colnames<-`(c("lon", "lat")) res <- utils::read.csv(file.path(site_folder, "GEDI_AGB.csv")) ggplot2::ggplot() + - ggplot2::geom_polygon(data = extent.x.y, ggplot2::aes(x = .data$lon, y = .data$lat), color="blue", fill = "white") + + ggplot2::geom_polygon(data = extent.x.y, ggplot2::aes(x = .data$lon, y = .data$lat), color = "blue", fill = "white") + ggplot2::geom_point(data = res, ggplot2::aes(x = .data$lon_lowestmode, y = .data$lat_lowestmode, color = .data$agbd)) + ggplot2::geom_point(shape = 24, data = point.lat.lon, ggplot2::aes(x = .data$lon, y = .data$lat), size = 3) + - ggplot2::geom_text(data = point.lat.lon, ggplot2::aes(x = .data$lon, y = .data$lat, label=site.id, hjust=-0.1, vjust=0)) + - ggplot2::scale_color_distiller(palette = 'Greens', direction = 1) + + ggplot2::geom_text(data = point.lat.lon, ggplot2::aes(x = .data$lon, y = .data$lat, label = site.id, hjust = -0.1, vjust = 0)) + + ggplot2::scale_color_distiller(palette = "Greens", direction = 1) + ggplot2::labs(color = "AGB") } } @@ -140,13 +143,13 @@ GEDI_AGB_plot <- function(outdir, site.id, start_date, end_date) { #' @param gradient the gradient for iteratively enlarge the extent if the nfile.min or nrow.min are not reached, default is 0. If nfile.min or nrow.min is 0 this will be skipped. #' #' @return A list of AGB data frames for each site. -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% #' @importFrom rlang .data GEDI_AGB_extract <- function(site_info, start_date, end_date, outdir, nfile.min = 0, nrow.min = 0, buffer = 0.01, gradient = 0) { - #Initialize the multicore computation. + # Initialize the multicore computation. if (future::supportsMulticore()) { future::plan(future::multicore) } else { @@ -154,14 +157,16 @@ GEDI_AGB_extract <- function(site_info, start_date, end_date, outdir, nfile.min } # grab site.info and convert from lat/lon to sf objects of points and buffer areas. GEDI_AGB <- split(as.data.frame(site_info), seq(nrow(as.data.frame(site_info)))) %>% - furrr::future_map(function(point){ + furrr::future_map(function(point) { # flag determine if we have satisfied res.filter object. csv.valid <- FALSE # extent for filter. - extent <- data.frame(ymax = point$lat + buffer, - ymin = point$lat - buffer, - xmin = point$lon - buffer, - xmax = point$lon + buffer) + extent <- data.frame( + ymax = point$lat + buffer, + ymin = point$lat - buffer, + xmin = point$lon - buffer, + xmax = point$lon + buffer + ) # redirect to the current folder. # if we already create the folder. site_folder <- file.path(outdir, point$site_id) @@ -173,18 +178,20 @@ GEDI_AGB_extract <- function(site_info, start_date, end_date, outdir, nfile.min res <- utils::read.csv(csv.path) if (file.exists(file.path(site_folder, "extent.txt")) & nfile.min != 0) { extent <- utils::read.table(file.path(site_folder, "extent.txt"), skip = 1, col.names = c("ymax", "ymin", "xmin", "xmax")) - extent <- extent[nrow(extent),] + extent <- extent[nrow(extent), ] } # filter previous records based on space and time. - res.filter <- res %>% dplyr::filter(.data$lat_lowestmode <= extent["ymax"], - .data$lat_lowestmode >= extent["ymin"], - .data$lon_lowestmode >= extent["xmin"], - .data$lon_lowestmode <= extent["xmax"], - lubridate::as_date(.data$date) >= lubridate::as_date(start_date), - lubridate::as_date(.data$date) <= lubridate::as_date(end_date)) + res.filter <- res %>% dplyr::filter( + .data$lat_lowestmode <= extent["ymax"], + .data$lat_lowestmode >= extent["ymin"], + .data$lon_lowestmode >= extent["xmin"], + .data$lon_lowestmode <= extent["xmax"], + lubridate::as_date(.data$date) >= lubridate::as_date(start_date), + lubridate::as_date(.data$date) <= lubridate::as_date(end_date) + ) # determine if res.filter is not empty. - # In the future, we will need to document - # file name of each pre-downloaded `GEDI L4A` files + # In the future, we will need to document + # file name of each pre-downloaded `GEDI L4A` files # such that any new files within the range will be downloaded and processed. if (nrow(res.filter) > 0) { csv.valid <- TRUE @@ -206,13 +213,15 @@ GEDI_AGB_extract <- function(site_info, start_date, end_date, outdir, nfile.min return(res.filter) } else { # download GEDI AGB for current site. - res.current <- GEDI_AGB_download(start_date = start_date, - end_date = end_date, - outdir = site_folder, - extent = extent, - nfile.min = nfile.min, - nrow.min = nrow.min, - gradient = gradient) + res.current <- GEDI_AGB_download( + start_date = start_date, + end_date = end_date, + outdir = site_folder, + extent = extent, + nfile.min = nfile.min, + nrow.min = nrow.min, + gradient = gradient + ) # if we have previous downloaded GEDI records. if (exists("res", mode = "environment") & !all(is.na(res.current))) { res <- rbind(res, res.current) @@ -231,7 +240,8 @@ GEDI_AGB_extract <- function(site_info, start_date, end_date, outdir, nfile.min return(NA) } } - }, .progress = T) %>% purrr::set_names(site_info$site_id) + }, .progress = T) %>% + purrr::set_names(site_info$site_id) GEDI_AGB } #' Download GEDI AGB data for the GEDI AGB extract function. @@ -245,22 +255,24 @@ GEDI_AGB_extract <- function(site_info, start_date, end_date, outdir, nfile.min #' @param gradient the gradient for iteratively enlarge the extent if the nfile.min or nrow.min are not reached, default is 0. If nfile.min or nrow.min is 0 this will be skipped. #' #' @return A data frame containing AGB and sd for the target spatial and temporal extent. -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% GEDI_AGB_download <- function(start_date, end_date, outdir, extent, nfile.min = 0, nrow.min = 0, gradient = 0) { # download GEDI AGB files. # if there is no data within current buffer distance. - files <- try(l4_download(ncore = 1, - ul_lat = extent["ymax"], - lr_lat = extent["ymin"], - ul_lon = extent["xmin"], - lr_lon = extent["xmax"], - from = start_date, - to = end_date, - outdir = outdir, - just_path = T), silent = T) + files <- try(l4_download( + ncore = 1, + ul_lat = extent["ymax"], + lr_lat = extent["ymin"], + ul_lon = extent["xmin"], + lr_lon = extent["xmax"], + from = start_date, + to = end_date, + outdir = outdir, + just_path = T + ), silent = T) # if we just need the data within fixed extent and hit error. if ("try-error" %in% class(files) & nfile.min == 0) { return(NA) @@ -269,31 +281,35 @@ GEDI_AGB_download <- function(start_date, end_date, outdir, extent, nfile.min = # we iteratively add 0.1 degree to the buffer distance. extent[c(1, 4)] <- extent[c(1, 4)] + gradient extent[c(2, 3)] <- extent[c(2, 3)] - gradient - files <- try(l4_download(ncore = 1, - ul_lat = extent["ymax"], - lr_lat = extent["ymin"], - ul_lon = extent["xmin"], - lr_lon = extent["xmax"], - from = start_date, - to = end_date, - outdir = outdir, - just_path = T), silent = T) + files <- try(l4_download( + ncore = 1, + ul_lat = extent["ymax"], + lr_lat = extent["ymin"], + ul_lon = extent["xmin"], + lr_lon = extent["xmax"], + from = start_date, + to = end_date, + outdir = outdir, + just_path = T + ), silent = T) } - try(files <- l4_download(ncore = 1, - ul_lat = extent["ymax"], - lr_lat = extent["ymin"], - ul_lon = extent["xmin"], - lr_lon = extent["xmax"], - from = start_date, - to = end_date, - outdir = outdir), silent = T) + try(files <- l4_download( + ncore = 1, + ul_lat = extent["ymax"], + lr_lat = extent["ymin"], + ul_lon = extent["xmin"], + lr_lon = extent["xmax"], + from = start_date, + to = end_date, + outdir = outdir + ), silent = T) # load files. res <- GEDI4R::l4_getmulti(files, ncore = 1) # filter observations based on filter buffer distance. - keep.ind <- which(res$lat_lowestmode <= extent["ymax"] & - res$lat_lowestmode >= extent["ymin"] & - res$lon_lowestmode >= extent["xmin"] & - res$lon_lowestmode <= extent["xmax"]) + keep.ind <- which(res$lat_lowestmode <= extent["ymax"] & + res$lat_lowestmode >= extent["ymin"] & + res$lon_lowestmode >= extent["xmin"] & + res$lon_lowestmode <= extent["xmax"]) while (length(keep.ind) < nrow.min & length(files) > 0) { # we iteratively add 0.1 degree to the buffer distance. # because sometimes even the the extent ensure at least 1 tile nearby the location. @@ -303,9 +319,9 @@ GEDI_AGB_download <- function(start_date, end_date, outdir, extent, nfile.min = extent[c(2, 3)] <- extent[c(2, 3)] - gradient # filter observations based on filter buffer distance. keep.ind <- which(res$lat_lowestmode <= extent["ymax"] & - res$lat_lowestmode >= extent["ymin"] & - res$lon_lowestmode >= extent["xmin"] & - res$lon_lowestmode <= extent["xmax"]) + res$lat_lowestmode >= extent["ymin"] & + res$lon_lowestmode >= extent["xmin"] & + res$lon_lowestmode <= extent["xmax"]) } # record extent for download and extraction. extent <- data.frame(matrix(extent, nrow = 1)) %>% purrr::set_names(c("ymax", "ymin", "xmin", "xmax")) @@ -324,7 +340,7 @@ GEDI_AGB_download <- function(start_date, end_date, outdir, extent, nfile.min = if (length(keep.ind) == 0) { return(NA) } else { - return(res[keep.ind,]) + return(res[keep.ind, ]) } } #' DOWNLOAD GEDI level 4A data from DAACL.ORNL @@ -364,8 +380,8 @@ GEDI_AGB_download <- function(start_date, end_date, outdir, extent, nfile.min = #' @return List of file path in outdir. #' @examples #' \dontrun{ -#' #retrive Italy bound -#' bound <- sf::st_as_sf(raster::getData('GADM', country='ITA', level=1)) +#' # retrive Italy bound +#' bound <- sf::st_as_sf(raster::getData("GADM", country = "ITA", level = 1)) #' ex <- raster::extent(bound) #' ul_lat <- ex[4] #' lr_lat <- ex[3] @@ -373,27 +389,30 @@ GEDI_AGB_download <- function(start_date, end_date, outdir, extent, nfile.min = #' lr_lon <- ex[1] #' from <- "2020-07-01" #' to <- "2020-07-02" -#' #get just files path available for the searched parameters -#' l4_download(ul_lat=ul_lat, -#' lr_lat=lr_lat, -#' ul_lon=ul_lon, -#' lr_lon=lr_lon, -#' from=from, -#' to=to, -#' just_path=T +#' # get just files path available for the searched parameters +#' l4_download( +#' ul_lat = ul_lat, +#' lr_lat = lr_lat, +#' ul_lon = ul_lon, +#' lr_lon = lr_lon, +#' from = from, +#' to = to, +#' just_path = T +#' ) +#' +#' # download the first 4 files +#' +#' l4_download( +#' ul_lat = ul_lat, +#' lr_lat = lr_lat, +#' ul_lon = ul_lon, +#' lr_lon = lr_lon, +#' from = from, +#' to = to, +#' just_path = F, +#' outdir = tempdir(), +#' subset = 1:4 #' ) -#' -#' #download the first 4 files -#' -#' l4_download(ul_lat=ul_lat, -#' lr_lat=lr_lat, -#' ul_lon=ul_lon, -#' lr_lon=lr_lon, -#' from=from, -#' to=to, -#' just_path=F, -#' outdir = tempdir(), -#' subset=1:4) #' } #' @author Elia Vangi l4_download <- @@ -404,14 +423,13 @@ l4_download <- ncore = parallel::detectCores() - 1, from = NULL, to = NULL, - outdir=getwd(), + outdir = getwd(), just_path = F, subset = NULL) { - op <- options("warn") on.exit(options(op)) - options(warn=1) - #check if outdir exist and if there is a netrc file in + options(warn = 1) + # check if outdir exist and if there is a netrc file in if (!just_path) { # stopifnot("outdir is not character" = check_char(outdir)) if (!dir.exists(outdir)) { @@ -420,13 +438,13 @@ l4_download <- # netrc_file <- getnetrc(outdir) } else if (length(list.files(outdir, pattern = "netrc")) == 0) { # netrc_file <- getnetrc(outdir) - } else{ + } else { netrc_file <- list.files(outdir, pattern = "netrc", full.names = T) } } - #time period + # time period daterange <- c(from, to) - + # Get path to GEDI2A data gLevel4 <- GEDI4R::gedifinder( @@ -436,44 +454,49 @@ l4_download <- lr_lon, daterange = daterange ) - + lg <- length(gLevel4) - - if(lg==0){stop("there are no GEDI files for this date or coordinates")} - + + if (lg == 0) { + stop("there are no GEDI files for this date or coordinates") + } + if (just_path) { return(gLevel4) stop(invisible()) } - - - #check for existing GEDI file in outdir - pre <- list.files(outdir,pattern = "h5") - if(length(pre)!=0) { + + + # check for existing GEDI file in outdir + pre <- list.files(outdir, pattern = "h5") + if (length(pre) != 0) { gLevel4 <- gLevel4[!basename(tools::file_path_sans_ext(gLevel4)) %in% basename(tools::file_path_sans_ext(pre))] nlg <- length(gLevel4) - message(lg, " files found, of wich ",lg-nlg, " already downloaded in ", outdir) - - }else{ message(lg, " files found.")} - - - #subset GEDI files + message(lg, " files found, of wich ", lg - nlg, " already downloaded in ", outdir) + } else { + message(lg, " files found.") + } + + + # subset GEDI files if (!is.null(subset) && is.numeric(subset)) { - if(length(subset)>length(gLevel4)){ + if (length(subset) > length(gLevel4)) { warning("the length of subset vector is greater than the number of files to be download. Subsetting will not be done.") - }else{ gLevel4 <- gLevel4[subset]} + } else { + gLevel4 <- gLevel4[subset] + } } - - #set ncore equal to the number of files found or to the user defined value + + # set ncore equal to the number of files found or to the user defined value if (ncore > 1) { - ncore <- ifelse(length(gLevel4) <= parallel::detectCores()-1, length(gLevel4), ncore) + ncore <- ifelse(length(gLevel4) <= parallel::detectCores() - 1, length(gLevel4), ncore) message("using ", ncore, " cores") - #download + # download cl <- parallel::makeCluster(ncore) doParallel::registerDoParallel(cl) message("start download") - + foreach::foreach( i = 1:length(gLevel4), .packages = "httr" @@ -500,7 +523,7 @@ l4_download <- ) } } - + message("Done") files <- list.files(outdir, pattern = "h5", full.names = T) return(files) @@ -509,28 +532,32 @@ l4_download <- #' #' @param dl_dir Directory where the netrc file will be stored. #' @return file path of the netrc file. -getnetrc <- function (dl_dir) { +getnetrc <- function(dl_dir) { netrc <- file.path(dl_dir, "netrc") if (file.exists(netrc) == FALSE || - any(grepl("urs.earthdata.nasa.gov", - readLines(netrc))) == FALSE) { + any(grepl( + "urs.earthdata.nasa.gov", + readLines(netrc) + )) == FALSE) { netrc_conn <- file(netrc) - writeLines(c( - "machine urs.earthdata.nasa.gov", - sprintf( - "login %s", - getPass::getPass(msg = "Enter NASA Earthdata Login Username \n (or create an account at urs.earthdata.nasa.gov) :") + writeLines( + c( + "machine urs.earthdata.nasa.gov", + sprintf( + "login %s", + getPass::getPass(msg = "Enter NASA Earthdata Login Username \n (or create an account at urs.earthdata.nasa.gov) :") + ), + sprintf( + "password %s", + getPass::getPass(msg = "Enter NASA Earthdata Login Password:") + ) ), - sprintf( - "password %s", - getPass::getPass(msg = "Enter NASA Earthdata Login Password:") - ) - ), - netrc_conn) + netrc_conn + ) close(netrc_conn) message( "A netrc file with your Earthdata Login credentials was stored in the output directory " ) } return(netrc) -} \ No newline at end of file +} diff --git a/modules/data.remote/R/LandTrendr.AGB.R b/modules/data.remote/R/LandTrendr.AGB.R index 5f2645e56d3..c75b1127586 100644 --- a/modules/data.remote/R/LandTrendr.AGB.R +++ b/modules/data.remote/R/LandTrendr.AGB.R @@ -1,44 +1,43 @@ # ##' @title download.LandTrendr.AGB ##' @name download.LandTrendr.AGB -##' +##' ##' @param outdir Where to place output ##' @param target_dataset Which LandTrendr dataset to download? Default = "biomass" ##' @param product_dates What data product dates to download -##' @param product_version Optional. LandTrend AGB is provided with two versions, +##' @param product_version Optional. LandTrend AGB is provided with two versions, ##' v0 and v1 (latest version) -##' @param con Optional database connection. If specified then the code will check to see -## if the file already exists in PEcAn before downloading, and will also create a database +##' @param con Optional database connection. If specified then the code will check to see +## if the file already exists in PEcAn before downloading, and will also create a database ## entry for new downloads ##' @param run_parallel Logical. Download and extract files in parallel? ##' @param ncores Optional. If run_parallel=TRUE how many cores to use? If left as NULL will select max number -1 ##' @param overwrite Logical. Overwrite existing files and replace with new versions -##' +##' ##' @return data.frame summarize the results of the function call -##' +##' ##' @examples ##' \dontrun{ ##' outdir <- "~/scratch/abg_data/" ##' product_dates <- c(1990, 1991, 1995) # using discontinous, or specific years ##' product_dates2 <- seq(1992, 1995, 1) # using a date sequence for selection of years ##' product_version = "v1" -##' -##' results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, -##' product_dates = product_dates, +##' +##' results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, +##' product_dates = product_dates, ##' product_version = product_version) -##' -##' results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, -##' product_dates = product_dates2, +##' +##' results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, +##' product_dates = product_dates2, ##' product_version = product_version) ##' } -##' +##' ##' @export ##' @author Shawn Serbin ##' -download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_dates = NULL, - product_version = "v1", con = NULL, run_parallel = TRUE, +download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_dates = NULL, + product_version = "v1", con = NULL, run_parallel = TRUE, ncores = NULL, overwrite = FALSE) { - # steps to implement: # check if files exist locally, also are they valid? Check DB for file location # check if files exist remotely, get file size? Is that relevant as remote files are likely .zip @@ -48,8 +47,8 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ # -- to implement. break/return out of function if nothing to do, else below ## setup output folder - if (! file.exists(outdir)) dir.create(outdir,recursive=TRUE) - + if (!file.exists(outdir)) dir.create(outdir, recursive = TRUE) + ## get target year range if (is.null(product_dates)) { PEcAn.logger::logger.severe("*** No products dates provided. Please provide dates to process ***") @@ -64,36 +63,36 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ if (!is.null(ncores)) { ncores <- ncores } else { - ncores <- parallel::detectCores() -1 + ncores <- parallel::detectCores() - 1 } PEcAn.logger::logger.info(paste0("Running in parallel with: ", ncores)) } - + ## setup PEcAn.logger::logger.info("*** Downloading LandTrendr ABG data products ***") URL <- "ftp://islay.ceoas.oregonstate.edu/cms" # setup product defaults - #target_dataset <- "biomassfiaald" # looks like they changed the directory structure - #target_dataset <- "biomass" # now just "biomass" --- now an argument + # target_dataset <- "biomassfiaald" # looks like they changed the directory structure + # target_dataset <- "biomass" # now just "biomass" --- now an argument target_filename_prefix <- "biomassfiaald" file_ext <- ".zip" - obs_files <- paste0(target_filename_prefix,"_",target_download_years,"_median",file_ext) # hard-coded name matching source, OK? - err_files <- paste0(target_filename_prefix,"_",target_download_years,"_stdv",file_ext) # hard-coded name matching source, OK? - files_to_download <- c(obs_files,err_files) - local_files <- file.path(outdir,gsub(".zip", ".tif",files_to_download)) - - prod_obs_urls <- paste(URL,product_version,target_dataset,"median",obs_files,sep="/") - prod_err_urls <- paste(URL,product_version,target_dataset,"stdv",err_files,sep="/") - download_urls <- c(prod_obs_urls,prod_err_urls) - + obs_files <- paste0(target_filename_prefix, "_", target_download_years, "_median", file_ext) # hard-coded name matching source, OK? + err_files <- paste0(target_filename_prefix, "_", target_download_years, "_stdv", file_ext) # hard-coded name matching source, OK? + files_to_download <- c(obs_files, err_files) + local_files <- file.path(outdir, gsub(".zip", ".tif", files_to_download)) + + prod_obs_urls <- paste(URL, product_version, target_dataset, "median", obs_files, sep = "/") + prod_err_urls <- paste(URL, product_version, target_dataset, "stdv", err_files, sep = "/") + download_urls <- c(prod_obs_urls, prod_err_urls) + # identify these are compressed files compressed <- TRUE ## download data # before downloading check that the remote FTP contains the desired years - a little clunky, clean up # if we keep this, will need to check this works with other data sources/products - check_urls <- paste0(unique(dirname(download_urls), fromLast = TRUE),"/") + check_urls <- paste0(unique(dirname(download_urls), fromLast = TRUE), "/") remote_filenames <- Map( function(p) { readLines( @@ -101,18 +100,24 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ p, handle = curl::new_handle( ftp_use_epsv = FALSE, - dirlistonly = TRUE))) + dirlistonly = TRUE + ) + ) + ) }, - check_urls) + check_urls + ) remote_filenames_list <- unlist(remote_filenames) - if (sum(basename(download_urls) %in% remote_filenames_list, na.rm=T)!=length(download_urls)) { + if (sum(basename(download_urls) %in% remote_filenames_list, na.rm = T) != length(download_urls)) { `%not_in%` <- purrr::negate(`%in%`) missing <- which(basename(download_urls) %not_in% remote_filenames_list) download_urls[missing] - PEcAn.logger::logger.severe(paste("Files missing from source: ", - download_urls[missing])) + PEcAn.logger::logger.severe(paste( + "Files missing from source: ", + download_urls[missing] + )) } - + ## check for local files exist - do we want to do this? Or use DB? Or both? # use this to subset the files that need to be downloaded. Check file size first? # ok to do this in one shot or need to check file by file....think this is OK @@ -125,7 +130,7 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ } # setup download - if (length(files_to_download_final)<1) { + if (length(files_to_download_final) < 1) { PEcAn.logger::logger.info("*** Requested files already exist on this host, providing file paths ***") } else { `%dopar%` <- foreach::`%dopar%` @@ -133,31 +138,36 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ if (run_parallel) { cl <- parallel::makeCluster(ncores) doParallel::registerDoParallel(cl) - foreach::foreach(i=1:length(files_to_download_final)) %dopar% - try(PEcAn.utils::download_file(download_urls_final[i], file.path(outdir, - files_to_download_final[i]))) + foreach::foreach(i = 1:length(files_to_download_final)) %dopar% + try(PEcAn.utils::download_file(download_urls_final[i], file.path( + outdir, + files_to_download_final[i] + ))) } else { - PEcAn.logger::logger.info("Caution, downloading in serial. + PEcAn.logger::logger.info("Caution, downloading in serial. Could take an extended period to finish") # needed? - Map(function(u, d) PEcAn.utils::download_file(u, d), download_urls_final, file.path(outdir, - files_to_download_final)) + Map(function(u, d) PEcAn.utils::download_file(u, d), download_urls_final, file.path( + outdir, + files_to_download_final + )) } # let user know downloading is complete PEcAn.logger::logger.info("*** Downloading complete ***") - + if (compressed) { PEcAn.logger::logger.info("*** Unpacking compressed files ***") ## unpack files # check type - there is a better way to do this - if (file_ext==".zip") { + if (file_ext == ".zip") { zip_files <- list.files(file.path(outdir), pattern = "*.zip", full.names = TRUE) k <- NULL # for passing the GitHub check that there is no global binding for k. - foreach::foreach(k=1:length(zip_files)) %dopar% try(utils::unzip(file.path(zip_files[k]), - files = NULL, list = FALSE, overwrite = TRUE, - junkpaths = FALSE, - exdir = file.path(path.expand(outdir)), - unzip = getOption("unzip"), - setTimes = FALSE)) + foreach::foreach(k = 1:length(zip_files)) %dopar% try(utils::unzip(file.path(zip_files[k]), + files = NULL, list = FALSE, overwrite = TRUE, + junkpaths = FALSE, + exdir = file.path(path.expand(outdir)), + unzip = getOption("unzip"), + setTimes = FALSE + )) PEcAn.logger::logger.info("*** Removing compressed files ***") unlink(zip_files) } @@ -165,32 +175,38 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ } ## Prepare results - clunky, need to refine this - downloaded_files <- file.path(outdir,gsub(".zip", ".tif",files_to_download)) - downloaded_years <- unlist(regmatches(downloaded_files, - gregexpr("\\d{4}", downloaded_files))) + downloaded_files <- file.path(outdir, gsub(".zip", ".tif", files_to_download)) + downloaded_years <- unlist(regmatches( + downloaded_files, + gregexpr("\\d{4}", downloaded_files) + )) total_rows <- length(downloaded_files) med_rows <- length(grep(pattern = "median", downloaded_files)) sdev_rows <- length(grep(pattern = "stdv", downloaded_files)) - out_formats <- c(rep("LandTrendr_AGB_median", times=med_rows), - rep("LandTrendr_AGB_stdev", times=sdev_rows)) + out_formats <- c( + rep("LandTrendr_AGB_median", times = med_rows), + rep("LandTrendr_AGB_stdev", times = sdev_rows) + ) - results <- data.frame(file = character(total_rows), - host = character(total_rows), - mimetype = "image/tiff", - formatname = character(total_rows), - startdate = character(total_rows), - enddate = character(total_rows), - dbfile.name = "LandTrendr", - stringsAsFactors = FALSE) + results <- data.frame( + file = character(total_rows), + host = character(total_rows), + mimetype = "image/tiff", + formatname = character(total_rows), + startdate = character(total_rows), + enddate = character(total_rows), + dbfile.name = "LandTrendr", + stringsAsFactors = FALSE + ) - for (i in seq_len(total_rows)) { + for (i in seq_len(total_rows)) { results$file[i] <- downloaded_files[i] results$host[i] <- PEcAn.remote::fqdn() results$startdate[i] <- paste0(downloaded_years[i], "-01-01 00:00:00") results$enddate[i] <- paste0(downloaded_years[i], "-12-31 23:59:59") results$formatname[i] <- out_formats[i] } - + return(results) } # @@ -198,7 +214,7 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ # ##' @title extract.LandTrendr.AGB ##' @name extract.LandTrendr.AGB -##' +##' ##' @param site_info list of site info for parsing AGB data: list(site_id, site_name, lat, lon, time_zone) ##' @param dataset Which LandTrendr dataset to parse, "median" or "stdv".Default: "median" ##' @param buffer Optional. operate over desired buffer area (not yet implemented) @@ -206,97 +222,108 @@ download.LandTrendr.AGB <- function(outdir, target_dataset = "biomass", product_ ##' @param data_dir directory where input data is located. Can be NUL if con is specified ##' @param product_dates Process and extract data only from selected years. Default behavior ##' (product_dates = NULL) is to extract data from all availible years in BETYdb or data_dir -##' @param output_file Path to save LandTrendr_AGB_output.RData file containing the +##' @param output_file Path to save LandTrendr_AGB_output.RData file containing the ##' output extraction list (see return) ##' @param ... Additional arguments, currently ignored -##' -##' @return list of two containing the median AGB values per pixel and the corresponding +##' +##' @return list of two containing the median AGB values per pixel and the corresponding ##' standard deviation values (uncertainties) -##' +##' ##' @examples ##' \dontrun{ -##' +##' ##' # Example 1 - using BETYdb site IDs to extract data ##' # Database connection (optional) ##' ##' con <- PEcAn.DB::db.open( ##' list(user='bety', password='bety', host='localhost', ##' dbname='bety', driver='PostgreSQL',write=TRUE)) -##' +##' ##' site_ID <- c(2000000023,1000025731,676,1000005149) # BETYdb site IDs -##' suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, -##' ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", +##' suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, +##' ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", ##' ids = site_ID, .con = con)) ##' suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) ##' suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -##' site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, +##' site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, ##' lon=qry_results$lon, time_zone=qry_results$time_zone) ##' data_dir <- "~/scratch/agb_data/" -##' -##' results <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", +##' +##' results <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", ##' data_dir, product_dates, output_file) -##' +##' ##' } -##' +##' ##' @export ##' @author Shawn Serbin, Alexey Shiklomanov -##' -extract.LandTrendr.AGB <- function(site_info, dataset = "median", buffer = NULL, fun = "mean", - data_dir = NULL, product_dates = NULL, output_file = NULL, +##' +extract.LandTrendr.AGB <- function(site_info, dataset = "median", buffer = NULL, fun = "mean", + data_dir = NULL, product_dates = NULL, output_file = NULL, ...) { - ## get coordinates and provide spatial info site_coords <- data.frame(site_info$lon, site_info$lat) - names(site_coords) <- c("Longitude","Latitude") + names(site_coords) <- c("Longitude", "Latitude") coords_latlong <- sp::SpatialPoints(site_coords) sp::proj4string(coords_latlong) <- sp::CRS("+init=epsg:4326") - + ## Subset list of years to process if requested by user if (!is.null(product_dates)) { - agb_files <- list.files(file.path(data_dir), pattern = paste0("*",dataset,".tif$"), - full.names = TRUE) - availible_years <- unlist(regmatches(agb_files, - gregexpr("\\d{4}", agb_files))) + agb_files <- list.files(file.path(data_dir), + pattern = paste0("*", dataset, ".tif$"), + full.names = TRUE + ) + availible_years <- unlist(regmatches( + agb_files, + gregexpr("\\d{4}", agb_files) + )) agb_files <- agb_files[availible_years %in% product_dates] } else { - agb_files <- list.files(file.path(data_dir), pattern = paste0("*",dataset,".tif$"), - full.names = TRUE) + agb_files <- list.files(file.path(data_dir), + pattern = paste0("*", dataset, ".tif$"), + full.names = TRUE + ) } ## load gridded AGB data raster_data <- lapply(agb_files, raster::raster) - + ## reproject Lat/Long site coords to AGB Albers Equal-Area - coords_AEA <- sp::spTransform(coords_latlong, - raster::crs(raster::raster(raster_data[[1]]))) - + coords_AEA <- sp::spTransform( + coords_latlong, + raster::crs(raster::raster(raster_data[[1]])) + ) + ## prepare product for extraction - stack requested years raster_data_stack <- raster::stack(raster_data) - + ## extract - agb_pixel <- raster::extract(x = raster_data_stack, - y = coords_AEA, buffer=buffer, fun=NULL, df=FALSE) - if(is.null(buffer)){ - processed_years <- unlist(regmatches(names(data.frame(agb_pixel)), - gregexpr("\\d{4}", names(data.frame(agb_pixel))))) + agb_pixel <- raster::extract( + x = raster_data_stack, + y = coords_AEA, buffer = buffer, fun = NULL, df = FALSE + ) + if (is.null(buffer)) { + processed_years <- unlist(regmatches( + names(data.frame(agb_pixel)), + gregexpr("\\d{4}", names(data.frame(agb_pixel))) + )) agb_pixel <- data.frame(agb_pixel) - names(agb_pixel) <- paste0("Year_",processed_years) - agb_pixel <- data.frame(Site_ID=site_info$site_id, Site_Name=site_info$site_name, agb_pixel) - + names(agb_pixel) <- paste0("Year_", processed_years) + agb_pixel <- data.frame(Site_ID = site_info$site_id, Site_Name = site_info$site_name, agb_pixel) + ## output list point_list <- list() - output_name <- paste0(dataset,"_AGB") + output_name <- paste0(dataset, "_AGB") point_list <- list(agb_pixel) names(point_list) <- output_name - }else{ + } else { return(agb_pixel) } ## save output to a file? if (!is.null(output_file)) { - save("point_list",file = file.path(output_file,paste0('LandTrendr_',dataset,'_AGB_output.RData'))) + save("point_list", file = file.path(output_file, paste0("LandTrendr_", dataset, "_AGB_output.RData"))) } - + ## return output list return(point_list) } diff --git a/modules/data.remote/R/Landtrendr_AGB_prep.R b/modules/data.remote/R/Landtrendr_AGB_prep.R index 0f4216e7b1b..b2c072bd517 100644 --- a/modules/data.remote/R/Landtrendr_AGB_prep.R +++ b/modules/data.remote/R/Landtrendr_AGB_prep.R @@ -17,54 +17,57 @@ #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% -Landtrendr_AGB_prep <- function(site_info, start_date, end_date, time_points, - AGB_indir, outdir = NULL, export_csv = TRUE, - allow_download = FALSE, buffer = NULL, skip_buffer = TRUE){ - #Initialize the multicore computation. +Landtrendr_AGB_prep <- function(site_info, start_date, end_date, time_points, + AGB_indir, outdir = NULL, export_csv = TRUE, + allow_download = FALSE, buffer = NULL, skip_buffer = TRUE) { + # Initialize the multicore computation. if (future::supportsMulticore()) { future::plan(future::multicore) } else { future::plan(future::multisession) } - #if we export CSV but didn't provide any path - if(as.logical(export_csv) && is.null(outdir)){ + # if we export CSV but didn't provide any path + if (as.logical(export_csv) && is.null(outdir)) { PEcAn.logger::logger.info("If you want to export CSV file, please ensure input the outdir!") return(0) } - #Landtrendr AGB doesn't provide data after 2017. + # Landtrendr AGB doesn't provide data after 2017. time_points <- time_points[which(lubridate::year(time_points) < 2018)] - #check the integrity of AGB files. + # check the integrity of AGB files. AGB_median_years <- as.numeric(gsub(".*?([0-9]+).*", "\\1", list.files(AGB_indir, pattern = "*median.tif"))) - missing_years_median <- lubridate::year((time_points[which(!lubridate::year(time_points)%in%AGB_median_years)])) #for landtrendr AGB data, we only have data before 2018. - #starting downloading - if(length(missing_years_median)>0){ - if(as.logical(allow_download)){ - if(getOption('timeout') < 3600) options(timeout=3600)#enable 1h download time + missing_years_median <- lubridate::year((time_points[which(!lubridate::year(time_points) %in% AGB_median_years)])) # for landtrendr AGB data, we only have data before 2018. + # starting downloading + if (length(missing_years_median) > 0) { + if (as.logical(allow_download)) { + if (getOption("timeout") < 3600) options(timeout = 3600) # enable 1h download time PEcAn.data.remote::download.LandTrendr.AGB(outdir = AGB_indir, product_dates = missing_years_median, run_parallel = FALSE) - }else{ - #files are missing, and we don't allow download + } else { + # files are missing, and we don't allow download PEcAn.logger::logger.info("Partial AGB files are missing, please set the allow_download as TRUE to download them automatically!") return(0) } } - #grab previous data to see which site has incomplete observations, if so, download the site for the whole time period. - #if we have previous downloaded CSV file - if(!is.null(outdir)){ - if(file.exists(file.path(outdir, "AGB.csv")) && length(buffer)==0 && as.logical(skip_buffer)){ + # grab previous data to see which site has incomplete observations, if so, download the site for the whole time period. + # if we have previous downloaded CSV file + if (!is.null(outdir)) { + if (file.exists(file.path(outdir, "AGB.csv")) && length(buffer) == 0 && as.logical(skip_buffer)) { PEcAn.logger::logger.info("Extracting previous AGB file!") Previous_CSV <- as.data.frame(utils::read.csv(file.path(outdir, "AGB.csv"))) - AGB_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, agb, sd, target time point. + AGB_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, agb, sd, target time point. AGB_Output$site_id <- site_info$site_id - #Calculate AGB for each time step and site. - #loop over time and site - AGB.list <- time_points %>% furrr::future_map(function(t){ + # Calculate AGB for each time step and site. + # loop over time and site + AGB.list <- time_points %>% furrr::future_map(function(t) { out.t <- data.frame() for (id in site_info$site_id) { - site_AGB <- Previous_CSV[which(Previous_CSV$site_id == id),] - if(length(site_AGB$agb[which(site_AGB$date == lubridate::year(t))])==1){ - out.t <- rbind(out.t, list(mean = site_AGB$agb[which(site_AGB$date == lubridate::year(t))], - sd = site_AGB$sd[which(site_AGB$date == lubridate::year(t))])) + site_AGB <- Previous_CSV[which(Previous_CSV$site_id == id), ] + if (length(site_AGB$agb[which(site_AGB$date == lubridate::year(t))]) == 1) { + out.t <- rbind(out.t, list( + mean = site_AGB$agb[which(site_AGB$date == lubridate::year(t))], + sd = site_AGB$sd[which(site_AGB$date == lubridate::year(t))] + )) } else { out.t <- rbind(out.t, list(mean = NA, sd = NA)) } @@ -72,122 +75,139 @@ Landtrendr_AGB_prep <- function(site_info, start_date, end_date, time_points, out.t %>% purrr::set_names(c(paste0(t, "_AbvGrndWood"), paste0(t, "_SD"))) }, .progress = T) for (i in seq_along(time_points)) { - t <- time_points[i]#otherwise the t will be number instead of date. - AGB_Output[, paste0(t, "_AbvGrndWood")] <- AGB.list[[i]][,paste0(t, "_AbvGrndWood")] - AGB_Output[, paste0(t, "_SD")] <- AGB.list[[i]][,paste0(t, "_SD")] + t <- time_points[i] # otherwise the t will be number instead of date. + AGB_Output[, paste0(t, "_AbvGrndWood")] <- AGB.list[[i]][, paste0(t, "_AbvGrndWood")] + AGB_Output[, paste0(t, "_SD")] <- AGB.list[[i]][, paste0(t, "_SD")] } } } - #only Site that has NA for any time points need to be downloaded. - if(!exists("AGB_Output")){ - AGB_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, AGB, std, target time point. + # only Site that has NA for any time points need to be downloaded. + if (!exists("AGB_Output")) { + AGB_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, AGB, std, target time point. AGB_Output$site_id <- site_info$site_id } AGB_Output_temp <- AGB_Output - new_site_info <- site_info %>% purrr::map(function(x)x[!stats::complete.cases(AGB_Output)]) - #if we have any site missing previously - if(length(new_site_info$site_id) != 0){ - if(is.null(buffer) | as.logical(skip_buffer)){ - #prepare lists for future::map parallelization. + new_site_info <- site_info %>% purrr::map(function(x) x[!stats::complete.cases(AGB_Output)]) + # if we have any site missing previously + if (length(new_site_info$site_id) != 0) { + if (is.null(buffer) | as.logical(skip_buffer)) { + # prepare lists for future::map parallelization. new_site_info$AGB_indir <- rep(AGB_indir, length(new_site_info$site_id)) new_site_info$start_date <- rep(start_date, length(new_site_info$site_id)) new_site_info$end_date <- rep(end_date, length(new_site_info$site_id)) l <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) - - #extracting AGB data - AGB_Output <- l %>% furrr::future_map(function(ll) { - time_points <- seq(as.Date(ll$start_date), as.Date(ll$end_date), "1 year") - #Landtrendr AGB doesn't provide data after 2017. - time_points <- time_points[which(lubridate::year(time_points) < 2018)] - product_dates <- lubridate::year(time_points) - site_info <- list(site_id = ll$site_id, - lat = ll$lat, - lon = ll$lon, - site_name = NA) - - med_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info = site_info, - dataset = "median", - fun = "mean", - data_dir = ll$AGB_indir, - product_dates = product_dates)[[1]] %>% dplyr::select(-2) %>% - `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"))) - sdev_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info = site_info, - dataset = "stdv", - fun = "mean", - data_dir = ll$AGB_indir, - product_dates = product_dates)[[1]] %>% dplyr::select(-c(1:2)) %>% - `colnames<-`(c(paste0(time_points, "_SD"))) - cbind(med_agb_data, sdev_agb_data) - }, .progress = T) %>% dplyr::bind_rows() - }else{#buffer is not empty - #extracting AGB data - med <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info = new_site_info, - dataset = "median", - buffer = buffer, - fun = "mean", - data_dir = AGB_indir, - product_dates = lubridate::year(time_points))[[1]] %>% dplyr::select(-2) %>% + + # extracting AGB data + AGB_Output <- l %>% + furrr::future_map(function(ll) { + time_points <- seq(as.Date(ll$start_date), as.Date(ll$end_date), "1 year") + # Landtrendr AGB doesn't provide data after 2017. + time_points <- time_points[which(lubridate::year(time_points) < 2018)] + product_dates <- lubridate::year(time_points) + site_info <- list( + site_id = ll$site_id, + lat = ll$lat, + lon = ll$lon, + site_name = NA + ) + + med_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB( + site_info = site_info, + dataset = "median", + fun = "mean", + data_dir = ll$AGB_indir, + product_dates = product_dates + )[[1]] %>% + dplyr::select(-2) %>% + `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"))) + sdev_agb_data <- PEcAn.data.remote::extract.LandTrendr.AGB( + site_info = site_info, + dataset = "stdv", + fun = "mean", + data_dir = ll$AGB_indir, + product_dates = product_dates + )[[1]] %>% + dplyr::select(-c(1:2)) %>% + `colnames<-`(c(paste0(time_points, "_SD"))) + cbind(med_agb_data, sdev_agb_data) + }, .progress = T) %>% + dplyr::bind_rows() + } else { # buffer is not empty + # extracting AGB data + med <- PEcAn.data.remote::extract.LandTrendr.AGB( + site_info = new_site_info, + dataset = "median", + buffer = buffer, + fun = "mean", + data_dir = AGB_indir, + product_dates = lubridate::year(time_points) + )[[1]] %>% + dplyr::select(-2) %>% `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"))) - sdev <- PEcAn.data.remote::extract.LandTrendr.AGB(site_info = new_site_info, - dataset = "stdv", - buffer = buffer, - fun = "mean", - data_dir = AGB_indir, - product_dates = lubridate::year(time_points))[[1]] %>% dplyr::select(-c(1:2)) %>% + sdev <- PEcAn.data.remote::extract.LandTrendr.AGB( + site_info = new_site_info, + dataset = "stdv", + buffer = buffer, + fun = "mean", + data_dir = AGB_indir, + product_dates = lubridate::year(time_points) + )[[1]] %>% + dplyr::select(-c(1:2)) %>% `colnames<-`(c(paste0(time_points, "_SD"))) sdev_agb_data <- med_agb_data <- c() - #searching for the min variance. + # searching for the min variance. for (i in seq_along(new_site_info$site_id)) { temp_var <- rowSums(sdev[[i]]) min_var_Ind <- which.min(temp_var) - - sdev_agb_data <- rbind(sdev_agb_data, sdev[[i]][min_var_Ind,]) - med_agb_data <- rbind(med_agb_data, med[[i]][min_var_Ind,]) + + sdev_agb_data <- rbind(sdev_agb_data, sdev[[i]][min_var_Ind, ]) + med_agb_data <- rbind(med_agb_data, med[[i]][min_var_Ind, ]) } - #Handle data - AGB_Output <- cbind(med$site_id, med_agb_data, sdev_agb_data) %>% - as.data.frame%>% + # Handle data + AGB_Output <- cbind(med$site_id, med_agb_data, sdev_agb_data) %>% + as.data.frame() %>% `colnames<-`(c("site_id", paste0(time_points, "_AbvGrndWood"), paste0(time_points, "_SD"))) } - #prepare CSV from AGB_Output - Current_CSV <- matrix(NA, 0, 6) %>% + # prepare CSV from AGB_Output + Current_CSV <- matrix(NA, 0, 6) %>% `colnames<-`(c("date", "site_id", "lat", "lon", "agb", "sd")) for (id in AGB_Output$site_id) { - site_AGB <- unlist(AGB_Output[which(AGB_Output$site_id==id),]) + site_AGB <- unlist(AGB_Output[which(AGB_Output$site_id == id), ]) for (i in seq_along(time_points)) { date <- lubridate::year(time_points[i]) site_id <- id - lon <- new_site_info$lon[which(new_site_info$site_id==id)] - lat <- new_site_info$lat[which(new_site_info$site_id==id)] + lon <- new_site_info$lon[which(new_site_info$site_id == id)] + lat <- new_site_info$lat[which(new_site_info$site_id == id)] agb <- site_AGB[paste0(time_points[i], "_AbvGrndWood")] %>% purrr::set_names("agb") sd <- site_AGB[paste0(time_points[i], "_SD")] %>% purrr::set_names("sd") - Current_CSV <- rbind(Current_CSV, tibble::tibble(date, site_id, lat, lon, agb, sd))#in date, id, lat, lon, agb, sd + Current_CSV <- rbind(Current_CSV, tibble::tibble(date, site_id, lat, lon, agb, sd)) # in date, id, lat, lon, agb, sd } } - #Compare with existing CSV file. (We name the CSV file as AGB.csv) - if(export_csv){ - if(exists("Previous_CSV")){#we already read the csv file previously. + # Compare with existing CSV file. (We name the CSV file as AGB.csv) + if (export_csv) { + if (exists("Previous_CSV")) { # we already read the csv file previously. Current_CSV <- rbind(Previous_CSV, Current_CSV) - Current_CSV <- Current_CSV[!duplicated(paste0(Current_CSV$site_id, Current_CSV$date)),]#using site_id and date to remove duplicated records. + Current_CSV <- Current_CSV[!duplicated(paste0(Current_CSV$site_id, Current_CSV$date)), ] # using site_id and date to remove duplicated records. utils::write.csv(Current_CSV, file = file.path(outdir, "AGB.csv"), row.names = FALSE) - }else{ + } else { utils::write.csv(Current_CSV, file = file.path(outdir, "AGB.csv"), row.names = FALSE) } } - #write current csv into AGB_Output data frame. - #recreate the AGB_Output object + # write current csv into AGB_Output data frame. + # recreate the AGB_Output object AGB_Output <- AGB_Output_temp - #loop over time and site + # loop over time and site for (i in seq_along(time_points)) { - t <- time_points[i]#otherwise the t will be number instead of date. + t <- time_points[i] # otherwise the t will be number instead of date. for (id in site_info$site_id) { - site_AGB <- Current_CSV[which(Current_CSV$site_id == id),] - AGB_Output[which(AGB_Output$site_id==id), paste0(t, "_AbvGrndWood")] <- as.numeric(site_AGB[which(site_AGB$date == lubridate::year(t)), "agb"]) - AGB_Output[which(AGB_Output$site_id==id), paste0(t, "_SD")] <- as.numeric(site_AGB[which(site_AGB$date == lubridate::year(t)), "sd"]) + site_AGB <- Current_CSV[which(Current_CSV$site_id == id), ] + AGB_Output[which(AGB_Output$site_id == id), paste0(t, "_AbvGrndWood")] <- as.numeric(site_AGB[which(site_AGB$date == lubridate::year(t)), "agb"]) + AGB_Output[which(AGB_Output$site_id == id), paste0(t, "_SD")] <- as.numeric(site_AGB[which(site_AGB$date == lubridate::year(t)), "sd"]) } } } PEcAn.logger::logger.info("Landtrendr AGB Prep Completed!") list(AGB_Output = AGB_Output, time_points = time_points, var = "AbvGrndWood") -} \ No newline at end of file +} diff --git a/modules/data.remote/R/MODIS_LAI_prep.R b/modules/data.remote/R/MODIS_LAI_prep.R index 6982f192291..f0d12733690 100644 --- a/modules/data.remote/R/MODIS_LAI_prep.R +++ b/modules/data.remote/R/MODIS_LAI_prep.R @@ -9,54 +9,58 @@ #' #' @return A data frame containing LAI and sd for each site and each time step. #' @export -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% -MODIS_LAI_prep <- function(site_info, time_points, outdir = NULL, search_window = 30, export_csv = FALSE, sd_threshold = 20){ - #initialize future parallel computation. +MODIS_LAI_prep <- function(site_info, time_points, outdir = NULL, search_window = 30, export_csv = FALSE, sd_threshold = 20) { + # initialize future parallel computation. if (future::supportsMulticore()) { future::plan(future::multicore, workers = 10) } else { - future::plan(future::multisession, workers = 10) #10 is the maximum number of requests permitted for the MODIS server. + future::plan(future::multisession, workers = 10) # 10 is the maximum number of requests permitted for the MODIS server. } - #if we export CSV but didn't provide any path - if(as.logical(export_csv) && is.null(outdir)){ + # if we export CSV but didn't provide any path + if (as.logical(export_csv) && is.null(outdir)) { PEcAn.logger::logger.info("If you want to export CSV file, please ensure input the outdir!") return(0) } - #convert time points into paired start and end dates. + # convert time points into paired start and end dates. start.end.dates <- data.frame() for (i in seq_along(time_points)) { - start.end.dates <- rbind(start.end.dates, - list(start_date = as.character(time_points[i] - lubridate::days(search_window)), - end_date = as.character(time_points[i] + lubridate::days(search_window)))) - + start.end.dates <- rbind( + start.end.dates, + list( + start_date = as.character(time_points[i] - lubridate::days(search_window)), + end_date = as.character(time_points[i] + lubridate::days(search_window)) + ) + ) } - #grab previous data to see which site has incomplete observations, if so, download the site for the whole time period. - #if we have previous downloaded CSV file - if(file.exists(file.path(outdir, "LAI.csv"))){ + # grab previous data to see which site has incomplete observations, if so, download the site for the whole time period. + # if we have previous downloaded CSV file + if (file.exists(file.path(outdir, "LAI.csv"))) { PEcAn.logger::logger.info("Extracting previous LAI file!") Previous_CSV <- utils::read.csv(file.path(outdir, "LAI.csv")) if (!is.null(sd_threshold)) { PEcAn.logger::logger.info("filtering out records with high standard errors!") ind.rm <- which(Previous_CSV$sd >= sd_threshold) if (length(ind.rm) > 0) { - Previous_CSV <- Previous_CSV[-ind.rm,] + Previous_CSV <- Previous_CSV[-ind.rm, ] } } - LAI_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_LAI"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, LAI, std, target time point. + LAI_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_LAI"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, LAI, std, target time point. LAI_Output$site_id <- site_info$site_id - #Calculate LAI for each time step and site. - #loop over time and site - LAI.list <- time_points %>% furrr::future_map(function(t){ + # Calculate LAI for each time step and site. + # loop over time and site + LAI.list <- time_points %>% furrr::future_map(function(t) { out.t <- data.frame() for (id in site_info$site_id) { - site_LAI <- Previous_CSV[which(Previous_CSV$site_id == id),] - site_LAI$sd[which(site_LAI$sd<=0.66)] <- 0.66 - diff_days <- abs(lubridate::days(lubridate::date(site_LAI$date)-lubridate::date(t))@day) - if(any(diff_days <= search_window)){#data found + site_LAI <- Previous_CSV[which(Previous_CSV$site_id == id), ] + site_LAI$sd[which(site_LAI$sd <= 0.66)] <- 0.66 + diff_days <- abs(lubridate::days(lubridate::date(site_LAI$date) - lubridate::date(t))@day) + if (any(diff_days <= search_window)) { # data found out.t <- rbind(out.t, list(mean = site_LAI$lai[which.min(diff_days)], sd = site_LAI$sd[which.min(diff_days)])) } else { out.t <- rbind(out.t, list(mean = NA, sd = NA)) @@ -65,69 +69,79 @@ MODIS_LAI_prep <- function(site_info, time_points, outdir = NULL, search_window out.t %>% purrr::set_names(c(paste0(t, "_LAI"), paste0(t, "_SD"))) }, .progress = T) for (i in seq_along(time_points)) { - t <- time_points[i]#otherwise the t will be number instead of date. - LAI_Output[, paste0(t, "_LAI")] <- LAI.list[[i]][,paste0(t, "_LAI")] - LAI_Output[, paste0(t, "_SD")] <- LAI.list[[i]][,paste0(t, "_SD")] + t <- time_points[i] # otherwise the t will be number instead of date. + LAI_Output[, paste0(t, "_LAI")] <- LAI.list[[i]][, paste0(t, "_LAI")] + LAI_Output[, paste0(t, "_SD")] <- LAI.list[[i]][, paste0(t, "_SD")] } - }else{#we don't have any previous downloaded CSV file. - LAI_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_LAI"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, LAI, std, target time point. + } else { # we don't have any previous downloaded CSV file. + LAI_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_LAI"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, LAI, std, target time point. LAI_Output$site_id <- site_info$site_id } - #only Site that has NA for any time points need to be downloaded. - new_site_info <- site_info %>% purrr::map(function(x)x[!stats::complete.cases(LAI_Output)]) - #TODO: only download data for specific date when we have missing data. - if(length(new_site_info$site_id) != 0){ + # only Site that has NA for any time points need to be downloaded. + new_site_info <- site_info %>% purrr::map(function(x) x[!stats::complete.cases(LAI_Output)]) + # TODO: only download data for specific date when we have missing data. + if (length(new_site_info$site_id) != 0) { product <- "MCD15A3H" PEcAn.logger::logger.info("Extracting LAI mean products!") - lai_mean <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% - furrr::future_map(function(s){ - split(as.data.frame(start.end.dates), seq(nrow(as.data.frame(start.end.dates)))) %>% - purrr::map(function(dates){ - if (! "try-error" %in% class(try(mean <- MODISTools::mt_subset(product = product, - lat = s$lat, - lon = s$lon, - band = "Lai_500m", - start = dates$start_date, - end = dates$end_date, - progress = FALSE)))) { + lai_mean <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% + furrr::future_map(function(s) { + split(as.data.frame(start.end.dates), seq(nrow(as.data.frame(start.end.dates)))) %>% + purrr::map(function(dates) { + if (!"try-error" %in% class(try(mean <- MODISTools::mt_subset( + product = product, + lat = s$lat, + lon = s$lon, + band = "Lai_500m", + start = dates$start_date, + end = dates$end_date, + progress = FALSE + )))) { return(list(mean = mean$value, date = mean$calendar_date)) } else { return(NA) } - }) %>% dplyr::bind_rows() + }) %>% + dplyr::bind_rows() }, .progress = T) PEcAn.logger::logger.info("Extracting LAI std products!") - lai_std <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% - furrr::future_map(function(s){ - split(as.data.frame(start.end.dates), seq(nrow(as.data.frame(start.end.dates)))) %>% - purrr::map(function(dates){ - if (! "try-error" %in% class(try(std <- MODISTools::mt_subset(product = product, - lat = s$lat, - lon = s$lon, - band = "LaiStdDev_500m", - start = dates$start_date, - end = dates$end_date, - progress = FALSE)))) { + lai_std <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% + furrr::future_map(function(s) { + split(as.data.frame(start.end.dates), seq(nrow(as.data.frame(start.end.dates)))) %>% + purrr::map(function(dates) { + if (!"try-error" %in% class(try(std <- MODISTools::mt_subset( + product = product, + lat = s$lat, + lon = s$lon, + band = "LaiStdDev_500m", + start = dates$start_date, + end = dates$end_date, + progress = FALSE + )))) { return(std$value) } else { return(NA) } - }) %>% unlist %>% purrr::set_names(NULL) + }) %>% + unlist() %>% + purrr::set_names(NULL) }, .progress = T) PEcAn.logger::logger.info("Extracting LAI qc products!") - lai_qc <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% - furrr::future_map(function(s){ - split(as.data.frame(start.end.dates), seq(nrow(as.data.frame(start.end.dates)))) %>% - purrr::map(function(dates){ - if (! "try-error" %in% class(try(qc <- MODISTools::mt_subset(product = product, - lat = s$lat, - lon = s$lon, - band = "FparLai_QC", - start = dates$start_date, - end = dates$end_date, - progress = FALSE)))) { - qc$value %>% purrr::map(function(v){ + lai_qc <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% + furrr::future_map(function(s) { + split(as.data.frame(start.end.dates), seq(nrow(as.data.frame(start.end.dates)))) %>% + purrr::map(function(dates) { + if (!"try-error" %in% class(try(qc <- MODISTools::mt_subset( + product = product, + lat = s$lat, + lon = s$lon, + band = "FparLai_QC", + start = dates$start_date, + end = dates$end_date, + progress = FALSE + )))) { + qc$value %>% purrr::map(function(v) { qc_flag <- intToBits(as.integer(v)) # NB big-endian (ones place first) qc_flag <- as.integer(rev(utils::head(qc_flag, 3))) # now ones place last paste(qc_flag, collapse = "") @@ -135,7 +149,9 @@ MODIS_LAI_prep <- function(site_info, time_points, outdir = NULL, search_window } else { return(NA) } - }) %>% unlist %>% purrr::set_names(NULL) + }) %>% + unlist() %>% + purrr::set_names(NULL) }, .progress = T) # LAI <- data.frame(matrix(NA, 0, 6)) %>% `colnames<-`(c("date", "site_id", "lat", "lon", "lai", "sd")) LAI <- data.frame() @@ -146,7 +162,7 @@ MODIS_LAI_prep <- function(site_info, time_points, outdir = NULL, search_window next } # skip bad pixels based on qc band. - if (! lai_qc[[i]][j] %in% c("000", "001")) { + if (!lai_qc[[i]][j] %in% c("000", "001")) { next } if (!is.null(sd_threshold)) { @@ -154,42 +170,44 @@ MODIS_LAI_prep <- function(site_info, time_points, outdir = NULL, search_window next } } - LAI <- rbind(LAI, list(date = lai_mean[[i]]$date[j], - site_id = new_site_info$site_id[i], - lat = new_site_info$lat[i], - lon = new_site_info$lon[i], - lai = lai_mean[[i]]$mean[j]*0.1, - sd = lai_std[[i]][j]*0.1)) + LAI <- rbind(LAI, list( + date = lai_mean[[i]]$date[j], + site_id = new_site_info$site_id[i], + lat = new_site_info$lat[i], + lon = new_site_info$lon[i], + lai = lai_mean[[i]]$mean[j] * 0.1, + sd = lai_std[[i]][j] * 0.1 + )) } } - #Compare with existing CSV file. (We name the CSV file as LAI.csv) - if(as.logical(export_csv)){ - if(exists("Previous_CSV")){#we already read the csv file previously. + # Compare with existing CSV file. (We name the CSV file as LAI.csv) + if (as.logical(export_csv)) { + if (exists("Previous_CSV")) { # we already read the csv file previously. Current_CSV <- rbind(Previous_CSV, LAI) - Current_CSV <- Current_CSV[!duplicated(paste0(Current_CSV$site_id, Current_CSV$date)),]#using site_id and date to remove duplicated records. + Current_CSV <- Current_CSV[!duplicated(paste0(Current_CSV$site_id, Current_CSV$date)), ] # using site_id and date to remove duplicated records. utils::write.csv(Current_CSV, file = file.path(outdir, "LAI.csv"), row.names = FALSE) - }else{ + } else { Current_CSV <- LAI utils::write.csv(Current_CSV, file = file.path(outdir, "LAI.csv"), row.names = FALSE) } } else { Current_CSV <- LAI } - #Calculate LAI for each time step and site. - #loop over time and site + # Calculate LAI for each time step and site. + # loop over time and site for (i in seq_along(time_points)) { - t <- time_points[i]#otherwise the t will be number instead of date. + t <- time_points[i] # otherwise the t will be number instead of date. for (id in new_site_info$site_id) { - site_LAI <- Current_CSV[which(Current_CSV$site_id == id),] - site_LAI$sd[which(site_LAI$sd<=0.66)] <- 0.66 - diff_days <- abs(lubridate::days(lubridate::date(site_LAI$date)-lubridate::date(t))@day) - if(any(diff_days <= as.numeric(search_window))){#data found - LAI_Output[which(LAI_Output$site_id==id), paste0(t, "_LAI")] <- site_LAI$lai[which.min(diff_days)] - LAI_Output[which(LAI_Output$site_id==id), paste0(t, "_SD")] <- site_LAI$sd[which.min(diff_days)] + site_LAI <- Current_CSV[which(Current_CSV$site_id == id), ] + site_LAI$sd[which(site_LAI$sd <= 0.66)] <- 0.66 + diff_days <- abs(lubridate::days(lubridate::date(site_LAI$date) - lubridate::date(t))@day) + if (any(diff_days <= as.numeric(search_window))) { # data found + LAI_Output[which(LAI_Output$site_id == id), paste0(t, "_LAI")] <- site_LAI$lai[which.min(diff_days)] + LAI_Output[which(LAI_Output$site_id == id), paste0(t, "_SD")] <- site_LAI$sd[which.min(diff_days)] } } } } PEcAn.logger::logger.info("MODIS LAI Prep Completed!") list(LAI_Output = LAI_Output, time_points = time_points, var = "LAI") -} \ No newline at end of file +} diff --git a/modules/data.remote/R/MODIS_LC_prep.R b/modules/data.remote/R/MODIS_LC_prep.R index e83ea2117e4..b6f5a19890a 100644 --- a/modules/data.remote/R/MODIS_LC_prep.R +++ b/modules/data.remote/R/MODIS_LC_prep.R @@ -7,119 +7,126 @@ #' #' @return A data frame containing MODIS land cover types for each site and each time step. #' @export -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% -#' @details This function enables the feature of grabbing pre-extracted MODIS LC CSV files such that any site that +#' @details This function enables the feature of grabbing pre-extracted MODIS LC CSV files such that any site that #' has records will be skipped (See Line 33). In more detail, we will be loading the previous `LC.csv` file, which #' contains previous extracted land cover records and trying to match that with current requests (location, time). #' Any requests that fail the match will be regarded as new extractions and combine with the previous `LC.csv` file. -MODIS_LC_prep <- function(site_info, time_points, outdir = NULL, qc.filter = c("000", "001")){ - #initialize future parallel computation. +MODIS_LC_prep <- function(site_info, time_points, outdir = NULL, qc.filter = c("000", "001")) { + # initialize future parallel computation. if (future::supportsMulticore()) { future::plan(future::multicore, workers = 10) } else { - future::plan(future::multisession, workers = 10) #10 is the maximum number of requests permitted for the MODIS server. + future::plan(future::multisession, workers = 10) # 10 is the maximum number of requests permitted for the MODIS server. } - #if we export CSV but didn't provide any path - if(is.null(outdir)){ + # if we export CSV but didn't provide any path + if (is.null(outdir)) { PEcAn.logger::logger.info("If you want to export CSV file, please ensure input the outdir!") return(0) } - #convert from dates into years. + # convert from dates into years. years <- lubridate::year(time_points) - #grab previous data to see which site has incomplete observations, if so, download the site for the whole time period. - #if we have previous downloaded CSV file + # grab previous data to see which site has incomplete observations, if so, download the site for the whole time period. + # if we have previous downloaded CSV file if (!is.null(outdir)) { - if(file.exists(file.path(outdir, "LC.csv"))){ + if (file.exists(file.path(outdir, "LC.csv"))) { PEcAn.logger::logger.info("Extracting previous MODIS Land Cover file!") Previous_CSV <- utils::read.csv(file.path(outdir, "LC.csv")) - LC_Output <- matrix(NA, length(site_info$site_id), length(years)+1) %>% - `colnames<-`(c("site_id", paste0(years, "_LC"))) %>% - as.data.frame()#we need: site_id, LC, target time point. - #fill in the site ids. + LC_Output <- matrix(NA, length(site_info$site_id), length(years) + 1) %>% + `colnames<-`(c("site_id", paste0(years, "_LC"))) %>% + as.data.frame() # we need: site_id, LC, target time point. + # fill in the site ids. LC_Output$site_id <- site_info$site_id - #Calculate LAI for each time step and site. - #loop over time and site - LC.list <- years %>% furrr::future_map(function(t){ + # Calculate LAI for each time step and site. + # loop over time and site + LC.list <- years %>% furrr::future_map(function(t) { out.t <- c() for (id in site_info$site_id) { - site_LC <- Previous_CSV[which(Previous_CSV$site_id == id),] + site_LC <- Previous_CSV[which(Previous_CSV$site_id == id), ] out.t <- c(out.t, site_LC$LC) } out.t %>% purrr::set_names(c(paste0(t, "_LC"))) }, .progress = T) for (i in seq_along(years)) { - t <- years[i]#otherwise the t will be number instead of date. - LC_Output[, paste0(t, "_LC")] <- LC.list[[i]][,paste0(t, "_LC")] + t <- years[i] # otherwise the t will be number instead of date. + LC_Output[, paste0(t, "_LC")] <- LC.list[[i]][, paste0(t, "_LC")] } } } else { - #we don't have any previous downloaded CSV file. - LC_Output <- matrix(NA, length(site_info$site_id), length(years)+1) %>% - `colnames<-`(c("site_id", paste0(years, "_LC"))) %>% - as.data.frame()#we need: site_id, LC, target time point. + # we don't have any previous downloaded CSV file. + LC_Output <- matrix(NA, length(site_info$site_id), length(years) + 1) %>% + `colnames<-`(c("site_id", paste0(years, "_LC"))) %>% + as.data.frame() # we need: site_id, LC, target time point. LC_Output$site_id <- site_info$site_id } - #only Site that has NA for any time points need to be downloaded. - new_site_info <- site_info %>% purrr::map(function(x)x[!stats::complete.cases(LC_Output)]) - #start extraction. - if(length(new_site_info$site_id) != 0){ - #grab the product and band names. + # only Site that has NA for any time points need to be downloaded. + new_site_info <- site_info %>% purrr::map(function(x) x[!stats::complete.cases(LC_Output)]) + # start extraction. + if (length(new_site_info$site_id) != 0) { + # grab the product and band names. product <- "MCD12Q1" band <- "LC_Type1" - LC.types <- list("1"="Evergreen Needleleaf Forests", - "2"="Evergreen Broadleaf Forests", - "3"="Deciduous Needleleaf Forests", - "4"="Deciduous Broadleaf Forests", - "5"="Mixed Forests", - "6"="Closed Shrublands", - "7"="Open Shrublands", - "8"="Woody Savannas", - "9"="Savannas", - "10"="Grasslands", - "11"="Permanent Wetlands", - "12"="Croplands", - "13"="Urban and Built-up Lands", - "14"="Cropland/Natural Vegetation Mosaics", - "15"="Permanent Snow and Ice", - "16"="Barren", - "17"="Water Bodies") - #extracting LC types from MODISTools. + LC.types <- list( + "1" = "Evergreen Needleleaf Forests", + "2" = "Evergreen Broadleaf Forests", + "3" = "Deciduous Needleleaf Forests", + "4" = "Deciduous Broadleaf Forests", + "5" = "Mixed Forests", + "6" = "Closed Shrublands", + "7" = "Open Shrublands", + "8" = "Woody Savannas", + "9" = "Savannas", + "10" = "Grasslands", + "11" = "Permanent Wetlands", + "12" = "Croplands", + "13" = "Urban and Built-up Lands", + "14" = "Cropland/Natural Vegetation Mosaics", + "15" = "Permanent Snow and Ice", + "16" = "Barren", + "17" = "Water Bodies" + ) + # extracting LC types from MODISTools. PEcAn.logger::logger.info("Extracting MODIS Land Cover products!") - lc.list <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% - furrr::future_map(function(s){ - years %>% - purrr::map(function(y){ - if (! "try-error" %in% class(try(value <- MODISTools::mt_subset(product = product, - lat = s$lat, - lon = s$lon, - band = band, - start = paste0(y, "-01-01"), - end = paste0(y, "-01-01"), - progress = FALSE)))) { + lc.list <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% + furrr::future_map(function(s) { + years %>% + purrr::map(function(y) { + if (!"try-error" %in% class(try(value <- MODISTools::mt_subset( + product = product, + lat = s$lat, + lon = s$lon, + band = band, + start = paste0(y, "-01-01"), + end = paste0(y, "-01-01"), + progress = FALSE + )))) { return(list(value = value$value, date = value$calendar_date)) } else { return(NA) } - }) %>% dplyr::bind_rows() + }) %>% + dplyr::bind_rows() }, .progress = T) - #extracting QC values. + # extracting QC values. if (!is.null(qc.filter)) { PEcAn.logger::logger.info("Extracting Land Cover QC products!") - qc.list <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% - furrr::future_map(function(s){ - years %>% - purrr::map(function(y){ - if (! "try-error" %in% class(try(qc <- MODISTools::mt_subset(product = product, - lat = s$lat, - lon = s$lon, - band = "QC", - start = paste0(y, "-01-01"), - end = paste0(y, "-01-01"), - progress = FALSE)))) { - qc$value %>% purrr::map(function(v){ + qc.list <- split(as.data.frame(new_site_info), seq(nrow(as.data.frame(new_site_info)))) %>% + furrr::future_map(function(s) { + years %>% + purrr::map(function(y) { + if (!"try-error" %in% class(try(qc <- MODISTools::mt_subset( + product = product, + lat = s$lat, + lon = s$lon, + band = "QC", + start = paste0(y, "-01-01"), + end = paste0(y, "-01-01"), + progress = FALSE + )))) { + qc$value %>% purrr::map(function(v) { qc_flag <- intToBits(as.integer(v)) # NB big-endian (ones place first) qc_flag <- as.integer(rev(utils::head(qc_flag, 3))) # now ones place last paste(qc_flag, collapse = "") @@ -127,7 +134,9 @@ MODIS_LC_prep <- function(site_info, time_points, outdir = NULL, qc.filter = c(" } else { return(NA) } - }) %>% unlist %>% purrr::set_names(NULL) + }) %>% + unlist() %>% + purrr::set_names(NULL) }, .progress = T) } LC <- data.frame() @@ -139,43 +148,45 @@ MODIS_LC_prep <- function(site_info, time_points, outdir = NULL, qc.filter = c(" } if (!is.null(qc.filter)) { # skip bad pixels based on qc band. - if (! qc.list[[i]][j] %in% qc.filter) { + if (!qc.list[[i]][j] %in% qc.filter) { next } } - LC <- rbind(LC, list(date = lc.list[[i]]$date[j], - site_id = new_site_info$site_id[i], - lat = new_site_info$lat[i], - lon = new_site_info$lon[i], - lc = LC.types[[lc.list[[i]]$value[j]]])) + LC <- rbind(LC, list( + date = lc.list[[i]]$date[j], + site_id = new_site_info$site_id[i], + lat = new_site_info$lat[i], + lon = new_site_info$lon[i], + lc = LC.types[[lc.list[[i]]$value[j]]] + )) } } - #Compare with existing CSV file. (We name the CSV file as LC.csv) - if(!is.null(outdir)){ - if(exists("Previous_CSV")){#we already read the csv file previously. + # Compare with existing CSV file. (We name the CSV file as LC.csv) + if (!is.null(outdir)) { + if (exists("Previous_CSV")) { # we already read the csv file previously. Current_CSV <- rbind(Previous_CSV, LC) - Current_CSV <- Current_CSV[!duplicated(paste0(Current_CSV$site_id, Current_CSV$date)),]#using site_id and date to remove duplicated records. + Current_CSV <- Current_CSV[!duplicated(paste0(Current_CSV$site_id, Current_CSV$date)), ] # using site_id and date to remove duplicated records. utils::write.csv(Current_CSV, file = file.path(outdir, "LC.csv"), row.names = FALSE) - }else{ + } else { Current_CSV <- LC utils::write.csv(Current_CSV, file = file.path(outdir, "LC.csv"), row.names = FALSE) } } else { Current_CSV <- LC } - #Fill LC for each time step and site. - #loop over time and site + # Fill LC for each time step and site. + # loop over time and site for (i in seq_along(years)) { t <- years[i] for (id in new_site_info$site_id) { if (!any(Current_CSV$site_id == id)) { next } - site_LC <- Current_CSV[which(Current_CSV$site_id == id),] - LC_Output[which(LC_Output$site_id==id), paste0(t, "_LC")] <- site_LC$lc[which(site_LC$date==paste0(t, "-01-01"))] + site_LC <- Current_CSV[which(Current_CSV$site_id == id), ] + LC_Output[which(LC_Output$site_id == id), paste0(t, "_LC")] <- site_LC$lc[which(site_LC$date == paste0(t, "-01-01"))] } } } PEcAn.logger::logger.info("MODIS Land Cover Prep Completed!") LC_Output -} \ No newline at end of file +} diff --git a/modules/data.remote/R/NASA_DAAC_download.R b/modules/data.remote/R/NASA_DAAC_download.R index 01fa50ac043..3c5f363a37e 100644 --- a/modules/data.remote/R/NASA_DAAC_download.R +++ b/modules/data.remote/R/NASA_DAAC_download.R @@ -11,15 +11,15 @@ #' "yyyy-mm-dd". #' @param outdir Character: path of the directory in which to save the #' downloaded files. Default is the current work directory(getwd()). -#' @param doi Character: data DOI on the NASA DAAC server, it can be obtained -#' directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through +#' @param doi Character: data DOI on the NASA DAAC server, it can be obtained +#' directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through #' https://daac.ornl.gov/cgi-bin/dsviewer.pl?ds_id=2056). #' @param netrc_file Character: path to the credential file, default is NULL. #' @param just_path Boolean: if we just want the metadata and URL or proceed the actual download. #' #' @return A list containing meta data and physical path for each data downloaded. #' @export -#' +#' #' @examples #' \dontrun{ #' ul_lat <- 35 @@ -30,14 +30,16 @@ #' to <- "2022-05-30" #' doi <- "10.3334/ORNLDAAC/2183" #' outdir <- "/projectnb/dietzelab/dongchen/SHIFT/test_download" -#' metadata <- NASA_DAAC_download(ul_lat = ul_lat, -#' ul_lon = ul_lon, -#' lr_lat = lr_lat, -#' lr_lon = lr_lon, -#' from = from, -#' to = to, -#' doi = doi, -#' just_path = T) +#' metadata <- NASA_DAAC_download( +#' ul_lat = ul_lat, +#' ul_lon = ul_lon, +#' lr_lat = lr_lat, +#' lr_lon = lr_lon, +#' from = from, +#' to = to, +#' doi = doi, +#' just_path = T +#' ) #' } #' @author Dongchen Zhang #' @importFrom foreach %dopar% @@ -74,11 +76,13 @@ NASA_DAAC_download <- function(ul_lat, # initialize variable for storing data. granules_href <- entry <- c() repeat { - request_url <- NASA_DAAC_URL(provider = provider_conceptID$provider[1], - concept_id = provider_conceptID$concept_id[1], - page = page, - bbox = bbox, - daterange = daterange) + request_url <- NASA_DAAC_URL( + provider = provider_conceptID$provider[1], + concept_id = provider_conceptID$concept_id[1], + page = page, + bbox = bbox, + daterange = daterange + ) response <- curl::curl_fetch_memory(request_url) content <- rawToChar(response$content) result <- jsonlite::parse_json(content) @@ -87,8 +91,9 @@ NASA_DAAC_download <- function(ul_lat, stop(paste("\n", result$errors, collapse = "\n")) } granules <- result$feed$entry - if (length(granules) == 0) + if (length(granules) == 0) { break + } granules_href <- c(granules_href, sapply(granules, function(x) x$links[[1]]$href)) page <- page + 1 } @@ -141,7 +146,7 @@ NASA_DAAC_download <- function(ul_lat, } #' Create URL that can be used to request data from NASA DAAC server. #' -#' @param base_url Character: base URL for the CMR search. +#' @param base_url Character: base URL for the CMR search. #' default is "https://cmr.earthdata.nasa.gov/search/granules.json?pretty=true". #' @param provider Character: ID of data provider from NASA DAAC. See `NASA_CMR_finder` for more details. #' @param page_size Numeric: maximum requested length, default is 2000. @@ -151,17 +156,19 @@ NASA_DAAC_download <- function(ul_lat, #' @param daterange Character: vectors of the requested start and end dates. In the form "yyyy-mm-dd". #' #' @return A character of URL that can be used to request data. -#' +#' #' @examples #' \dontrun{ #' provider <- "ORNL_CLOUD" #' concept_id <- "C2770099044-ORNL_CLOUD" #' bbox <- "-121,33,-117,35" #' daterange <- c("2022-02-23", "2022-05-30") -#' URL <- NASA_DAAC_URL(provider = provider, -#' concept_id = concept_id, -#' bbox = bbox, -#' daterange = daterange) +#' URL <- NASA_DAAC_URL( +#' provider = provider, +#' concept_id = concept_id, +#' bbox = bbox, +#' daterange = daterange +#' ) #' } #' @author Dongchen Zhang NASA_DAAC_URL <- function(base_url = "https://cmr.earthdata.nasa.gov/search/granules.json?pretty=true", @@ -182,13 +189,13 @@ NASA_DAAC_URL <- function(base_url = "https://cmr.earthdata.nasa.gov/search/gran } #' Create URL that can be used to request data from NASA DAAC server. #' -#' @param doi Character: data DOI on the NASA DAAC server, it can be obtained -#' directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through +#' @param doi Character: data DOI on the NASA DAAC server, it can be obtained +#' directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through #' https://daac.ornl.gov/cgi-bin/dsviewer.pl?ds_id=2056). #' -#' @return A list with each containing corresponding provider and concept ids +#' @return A list with each containing corresponding provider and concept ids #' given the data doi. -#' +#' #' @examples #' \dontrun{ #' provider_conceptID <- NASA_CMR_finder("10.3334/ORNLDAAC/2183") @@ -205,8 +212,12 @@ NASA_CMR_finder <- function(doi) { httr::stop_for_status(request) results <- httr::content(request, "parsed") # grab paried provider-conceptID records. - provider <- results$feed$entry %>% purrr::map("data_center") %>% unlist - concept_id <- results$feed$entry %>% purrr::map("id") %>% unlist + provider <- results$feed$entry %>% + purrr::map("data_center") %>% + unlist() + concept_id <- results$feed$entry %>% + purrr::map("id") %>% + unlist() # return results. return(as.list(data.frame(cbind(provider, concept_id)))) -} \ No newline at end of file +} diff --git a/modules/data.remote/R/NLCD.R b/modules/data.remote/R/NLCD.R index 1cd2f193607..5d763583543 100644 --- a/modules/data.remote/R/NLCD.R +++ b/modules/data.remote/R/NLCD.R @@ -1,63 +1,68 @@ ##' @title download.NLCD ##' @name download.NLCD -##' +##' ##' @author Mike Dietze ##' @export -##' +##' ##' @param outdir Directory to download NLCD to ##' @param year which NLCD year to download. Only 2001 and 2011 are currently supported. ##' @param con Optional database connection. If specified then the code will check to see if the file already exists in PEcAn before downloading, and will also create a database entry for new downloads -##' +##' ##' @description Downloads and unzips the National Land Cover Database http://www.mrlc.gov/nlcd2011.php. Will automatically insert into PEcAn database if database connection provided. download.NLCD <- function(outdir, year = 2011, con = NULL) { - - if (year == 2001) { - url <- "http://gisdata.usgs.gov/TDDS/DownloadFile.php?TYPE=nlcd2001v2&FNAME=nlcd_2001_landcover_2011_edition_2014_10_10.zip" - input.id <- 1000000482 - } else if (year == 2011) { - url <- "http://gisdata.usgs.gov/TDDS/DownloadFile.php?TYPE=nlcd2011&FNAME=nlcd_2011_landcover_2011_edition_2014_10_10.zip" - input.id <- 1000000483 - } else { - print(paste("Year not yet supported: ", year)) - } - - ## before downloading, check if the file already exists on this host - if (!is.null(con)) { - chk <- PEcAn.DB::dbfile.check(type = "Input", id = input.id, con = con) - if (nrow(chk) > 0) { - machines <- PEcAn.DB::db.query(paste("SELECT * from machines where id in (", - paste(chk$machine_id, sep = ","), ")"), con) - if (PEcAn.remote::fqdn() %in% machines$hostname) { - ## record already exists on this host - return(chk$id[PEcAn.remote::fqdn() == machines$hostname]) - } - } - } - - ## Download the data - dir.create(outdir, showWarnings = FALSE) - destfile <- file.path(outdir, paste0("nlcd", year, ".zip")) - utils::download.file(url, destfile = destfile) - status <- system(paste("(cd", outdir, "; unzip", destfile, ")")) - # unzip(destfile,exdir = outdir) ## unzip command produced corrupted file! - file.remove(destfile) ## clean up raw zip file - - ## Insert the database record - data_dir <- file.path(outdir, paste0("nlcd_", year, "_landcover_2011_edition_2014_10_10")) - if (!is.null(con)) { - prefix <- table(sapply(strsplit(dir(data_dir), ".", fixed = TRUE), function(x) { x[1] })) - prefix <- names(which.max(prefix)) - site.id <- 1000000676 - return(PEcAn.DB::dbfile.insert(data_dir, in.prefix = prefix, type = "Input", input.id, con, - reuse = TRUE)) + if (year == 2001) { + url <- "http://gisdata.usgs.gov/TDDS/DownloadFile.php?TYPE=nlcd2001v2&FNAME=nlcd_2001_landcover_2011_edition_2014_10_10.zip" + input.id <- 1000000482 + } else if (year == 2011) { + url <- "http://gisdata.usgs.gov/TDDS/DownloadFile.php?TYPE=nlcd2011&FNAME=nlcd_2011_landcover_2011_edition_2014_10_10.zip" + input.id <- 1000000483 + } else { + print(paste("Year not yet supported: ", year)) + } + + ## before downloading, check if the file already exists on this host + if (!is.null(con)) { + chk <- PEcAn.DB::dbfile.check(type = "Input", id = input.id, con = con) + if (nrow(chk) > 0) { + machines <- PEcAn.DB::db.query(paste( + "SELECT * from machines where id in (", + paste(chk$machine_id, sep = ","), ")" + ), con) + if (PEcAn.remote::fqdn() %in% machines$hostname) { + ## record already exists on this host + return(chk$id[PEcAn.remote::fqdn() == machines$hostname]) + } } - return(data_dir) + } + + ## Download the data + dir.create(outdir, showWarnings = FALSE) + destfile <- file.path(outdir, paste0("nlcd", year, ".zip")) + utils::download.file(url, destfile = destfile) + status <- system(paste("(cd", outdir, "; unzip", destfile, ")")) + # unzip(destfile,exdir = outdir) ## unzip command produced corrupted file! + file.remove(destfile) ## clean up raw zip file + + ## Insert the database record + data_dir <- file.path(outdir, paste0("nlcd_", year, "_landcover_2011_edition_2014_10_10")) + if (!is.null(con)) { + prefix <- table(sapply(strsplit(dir(data_dir), ".", fixed = TRUE), function(x) { + x[1] + })) + prefix <- names(which.max(prefix)) + site.id <- 1000000676 + return(PEcAn.DB::dbfile.insert(data_dir, + in.prefix = prefix, type = "Input", input.id, con, + reuse = TRUE + )) + } + return(data_dir) } # download.NLCD ##' @title extract.NLCD ##' @author Mike Dietze ##' @export -##' +##' ##' @param buffer search radius (meters) ##' @param coords data frame containing elements 'long' and 'lat'. Currently just supports single point extraction. ##' @param data_dir directory where input data is located. Can be NUL if con is specified @@ -65,61 +70,63 @@ download.NLCD <- function(outdir, year = 2011, con = NULL) { ##' @param year which NLCD year to extract. If data_dir not provided, must be one of `2001` or `2011` ##' ##' @return dataframe of fractional cover of different cover classes -##' +##' ##' @description Based on codes from Christy Rollinson and from Max Joseph (http://mbjoseph.github.io/2014/11/08/nlcd.html) extract_NLCD <- function(buffer, coords, data_dir = NULL, con = NULL, year = 2011) { - - if (!is.null(con)) { - - if (year == 2001) { - input.id <- 1000000482 - } else if (year == 2011) { - input.id <- 1000000483 - } else { - print(paste("Year not yet supported: ", year)) - } - chk <- PEcAn.DB::dbfile.check(type = "Input", id = input.id, con = con) - if (nrow(chk) > 0) { - machines <- PEcAn.DB::db.query(paste("SELECT * from machines where id in (", - paste(chk$machine_id, sep = ","), ")"), con) - if (PEcAn.remote::fqdn() %in% machines$hostname) { - ## record already exists on this host - data_dir <- chk$file_path[PEcAn.remote::fqdn() == machines$hostname] - } else { - print(paste0("File not found on localhost, please check database input.id ", - input.id, ". You may need to run download.NLCD")) - return(list(chk = chk, machines = machines, localhost = PEcAn.remote::fqdn())) - } - } else { - print(paste("No files found for input.id", input.id)) - return(NULL) - } + if (!is.null(con)) { + if (year == 2001) { + input.id <- 1000000482 + } else if (year == 2011) { + input.id <- 1000000483 + } else { + print(paste("Year not yet supported: ", year)) } - - # load cover data - filename <- file.path(data_dir, paste0("nlcd_", year, "_landcover_2011_edition_2014_10_10.img")) - if (!file.exists(filename)) { - print(paste("File not found:", filename)) - return(NULL) + chk <- PEcAn.DB::dbfile.check(type = "Input", id = input.id, con = con) + if (nrow(chk) > 0) { + machines <- PEcAn.DB::db.query(paste( + "SELECT * from machines where id in (", + paste(chk$machine_id, sep = ","), ")" + ), con) + if (PEcAn.remote::fqdn() %in% machines$hostname) { + ## record already exists on this host + data_dir <- chk$file_path[PEcAn.remote::fqdn() == machines$hostname] + } else { + print(paste0( + "File not found on localhost, please check database input.id ", + input.id, ". You may need to run download.NLCD" + )) + return(list(chk = chk, machines = machines, localhost = PEcAn.remote::fqdn())) + } + } else { + print(paste("No files found for input.id", input.id)) + return(NULL) } - - # WARNING: the following extraction previously used raster and sp package functions - # this new implementation with terra functions has not been thoroughly tested - nlcd <- terra::rast(filename) - - # transform points - sites <- terra::vect(coords, geom=c("long", "lat"), crs="+proj=longlat +datum=WGS84") - sites <- terra::buffer(sites, width=buffer) - - # extract - sum.raw <- table(terra::extract(nlcd, sites)) - summ <- prop.table(sum.raw) - mydf <- data.frame(cover.name = colnames(summ), percent = as.vector(summ), count = as.vector(sum.raw)) - mydf <- mydf[mydf$count!=0,] - - # land cover name to number conversions - nlcd_levels <- terra::levels(nlcd)[[1]] - mydf$cover <- nlcd_levels$value[nlcd_levels$`Land Cover Class` %in% mydf$cover.name] - - return(mydf) + } + + # load cover data + filename <- file.path(data_dir, paste0("nlcd_", year, "_landcover_2011_edition_2014_10_10.img")) + if (!file.exists(filename)) { + print(paste("File not found:", filename)) + return(NULL) + } + + # WARNING: the following extraction previously used raster and sp package functions + # this new implementation with terra functions has not been thoroughly tested + nlcd <- terra::rast(filename) + + # transform points + sites <- terra::vect(coords, geom = c("long", "lat"), crs = "+proj=longlat +datum=WGS84") + sites <- terra::buffer(sites, width = buffer) + + # extract + sum.raw <- table(terra::extract(nlcd, sites)) + summ <- prop.table(sum.raw) + mydf <- data.frame(cover.name = colnames(summ), percent = as.vector(summ), count = as.vector(sum.raw)) + mydf <- mydf[mydf$count != 0, ] + + # land cover name to number conversions + nlcd_levels <- terra::levels(nlcd)[[1]] + mydf$cover <- nlcd_levels$value[nlcd_levels$`Land Cover Class` %in% mydf$cover.name] + + return(mydf) } # extract_NLCD diff --git a/modules/data.remote/R/Prep_AGB_IC_from_2010_global.R b/modules/data.remote/R/Prep_AGB_IC_from_2010_global.R index 8b3a4c9b09e..79e85989790 100644 --- a/modules/data.remote/R/Prep_AGB_IC_from_2010_global.R +++ b/modules/data.remote/R/Prep_AGB_IC_from_2010_global.R @@ -8,12 +8,12 @@ #' #' @return A data frame containing sampled above ground biomass densities, each column represent each site. #' @export -#' +#' #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% Prep_AGB_IC_from_2010_global <- function(site_info, paths.list, ens) { - #Initialize the multicore computation. + # Initialize the multicore computation. if (future::supportsMulticore()) { future::plan(future::multicore) } else { @@ -21,23 +21,30 @@ Prep_AGB_IC_from_2010_global <- function(site_info, paths.list, ens) { } ## get coordinates and provide spatial info site_coords <- data.frame(site_info$lon, site_info$lat) - names(site_coords) <- c("Longitude","Latitude") + names(site_coords) <- c("Longitude", "Latitude") coords_latlong <- sp::SpatialPoints(site_coords) sp::proj4string(coords_latlong) <- sp::CRS("+init=epsg:4326") ## load gridded AGB data raster_data <- lapply(paths.list, raster::raster) ## reproject Lat/Long site coords to AGB Albers Equal-Area - coords_AEA <- sp::spTransform(coords_latlong, - raster::crs(raster::raster(raster_data[[1]]))) + coords_AEA <- sp::spTransform( + coords_latlong, + raster::crs(raster::raster(raster_data[[1]])) + ) ## prepare product for extraction - stack requested years raster_data_stack <- raster::stack(raster_data) ## extract - agb_pixel <- raster::extract(x = raster_data_stack, - y = coords_AEA, buffer=0, fun=NULL, df=FALSE) - sampled_ic <- agb_pixel %>% furrr::future_map(function(pixel){ - ens_sample <- stats::rnorm(ens, pixel["mean"], pixel["uncertainty"]) - ens_sample[which(ens_sample<0)] <- 0 - ens_sample - }, .progress = T) %>% dplyr::bind_cols() %>% `colnames<-`(site_info$site_id) + agb_pixel <- raster::extract( + x = raster_data_stack, + y = coords_AEA, buffer = 0, fun = NULL, df = FALSE + ) + sampled_ic <- agb_pixel %>% + furrr::future_map(function(pixel) { + ens_sample <- stats::rnorm(ens, pixel["mean"], pixel["uncertainty"]) + ens_sample[which(ens_sample < 0)] <- 0 + ens_sample + }, .progress = T) %>% + dplyr::bind_cols() %>% + `colnames<-`(site_info$site_id) return(sampled_ic) -} \ No newline at end of file +} diff --git a/modules/data.remote/R/SMAP_SMP_prep.R b/modules/data.remote/R/SMAP_SMP_prep.R index 5eca6d16750..d433e5d0a91 100644 --- a/modules/data.remote/R/SMAP_SMP_prep.R +++ b/modules/data.remote/R/SMAP_SMP_prep.R @@ -15,73 +15,75 @@ #' @examples #' @author Dongchen Zhang #' @importFrom magrittr %>% -SMAP_SMP_prep <- function(site_info, start_date, end_date, time_points, - outdir, search_window = 30, export_csv = TRUE, update_csv = FALSE){ - #note that, the SMAP_gee.csv file comes from Google Earth Engine (GEE) directly. - #Code for generating this file can be found through this link: - #https://code.earthengine.google.com/ecbeb770e576d8ef72f72f5f12da3496 - #Feel free to contact Dongchen Zhang (zhangdc@bu.edu) who wrote this code. - #The SMAP.csv file will be generated the first time when you use this code. - #for the next time, it will save you lot of time if you can provide the SMAP.csv directly. - #Initialize the multicore computation. +SMAP_SMP_prep <- function(site_info, start_date, end_date, time_points, + outdir, search_window = 30, export_csv = TRUE, update_csv = FALSE) { + # note that, the SMAP_gee.csv file comes from Google Earth Engine (GEE) directly. + # Code for generating this file can be found through this link: + # https://code.earthengine.google.com/ecbeb770e576d8ef72f72f5f12da3496 + # Feel free to contact Dongchen Zhang (zhangdc@bu.edu) who wrote this code. + # The SMAP.csv file will be generated the first time when you use this code. + # for the next time, it will save you lot of time if you can provide the SMAP.csv directly. + # Initialize the multicore computation. if (future::supportsMulticore()) { future::plan(future::multicore) } else { future::plan(future::multisession) } - #check if SMAP.csv exists. - if(!file.exists(file.path(outdir, "SMAP.csv")) | as.logical(update_csv)){ - if(!file.exists(file.path(outdir, "SMAP_gee.csv"))){ + # check if SMAP.csv exists. + if (!file.exists(file.path(outdir, "SMAP.csv")) | as.logical(update_csv)) { + if (!file.exists(file.path(outdir, "SMAP_gee.csv"))) { PEcAn.logger::logger.info("Please Provide SMAP dir that contains at least the SMAP_gee.csv file!") return(0) - }else{ - SMAP_CSV <- utils::read.csv(file.path(outdir, "SMAP_gee.csv"))[-1,2] %>% - furrr::future_map(function(string){ - String <- strsplit(gsub(",", "", gsub("\\[|\\]", "", string)), " ")[[1]] - date <- as.Date(strsplit(String[1], "_")[[1]][5], "%Y%m%d") - lon <- as.numeric(String[2]) - lat <- as.numeric(String[3]) - smp <- as.numeric(String[5]) * 100 - sd <- 0.04 * 100 #From Daniel - - #Match current lon/lat with site_info - Longlat_matrix <- matrix(c(lon, site_info$lon, lat, site_info$lat), ncol=2) - Distance <- sp::spDistsN1(Longlat_matrix, Longlat_matrix[1,], longlat = TRUE)[-1] - distloc <- match(min(Distance), Distance) - site_id <- site_info$site_id[distloc] - list(date = date, site_id = site_id, lat = lat, lon = lon, smp = smp, sd = sd)#in date, id, lat, lon, smp, sd - }, .progress = T) %>% dplyr::bind_rows() - #write out csv file. - if(as.logical((export_csv))){ + } else { + SMAP_CSV <- utils::read.csv(file.path(outdir, "SMAP_gee.csv"))[-1, 2] %>% + furrr::future_map(function(string) { + String <- strsplit(gsub(",", "", gsub("\\[|\\]", "", string)), " ")[[1]] + date <- as.Date(strsplit(String[1], "_")[[1]][5], "%Y%m%d") + lon <- as.numeric(String[2]) + lat <- as.numeric(String[3]) + smp <- as.numeric(String[5]) * 100 + sd <- 0.04 * 100 # From Daniel + + # Match current lon/lat with site_info + Longlat_matrix <- matrix(c(lon, site_info$lon, lat, site_info$lat), ncol = 2) + Distance <- sp::spDistsN1(Longlat_matrix, Longlat_matrix[1, ], longlat = TRUE)[-1] + distloc <- match(min(Distance), Distance) + site_id <- site_info$site_id[distloc] + list(date = date, site_id = site_id, lat = lat, lon = lon, smp = smp, sd = sd) # in date, id, lat, lon, smp, sd + }, .progress = T) %>% + dplyr::bind_rows() + # write out csv file. + if (as.logical((export_csv))) { utils::write.csv(SMAP_CSV, file = file.path(outdir, "SMAP.csv"), row.names = F) } } - }else{ - #TODO: When current SMAP.csv need to be updated + } else { + # TODO: When current SMAP.csv need to be updated SMAP_CSV <- utils::read.csv(file.path(outdir, "SMAP.csv")) Current_years <- sort(unique(lubridate::year(SMAP_CSV$date))) Required_years <- lubridate::year(start_date):lubridate::year(end_date) - Required_years <- Required_years[which(Required_years>=2015)] #SMAP data only available after year 2015. - if(sum(!Required_years%in%Current_years)){ + Required_years <- Required_years[which(Required_years >= 2015)] # SMAP data only available after year 2015. + if (sum(!Required_years %in% Current_years)) { PEcAn.logger::logger.info("The existing SMAP.csv file doesn't contain data between start and end date!") PEcAn.logger::logger.info("Please update the SMAP_gee.csv file to include the data that are missing! And then flag update_csv as TRUE to proceed!") return(0) } } - time_points <- time_points[which(lubridate::year(time_points)>=2015)] #filter out any time points that are before 2015 - #initialize SMAP_Output - SMAP_Output <- matrix(NA, length(site_info$site_id), 2*length(time_points)+1) %>% - `colnames<-`(c("site_id", paste0(time_points, "_SoilMoistFrac"), paste0(time_points, "_SD"))) %>% as.data.frame()#we need: site_id, LAI, std, target time point. + time_points <- time_points[which(lubridate::year(time_points) >= 2015)] # filter out any time points that are before 2015 + # initialize SMAP_Output + SMAP_Output <- matrix(NA, length(site_info$site_id), 2 * length(time_points) + 1) %>% + `colnames<-`(c("site_id", paste0(time_points, "_SoilMoistFrac"), paste0(time_points, "_SD"))) %>% + as.data.frame() # we need: site_id, LAI, std, target time point. SMAP_Output$site_id <- site_info$site_id - #Calculate SMAP for each time step and site. - #loop over time and site + # Calculate SMAP for each time step and site. + # loop over time and site PEcAn.logger::logger.info("Extracting previous SMAP file!") - SMAP.list <- time_points %>% furrr::future_map(function(t){ + SMAP.list <- time_points %>% furrr::future_map(function(t) { out.t <- data.frame() for (id in site_info$site_id) { - site_SMP <- SMAP_CSV[which(SMAP_CSV$site_id == id),] - diff_days <- abs(lubridate::days(lubridate::date(site_SMP$date)-lubridate::date(t))@day) - if(any(diff_days <= search_window)){#data found + site_SMP <- SMAP_CSV[which(SMAP_CSV$site_id == id), ] + diff_days <- abs(lubridate::days(lubridate::date(site_SMP$date) - lubridate::date(t))@day) + if (any(diff_days <= search_window)) { # data found out.t <- rbind(out.t, list(mean = site_SMP$smp[which.min(diff_days)], sd = site_SMP$sd[which.min(diff_days)])) } else { out.t <- rbind(out.t, list(mean = NA, sd = NA)) @@ -90,10 +92,10 @@ SMAP_SMP_prep <- function(site_info, start_date, end_date, time_points, out.t %>% purrr::set_names(c(paste0(t, "_SoilMoistFrac"), paste0(t, "_SD"))) }, .progress = T) for (i in seq_along(time_points)) { - t <- time_points[i]#otherwise the t will be number instead of date. - SMAP_Output[, paste0(t, "_SoilMoistFrac")] <- SMAP.list[[i]][,paste0(t, "_SoilMoistFrac")] - SMAP_Output[, paste0(t, "_SD")] <- SMAP.list[[i]][,paste0(t, "_SD")] + t <- time_points[i] # otherwise the t will be number instead of date. + SMAP_Output[, paste0(t, "_SoilMoistFrac")] <- SMAP.list[[i]][, paste0(t, "_SoilMoistFrac")] + SMAP_Output[, paste0(t, "_SD")] <- SMAP.list[[i]][, paste0(t, "_SD")] } PEcAn.logger::logger.info("SMAP SMP Prep Completed!") list(SMP_Output = SMAP_Output, time_points = time_points, var = "SoilMoistFrac") -} \ No newline at end of file +} diff --git a/modules/data.remote/R/call_MODIS.R b/modules/data.remote/R/call_MODIS.R index b4791cd004e..e7dbeac388c 100755 --- a/modules/data.remote/R/call_MODIS.R +++ b/modules/data.remote/R/call_MODIS.R @@ -1,31 +1,31 @@ -##' Get MODIS data by date and location -##' +##' Get MODIS data by date and location +##' ##' @name call_MODIS ##' @title call_MODIS ##' @export -##' @param outdir where the output file will be stored. Default is NULL and in this case only values are returned. When path is provided values are returned and written to disk. +##' @param outdir where the output file will be stored. Default is NULL and in this case only values are returned. When path is provided values are returned and written to disk. ##' @param var the simple name of the modis dataset variable (e.g. lai) -##' @param site_info Bety list of site info for parsing MODIS data: list(site_id, site_name, lat, +##' @param site_info Bety list of site info for parsing MODIS data: list(site_id, site_name, lat, ##' lon, time_zone) ##' @param product_dates a character vector of the start and end date of the data in YYYYJJJ -##' @param run_parallel optional method to download data paralleize. Only works if more than 1 +##' @param run_parallel optional method to download data paralleize. Only works if more than 1 ##' site is needed and there are >1 CPUs available. -##' @param ncores number of cpus to use if run_parallel is set to TRUE. If you do not know the +##' @param ncores number of cpus to use if run_parallel is set to TRUE. If you do not know the ##' number of CPU's available, enter NULL. ##' @param product string value for MODIS product number ##' @param band string value for which measurement to extract -##' @param package_method string value to inform function of which package method to use to download +##' @param package_method string value to inform function of which package method to use to download ##' modis data. Either "MODISTools" or "reticulate" (optional) -##' @param QC_filter Converts QC values of band and keeps only data values that are excellent or good -##' (as described by MODIS documentation), and removes all bad values. qc_band must be supplied for this +##' @param QC_filter Converts QC values of band and keeps only data values that are excellent or good +##' (as described by MODIS documentation), and removes all bad values. qc_band must be supplied for this ##' parameter to work. Default is False. Only MODISTools option. -##' @param progress TRUE reports the download progress bar of the dataset, FALSE omits the download +##' @param progress TRUE reports the download progress bar of the dataset, FALSE omits the download ##' progress bar. Default is TRUE. Only MODISTools option. -##' -##' Requires Python3 for reticulate method option. There are a number of required python libraries. +##' +##' Requires Python3 for reticulate method option. There are a number of required python libraries. ##' sudo -H pip install numpy suds netCDF4 json ##' depends on the MODISTools package version 1.1.0 -##' +##' ##' @examples ##' \dontrun{ ##' site_info <- list( @@ -50,266 +50,254 @@ ##' @importFrom foreach %do% %dopar% ##' @author Bailey Morrison ##' -call_MODIS <- function(var, product, - band, site_info, +call_MODIS <- function(var, product, + band, site_info, product_dates, - outdir = NULL, - run_parallel = FALSE, + outdir = NULL, + run_parallel = FALSE, ncores = NULL, - package_method = "MODISTools", - QC_filter = FALSE, + package_method = "MODISTools", + QC_filter = FALSE, progress = FALSE) { - # makes the query search for 1 pixel and not for rasters chunks for now. Will be changed when we provide raster output support. - size <- 0 - + size <- 0 + site_coords <- data.frame(site_info$lon, site_info$lat) - names(site_coords) <- c("lon","lat") - + names(site_coords) <- c("lon", "lat") + # set up CPUS for parallel runs. if (is.null(ncores)) { total_cores <- parallel::detectCores(all.tests = FALSE, logical = TRUE) - ncores <- total_cores-2 + ncores <- total_cores - 2 } if (ncores > 10) # MODIS API has a 10 download limit / computer - { - ncores <- 10 - } - + { + ncores <- 10 + } + # register CPUS if run_parallel = TRUE - if (run_parallel){ - if (progress){ + if (run_parallel) { + if (progress) { cl <- parallel::makeCluster(ncores, outfile = "") doParallel::registerDoParallel(cl) } else { cl <- parallel::makeCluster(ncores) doParallel::registerDoParallel(cl) } - } - + #################### if package_method == MODISTools option #################### - - if (package_method == "MODISTools") - { - #################### FUNCTION PARAMETER PRECHECKS #################### - #1. check that modis product is available + + if (package_method == "MODISTools") { + #################### FUNCTION PARAMETER PRECHECKS #################### + # 1. check that modis product is available products <- MODISTools::mt_products() - if (!(product %in% products$product)) - { + if (!(product %in% products$product)) { PEcAn.logger::logger.warn(products) stop("Product not available for MODIS API. Please chose a product from the list above.") - } - - #2. check that modis produdct band is available + } + + # 2. check that modis produdct band is available bands <- MODISTools::mt_bands(product = product) - if (!(band %in% bands$band)) - { + if (!(band %in% bands$band)) { PEcAn.logger::logger.warn(bands$band) stop("Band selected is not avialable. Please selected from the bands listed above that correspond with the data product.") - } - - #3. check that dates asked for in function parameters are fall within dates available for modis product/bands. - if (run_parallel) - { - modis_dates <- as.numeric(substr(sort(unique(foreach::foreach(i = seq_along(nrow(site_coords)), .combine = c) - %dopar% MODISTools::mt_dates(product = product, lat = site_coords$lat[i], lon = site_coords$lon[i])$modis_date)), 2, 8)) - } else { - modis_dates <- as.numeric(substr(sort(unique(foreach::foreach(i = seq_along(nrow(site_coords)), .combine = c) %do% - MODISTools::mt_dates(product = product, lat = site_coords$lat[i], lon = site_coords$lon[i])$modis_date)), 2, 8)) - } - - - # check if user asked for dates for data, if not, download all dates - if (is.null(product_dates)) { - dates <- sort(unique(foreach::foreach(i = seq_along(nrow(site_coords)), .combine = c) %do% - MODISTools::mt_dates(product = product, lat = site_coords$lat[i], lon = site_coords$lon[i])$modis_date)) - #dates = as.Date(as.character(substr(dates, 2, nchar(dates))), format = "%Y%j") - } else { - # if user asked for specific dates, first make sure data is available, then inform user of any missing dates in time period asked for. - start_date <- as.numeric(product_dates[1]) - end_date <- as.numeric(product_dates[2]) - - # if all dates are available with user defined time period: - if (start_date >= modis_dates[1] & end_date <= modis_dates[length(modis_dates)]) - { - PEcAn.logger::logger.info("Check #2: All dates are available!") - - start_date <- modis_dates[which(modis_dates >= start_date)[1]] - - include <- which(modis_dates <= end_date) - end_date <- modis_dates[include[length(include)]] } - - # if start and end dates fall completely outside of available modis_dates: - if ((start_date < modis_dates[1] & end_date < modis_dates[1]) | start_date > modis_dates[length(modis_dates)] & end_date > modis_dates[length(modis_dates)]) - { - PEcAn.logger::logger.severe( - "Start and end date (", start_date, ", ", end_date, - ") are not within MODIS data product date range (", modis_dates[1], ", ", modis_dates[length(modis_dates)], - "). Please choose another date.") - } - - # if start and end dates are larger than the available range, but part or full range: - if ((start_date < modis_dates[1] & end_date > modis_dates[1]) | start_date < modis_dates[length(modis_dates)] & end_date > modis_dates[length(modis_dates)]) - { - PEcAn.logger::logger.warn("WARNING: Dates are partially available. Start and/or end date extend beyond modis data product availability.") - start_date <- modis_dates[which(modis_dates >= start_date)[1]] - - include <- which(modis_dates <= end_date) - end_date <- modis_dates[include[length(include)]] - } - - dates <- modis_dates[which(modis_dates >= start_date & modis_dates <= end_date)] - - } - - modis_dates <- as.Date(as.character(modis_dates), format = "%Y%j") - dates <- as.Date(as.character(dates), format = "%Y%j") - - #### Start extracting the data - PEcAn.logger::logger.info("Extracting data") - - if (run_parallel) - { - dat <- foreach::foreach(i=seq_along(site_info$site_id), .combine = rbind) %dopar% - MODISTools::mt_subset(lat = site_coords$lat[i],lon = site_coords$lon[i], - product = product, - band = band, - start = dates[1], - end = dates[length(dates)], - km_ab = size, km_lr = size, - progress = progress, site_name = as.character(site_info$site_id[i])) - } else { - dat <- data.frame() - - for (i in seq_along(site_info$site_id)) - { - d <- MODISTools::mt_subset(lat = site_coords$lat[i], - lon = site_coords$lon[i], - product = product, - band = band, - start = dates[1], - end = dates[length(dates)], - km_ab = size, km_lr = size, - progress = progress) - dat <- rbind(dat, d) + + # 3. check that dates asked for in function parameters are fall within dates available for modis product/bands. + if (run_parallel) { + modis_dates <- as.numeric(substr(sort(unique(foreach::foreach(i = seq_along(nrow(site_coords)), .combine = c) + %dopar% MODISTools::mt_dates(product = product, lat = site_coords$lat[i], lon = site_coords$lon[i])$modis_date)), 2, 8)) + } else { + modis_dates <- as.numeric(substr(sort(unique(foreach::foreach(i = seq_along(nrow(site_coords)), .combine = c) %do% + MODISTools::mt_dates(product = product, lat = site_coords$lat[i], lon = site_coords$lon[i])$modis_date)), 2, 8)) } - } - - # clean up data outputs so there isn't extra data, format classes. - output <- as.data.frame(cbind(dat$modis_date, dat$calendar_date, dat$band, dat$tile, dat$site, dat$latitude, dat$longitude, dat$pixel, dat$value), stringsAsFactors = FALSE) - names(output) <- c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "data") - - output[ ,5:9] <- lapply(output[ ,5:9], as.numeric) - - # scale the data + stdev to proper units - output$data <- output$data * (as.numeric(dat$scale)) - output$lat <- round(output$lat, 4) - output$lon <- round(output$lon, 4) - - # remove bad values if QC filter is on - if (QC_filter) - { - qc_band <- bands$band[which(grepl(var, bands$band, ignore.case = TRUE) & grepl("QC", bands$band, ignore.case = TRUE))] - - if (run_parallel) - { - qc <- foreach::foreach(i=seq_along(site_info$site_id), .combine = rbind) %dopar% - MODISTools::mt_subset(lat = site_coords$lat[i], - lon = site_coords$lon[i], - product = product, - band = qc_band, - start = dates[1], - end = dates[length(dates)], - km_ab = size, km_lr = size, - progress = progress) + + + # check if user asked for dates for data, if not, download all dates + if (is.null(product_dates)) { + dates <- sort(unique(foreach::foreach(i = seq_along(nrow(site_coords)), .combine = c) %do% + MODISTools::mt_dates(product = product, lat = site_coords$lat[i], lon = site_coords$lon[i])$modis_date)) + # dates = as.Date(as.character(substr(dates, 2, nchar(dates))), format = "%Y%j") } else { - qc <- MODISTools::mt_subset(lat = site_coords$lat[i], - lon = site_coords$lon[i], - product = product, - band = qc_band, - start = dates[1], - end = dates[length(dates)], - km_ab = size, km_lr = size, - progress = progress) + # if user asked for specific dates, first make sure data is available, then inform user of any missing dates in time period asked for. + start_date <- as.numeric(product_dates[1]) + end_date <- as.numeric(product_dates[2]) - } + # if all dates are available with user defined time period: + if (start_date >= modis_dates[1] & end_date <= modis_dates[length(modis_dates)]) { + PEcAn.logger::logger.info("Check #2: All dates are available!") - output$qc <- as.character(qc$value) - - #convert QC values and keep only okay values - for (i in seq_len(nrow(output))) - { - # QC flags are stored as an 8-bit mask - # we only care about the 3 least-significant bits - qc_flags <- intToBits(as.integer(output$qc[i])) # NB big-endian (ones place first) - qc_flags <- as.integer(rev(utils::head(qc_flags, 3))) # now ones place last - output$qc[i] <- paste(qc_flags, collapse = "") + start_date <- modis_dates[which(modis_dates >= start_date)[1]] + + include <- which(modis_dates <= end_date) + end_date <- modis_dates[include[length(include)]] + } + + # if start and end dates fall completely outside of available modis_dates: + if ((start_date < modis_dates[1] & end_date < modis_dates[1]) | start_date > modis_dates[length(modis_dates)] & end_date > modis_dates[length(modis_dates)]) { + PEcAn.logger::logger.severe( + "Start and end date (", start_date, ", ", end_date, + ") are not within MODIS data product date range (", modis_dates[1], ", ", modis_dates[length(modis_dates)], + "). Please choose another date." + ) + } + + # if start and end dates are larger than the available range, but part or full range: + if ((start_date < modis_dates[1] & end_date > modis_dates[1]) | start_date < modis_dates[length(modis_dates)] & end_date > modis_dates[length(modis_dates)]) { + PEcAn.logger::logger.warn("WARNING: Dates are partially available. Start and/or end date extend beyond modis data product availability.") + start_date <- modis_dates[which(modis_dates >= start_date)[1]] + + include <- which(modis_dates <= end_date) + end_date <- modis_dates[include[length(include)]] + } + + dates <- modis_dates[which(modis_dates >= start_date & modis_dates <= end_date)] } - good <- which(output$qc %in% c("000", "001")) - if (length(good) > 0) - { - output <- output[good, ] + + modis_dates <- as.Date(as.character(modis_dates), format = "%Y%j") + dates <- as.Date(as.character(dates), format = "%Y%j") + + #### Start extracting the data + PEcAn.logger::logger.info("Extracting data") + + if (run_parallel) { + dat <- foreach::foreach(i = seq_along(site_info$site_id), .combine = rbind) %dopar% + MODISTools::mt_subset( + lat = site_coords$lat[i], lon = site_coords$lon[i], + product = product, + band = band, + start = dates[1], + end = dates[length(dates)], + km_ab = size, km_lr = size, + progress = progress, site_name = as.character(site_info$site_id[i]) + ) } else { - PEcAn.logger::logger.warn("All QC values are bad. No data to output with QC filter == TRUE.") - } - } - - # unregister cores since parallel process is done - if (run_parallel) - { - parallel::stopCluster(cl) - } - - # break dataoutput up by site and save out chunks - if (!(is.null(outdir))) - { - for (i in seq_along(site_info$site_id)) - { - if (!(dir.exists(file.path(outdir, site_info$site_id[i])))) + dat <- data.frame() + + for (i in seq_along(site_info$site_id)) { - dir.create(file.path(outdir, site_info$site_id[i])) + d <- MODISTools::mt_subset( + lat = site_coords$lat[i], + lon = site_coords$lon[i], + product = product, + band = band, + start = dates[1], + end = dates[length(dates)], + km_ab = size, km_lr = size, + progress = progress + ) + dat <- rbind(dat, d) + } + } + + # clean up data outputs so there isn't extra data, format classes. + output <- as.data.frame(cbind(dat$modis_date, dat$calendar_date, dat$band, dat$tile, dat$site, dat$latitude, dat$longitude, dat$pixel, dat$value), stringsAsFactors = FALSE) + names(output) <- c("modis_date", "calendar_date", "band", "tile", "site_id", "lat", "lon", "pixels", "data") + + output[, 5:9] <- lapply(output[, 5:9], as.numeric) + + # scale the data + stdev to proper units + output$data <- output$data * (as.numeric(dat$scale)) + output$lat <- round(output$lat, 4) + output$lon <- round(output$lon, 4) + + # remove bad values if QC filter is on + if (QC_filter) { + qc_band <- bands$band[which(grepl(var, bands$band, ignore.case = TRUE) & grepl("QC", bands$band, ignore.case = TRUE))] + + if (run_parallel) { + qc <- foreach::foreach(i = seq_along(site_info$site_id), .combine = rbind) %dopar% + MODISTools::mt_subset( + lat = site_coords$lat[i], + lon = site_coords$lon[i], + product = product, + band = qc_band, + start = dates[1], + end = dates[length(dates)], + km_ab = size, km_lr = size, + progress = progress + ) + } else { + qc <- MODISTools::mt_subset( + lat = site_coords$lat[i], + lon = site_coords$lon[i], + product = product, + band = qc_band, + start = dates[1], + end = dates[length(dates)], + km_ab = size, km_lr = size, + progress = progress + ) } - - site <- output[which(output$site_id == site_info$site_id[i]), ] - site$modis_date <- substr(site$modis_date, 2, length(site$modis_date)) - - if (QC_filter) + + output$qc <- as.character(qc$value) + + # convert QC values and keep only okay values + for (i in seq_len(nrow(output))) { - fname <- paste(site_info$site_id[i], "/", product, "_", band, "_", start_date, "-", end_date, "_filtered.csv", sep = "") + # QC flags are stored as an 8-bit mask + # we only care about the 3 least-significant bits + qc_flags <- intToBits(as.integer(output$qc[i])) # NB big-endian (ones place first) + qc_flags <- as.integer(rev(utils::head(qc_flags, 3))) # now ones place last + output$qc[i] <- paste(qc_flags, collapse = "") + } + good <- which(output$qc %in% c("000", "001")) + if (length(good) > 0) { + output <- output[good, ] } else { - fname <- paste(site_info$site_id[i], "/", product, "_", band, "_", start_date, "-", end_date, "_unfiltered.csv", sep = "") + PEcAn.logger::logger.warn("All QC values are bad. No data to output with QC filter == TRUE.") } - fname <- file.path(outdir, fname) - utils::write.csv(site, fname, row.names = FALSE) } - - } - + + # unregister cores since parallel process is done + if (run_parallel) { + parallel::stopCluster(cl) + } + + # break dataoutput up by site and save out chunks + if (!(is.null(outdir))) { + for (i in seq_along(site_info$site_id)) + { + if (!(dir.exists(file.path(outdir, site_info$site_id[i])))) { + dir.create(file.path(outdir, site_info$site_id[i])) + } + + site <- output[which(output$site_id == site_info$site_id[i]), ] + site$modis_date <- substr(site$modis_date, 2, length(site$modis_date)) + + if (QC_filter) { + fname <- paste(site_info$site_id[i], "/", product, "_", band, "_", start_date, "-", end_date, "_filtered.csv", sep = "") + } else { + fname <- paste(site_info$site_id[i], "/", product, "_", band, "_", start_date, "-", end_date, "_unfiltered.csv", sep = "") + } + fname <- file.path(outdir, fname) + utils::write.csv(site, fname, row.names = FALSE) + } + } + return(output) } - + ########### temporarily removed for now as python2 is being discontinued and modules are not working correctly # if (package_method == "reticulate"){ # # load in python script # script.path <- file.path(system.file("extract_modis_data.py", package = "PEcAn.data.remote")) # reticulate::source_python(script.path) - # + # # # extract the data # output <- extract_modis_data(product = product, band = band, lat = lat, lon = lon, start_date = start_date, end_date = end_date, size = size, band_qc = band_qc, band_sd = band_sd) # output[ ,5:10] <- lapply(output[ ,5:10], as.numeric) # output$lat <- round(output$lat, 4) # output$lon <- round(output$lon, 4) - # + # # if (!(is.null(outdir))) # { # fname <- paste(product, "_", band, "_", start_date, "_", end_date, "_", lat, "_", lon, ".csv", sep = "") # fname <- file.path(outdir, fname) # write.csv(output, fname) # } - # + # # return(output)} - } diff --git a/modules/data.remote/R/download.thredds.R b/modules/data.remote/R/download.thredds.R index d4a5ca4c728..e0616f34c20 100755 --- a/modules/data.remote/R/download.thredds.R +++ b/modules/data.remote/R/download.thredds.R @@ -1,102 +1,103 @@ # ##' @title download.thredds.AGB ##' @name download.thredds.AGB -##' +##' ##' @param outdir Where to place output -##' @param site_ids What locations to download data at? +##' @param site_ids What locations to download data at? ##' @param run_parallel Logical. Download and extract files in parallel? ##' @param ncores Optional. If run_parallel=TRUE how many cores to use? If left as NULL will select max number -1 -##' +##' ##' @return data.frame summarize the results of the function call -##' +##' ##' @examples ##' \dontrun{ ##' outdir <- "~/scratch/abg_data/" -##' results <- PEcAn.data.remote::download.thredds.AGB(outdir=outdir, -##' site_ids = c(676, 678, 679, 755, 767, 1000000030, 1000000145, 1000025731), +##' results <- PEcAn.data.remote::download.thredds.AGB(outdir=outdir, +##' site_ids = c(676, 678, 679, 755, 767, 1000000030, 1000000145, 1000025731), ##' run_parallel = TRUE, ncores = 8) ##' } ##' @export ##' @author Bailey Morrison ##' -download.thredds.AGB <- function(outdir = NULL, site_ids, run_parallel = FALSE, - ncores = NULL) { - - +download.thredds.AGB <- function(outdir = NULL, site_ids, run_parallel = FALSE, + ncores = NULL) { con <- PEcAn.DB::db.open( - list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE)) + list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE + ) + ) site_ID <- as.character(site_ids) suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", - ids = site_ID, .con = con)) - suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) + ids = site_ID, .con = con + )) + suppressWarnings(qry_results <- DBI::dbSendQuery(con, site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) - site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, - lon=qry_results$lon, time_zone=qry_results$time_zone) - - mylat = site_info$lat - mylon = site_info$lon - + site_info <- list( + site_id = qry_results$id, site_name = qry_results$sitename, lat = qry_results$lat, + lon = qry_results$lon, time_zone = qry_results$time_zone + ) + + mylat <- site_info$lat + mylon <- site_info$lon + # site specific URL for dataset --> these will be made to work for all THREDDS datasets in the future, but for now, just testing with # this one dataset. This specific dataset only has 1 year (2005), so no temporal looping for now. - obs_file = "https://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1221/agb_5k.nc4" - obs_err = "https://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1221/agb_SE_5k.nc4" - files = c(obs_file, obs_err) - + obs_file <- "https://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1221/agb_5k.nc4" + obs_err <- "https://thredds.daac.ornl.gov/thredds/dodsC/ornldaac/1221/agb_SE_5k.nc4" + files <- c(obs_file, obs_err) + # function to extract ncdf data from lat and lon values for value + SE URLs - get_data = function(i) - { - data = ncdf4::nc_open(files[1]) - agb_lats = ncdf4::ncvar_get(data, "latitude") - agb_lons = ncdf4::ncvar_get(data, "longitude") - - agb_x = which(abs(agb_lons- mylon[i]) == min(abs(agb_lons - mylon[i]))) - agb_y = which(abs(agb_lats- mylat[i]) == min(abs(agb_lats - mylat[i]))) - - start = c(agb_x, agb_y) - count = c(1,1) - d = ncdf4::ncvar_get(ncdf4::nc_open(files[1]), "abvgrndbiomass", start=start, count = count) + get_data <- function(i) { + data <- ncdf4::nc_open(files[1]) + agb_lats <- ncdf4::ncvar_get(data, "latitude") + agb_lons <- ncdf4::ncvar_get(data, "longitude") + + agb_x <- which(abs(agb_lons - mylon[i]) == min(abs(agb_lons - mylon[i]))) + agb_y <- which(abs(agb_lats - mylat[i]) == min(abs(agb_lats - mylat[i]))) + + start <- c(agb_x, agb_y) + count <- c(1, 1) + d <- ncdf4::ncvar_get(ncdf4::nc_open(files[1]), "abvgrndbiomass", start = start, count = count) if (is.na(d)) d <- NA - sd = ncdf4::ncvar_get(ncdf4::nc_open(files[2]), "agbSE", start=start, count = count) + sd <- ncdf4::ncvar_get(ncdf4::nc_open(files[2]), "agbSE", start = start, count = count) if (is.na(sd)) sd <- NA - date = "2005" - site = site_ID[i] - output = as.data.frame(cbind(d, sd, date, site)) - names(output) = c("value", "sd", "date", "siteID") - + date <- "2005" + site <- site_ID[i] + output <- as.data.frame(cbind(d, sd, date, site)) + names(output) <- c("value", "sd", "date", "siteID") + # option to save output dataset to directory for user. - if (!(is.null(outdir))) - { - utils::write.csv(output, file = paste0(outdir, "THREDDS_", sub("^([^.]*).*", "\\1",basename(files[1])), "_site_", site, ".csv"), row.names = FALSE) + if (!(is.null(outdir))) { + utils::write.csv(output, file = paste0(outdir, "THREDDS_", sub("^([^.]*).*", "\\1", basename(files[1])), "_site_", site, ".csv"), row.names = FALSE) } - + return(output) } - + ## setup parallel if (run_parallel) { if (!is.null(ncores)) { ncores <- ncores } else { - ncores <- parallel::detectCores() -1 + ncores <- parallel::detectCores() - 1 } - + PEcAn.logger::logger.info(paste0("Running in parallel with: ", ncores)) - cl = parallel::makeCluster(ncores) + cl <- parallel::makeCluster(ncores) doParallel::registerDoParallel(cl) - data = foreach::foreach(i = seq_along(mylat), .combine = rbind) %dopar% get_data(i) + data <- foreach::foreach(i = seq_along(mylat), .combine = rbind) %dopar% get_data(i) parallel::stopCluster(cl) - } else { # setup sequential run - data = data.frame() + data <- data.frame() for (i in seq_along(mylat)) { - data = rbind(data, get_data(i)) + data <- rbind(data, get_data(i)) } } - + return(data) } diff --git a/modules/data.remote/R/extract_phenology_MODIS.R b/modules/data.remote/R/extract_phenology_MODIS.R index d99620950b5..65e3ce3994b 100644 --- a/modules/data.remote/R/extract_phenology_MODIS.R +++ b/modules/data.remote/R/extract_phenology_MODIS.R @@ -1,94 +1,99 @@ -##' Get MODIS phenology data by date and location -##' +##' Get MODIS phenology data by date and location +##' ##' @export -##' @param site_info A dataframe of site info containing the BETYdb site ID, -##' site name, latitude, and longitude, e.g. +##' @param site_info A dataframe of site info containing the BETYdb site ID, +##' site name, latitude, and longitude, e.g. ##' @param start_date Start date to download data ##' @param end_date End date to download data ##' @param outdir Path to store the outputs -##' @param run_parallel optional method to download data parallely. Only works if more than 1 +##' @param run_parallel optional method to download data parallely. Only works if more than 1 ##' site is needed and there are >1 CPUs available. -##' @param ncores number of cpus to use if run_parallel is set to TRUE. If you do not know the -##' number of CPU's available, enter NULL. -##' @return the path for output file +##' @param ncores number of cpus to use if run_parallel is set to TRUE. If you do not know the +##' number of CPU's available, enter NULL. +##' @return the path for output file ##' The output file will be saved as a CSV file to the outdir. ##' Output column names are "year", "site_id", "lat", "lon", "leafonday","leafoffday","leafon_qa","leafoff_qa" ##' @author Qianyu Li -extract_phenology_MODIS<- function(site_info,start_date,end_date,outdir,run_parallel = TRUE,ncores = NULL){ - +extract_phenology_MODIS <- function(site_info, start_date, end_date, outdir, run_parallel = TRUE, ncores = NULL) { if (is.null(outdir)) { PEcAn.logger::logger.error("No output directory found. Please provide it.") } else { - #Set up the start and end date of the extraction - start_YEARDOY <- paste0(lubridate::year(start_date),sprintf("%03d", 1)) - end_YEARDOY <- paste0(lubridate::year(end_date),sprintf("%03d", 365)) - #Extracting leaf-on, leaf-off, and Quality Assurance (QA) from MODIS MCD12Q2 product - leafon_data <- PEcAn.data.remote::call_MODIS(outdir = NULL, - var = "Greenup", - site_info = site_info, - product_dates = c(start_YEARDOY, end_YEARDOY), - run_parallel = as.logical(run_parallel), - ncores = ncores, - product = "MCD12Q2", - band = "MidGreenup.Num_Modes_01", - package_method = "MODISTools", - QC_filter = FALSE, - progress = TRUE) + # Set up the start and end date of the extraction + start_YEARDOY <- paste0(lubridate::year(start_date), sprintf("%03d", 1)) + end_YEARDOY <- paste0(lubridate::year(end_date), sprintf("%03d", 365)) + # Extracting leaf-on, leaf-off, and Quality Assurance (QA) from MODIS MCD12Q2 product + leafon_data <- PEcAn.data.remote::call_MODIS( + outdir = NULL, + var = "Greenup", + site_info = site_info, + product_dates = c(start_YEARDOY, end_YEARDOY), + run_parallel = as.logical(run_parallel), + ncores = ncores, + product = "MCD12Q2", + band = "MidGreenup.Num_Modes_01", + package_method = "MODISTools", + QC_filter = FALSE, + progress = TRUE + ) - leafoff_data <- PEcAn.data.remote::call_MODIS(outdir = NULL, - var = "Greendown", - site_info = site_info, - product_dates = c(start_YEARDOY, end_YEARDOY), - run_parallel = as.logical(run_parallel), - ncores = ncores, - product = "MCD12Q2", - band = "MidGreendown.Num_Modes_01", - package_method = "MODISTools", - QC_filter = FALSE, - progress = TRUE) + leafoff_data <- PEcAn.data.remote::call_MODIS( + outdir = NULL, + var = "Greendown", + site_info = site_info, + product_dates = c(start_YEARDOY, end_YEARDOY), + run_parallel = as.logical(run_parallel), + ncores = ncores, + product = "MCD12Q2", + band = "MidGreendown.Num_Modes_01", + package_method = "MODISTools", + QC_filter = FALSE, + progress = TRUE + ) - qa_data <- PEcAn.data.remote::call_MODIS(outdir = NULL, - var = "QA", - site_info = site_info, - product_dates = c(start_YEARDOY, end_YEARDOY), - run_parallel = as.logical(run_parallel), - ncores = ncores, - product = "MCD12Q2", - band = "QA_Detailed.Num_Modes_01", - package_method = "MODISTools", - QC_filter = FALSE, - progress = TRUE) + qa_data <- PEcAn.data.remote::call_MODIS( + outdir = NULL, + var = "QA", + site_info = site_info, + product_dates = c(start_YEARDOY, end_YEARDOY), + run_parallel = as.logical(run_parallel), + ncores = ncores, + product = "MCD12Q2", + band = "QA_Detailed.Num_Modes_01", + package_method = "MODISTools", + QC_filter = FALSE, + progress = TRUE + ) - #Example function for unpacking QA for each date phenometric from MCD12Q2_Collection6_UserGuide - #in the order: Greenup, MidGreenup, Maturity, Peak, Senescence, MidGreendown, Dormancy - UnpackDetailedQA <- function(x){ - bits <- as.integer(intToBits(x)) - quals <- sapply(seq(1, 16, by=2), function(i) sum(bits[i:(i+1)] * 2^c(0, 1)))[1:7] - return(quals) - } - #Extract QA for leaf-on and leaf-off dates. Values are in the range 0–3 corresponding to “best”, “good”, “fair”, and “poor”. - qa_ind<-matrix(NA,nrow=length(qa_data[,1]),ncol=2) - for (i in seq_along(qa_data$data)){ - qa_ind[i,]<-cbind(UnpackDetailedQA(qa_data$data[i])[2],UnpackDetailedQA(qa_data$data[i])[6]) + # Example function for unpacking QA for each date phenometric from MCD12Q2_Collection6_UserGuide + # in the order: Greenup, MidGreenup, Maturity, Peak, Senescence, MidGreendown, Dormancy + UnpackDetailedQA <- function(x) { + bits <- as.integer(intToBits(x)) + quals <- sapply(seq(1, 16, by = 2), function(i) sum(bits[i:(i + 1)] * 2^c(0, 1)))[1:7] + return(quals) } - - leafphdata <- cbind(leafon_data %>% as.data.frame %>% dplyr::select("calendar_date", "site_id", "lat", "lon", "data"), leafoff_data$data,qa_ind)%>% - `colnames<-`(c("year", "site_id", "lat", "lon", "leafonday","leafoffday","leafon_qa","leafoff_qa")) - leafphdata$leafonday[leafphdata$leafonday==32767]<-NA #exclude the data with fill values - leafphdata$leafoffday[leafphdata$leafoffday==32767]<-NA - leafphdata$leafonday[leafphdata$leafon_qa==3]<-NA #exclude the data when QA is poor - leafphdata$leafoffday[leafphdata$leafoff_qa==3]<-NA - leafphdata$leafonday[leafphdata$leafonday>leafphdata$leafoffday]<-NA #exclude the data when leaf-on date is larger than leaf-off date - leafphdata$leafonday<-lubridate::yday(as.Date(leafphdata$leafonday)) #convert the dates to Day-of-Year format - leafphdata$leafoffday<-lubridate::yday(as.Date(leafphdata$leafoffday)) - leafphdata$year<-lubridate::year(leafphdata$year) - leafphdata$site_id<-as.character(leafphdata$site_id) - - file_path<-file.path(outdir,"leaf_phenology.csv") - PEcAn.logger::logger.info(paste0("Storing results in: ",file_path)) + # Extract QA for leaf-on and leaf-off dates. Values are in the range 0–3 corresponding to “best”, “good”, “fair”, and “poor”. + qa_ind <- matrix(NA, nrow = length(qa_data[, 1]), ncol = 2) + for (i in seq_along(qa_data$data)) { + qa_ind[i, ] <- cbind(UnpackDetailedQA(qa_data$data[i])[2], UnpackDetailedQA(qa_data$data[i])[6]) + } + + leafphdata <- cbind(leafon_data %>% as.data.frame() %>% dplyr::select("calendar_date", "site_id", "lat", "lon", "data"), leafoff_data$data, qa_ind) %>% + `colnames<-`(c("year", "site_id", "lat", "lon", "leafonday", "leafoffday", "leafon_qa", "leafoff_qa")) + leafphdata$leafonday[leafphdata$leafonday == 32767] <- NA # exclude the data with fill values + leafphdata$leafoffday[leafphdata$leafoffday == 32767] <- NA + leafphdata$leafonday[leafphdata$leafon_qa == 3] <- NA # exclude the data when QA is poor + leafphdata$leafoffday[leafphdata$leafoff_qa == 3] <- NA + leafphdata$leafonday[leafphdata$leafonday > leafphdata$leafoffday] <- NA # exclude the data when leaf-on date is larger than leaf-off date + leafphdata$leafonday <- lubridate::yday(as.Date(leafphdata$leafonday)) # convert the dates to Day-of-Year format + leafphdata$leafoffday <- lubridate::yday(as.Date(leafphdata$leafoffday)) + leafphdata$year <- lubridate::year(leafphdata$year) + leafphdata$site_id <- as.character(leafphdata$site_id) + + file_path <- file.path(outdir, "leaf_phenology.csv") + PEcAn.logger::logger.info(paste0("Storing results in: ", file_path)) utils::write.csv(leafphdata, file = file_path, row.names = FALSE) return(file_path) } -} \ No newline at end of file +} diff --git a/modules/data.remote/R/regrid.R b/modules/data.remote/R/regrid.R index bd8c5ead124..018d0fd92dd 100644 --- a/modules/data.remote/R/regrid.R +++ b/modules/data.remote/R/regrid.R @@ -8,14 +8,15 @@ regrid <- function(latlon.data) { PEcAn.utils::need_packages("raster", "sp") ## from http://stackoverflow.com/a/15351169/513006 spdf <- sp::SpatialPointsDataFrame(data.frame(x = latlon.data$lon, y = latlon.data$lat), - data = data.frame(z = latlon.data$yield)) + data = data.frame(z = latlon.data$yield) + ) ## Make evenly spaced raster, same extent as original data e <- raster::extent(spdf) ## Determine ratio between x and y dimensions ratio <- (e@xmax - e@xmin) / (e@ymax - e@ymin) ## Create template raster to sample to - r <- raster::raster(nrows = 56, ncols = floor(56 * ratio), ext = raster::extent(spdf)) + r <- raster::raster(nrows = 56, ncols = floor(56 * ratio), ext = raster::extent(spdf)) rf <- raster::rasterize(spdf, r, field = "z", fun = mean) # rdf <- data.frame( rasterToPoints( rf ) ) colnames(rdf) <- @@ -36,12 +37,11 @@ regrid <- function(latlon.data) { ##' @return writes netCDF file ##' @author David LeBauer grid2netcdf <- function(gdata, date = "9999-09-09", outfile = "out.nc") { - ## Fill in NA's - lats <- unique(gdata$lat) - lons <- unique(gdata$lon) - dates <- unique(gdata$date) - latlons <- expand.grid(lat = lats, lon = lons, date = dates) + lats <- unique(gdata$lat) + lons <- unique(gdata$lon) + dates <- unique(gdata$date) + latlons <- expand.grid(lat = lats, lon = lons, date = dates) if (requireNamespace("data.table", quietly = TRUE)) { latlons <- data.table::data.table(latlons) } else { @@ -52,12 +52,14 @@ grid2netcdf <- function(gdata, date = "9999-09-09", outfile = "out.nc") { ) } grid.data <- merge(latlons, gdata, by = c("lat", "lon", "date"), all.x = TRUE) - lat <- ncdf4::ncdim_def("lat", "degrees_east", vals = lats, longname = "station_latitude") - lon <- ncdf4::ncdim_def("lon", "degrees_north", vals = lons, longname = "station_longitude") - time <- ncdf4::ncdim_def(name = "time", units = paste0("days since 1700-01-01"), - vals = as.numeric(lubridate::ymd(paste0(years, "01-01")) - lubridate::ymd("1700-01-01")), - calendar = "standard", - unlim = TRUE) + lat <- ncdf4::ncdim_def("lat", "degrees_east", vals = lats, longname = "station_latitude") + lon <- ncdf4::ncdim_def("lon", "degrees_north", vals = lons, longname = "station_longitude") + time <- ncdf4::ncdim_def( + name = "time", units = paste0("days since 1700-01-01"), + vals = as.numeric(lubridate::ymd(paste0(years, "01-01")) - lubridate::ymd("1700-01-01")), + calendar = "standard", + unlim = TRUE + ) yieldvar <- PEcAn.utils::to_ncvar("CropYield", list(lat, lon, time)) nc <- ncdf4::nc_create(filename = outfile, vars = list(CropYield = yieldvar)) diff --git a/modules/data.remote/R/remote_process.R b/modules/data.remote/R/remote_process.R index 4e745e55b22..19002fc2197 100644 --- a/modules/data.remote/R/remote_process.R +++ b/modules/data.remote/R/remote_process.R @@ -19,9 +19,9 @@ remote_process <- function(settings) { # start, end : effective start, end dates created after checking the DB status. These dates are sent to rp_control for downloading and processing data # write_raw_start, write_raw_end : start, end dates which are used while inserting and updating the DB # the "pro" version of these variables have the same meaning and are used to refer to the processed file - + # The value of remotefile_check_flag denotes the following cases: - + # When processed file is requested, # 1 - There are no existing raw and processed files of the requested type in the DB # 2 - Requested processed file does not exist, the raw file used to create is it present and matches with the requested daterange @@ -29,131 +29,145 @@ remote_process <- function(settings) { # 4 - Both processed and raw file of the requested type exists, but they have to be updated to match with the requested daterange # 5 - Raw file required for creating the processed file exists with the required daterange and the processed file needs to be updated. Here the new processed file will now contain data for the entire daterange of the existing raw file # 6 - There is a existing processed file of the requested type but the raw file used to create it has been deleted. Here, the raw file will be created again and the processed file will be replaced entirely with the one created from new raw file - + # When raw file is requested, # 1 - There is no existing raw the requested type in the DB # 2 - existing raw file will be updated - + RpTools <- reticulate::import("RpTools") - + # extract the variables from the settings list - siteid <- as.numeric(settings$run$site$id) - siteid_short <- paste0(siteid %/% 1e+09, "-", siteid %% 1e+09) - outdir <- settings$database$dbfiles - lat <- as.numeric(settings$run$site$lat) - lon <- as.numeric(settings$run$site$lon) - start <- as.character(as.Date(settings$run$start.date)) - end <- as.character(as.Date(settings$run$end.date)) - source <- settings$remotedata$source - collection <- settings$remotedata$collection - reg_info <- read_remote_registry(source, collection) - collection <- reg_info$pecan_name - raw_mimetype <- reg_info$raw_mimetype - raw_formatname <- reg_info$raw_formatname - pro_mimetype <- reg_info$pro_mimetype - pro_formatname <- reg_info$pro_formatname - + siteid <- as.numeric(settings$run$site$id) + siteid_short <- paste0(siteid %/% 1e+09, "-", siteid %% 1e+09) + outdir <- settings$database$dbfiles + lat <- as.numeric(settings$run$site$lat) + lon <- as.numeric(settings$run$site$lon) + start <- as.character(as.Date(settings$run$start.date)) + end <- as.character(as.Date(settings$run$end.date)) + source <- settings$remotedata$source + collection <- settings$remotedata$collection + reg_info <- read_remote_registry(source, collection) + collection <- reg_info$pecan_name + raw_mimetype <- reg_info$raw_mimetype + raw_formatname <- reg_info$raw_formatname + pro_mimetype <- reg_info$pro_mimetype + pro_formatname <- reg_info$pro_formatname + if (!is.null(reg_info$scale)) { - if(!is.null(settings$remotedata$scale)){ + if (!is.null(settings$remotedata$scale)) { scale <- as.double(settings$remotedata$scale) scale <- format(scale, nsmall = 1) - }else{ + } else { scale <- as.double(reg_info$scale) scale <- format(scale, nsmall = 1) PEcAn.logger::logger.warn(paste0("scale not provided, using default scale ", scale)) } - }else{ + } else { scale <- NULL } - + if (!is.null(reg_info$qc)) { - if(!is.null(settings$remotedata$qc)){ + if (!is.null(settings$remotedata$qc)) { qc <- as.double(settings$remotedata$qc) qc <- format(qc, nsmall = 1) - }else{ + } else { qc <- as.double(reg_info$qc) qc <- format(qc, nsmall = 1) PEcAn.logger::logger.warn(paste0("qc not provided, using default qc ", qc)) } - }else{ + } else { qc <- NULL } if (!is.null(reg_info$projection)) { - if(!is.null(settings$remotedata$projection)){ + if (!is.null(settings$remotedata$projection)) { projection <- settings$remotedata$projection - }else{ + } else { projection <- reg_info$projection PEcAn.logger::logger.warn(paste0("projection not provided, using default projection ", projection)) } - }else{ + } else { projection <- NULL } - - algorithm <- settings$remotedata$algorithm - credfile <- settings$remotedata$credfile - out_get_data <- settings$remotedata$out_get_data + + algorithm <- settings$remotedata$algorithm + credfile <- settings$remotedata$credfile + out_get_data <- settings$remotedata$out_get_data out_process_data <- settings$remotedata$out_process_data - overwrite <- settings$remotedata$overwrite + overwrite <- settings$remotedata$overwrite if (is.null(overwrite)) { overwrite <- FALSE } - - - PEcAn.logger::severeifnot("Check if siteid is of numeric type and is not NULL", - is.numeric(siteid)) - PEcAn.logger::severeifnot("Check if outdir is of character type and is not NULL", - is.character(outdir)) - PEcAn.logger::severeifnot("Check if source is of character type and is not NULL", - is.character(source)) + + + PEcAn.logger::severeifnot( + "Check if siteid is of numeric type and is not NULL", + is.numeric(siteid) + ) + PEcAn.logger::severeifnot( + "Check if outdir is of character type and is not NULL", + is.character(outdir) + ) + PEcAn.logger::severeifnot( + "Check if source is of character type and is not NULL", + is.character(source) + ) these_sources <- gsub("^.+?\\.(.+?)\\..*$", "\\1", list.files(system.file("registration", package = "PEcAn.data.remote"))) - PEcAn.logger::severeifnot(paste0("Source should be one of ", paste(these_sources, collapse = ' ')), toupper(source) %in% these_sources) + PEcAn.logger::severeifnot(paste0("Source should be one of ", paste(these_sources, collapse = " ")), toupper(source) %in% these_sources) # collection validation to be implemented if (!is.null(projection)) { - PEcAn.logger::severeifnot("projection should be of character type", - is.character(projection)) + PEcAn.logger::severeifnot( + "projection should be of character type", + is.character(projection) + ) } if (!is.null(algorithm)) { - PEcAn.logger::severeifnot("algorithm should be of character type", - is.character(algorithm)) + PEcAn.logger::severeifnot( + "algorithm should be of character type", + is.character(algorithm) + ) } if (!is.null(credfile)) { - PEcAn.logger::severeifnot("credfile should be of character type", - is.character(credfile)) + PEcAn.logger::severeifnot( + "credfile should be of character type", + is.character(credfile) + ) } PEcAn.logger::severeifnot( "Check if out_get_data is of character type and is not NULL", is.character(out_get_data) ) if (!is.null(out_process_data)) { - PEcAn.logger::severeifnot("out_process_data should be of character type", - is.character(out_process_data)) + PEcAn.logger::severeifnot( + "out_process_data should be of character type", + is.character(out_process_data) + ) } - - + + dbcon <- PEcAn.DB::db.open(settings$database$bety) on.exit(PEcAn.DB::db.close(dbcon), add = TRUE) - + # extract the AOI of the site from BETYdb coords <- unlist(PEcAn.DB::db.query( sprintf("select ST_AsGeoJSON(geometry) from sites where id=%f", siteid), con = dbcon ), use.names = FALSE) - - if(!(tolower(gsub(".*type(.+),coordinates.*", "\\1", gsub("[^=A-Za-z,0-9{} ]+","",coords))) %in% reg_info$coordtype)){ + + if (!(tolower(gsub(".*type(.+),coordinates.*", "\\1", gsub("[^=A-Za-z,0-9{} ]+", "", coords))) %in% reg_info$coordtype)) { PEcAn.logger::logger.severe(paste0("Coordinate type of the site is not supported by the requested source, please make sure that your site type is ", reg_info$coordtype)) } - + # construct raw file name remotedata_file_names <- construct_remotedata_filename(source, collection, siteid_short, scale, projection, qc, algorithm, out_process_data) - + raw_file_name <- remotedata_file_names$raw_file_name - + pro_file_name <- remotedata_file_names$pro_file_name - + # check if any data is already present in the inputs table dbstatus <- remotedata_db_check( @@ -169,79 +183,79 @@ remote_process <- function(settings) { overwrite = overwrite, dbcon = dbcon ) - - remotefile_check_flag <- dbstatus$remotefile_check_flag - start <- dbstatus$start - end <- dbstatus$end - stage_get_data <- dbstatus$stage_get_data - write_raw_start <- dbstatus$write_raw_start - write_raw_end <- dbstatus$write_raw_end - raw_merge <- dbstatus$raw_merge + + remotefile_check_flag <- dbstatus$remotefile_check_flag + start <- dbstatus$start + end <- dbstatus$end + stage_get_data <- dbstatus$stage_get_data + write_raw_start <- dbstatus$write_raw_start + write_raw_end <- dbstatus$write_raw_end + raw_merge <- dbstatus$raw_merge existing_raw_file_path <- dbstatus$existing_raw_file_path - stage_process_data <- dbstatus$stage_process_data - write_pro_start <- dbstatus$write_pro_start - write_pro_end <- dbstatus$write_pro_end - pro_merge <- dbstatus$pro_merge - input_file <- dbstatus$input_file + stage_process_data <- dbstatus$stage_process_data + write_pro_start <- dbstatus$write_pro_start + write_pro_end <- dbstatus$write_pro_end + pro_merge <- dbstatus$pro_merge + input_file <- dbstatus$input_file existing_pro_file_path <- dbstatus$existing_pro_file_path - raw_check <- dbstatus$raw_check - pro_check <- dbstatus$pro_check - + raw_check <- dbstatus$raw_check + pro_check <- dbstatus$pro_check - if(stage_get_data == FALSE && stage_process_data == FALSE){ + + if (stage_get_data == FALSE && stage_process_data == FALSE) { # requested data already exists, no need to call rp_control - settings$remotedata$raw_id <- raw_check$id + settings$remotedata$raw_id <- raw_check$id settings$remotedata$raw_path <- raw_check$file_path - settings$remotedata$pro_id <- pro_check$id + settings$remotedata$pro_id <- pro_check$id settings$remotedata$pro_path <- pro_check$file_path return(settings) } - + # construct outdir path outdir <- file.path(outdir, paste(toupper(source), "site", siteid_short, sep = "_")) - + fcn.args <- list() - fcn.args$coords <- coords - fcn.args$outdir <- outdir - fcn.args$lat <- lat - fcn.args$lon <- lon - fcn.args$start <- start - fcn.args$end <- end - fcn.args$source <- source - fcn.args$collection <- collection - fcn.args$siteid <- siteid_short - fcn.args$scale <- as.double(scale) - fcn.args$projection <- projection - fcn.args$qc <- as.double(qc) - fcn.args$algorithm <- algorithm - fcn.args$input_file <- input_file - fcn.args$credfile <- credfile - fcn.args$out_get_data <- out_get_data - fcn.args$out_process_data <- out_process_data - fcn.args$stage_get_data <- stage_get_data - fcn.args$stage_process_data <- stage_process_data - fcn.args$raw_merge <- raw_merge - fcn.args$pro_merge <- pro_merge + fcn.args$coords <- coords + fcn.args$outdir <- outdir + fcn.args$lat <- lat + fcn.args$lon <- lon + fcn.args$start <- start + fcn.args$end <- end + fcn.args$source <- source + fcn.args$collection <- collection + fcn.args$siteid <- siteid_short + fcn.args$scale <- as.double(scale) + fcn.args$projection <- projection + fcn.args$qc <- as.double(qc) + fcn.args$algorithm <- algorithm + fcn.args$input_file <- input_file + fcn.args$credfile <- credfile + fcn.args$out_get_data <- out_get_data + fcn.args$out_process_data <- out_process_data + fcn.args$stage_get_data <- stage_get_data + fcn.args$stage_process_data <- stage_process_data + fcn.args$raw_merge <- raw_merge + fcn.args$pro_merge <- pro_merge fcn.args$existing_raw_file_path <- existing_raw_file_path fcn.args$existing_pro_file_path <- existing_pro_file_path - fcn.args$raw_file_name <- raw_file_name - fcn.args$pro_file_name <- pro_file_name - + fcn.args$raw_file_name <- raw_file_name + fcn.args$pro_file_name <- pro_file_name + + + - - arg.string <- PEcAn.utils::listToArgString(fcn.args) - + cmdFcn <- paste0("RpTools$rp_control(", arg.string, ")") PEcAn.logger::logger.debug(paste0("Remote module executing the following function:\n", cmdFcn)) - + # call rp_control output <- do.call(RpTools$rp_control, fcn.args) - - + + # insert output data in the DB db_out <- remotedata_db_insert( @@ -262,19 +276,19 @@ remote_process <- function(settings) { pro_formatname = pro_formatname, dbcon = dbcon ) - - + + # return the ids and paths of the inserted data if (!is.null(out_get_data)) { - settings$remotedata$raw_id <- db_out$raw_id + settings$remotedata$raw_id <- db_out$raw_id settings$remotedata$raw_path <- db_out$raw_path } if (!is.null(out_process_data)) { - settings$remotedata$pro_id <- db_out$pro_id + settings$remotedata$pro_id <- db_out$pro_id settings$remotedata$pro_path <- db_out$pro_path } - - return (settings) + + return(settings) } @@ -317,32 +331,34 @@ construct_remotedata_filename <- # skip if a parameter is not applicable and is NULL if (is.null(scale)) { scale_str <- "_" - } else{ + } else { scale_str <- paste0("_", format(scale, nsmall = 1), "_") } if (is.null(projection)) { prj_str <- "" - }else{ + } else { prj_str <- paste0(projection, "_") } if (is.null(qc)) { qc_str <- "" - } else{ + } else { qc_str <- paste0(format(qc, nsmall = 1), "_") } - + raw_file_name <- paste0(toupper(source), "_", collection, scale_str, prj_str, qc_str, "site_", siteid) - if(!is.null(out_process_data)){ + if (!is.null(out_process_data)) { alg_str <- paste0(algorithm, "_") var_str <- paste0(out_process_data, "_") pro_file_name <- paste0(toupper(source), "_", collection, scale_str, prj_str, qc_str, alg_str, var_str, "site_", siteid) - }else{ + } else { pro_file_name <- NULL } - - remotedata_file_names <- list(raw_file_name = raw_file_name, - pro_file_name = pro_file_name) - + + remotedata_file_names <- list( + raw_file_name = raw_file_name, + pro_file_name = pro_file_name + ) + return(remotedata_file_names) } @@ -367,41 +383,40 @@ construct_remotedata_filename <- ##' get_remote_data) ##' } ##' @author Ayush Prasad -set_stage <- function(result, req_start, req_end, stage) { - db_start <- as.Date(result$start_date) - db_end <- as.Date(result$end_date) +set_stage <- function(result, req_start, req_end, stage) { + db_start <- as.Date(result$start_date) + db_end <- as.Date(result$end_date) req_start <- as.Date(req_start) - req_end <- as.Date(req_end) - stage <- TRUE - merge <- TRUE - + req_end <- as.Date(req_end) + stage <- TRUE + merge <- TRUE + # data already exists if ((req_start >= db_start) && (req_end <= db_end)) { - req_start <- "dont write" - req_end <- "dont write" - stage <- FALSE - merge <- FALSE + req_start <- "dont write" + req_end <- "dont write" + stage <- FALSE + merge <- FALSE write_start <- "dont write" - write_end <- "dont write" + write_end <- "dont write" } else if (req_start < db_start && db_end < req_end) { # data has to be replaced - merge <- "replace" + merge <- "replace" write_start <- req_start - write_end <- req_end - stage <- TRUE + write_end <- req_end + stage <- TRUE } else if ((req_start > db_start) && (req_end > db_end)) { # forward case - req_start <- db_end + 1 + req_start <- db_end + 1 write_start <- db_start - write_end <- req_end + write_end <- req_end } else if ((req_start < db_start) && (req_end < db_end)) { # backward case - req_end <- db_start - 1 - write_end <- db_end + req_end <- db_start - 1 + write_end <- db_end write_start <- req_start } - return (list(req_start = req_start, req_end = req_end, stage = stage, merge = merge, write_start = write_start, write_end = write_end)) - + return(list(req_start = req_start, req_end = req_end, stage = stage, merge = merge, write_start = write_start, write_end = write_end)) } @@ -422,38 +437,38 @@ set_stage <- function(result, req_start, req_end, stage) { ##' "COPERNICUS/S2_SR") ##' } ##' @author Istem Fer -read_remote_registry <- function(source, collection){ - +read_remote_registry <- function(source, collection) { # get registration file register.xml <- system.file(paste0("registration/register.", toupper(source), ".xml"), package = "PEcAn.data.remote") - - tryCatch(expr = { - register <- XML::xmlToList(XML::xmlParse(register.xml)) - }, - error = function(e){ + + tryCatch( + expr = { + register <- XML::xmlToList(XML::xmlParse(register.xml)) + }, + error = function(e) { PEcAn.logger::logger.severe("Requested source is not available") - } + } ) . <- NULL - - if(!(purrr::is_empty(register %>% purrr::keep(names(.) == "collection")))){ + + if (!(purrr::is_empty(register %>% purrr::keep(names(.) == "collection")))) { # this is a type of source that requires different setup for its collections, e.g. GEE # then read collection specific information register <- register[[which(register %>% purrr::map_chr("original_name") == collection)]] } - + reg_list <- list() - reg_list$original_name <- ifelse(is.null(register$original_name), collection, register$original_name) - reg_list$pecan_name <- ifelse(is.null(register$pecan_name), collection, register$pecan_name) - reg_list$scale <- register$scale - reg_list$qc <- register$qc - reg_list$projection <- register$projection - reg_list$raw_mimetype <- register$raw_format$mimetype + reg_list$original_name <- ifelse(is.null(register$original_name), collection, register$original_name) + reg_list$pecan_name <- ifelse(is.null(register$pecan_name), collection, register$pecan_name) + reg_list$scale <- register$scale + reg_list$qc <- register$qc + reg_list$projection <- register$projection + reg_list$raw_mimetype <- register$raw_format$mimetype reg_list$raw_formatname <- register$raw_format$name - reg_list$pro_mimetype <- register$pro_format$mimetype + reg_list$pro_mimetype <- register$pro_format$mimetype reg_list$pro_formatname <- register$pro_format$name - reg_list$coordtype <- unlist(register$coord) - + reg_list$coordtype <- unlist(register$coord) + return(reg_list) } @@ -469,7 +484,7 @@ read_remote_registry <- function(source, collection){ ##' @param pro_file_name pro_file_name ##' @param start start date requested by user ##' @param end end date requested by the user -##' @param siteid siteid of the site +##' @param siteid siteid of the site ##' @param siteid_short short form of the siteid ##' @param out_get_data out_get_data ##' @param algorithm algorithm @@ -505,30 +520,29 @@ remotedata_db_check <- out_process_data, overwrite, dbcon) { - # Information about the date variables used: # req_start, req_end : start, end dates requested by the user, the user does not have to be aware about the status of the requested file in the DB # start, end : effective start, end dates created after checking the DB status. These dates are sent to rp_control for downloading and processing data # write_raw_start, write_raw_end : start, end dates which are used while inserting and updating the DB # the "pro" version of these variables have the same meaning and are used to refer to the processed file - - req_start <- start - req_end <- end - input_file <- NULL - stage_get_data <- FALSE - stage_process_data <- FALSE - raw_merge <- NULL - pro_merge <- NULL + + req_start <- start + req_end <- end + input_file <- NULL + stage_get_data <- FALSE + stage_process_data <- FALSE + raw_merge <- NULL + pro_merge <- NULL existing_raw_file_path <- NULL existing_pro_file_path <- NULL - write_raw_start <- NULL - write_raw_end <- NULL - write_pro_start <- NULL - write_pro_end <- NULL - raw_check <- NULL - pro_check <- NULL - remotefile_check_flag <- NULL - + write_raw_start <- NULL + write_raw_end <- NULL + write_pro_start <- NULL + write_pro_end <- NULL + raw_check <- NULL + pro_check <- NULL + remotefile_check_flag <- NULL + existing_data <- PEcAn.DB::db.query(paste0("SELECT * FROM inputs WHERE site_id=", siteid), dbcon) if (nrow(existing_data) >= 1) { @@ -536,52 +550,52 @@ remotedata_db_check <- PEcAn.logger::logger.warn("overwrite is set to TRUE, any existing file will be entirely replaced") if (!is.null(out_process_data)) { if (nrow(pro_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - pro_file_name - ), - dbcon - )) == 1) { + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + pro_file_name + ), + dbcon + )) == 1) { if (nrow(raw_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - raw_file_name - ), - dbcon - )) == 1) { + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + raw_file_name + ), + dbcon + )) == 1) { remotefile_check_flag <- 4 - } else{ + } else { remotefile_check_flag <- 6 } - } else{ + } else { remotefile_check_flag <- 1 } stage_process_data <- TRUE - pro_merge <- "replace" - write_pro_start <- start - write_pro_end <- end + pro_merge <- "replace" + write_pro_start <- start + write_pro_end <- end } else if (!is.null(out_get_data)) { if (nrow(raw_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - raw_file_name - ), - dbcon - )) == 1) { + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + raw_file_name + ), + dbcon + )) == 1) { remotefile_check_flag <- 2 - } else{ + } else { remotefile_check_flag <- 1 } } - stage_get_data <- TRUE - start <- req_start - end <- req_end - write_raw_start <- start - write_raw_end <- end - raw_merge <- "replace" + stage_get_data <- TRUE + start <- req_start + end <- req_end + write_raw_start <- start + write_raw_end <- end + raw_merge <- "replace" existing_pro_file_path <- NULL existing_raw_file_path <- NULL } else if (!is.null(out_process_data)) { @@ -589,19 +603,19 @@ remotedata_db_check <- # check if processed file exists if (nrow(pro_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - pro_file_name - ), - dbcon - )) == 1) { + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + pro_file_name + ), + dbcon + )) == 1) { datalist <- set_stage(pro_check, req_start, req_end, stage_process_data) - pro_start <- as.character(datalist$req_start) - pro_end <- as.character(datalist$req_end) + pro_start <- as.character(datalist$req_start) + pro_end <- as.character(datalist$req_end) write_pro_start <- datalist$write_start - write_pro_end <- datalist$write_end + write_pro_end <- datalist$write_end if (pro_start != "dont write" || pro_end != "dont write") { stage_process_data <- datalist$stage pro_merge <- datalist$merge @@ -619,15 +633,15 @@ remotedata_db_check <- dbcon ) if (!is.null(raw_check$start_date) && - !is.null(raw_check$end_date)) { + !is.null(raw_check$end_date)) { raw_datalist <- set_stage(raw_check, pro_start, pro_end, stage_get_data) - start <- as.character(raw_datalist$req_start) - end <- as.character(raw_datalist$req_end) + start <- as.character(raw_datalist$req_start) + end <- as.character(raw_datalist$req_end) write_raw_start <- raw_datalist$write_start - write_raw_end <- raw_datalist$write_end - stage_get_data <- raw_datalist$stage - raw_merge <- raw_datalist$merge + write_raw_end <- raw_datalist$write_end + stage_get_data <- raw_datalist$stage + raw_merge <- raw_datalist$merge if (stage_get_data == FALSE) { input_file <- raw_check$file_path } @@ -637,170 +651,168 @@ remotedata_db_check <- } if (pro_merge == TRUE && stage_get_data == FALSE) { remotefile_check_flag <- 5 - write_pro_start <- raw_check$start_date - write_pro_end <- raw_check$end_date + write_pro_start <- raw_check$start_date + write_pro_end <- raw_check$end_date existing_pro_file_path <- NULL - pro_merge <- FALSE + pro_merge <- FALSE } - } else{ + } else { # this case happens when the processed file has to be extended but the raw file used to create the existing processed file has been deleted remotefile_check_flag <- 6 - write_raw_start <- req_start - write_raw_end <- req_end - start <- req_start - end <- req_end - stage_get_data <- TRUE + write_raw_start <- req_start + write_raw_end <- req_end + start <- req_start + end <- req_end + stage_get_data <- TRUE existing_raw_file_path <- NULL - write_pro_start <- write_raw_start - write_pro_end <- write_raw_end - pro_merge <- "replace" + write_pro_start <- write_raw_start + write_pro_end <- write_raw_end + pro_merge <- "replace" existing_pro_file_path <- NULL } } - } else{ + } else { # requested file already exists pro_id <- pro_check$id pro_path <- pro_check$file_path if (nrow(raw_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - raw_file_name - ), - dbcon - )) == 1) { + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + raw_file_name + ), + dbcon + )) == 1) { raw_id <- raw_check$id raw_path <- raw_check$file_path } } - } - else if (nrow(raw_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - raw_file_name - ), - dbcon - )) == 1) { + } else if (nrow(raw_check <- + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + raw_file_name + ), + dbcon + )) == 1) { # if the processed file does not exist in the DB check if the raw file required for creating it is present PEcAn.logger::logger.info("Requested processed file does not exist in the DB, checking if the raw file does") datalist <- set_stage(raw_check, req_start, req_end, stage_get_data) - start <- as.character(datalist$req_start) - end <- as.character(datalist$req_end) + start <- as.character(datalist$req_start) + end <- as.character(datalist$req_end) write_raw_start <- datalist$write_start - write_raw_end <- datalist$write_end + write_raw_end <- datalist$write_end write_pro_start <- req_start - write_pro_end <- req_end - stage_get_data <- datalist$stage + write_pro_end <- req_end + stage_get_data <- datalist$stage if (stage_get_data == FALSE) { - input_file <- raw_check$file_path + input_file <- raw_check$file_path write_pro_start <- raw_check$start_date - write_pro_end <- raw_check$end_date + write_pro_end <- raw_check$end_date remotefile_check_flag <- 2 } raw_merge <- datalist$merge stage_process_data <- TRUE pro_merge <- FALSE if (raw_merge == TRUE || raw_merge == "replace") { - existing_raw_file_path = file.path(raw_check$file_path, raw_check$name) + existing_raw_file_path <- file.path(raw_check$file_path, raw_check$name) remotefile_check_flag <- 3 - } else{ + } else { existing_raw_file_path <- NULL } - } else{ + } else { # if no processed or raw file of requested type exists - start <- req_start - end <- req_end - write_raw_start <- req_start - write_raw_end <- req_end - write_pro_start <- req_start - write_pro_end <- req_end - stage_get_data <- TRUE - raw_merge <- FALSE + start <- req_start + end <- req_end + write_raw_start <- req_start + write_raw_end <- req_end + write_pro_start <- req_start + write_pro_end <- req_end + stage_get_data <- TRUE + raw_merge <- FALSE existing_raw_file_path <- NULL - stage_process_data <- TRUE - pro_merge <- FALSE + stage_process_data <- TRUE + pro_merge <- FALSE existing_pro_file_path <- NULL - remotefile_check_flag <- 1 + remotefile_check_flag <- 1 } } else if (nrow(raw_check <- - PEcAn.DB::db.query( - sprintf( - "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", - raw_file_name - ), - dbcon - )) == 1) { + PEcAn.DB::db.query( + sprintf( + "SELECT inputs.id, inputs.site_id, dbfiles.file_name as name, inputs.start_date, inputs.end_date, dbfiles.file_path FROM inputs INNER JOIN dbfiles ON inputs.id=dbfiles.container_id AND dbfiles.file_name LIKE '%s%%';", + raw_file_name + ), + dbcon + )) == 1) { # if only raw data is requested datalist <- set_stage(raw_check, req_start, req_end, stage_get_data) - start <- as.character(datalist$req_start) - end <- as.character(datalist$req_end) - stage_get_data <- datalist$stage - raw_merge <- datalist$merge - write_raw_start <- datalist$write_start - write_raw_end <- datalist$write_end + start <- as.character(datalist$req_start) + end <- as.character(datalist$req_end) + stage_get_data <- datalist$stage + raw_merge <- datalist$merge + write_raw_start <- datalist$write_start + write_raw_end <- datalist$write_end stage_process_data <- FALSE if (as.character(write_raw_start) == "dont write" && - as.character(write_raw_end) == "dont write") { - raw_id <- raw_check$id + as.character(write_raw_end) == "dont write") { + raw_id <- raw_check$id raw_path <- raw_check$file_path } if (raw_merge == TRUE) { existing_raw_file_path <- file.path(raw_check$file_path, raw_check$name) remotefile_check_flag <- 2 - } else{ + } else { existing_raw_file_path <- NULL } existing_pro_file_path <- NULL - } else{ + } else { # no data of requested type exists PEcAn.logger::logger.info("Requested data does not exist in the DB, retrieving for the first time") - remotefile_check_flag <- 1 + remotefile_check_flag <- 1 start <- req_start - end <- req_end + end <- req_end if (!is.null(out_get_data)) { - stage_get_data <- TRUE - write_raw_start <- req_start - write_raw_end <- req_end - raw_merge <- FALSE + stage_get_data <- TRUE + write_raw_start <- req_start + write_raw_end <- req_end + raw_merge <- FALSE existing_raw_file_path <- NULL } if (!is.null(out_process_data)) { - stage_process_data <- TRUE - write_pro_start <- req_start - write_pro_end <- req_end - pro_merge <- FALSE - process_file_name <- NULL + stage_process_data <- TRUE + write_pro_start <- req_start + write_pro_end <- req_end + pro_merge <- FALSE + process_file_name <- NULL existing_pro_file_path <- NULL - remotefile_check_flag <- 1 + remotefile_check_flag <- 1 } } - - } else{ + } else { # db is completely empty for the given siteid PEcAn.logger::logger.info("DB is completely empty for this site") - remotefile_check_flag <- 1 - start <- req_start - end <- req_end - stage_get_data <- TRUE - write_raw_start <- req_start - write_raw_end <- req_end - raw_merge <- FALSE + remotefile_check_flag <- 1 + start <- req_start + end <- req_end + stage_get_data <- TRUE + write_raw_start <- req_start + write_raw_end <- req_end + raw_merge <- FALSE existing_raw_file_path <- NULL if (!is.null(out_process_data)) { - stage_process_data <- TRUE - write_pro_start <- req_start - write_pro_end <- req_end - pro_merge <- FALSE + stage_process_data <- TRUE + write_pro_start <- req_start + write_pro_end <- req_end + pro_merge <- FALSE existing_pro_file_path <- NULL } } - - + + return( - list( + list( remotefile_check_flag = remotefile_check_flag, start = start, end = end, @@ -818,9 +830,7 @@ remotedata_db_check <- raw_check = raw_check, pro_check = pro_check ) - ) - - + ) } @@ -832,12 +842,12 @@ remotedata_db_check <- ##' @name remotedata_db_insert ##' @param output output list from rp_control ##' @param remotefile_check_flag remotefile_check_flag -##' @param siteid siteid +##' @param siteid siteid ##' @param out_get_data out_get_data ##' @param out_process_data out_process_data ##' @param write_raw_start write_raw_start, start date of the raw file ##' @param write_raw_end write_raw_end, end date of the raw file -##' @param write_pro_start write_pro_start +##' @param write_pro_start write_pro_start ##' @param write_pro_end write_pro_end ##' @param raw_check id, site_id, name, start_date, end_date, of the existing raw file from inputs table and file_path from dbfiles tables ##' @param pro_check pro_check id, site_id, name, start_date, end_date, of the existing processed file from inputs table and file_path from dbfiles tables @@ -886,9 +896,8 @@ remotedata_db_insert <- pro_mimetype, pro_formatname, dbcon) { - # The value of remotefile_check_flag denotes the following cases: - + # When processed file is requested, # 1 - There are no existing raw and processed files of the requested type in the DB # 2 - Requested processed file does not exist, the raw file used to create is it present and matches with the requested daterange @@ -896,24 +905,24 @@ remotedata_db_insert <- # 4 - Both processed and raw file of the requested type exists, but they have to be updated to match with the requested daterange # 5 - Raw file required for creating the processed file exists with the required daterange and the processed file needs to be updated. Here the new processed file will now contain data for the entire daterange of the existing raw file # 6 - There is a existing processed file of the requested type but the raw file used to create it has been deleted. Here, the raw file will be created again and the processed file will be replaced entirely with the one created from new raw file - + # When raw file is requested, # 1 - There is no existing raw the requested type in the DB # 2 - existing raw file will be updated - + pro_id <- NULL pro_path <- NULL - + if (!is.null(out_process_data)) { # if the requested processed file already exists within the required timeline dont insert or update the DB if (as.character(write_pro_start) == "dont write" && - as.character(write_pro_end) == "dont write") { + as.character(write_pro_end) == "dont write") { PEcAn.logger::logger.info("Requested processed file already exists") - pro_id <- pro_check$id + pro_id <- pro_check$id pro_path <- pro_check$file_path - raw_id <- raw_check$id + raw_id <- raw_check$id raw_path <- raw_check$file_path - } else{ + } else { if (remotefile_check_flag == 1) { # no processed and rawfile are present PEcAn.logger::logger.info("Inserting raw and processed files for the first time") @@ -941,8 +950,8 @@ remotedata_db_insert <- formatname = raw_formatname, con = dbcon ) - pro_id <- pro_ins$input.id - raw_id <- raw_ins$input.id + pro_id <- pro_ins$input.id + raw_id <- raw_ins$input.id pro_path <- output$process_data_path raw_path <- output$raw_data_path } else if (remotefile_check_flag == 2) { @@ -959,9 +968,9 @@ remotedata_db_insert <- formatname = pro_formatname, con = dbcon ) - raw_id <- raw_check$id + raw_id <- raw_check$id raw_path <- raw_check$file_path - pro_id <- pro_ins$input.id + pro_id <- pro_ins$input.id pro_path <- output$process_data_path } else if (remotefile_check_flag == 3) { # requested processed file does not exist, raw file used to create it is present but has to be updated to match with the requested dates @@ -995,12 +1004,12 @@ remotedata_db_insert <- ), dbcon ) - pro_id <- pro_ins$input.id + pro_id <- pro_ins$input.id pro_path <- output$process_data_path } else if (remotefile_check_flag == 4) { # requested processed and raw files are present and have to be updated - pro_id <- pro_check$id - raw_id <- raw_check$id + pro_id <- pro_check$id + raw_id <- raw_check$id raw_path <- output$raw_data_path pro_path <- output$process_data_path PEcAn.logger::logger.info("Updating processed and raw files") @@ -1044,9 +1053,9 @@ remotedata_db_insert <- ) } else if (remotefile_check_flag == 5) { # raw file required for creating the processed file exists and the processed file needs to be updated - pro_id <- pro_check$id + pro_id <- pro_check$id pro_path <- output$process_data_path - raw_id <- raw_check$id + raw_id <- raw_check$id raw_path <- raw_check$file_path PEcAn.logger::logger.info("Updating the existing processed file") PEcAn.DB::db.query( @@ -1070,7 +1079,7 @@ remotedata_db_insert <- ) } else if (remotefile_check_flag == 6) { # there is some existing processed file but the raw file used to create it is now deleted, replace the processed file entirely with the one created from new raw file - pro_id <- pro_check$id + pro_id <- pro_check$id pro_path <- output$process_data_path raw_path <- output$raw_data_path PEcAn.logger::logger.info("Replacing the existing processed file and creating a new raw file") @@ -1107,15 +1116,14 @@ remotedata_db_insert <- raw_id <- raw_ins$input.id } } - } - else{ + } else { # if the requested raw file already exists within the required timeline dont insert or update the DB if (as.character(write_raw_start) == "dont write" && - as.character(write_raw_end) == "dont write") { + as.character(write_raw_end) == "dont write") { PEcAn.logger::logger.info("Requested raw file already exists") - raw_id <- raw_check$id + raw_id <- raw_check$id raw_path <- raw_check$file_path - } else{ + } else { if (remotefile_check_flag == 1) { PEcAn.logger::logger.info(("Inserting raw file for the first time")) raw_ins <- @@ -1129,11 +1137,11 @@ remotedata_db_insert <- formatname = raw_formatname, con = dbcon ) - raw_id <- raw_ins$input.id + raw_id <- raw_ins$input.id raw_path <- output$raw_data_path } else if (remotefile_check_flag == 2) { PEcAn.logger::logger.info("Updating raw file") - raw_id <- raw_check$id + raw_id <- raw_check$id raw_path <- output$raw_data_path PEcAn.DB::db.query( sprintf( @@ -1157,6 +1165,6 @@ remotedata_db_insert <- } } } - + return(list(raw_id = raw_id, raw_path = raw_path, pro_id = pro_id, pro_path = pro_path)) - } \ No newline at end of file + } diff --git a/modules/data.remote/inst/FieldvsSMAP_compare.R b/modules/data.remote/inst/FieldvsSMAP_compare.R index c4314260ad9..5f4e36f57af 100644 --- a/modules/data.remote/inst/FieldvsSMAP_compare.R +++ b/modules/data.remote/inst/FieldvsSMAP_compare.R @@ -1,4 +1,4 @@ -#load necessities +# load necessities library(tidyverse) library(hrbrthemes) library(plotly) @@ -11,416 +11,461 @@ library(tidyr) library(dplyr) library(ncdf4) -par(mfrow = c(1,2)) +par(mfrow = c(1, 2)) -#set start and end dates -start = "2019-04-01" -end = as.character(Sys.Date()) +# set start and end dates +start <- "2019-04-01" +end <- as.character(Sys.Date()) - ############################## - ######## WILLOW CREEK ######## - ############################## +############################## +######## WILLOW CREEK ######## +############################## - ######## Download Ameriflux field data######## +######## Download Ameriflux field data######## -#download and get daily average +# download and get daily average source("/fs/data3/jbateman/pecan/modules/assim.sequential/inst/NEFI/US_WCr/download_soilmoist_WCr.R") -sm_wcr = download_soilmoist_WCr(start, end) %>% +sm_wcr <- download_soilmoist_WCr(start, end) %>% dplyr::mutate(Day = lubridate::day(Time), Month = lubridate::month(Time), Year = lubridate::year(Time)) %>% group_by(Year, Month, Day) -sm_wcr$Date = as.Date(with(sm_wcr, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") - -sm_wcr.Dayavg = sm_wcr %>% +sm_wcr$Date <- as.Date(with(sm_wcr, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") + +sm_wcr.Dayavg <- sm_wcr %>% summarise(DayAvgsm1 = mean(avgsoil1)) %>% ungroup() -sm_wcr.Dayavg2= sm_wcr %>% +sm_wcr.Dayavg2 <- sm_wcr %>% summarise(DayAvgsm2 = mean(avgsoil2)) %>% ungroup() -sm_wcr.Dayavg$DayAvgsm2 =sm_wcr.Dayavg2$DayAvgsm2 -sm_wcr.Dayavg$Date = as.Date(with(sm_wcr.Dayavg, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_wcr.Dayavg = sm_wcr.Dayavg %>% dplyr::select(Date, DayAvgsm1, DayAvgsm2) +sm_wcr.Dayavg$DayAvgsm2 <- sm_wcr.Dayavg2$DayAvgsm2 +sm_wcr.Dayavg$Date <- as.Date(with(sm_wcr.Dayavg, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_wcr.Dayavg <- sm_wcr.Dayavg %>% dplyr::select(Date, DayAvgsm1, DayAvgsm2) - ######## Download SMAP data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst/" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +######## Download SMAP data ######## +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst/" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 676, site_name = "Willow Creek", lat = 45.805925, lon = -90.07961, - time_zone = "UTC") + time_zone = "UTC" +) -wcr.smap_sm = download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) +wcr.smap_sm <- download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -wcr.d = ggplot() + - geom_line(data = na.omit(wcr.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(wcr.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_wcr.Dayavg, aes(x=Date, y=DayAvgsm1, color = "red"), linetype = "dashed") + - geom_line(data = sm_wcr.Dayavg, aes(x=Date, y=DayAvgsm2, color = "purple"), linetype = "dashed") + - ylim(0,60) + +wcr.d <- ggplot() + + geom_line(data = na.omit(wcr.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(wcr.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_wcr.Dayavg, aes(x = Date, y = DayAvgsm1, color = "red"), linetype = "dashed") + + geom_line(data = sm_wcr.Dayavg, aes(x = Date, y = DayAvgsm2, color = "purple"), linetype = "dashed") + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: Willow Creek") + - labs(x = "Date", - y = "Soil Moisture (%)" , - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "purple"), + breaks = c("steel blue", "red", "purple"), labels = c("SMAP", "Old Field", "New Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -wcr.half = ggplot() + - geom_line(data = sm_wcr, aes(x=Date, y=avgsoil1, color="red"), linetype ="solid") + - geom_line(data = sm_wcr, aes(x=Date, y=avgsoil2, color="purple"), linetype ="solid") + - geom_line(data = na.omit(wcr.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(wcr.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + +wcr.half <- ggplot() + + geom_line(data = sm_wcr, aes(x = Date, y = avgsoil1, color = "red"), linetype = "solid") + + geom_line(data = sm_wcr, aes(x = Date, y = avgsoil2, color = "purple"), linetype = "solid") + + geom_line(data = na.omit(wcr.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(wcr.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + ggtitle("SMAP vs 1/2 hr Field Data: Willow Creek") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "purple"), + breaks = c("steel blue", "red", "purple"), labels = c("SMAP", "Old Field", "New Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) require(gridExtra) grid.arrange(wcr.d, wcr.half) - ############################## - ######## SYLVANIA ######## - ############################## +############################## +######## SYLVANIA ######## +############################## ######## Download Ameriflux field data######## -#download and get daily average +# download and get daily average source("/fs/data3/jbateman/pecan/modules/assim.sequential/inst/NEFI/US_Syv/download_soilmoist_Syv.R") -sm_syv = download_soilmoist_Syv(start, end) %>% +sm_syv <- download_soilmoist_Syv(start, end) %>% mutate(Day = day(Time), Month = month(Time), Year = year(Time)) %>% group_by(Year, Month, Day) -sm_syv$Date = as.Date(with(sm_syv, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") +sm_syv$Date <- as.Date(with(sm_syv, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") -sm_syv.Dayavg = sm_syv %>% +sm_syv.Dayavg <- sm_syv %>% summarise(DayAvgsm = mean(avgsoil)) %>% ungroup() -sm_syv.Dayavg$Date = as.Date(with(sm_syv.Dayavg, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_syv.Dayavg = sm_syv.Dayavg %>% dplyr::select(Date, DayAvgsm) +sm_syv.Dayavg$Date <- as.Date(with(sm_syv.Dayavg, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_syv.Dayavg <- sm_syv.Dayavg %>% dplyr::select(Date, DayAvgsm) ######## Download SMAP ssm data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 622, site_name = "Sylvania", lat = 46.242017, lon = -89.347567, - time_zone = "UTC") -syv.smap_sm = download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) + time_zone = "UTC" +) +syv.smap_sm <- download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -syv.d = ggplot() + - geom_line(data = na.omit(syv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(syv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_syv.Dayavg, aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dashed") + - ylim(0,60) + +syv.d <- ggplot() + + geom_line(data = na.omit(syv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(syv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_syv.Dayavg, aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dashed") + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: SYLVANIA") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), - labels = c("SMAP", "Field"), - guide = "legend") + + breaks = c("steel blue", "red"), + labels = c("SMAP", "Field"), + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -syv.half = ggplot() + - geom_line(data = sm_syv, aes(x=Date, y=avgsoil, color="red"), linetype ="solid") + - geom_line(data = na.omit(syv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(syv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + +syv.half <- ggplot() + + geom_line(data = sm_syv, aes(x = Date, y = avgsoil, color = "red"), linetype = "solid") + + geom_line(data = na.omit(syv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(syv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + ggtitle("SMAP vs 1/2 hr Field Data: SYLVANIA") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), + breaks = c("steel blue", "red"), labels = c("SMAP", "Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) grid.arrange(syv.d, syv.half) - ############################## - ######## WLEF ######## - ############################## +############################## +######## WLEF ######## +############################## ######## Download Ameriflux field data######## -#download and get daily average +# download and get daily average source("/fs/data3/jbateman/pecan/modules/assim.sequential/inst/NEFI/US_WLEF/download_soilmoist_WLEF.R") -sm_wlef = download_soilmoist_WLEF(start, end) %>% +sm_wlef <- download_soilmoist_WLEF(start, end) %>% mutate(Day = day(Time), Month = month(Time), Year = year(Time)) %>% group_by(Year, Month, Day) -sm_wlef$Date = as.Date(with(sm_wlef, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") +sm_wlef$Date <- as.Date(with(sm_wlef, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") -sm_wlef.Dayavg = sm_wlef %>% +sm_wlef.Dayavg <- sm_wlef %>% summarise(DayAvgsm = mean(avgsoil)) %>% ungroup() -sm_wlef.Dayavg$Date = as.Date(with(sm_wlef.Dayavg, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_wlef.Dayavg = sm_wlef.Dayavg %>% dplyr::select(Date, DayAvgsm) +sm_wlef.Dayavg$Date <- as.Date(with(sm_wlef.Dayavg, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_wlef.Dayavg <- sm_wlef.Dayavg %>% dplyr::select(Date, DayAvgsm) ######## Download SMAP data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 678, site_name = "WLEF", lat = 45.9408, lon = -90.27, - time_zone = "UTC") -wlef.smap_sm = download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) + time_zone = "UTC" +) +wlef.smap_sm <- download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -wlef.d = ggplot() + - geom_line(data = na.omit(wlef.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(wlef.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_wlef.Dayavg, aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dashed") + - ylim(0,60) + +wlef.d <- ggplot() + + geom_line(data = na.omit(wlef.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(wlef.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_wlef.Dayavg, aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dashed") + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: WLEF") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), + breaks = c("steel blue", "red"), labels = c("SMAP", "Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -wlef.half = ggplot() + - geom_line(data = sm_wlef, aes(x=Date, y=avgsoil, color="red"), linetype ="solid") + - geom_line(data = na.omit(wlef.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(wlef.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + +wlef.half <- ggplot() + + geom_line(data = sm_wlef, aes(x = Date, y = avgsoil, color = "red"), linetype = "solid") + + geom_line(data = na.omit(wlef.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(wlef.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + ggtitle("SMAP vs 1/2 hr Field Data: WLEF") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), + breaks = c("steel blue", "red"), labels = c("SMAP", "Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) grid.arrange(wlef.d, wlef.half) - ############################## - ######## HARVARD ######## - ############################## +############################## +######## HARVARD ######## +############################## ######## Download Ameriflux field data######## -#download and get daily average +# download and get daily average source("/fs/data3/jbateman/pecan/modules/assim.sequential/inst/NEFI/US_Harvard/download_soilmoist_harvard.R") -sm_harv = download_soilmoist_Harvard(start, end) %>% +sm_harv <- download_soilmoist_Harvard(start, end) %>% mutate(Day = day(Time), Month = month(Time), Year = year(Time)) %>% group_by(Year, Month, Day) -sm_harv$Date = as.Date(with(sm_harv, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_harv$SWC15 = replace(sm_harv$SWC15, sm_harv$SWC15 == -9999, NA) +sm_harv$Date <- as.Date(with(sm_harv, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_harv$SWC15 <- replace(sm_harv$SWC15, sm_harv$SWC15 == -9999, NA) -sm_harv.Dayavg = sm_harv %>% +sm_harv.Dayavg <- sm_harv %>% summarise(DayAvgsm = mean(SWC15)) %>% ungroup() -sm_harv.Dayavg$Date = as.Date(with(sm_harv.Dayavg, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_harv.Dayavg = sm_harv.Dayavg %>% dplyr::select(Date, DayAvgsm) +sm_harv.Dayavg$Date <- as.Date(with(sm_harv.Dayavg, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_harv.Dayavg <- sm_harv.Dayavg %>% dplyr::select(Date, DayAvgsm) ######## Download SMAP data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 1126, site_name = "Harvard Forest", lat = 42.531453, lon = -72.188896, - time_zone = "UTC") -harv.smap_sm = download_SMAP_gee2pecan("2019-11-06", end, site_info, geoJSON_outdir, smap_outdir) + time_zone = "UTC" +) +harv.smap_sm <- download_SMAP_gee2pecan("2019-11-06", end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -harv.d = ggplot() + - geom_line(data = na.omit(harv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(harv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_harv.Dayavg, aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dashed") + - ylim(0,60) + +harv.d <- ggplot() + + geom_line(data = na.omit(harv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(harv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_harv.Dayavg, aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dashed") + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: Harvard") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), + breaks = c("steel blue", "red"), labels = c("SMAP", "Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -harv.half = ggplot() + - geom_line(data = na.omit(sm_harv), aes(x=Date, y=SWC15, color="red"), linetype ="solid") + - geom_line(data = na.omit(harv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(harv.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + +harv.half <- ggplot() + + geom_line(data = na.omit(sm_harv), aes(x = Date, y = SWC15, color = "red"), linetype = "solid") + + geom_line(data = na.omit(harv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(harv.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + ggtitle("SMAP vs 1/2 hr Field Data: Harvard") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), + breaks = c("steel blue", "red"), labels = c("SMAP", "Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) grid.arrange(harv.d, harv.half) - ############################## - ######## BART ######## - ############################## +############################## +######## BART ######## +############################## ######## NEON data ######## -#download and get daily average -BART_ssm = split(BART, list(BART$verticalPosition, BART$horizontalPosition, BART$VSWCFinalQF)) -BART_ssm = split(BART, BART$VSWCFinalQF) -sm_bart = BART_ssm$'0' %>% +# download and get daily average +BART_ssm <- split(BART, list(BART$verticalPosition, BART$horizontalPosition, BART$VSWCFinalQF)) +BART_ssm <- split(BART, BART$VSWCFinalQF) +sm_bart <- BART_ssm$"0" %>% na.omit() %>% dplyr::select(startDateTime, VSWCMean, horizontalPosition, verticalPosition) %>% mutate(Day = day(startDateTime), Month = month(startDateTime), Year = year(startDateTime)) %>% group_by(Year, Month, Day) -sm_bart$Date = as.Date(with(sm_bart, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_bart$VSWCMean = sm_bart$VSWCMean * 100 -sm_bart = split(sm_bart, list(sm_bart$verticalPosition, sm_bart$horizontalPosition)) - -sm_bart.Dayavg = vector(mode = "list", length = 40) -names(sm_bart.Dayavg) = names(sm_bart) -for (i in 1:length(sm_bart)){ - sm_bart.Dayavg[[i]] = dplyr::select(sm_bart[[i]], Date, VSWCMean) %>% +sm_bart$Date <- as.Date(with(sm_bart, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_bart$VSWCMean <- sm_bart$VSWCMean * 100 +sm_bart <- split(sm_bart, list(sm_bart$verticalPosition, sm_bart$horizontalPosition)) + +sm_bart.Dayavg <- vector(mode = "list", length = 40) +names(sm_bart.Dayavg) <- names(sm_bart) +for (i in 1:length(sm_bart)) { + sm_bart.Dayavg[[i]] <- dplyr::select(sm_bart[[i]], Date, VSWCMean) %>% summarise(DayAvgsm = mean(VSWCMean)) %>% ungroup() - sm_bart.Dayavg[[i]]$Date = as.Date(with(sm_bart.Dayavg[[i]], paste(Year, Month, Day, sep="-")), "%Y-%m-%d") + sm_bart.Dayavg[[i]]$Date <- as.Date(with(sm_bart.Dayavg[[i]], paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") } ######## Download SMAP data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 796, site_name = "Bartlett", lat = 44.06464, lon = -71.288077, - time_zone = "UTC") -bart.smap_sm = download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) + time_zone = "UTC" +) +bart.smap_sm <- download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -bart.d = ggplot() + - geom_line(data = na.omit(bart.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(bart.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_bart.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), size=1) + - geom_line(data = sm_bart.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"),linetype = "dotted", size=.5) + - geom_point(data = sm_bart.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), size=1) + - geom_line(data = sm_bart.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), size=1) + - geom_line(data = sm_bart.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), size=1) + - geom_line(data = sm_bart.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), size=1) + - geom_line(data = sm_bart.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), linetype = "dotted", size=.5) + - ylim(0,60) + +bart.d <- ggplot() + + geom_line(data = na.omit(bart.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(bart.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_bart.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), size = 1) + + geom_line(data = sm_bart.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), size = 1) + + geom_line(data = sm_bart.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), size = 1) + + geom_line(data = sm_bart.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), size = 1) + + geom_line(data = sm_bart.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), size = 1) + + geom_line(data = sm_bart.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), linetype = "dotted", size = .5) + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: Bartlett") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "green", "purple", "orange", "yellow"), + breaks = c("steel blue", "red", "green", "purple", "orange", "yellow"), labels = c("SMAP", "Field 1 (-6cm)", "Field 2 (-6cm)", "Field 3 (-6cm)", "Field 4 (-6cm)", "Field 5 (-6cm)"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -bart.half = ggplot() + - geom_line(data = sm_bart$'502.1', aes(x=Date, y=VSWCMean, color = "red"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart$'502.1', aes(x=Date, y=VSWCMean, color = "red"), size=1) + - geom_line(data = sm_bart$'502.1', aes(x=Date, y=VSWCMean, color = "red"),linetype = "dotted", size=.5) + - geom_point(data = sm_bart$'502.2', aes(x=Date, y=VSWCMean, color = "green"), size=1) + - geom_line(data = sm_bart$'502.2', aes(x=Date, y=VSWCMean, color = "green"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart$'502.3', aes(x=Date, y=VSWCMean, color = "purple"), size=1) + - geom_line(data = sm_bart$'502.3', aes(x=Date, y=VSWCMean, color = "purple"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart$'502.4', aes(x=Date, y=VSWCMean, color = "orange"), size=1) + - geom_line(data = sm_bart$'502.4', aes(x=Date, y=VSWCMean, color = "orange"), linetype = "dotted", size=.5) + - geom_point(data = sm_bart$'502.5', aes(x=Date, y=VSWCMean, color = "yellow"), size=1) + - geom_line(data = sm_bart$'502.5', aes(x=Date, y=VSWCMean, color = "yellow"), linetype = "dotted", size=.5) + - ylim(0,60) + - geom_line(data = na.omit(bart.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(bart.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + +bart.half <- ggplot() + + geom_line(data = sm_bart$"502.1", aes(x = Date, y = VSWCMean, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart$"502.1", aes(x = Date, y = VSWCMean, color = "red"), size = 1) + + geom_line(data = sm_bart$"502.1", aes(x = Date, y = VSWCMean, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart$"502.2", aes(x = Date, y = VSWCMean, color = "green"), size = 1) + + geom_line(data = sm_bart$"502.2", aes(x = Date, y = VSWCMean, color = "green"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart$"502.3", aes(x = Date, y = VSWCMean, color = "purple"), size = 1) + + geom_line(data = sm_bart$"502.3", aes(x = Date, y = VSWCMean, color = "purple"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart$"502.4", aes(x = Date, y = VSWCMean, color = "orange"), size = 1) + + geom_line(data = sm_bart$"502.4", aes(x = Date, y = VSWCMean, color = "orange"), linetype = "dotted", size = .5) + + geom_point(data = sm_bart$"502.5", aes(x = Date, y = VSWCMean, color = "yellow"), size = 1) + + geom_line(data = sm_bart$"502.5", aes(x = Date, y = VSWCMean, color = "yellow"), linetype = "dotted", size = .5) + + ylim(0, 60) + + geom_line(data = na.omit(bart.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(bart.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + ggtitle("SMAP vs 1/2 hr Field Data: Bartlett") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "green", "purple", "orange", "yellow"), + breaks = c("steel blue", "red", "green", "purple", "orange", "yellow"), labels = c("SMAP", "Field 1 (-6cm)", "Field 2 (-6cm)", "Field 3 (-6cm)", "Field 4 (-6cm)", "Field 5 (-6cm)"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) -#require(gridExtra) -#grid.arrange(bart.d, bart.half) +# require(gridExtra) +# grid.arrange(bart.d, bart.half) plot(bart.half) @@ -431,97 +476,106 @@ plot(bart.half) ######## NEON data ######## -#download and get daily average -SRER_ssm = split(SRER, list(SRER$verticalPosition, SRER$horizontalPosition, SRER$VSWCFinalQF)) -SRER_ssm = split(SRER, SRER$VSWCFinalQF) -sm_srer = SRER_ssm$'0' %>% +# download and get daily average +SRER_ssm <- split(SRER, list(SRER$verticalPosition, SRER$horizontalPosition, SRER$VSWCFinalQF)) +SRER_ssm <- split(SRER, SRER$VSWCFinalQF) +sm_srer <- SRER_ssm$"0" %>% na.omit() %>% dplyr::select(startDateTime, VSWCMean, horizontalPosition, verticalPosition) %>% mutate(Day = day(startDateTime), Month = month(startDateTime), Year = year(startDateTime)) %>% group_by(Year, Month, Day) -sm_srer$Date = as.Date(with(sm_srer, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_srer$VSWCMean = sm_srer$VSWCMean * 100 -sm_srer = split(sm_srer, list(sm_srer$verticalPosition, sm_srer$horizontalPosition)) - -sm_srer.Dayavg = vector(mode = "list", length = 40) -names(sm_srer.Dayavg) = names(sm_srer) -for (i in 1:length(sm_srer)){ - sm_srer.Dayavg[[i]] = dplyr::select(sm_srer[[i]], Date, VSWCMean) %>% +sm_srer$Date <- as.Date(with(sm_srer, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_srer$VSWCMean <- sm_srer$VSWCMean * 100 +sm_srer <- split(sm_srer, list(sm_srer$verticalPosition, sm_srer$horizontalPosition)) + +sm_srer.Dayavg <- vector(mode = "list", length = 40) +names(sm_srer.Dayavg) <- names(sm_srer) +for (i in 1:length(sm_srer)) { + sm_srer.Dayavg[[i]] <- dplyr::select(sm_srer[[i]], Date, VSWCMean) %>% summarise(DayAvgsm = mean(VSWCMean)) %>% ungroup() - sm_srer.Dayavg[[i]]$Date = as.Date(with(sm_srer.Dayavg[[i]], paste(Year, Month, Day, sep="-")), "%Y-%m-%d") + sm_srer.Dayavg[[i]]$Date <- as.Date(with(sm_srer.Dayavg[[i]], paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") } ######## Download SMAP data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 1000004876, site_name = "Santa Rita", lat = 31.91068, lon = -110.83549, - time_zone = "UTC") -srer.smap_sm = download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) + time_zone = "UTC" +) +srer.smap_sm <- download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -srer.d = ggplot() + - geom_line(data = na.omit(srer.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(srer.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"),linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), linetype = "dotted", size=.5) + - ylim(0,60) + +srer.d <- ggplot() + + geom_line(data = na.omit(srer.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(srer.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), linetype = "dotted", size = .5) + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: Santa Rita") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "green", "purple", "orange", "yellow"), + breaks = c("steel blue", "red", "green", "purple", "orange", "yellow"), labels = c("SMAP", "Field 1 (-6cm)", "Field 2 (-6cm)", "Field 3 (-6cm)", "Field 4 (-6cm)", "Field 5 (-6cm)"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -srer.half = ggplot() + - geom_line(data = sm_srer.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"),linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), linetype = "dotted", size=.5) + - geom_point(data = sm_srer.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), size=1) + - geom_line(data = sm_srer.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), linetype = "dotted", size=.5) + - geom_line(data = na.omit(srer.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(srer.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - ylim(0,60) + +srer.half <- ggplot() + + geom_line(data = sm_srer.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), linetype = "dotted", size = .5) + + geom_point(data = sm_srer.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), size = 1) + + geom_line(data = sm_srer.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), linetype = "dotted", size = .5) + + geom_line(data = na.omit(srer.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(srer.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + ylim(0, 60) + ggtitle("SMAP vs 1/2 hr Field Data: Santa Rita") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "green", "purple", "orange", "yellow"), + breaks = c("steel blue", "red", "green", "purple", "orange", "yellow"), labels = c("SMAP", "Field 1 (-6cm)", "Field 2 (-6cm)", "Field 3 (-6cm)", "Field 4 (-6cm)", "Field 5 (-6cm)"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) grid.arrange(srer.d, srer.half) @@ -534,89 +588,97 @@ plot(srer.half) ######## NEON data ######## -#download and get daily average -KONA_ssm = split(KONA, list(KONA$verticalPosition, KONA$horizontalPosition, KONA$VSWCFinalQF)) -KONA_ssm = split(KONA, KONA$VSWCFinalQF) -sm_kona = KONA_ssm$'0' %>% +# download and get daily average +KONA_ssm <- split(KONA, list(KONA$verticalPosition, KONA$horizontalPosition, KONA$VSWCFinalQF)) +KONA_ssm <- split(KONA, KONA$VSWCFinalQF) +sm_kona <- KONA_ssm$"0" %>% na.omit() %>% dplyr::select(startDateTime, VSWCMean, horizontalPosition, verticalPosition) %>% mutate(Day = day(startDateTime), Month = month(startDateTime), Year = year(startDateTime)) %>% group_by(Year, Month, Day) -sm_kona$Date = as.Date(with(sm_kona, paste(Year, Month, Day, sep="-")), "%Y-%m-%d") -sm_kona$VSWCMean = sm_kona$VSWCMean * 100 -sm_kona = split(sm_kona, list(sm_kona$verticalPosition, sm_kona$horizontalPosition)) - -sm_kona.Dayavg = vector(mode = "list", length = 40) -names(sm_kona.Dayavg) = names(sm_kona) -for (i in 1:length(sm_kona)){ - sm_kona.Dayavg[[i]] = dplyr::select(sm_kona[[i]], Date, VSWCMean) %>% +sm_kona$Date <- as.Date(with(sm_kona, paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") +sm_kona$VSWCMean <- sm_kona$VSWCMean * 100 +sm_kona <- split(sm_kona, list(sm_kona$verticalPosition, sm_kona$horizontalPosition)) + +sm_kona.Dayavg <- vector(mode = "list", length = 40) +names(sm_kona.Dayavg) <- names(sm_kona) +for (i in 1:length(sm_kona)) { + sm_kona.Dayavg[[i]] <- dplyr::select(sm_kona[[i]], Date, VSWCMean) %>% summarise(DayAvgsm = mean(VSWCMean)) %>% ungroup() - sm_kona.Dayavg[[i]]$Date = as.Date(with(sm_kona.Dayavg[[i]], paste(Year, Month, Day, sep="-")), "%Y-%m-%d") + sm_kona.Dayavg[[i]]$Date <- as.Date(with(sm_kona.Dayavg[[i]], paste(Year, Month, Day, sep = "-")), "%Y-%m-%d") } ######## Download SMAP data ######## -geoJSON_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" -smap_outdir = "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +geoJSON_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" +smap_outdir <- "/projectnb/dietzelab/jbateman/pecan/modules/data.remote/inst" site_info <- list( site_id = 1000004925, site_name = "KONA", lat = 39.11044, lon = -96.61295, - time_zone = "UTC") -kona.smap_sm = download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) + time_zone = "UTC" +) +kona.smap_sm <- download_SMAP_gee2pecan(start, end, site_info, geoJSON_outdir, smap_outdir) ##### plot time series # Daily sm average -kona.d = ggplot() + - geom_line(data = na.omit(kona.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(kona.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + - geom_line(data = sm_kona.Dayavg, aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dashed") + - ylim(0,60) + +kona.d <- ggplot() + + geom_line(data = na.omit(kona.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(kona.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + + geom_line(data = sm_kona.Dayavg, aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dashed") + + ylim(0, 60) + ggtitle("SMAP vs Daily Field Data: Konza Prairie") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red"), + breaks = c("steel blue", "red"), labels = c("SMAP", "Field"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "none", - legend.title = element_blank()) + legend.title = element_blank() + ) # 1/2 hr field data vs daily smap (6am) -kona.half = ggplot() + - geom_line(data = sm_kona.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), linetype = "dotted", size=.5) + - geom_point(data = sm_kona.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"), size=1) + - geom_line(data = sm_kona.Dayavg$'502.1', aes(x=Date, y=DayAvgsm, color = "red"),linetype = "dotted", size=.5) + - geom_point(data = sm_kona.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), size=1) + - geom_line(data = sm_kona.Dayavg$'502.2', aes(x=Date, y=DayAvgsm, color = "green"), linetype = "dotted", size=.5) + - geom_point(data = sm_kona.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), size=1) + - geom_line(data = sm_kona.Dayavg$'502.3', aes(x=Date, y=DayAvgsm, color = "purple"), linetype = "dotted", size=.5) + - geom_point(data = sm_kona.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), size=1) + - geom_line(data = sm_kona.Dayavg$'502.4', aes(x=Date, y=DayAvgsm, color = "orange"), linetype = "dotted", size=.5) + - geom_point(data = sm_kona.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), size=1) + - geom_line(data = sm_kona.Dayavg$'502.5', aes(x=Date, y=DayAvgsm, color = "yellow"), linetype = "dotted", size=.5) + - geom_line(data = na.omit(kona.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue")) + - geom_point(data = na.omit(kona.smap_sm), aes(x=Date, y=ssm.vol, color="steel blue"), size=1) + +kona.half <- ggplot() + + geom_line(data = sm_kona.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_kona.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), size = 1) + + geom_line(data = sm_kona.Dayavg$"502.1", aes(x = Date, y = DayAvgsm, color = "red"), linetype = "dotted", size = .5) + + geom_point(data = sm_kona.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), size = 1) + + geom_line(data = sm_kona.Dayavg$"502.2", aes(x = Date, y = DayAvgsm, color = "green"), linetype = "dotted", size = .5) + + geom_point(data = sm_kona.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), size = 1) + + geom_line(data = sm_kona.Dayavg$"502.3", aes(x = Date, y = DayAvgsm, color = "purple"), linetype = "dotted", size = .5) + + geom_point(data = sm_kona.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), size = 1) + + geom_line(data = sm_kona.Dayavg$"502.4", aes(x = Date, y = DayAvgsm, color = "orange"), linetype = "dotted", size = .5) + + geom_point(data = sm_kona.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), size = 1) + + geom_line(data = sm_kona.Dayavg$"502.5", aes(x = Date, y = DayAvgsm, color = "yellow"), linetype = "dotted", size = .5) + + geom_line(data = na.omit(kona.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue")) + + geom_point(data = na.omit(kona.smap_sm), aes(x = Date, y = ssm.vol, color = "steel blue"), size = 1) + ggtitle("SMAP vs 1/2 hr Field Data: Konza Prairie") + - labs(x = "Date", - y = "Soil Moisture (%)", - color = "Legend\n") + + labs( + x = "Date", + y = "Soil Moisture (%)", + color = "Legend\n" + ) + scale_color_identity( - breaks = c("steel blue","red", "green", "purple", "orange", "yellow"), + breaks = c("steel blue", "red", "green", "purple", "orange", "yellow"), labels = c("SMAP", "Field 1 (-6cm)", "Field 2 (-6cm)", "Field 3 (-6cm)", "Field 4 (-6cm)", "Field 5 (-6cm)"), - guide = "legend") + + guide = "legend" + ) + theme( legend.position = "bottom", - legend.title = element_blank()) + legend.title = element_blank() + ) grid.arrange(kona.d, kona.half) plot(kona.half) - diff --git a/modules/data.remote/inst/SMAPvsModel_Comparison.R b/modules/data.remote/inst/SMAPvsModel_Comparison.R index ecb1165f992..01aa987bcf9 100644 --- a/modules/data.remote/inst/SMAPvsModel_Comparison.R +++ b/modules/data.remote/inst/SMAPvsModel_Comparison.R @@ -1,57 +1,57 @@ -##'@name SMAPvsModel_Comparison -##'@description: This script collects ensemble model data from a specified file location, -##'analyzes that data to produce two exploratory graphs, and finally compares the -##'model data to actual soil moisture data collected from NASA's SMAP satellite to -##'produce an additional 5 graphs and a set of comparative statistics including: pbias, -##'RMSE, CRPS, Correlation, and r-squared values. +##' @name SMAPvsModel_Comparison +##' @description: This script collects ensemble model data from a specified file location, +##' analyzes that data to produce two exploratory graphs, and finally compares the +##' model data to actual soil moisture data collected from NASA's SMAP satellite to +##' produce an additional 5 graphs and a set of comparative statistics including: pbias, +##' RMSE, CRPS, Correlation, and r-squared values. ##' -##'This script is broken into 2 functions: runSMAPvalidation and main. runSMAPvalidation can -##'be run on its own, however, the main method was developed to make utilizing runSMAPvalidation -##'more streamlined. +##' This script is broken into 2 functions: runSMAPvalidation and main. runSMAPvalidation can +##' be run on its own, however, the main method was developed to make utilizing runSMAPvalidation +##' more streamlined. ##' -##'This script also utilize the download_SMAP.R and netCDFvarExtraction scripts. +##' This script also utilize the download_SMAP.R and netCDFvarExtraction scripts. ##' -##'WARNING! D17 NEON sites including Lower Teakettle, Soaproot Saddle, and San Joaquin -##'Experimental Range are not available available in SMAP and thus should not be -##'run with this script. +##' WARNING! D17 NEON sites including Lower Teakettle, Soaproot Saddle, and San Joaquin +##' Experimental Range are not available available in SMAP and thus should not be +##' run with this script. ##' -##'@author Joshua Bowers +##' @author Joshua Bowers ##' -##'@examples +##' @examples ##' -##'## Desired Date Range ## -##'test_start <- '2016-01-02' -##'test_end <- '2016-07-16' +##' ## Desired Date Range ## +##' test_start <- '2016-01-02' +##' test_end <- '2016-07-16' ##' -##'## Directory of Ensembles Files ## -##'test_ensemble_dir <- '/projectnb2/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_testrun/out/' +##' ## Directory of Ensembles Files ## +##' test_ensemble_dir <- '/projectnb2/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_testrun/out/' ##' -##'## CONUS NEON Sites XML File Location ## - could also be vector of all conus site IDs -##'test_input_dir <- '~/pecan/modules/data.remote/inst/conus_sites.xml' +##' ## CONUS NEON Sites XML File Location ## - could also be vector of all conus site IDs +##' test_input_dir <- '~/pecan/modules/data.remote/inst/conus_sites.xml' ##' -##'## File Directory to Place Output PDFs In ## -##'test_output_dir <- '/projectnb/dietzelab/jbowers1/graphic_output/' +##' ## File Directory to Place Output PDFs In ## +##' test_output_dir <- '/projectnb/dietzelab/jbowers1/graphic_output/' ##' -##'## Output Directories for geoJson Files ## -##'test_geoJSON_outdir <- '/projectnb/dietzelab/jbowers1/geoFiles/' +##' ## Output Directories for geoJson Files ## +##' test_geoJSON_outdir <- '/projectnb/dietzelab/jbowers1/geoFiles/' ##' -##'## Output Directories for SMAP netCDF Files ## -##'test_smap_outdir <- '/projectnb/dietzelab/jbowers1/smap_ncFiles/' +##' ## Output Directories for SMAP netCDF Files ## +##' test_smap_outdir <- '/projectnb/dietzelab/jbowers1/smap_ncFiles/' ##' -##'# Running All Sites # -##'all_sites <- PEcAn.settings::read.settings(test_input_dir) -##'sites_vector <- c() +##' # Running All Sites # +##' all_sites <- PEcAn.settings::read.settings(test_input_dir) +##' sites_vector <- c() ##' -##'for (index in 1:length(all_sites)) { -##'sites_vector <- append(sites_vector, all_sites[[index]]$run$site$id) -##'} +##' for (index in 1:length(all_sites)) { +##' sites_vector <- append(sites_vector, all_sites[[index]]$run$site$id) +##' } ##' -##'sites_vector <- sites_vector[c(-4, -33, -37)] # Removes D17 sites that are not collected by SMAP -##'main_test_all <- main(sites_vector, test_start, test_end, -##'pdf_output = test_output_dir, -##'geoJSON_outdir = test_geoJSON_outdir, -##'smap_outdir = test_smap_outdir, -##'ensemble_dir = test_ensemble_dir) +##' sites_vector <- sites_vector[c(-4, -33, -37)] # Removes D17 sites that are not collected by SMAP +##' main_test_all <- main(sites_vector, test_start, test_end, +##' pdf_output = test_output_dir, +##' geoJSON_outdir = test_geoJSON_outdir, +##' smap_outdir = test_smap_outdir, +##' ensemble_dir = test_ensemble_dir) ##' # load necessary packages @@ -71,7 +71,7 @@ library(Metrics) library(verification) #' @title runSMAPvalidation -#' +#' #' @description runs multiple analyses on the specified site during a given date range #' and has the functionality to compare those values to actual soil moisture values #' collected from SMAP. If smpa_data is left as default, no comparison will be made @@ -80,162 +80,190 @@ library(verification) #' @param betySiteId bety site id for the desired location as (String) #' @param start start date of validation and comparison analysis YYYY-MM-DD as (Date) #' @param end end date of validation and comparison analysis YYYY-MM-DD as (Date) -#' @param smap_data downloaded SMAP data as (data.frame) -- see download_SMAP.R module +#' @param smap_data downloaded SMAP data as (data.frame) -- see download_SMAP.R module #' @param ensemble_dir directory of ensembles to run validation as (String) #' #' @return returns a list consisting all analysis plots and statistics #' -#' @examples -#' harv.var <- runSMAPvalidation('1000004945', as.Date('2016-01-02'), as.Date('2016-07-16'), -#' download_SMAP('1000004945', '2016-01-02', '2016-07-16', -#' '/projectnb/dietzelab/jbowers1/geoFiles/', -#' '/projectnb/dietzelab/jbowers1/smap_ncFiles/'), -#' '/projectnb2/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_testrun/out/') -#' -runSMAPvalidation <- function(betySiteId, start, end, smap_data = NULL, ensemble_dir){ - - source('~/pecan/modules/data.remote/inst/netCDFvarExtraction.R') - moistureFrame <- extractVariableENS(betySiteId = betySiteId, predictVar = 'SoilMoist', - start = start, end = end, ensemble_dir = ensemble_dir) - +#' @examples +#' harv.var <- runSMAPvalidation( +#' "1000004945", as.Date("2016-01-02"), as.Date("2016-07-16"), +#' download_SMAP( +#' "1000004945", "2016-01-02", "2016-07-16", +#' "/projectnb/dietzelab/jbowers1/geoFiles/", +#' "/projectnb/dietzelab/jbowers1/smap_ncFiles/" +#' ), +#' "/projectnb2/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_testrun/out/" +#' ) +#' +runSMAPvalidation <- function(betySiteId, start, end, smap_data = NULL, ensemble_dir) { + source("~/pecan/modules/data.remote/inst/netCDFvarExtraction.R") + moistureFrame <- extractVariableENS( + betySiteId = betySiteId, predictVar = "SoilMoist", + start = start, end = end, ensemble_dir = ensemble_dir + ) + ## Grouping data by Date ## - grouped_ENS <- moistureFrame %>% - group_by(time) %>% + grouped_ENS <- moistureFrame %>% + group_by(time) %>% mutate(max = max(SoilMoist)) %>% mutate(min = min(SoilMoist)) - + ## Calculating Mean Per Ensemble ## mean_per_ENS <- summarize(grouped_ENS, mean = mean(SoilMoist)) - + ## Calculating Standard Deviation ## sd_per_ENS <- summarize(grouped_ENS, sd = sd(SoilMoist)) - + ## Calculating .025 and .975 Quantiles ## quantiles_lower <- summarize(grouped_ENS, q_lower = quantile(SoilMoist, .025)) quantiles_upper <- summarize(grouped_ENS, q_upper = quantile(SoilMoist, .975)) - + ## Hex Data Distribution with Mean-line Highlighted ## - p1 <- ggplot(moistureFrame, aes(x = time, y = SoilMoist)) + geom_hex() + theme_bw() + - guides(fill=guide_legend(title = 'Frequency of Model Data')) + + p1 <- ggplot(moistureFrame, aes(x = time, y = SoilMoist)) + + geom_hex() + + theme_bw() + + guides(fill = guide_legend(title = "Frequency of Model Data")) + theme(plot.title = element_text(hjust = 0.5)) + - ggtitle('Hex Distribution of Model Data') + - geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = 'Mean of Model Data')) + - labs(colour = '') + ylab('Surface Soil Moisture (ssm)') + xlab('Date of Prediction') + - labs(colour = 'Legend') + ggtitle("Hex Distribution of Model Data") + + geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = "Mean of Model Data")) + + labs(colour = "") + + ylab("Surface Soil Moisture (ssm)") + + xlab("Date of Prediction") + + labs(colour = "Legend") print(p1) - + ## Min/Max Values with 95% Quantile Range ## Still has messed up color legend - p2 <- ggplot(grouped_ENS, aes(x = time, y = SoilMoist)) + theme_bw() + - ggtitle('Daily Min and Max Values with 95% Quantile Range in Black') + - theme(legend.position = c(0.87, 0.75), - legend.background = element_rect(fill = "white", color = "black"), - plot.title = element_text(hjust = 0.5)) + - geom_point(data = grouped_ENS, aes(x = time, y = max, color = 'Daily Max')) + - geom_point(data = grouped_ENS, aes(x = time, y = min, color = 'Daily Min')) + - geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = 'Mean')) + + p2 <- ggplot(grouped_ENS, aes(x = time, y = SoilMoist)) + + theme_bw() + + ggtitle("Daily Min and Max Values with 95% Quantile Range in Black") + + theme( + legend.position = c(0.87, 0.75), + legend.background = element_rect(fill = "white", color = "black"), + plot.title = element_text(hjust = 0.5) + ) + + geom_point(data = grouped_ENS, aes(x = time, y = max, color = "Daily Max")) + + geom_point(data = grouped_ENS, aes(x = time, y = min, color = "Daily Min")) + + geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = "Mean")) + geom_line(data = quantiles_lower, aes(x = time, y = q_lower)) + - geom_line(data = quantiles_upper, aes(x = time, y = q_upper)) + - xlab('Date of Prediction') + ylab('Surface Soil Moisture (ssm)') + - labs(colour = 'Legend') + geom_line(data = quantiles_upper, aes(x = time, y = q_upper)) + + xlab("Date of Prediction") + + ylab("Surface Soil Moisture (ssm)") + + labs(colour = "Legend") print(p2) - + if (!is.null(smap_data)) { ## Analyses Requiring Actual SMAP Data ## smap_data <- smap_data[[betySiteId]] - - + + ## Quick Plot of Actual SMAP Data ## - plot(smap_data$Date, smap_data$ssm, main = 'SMAP ssm (Surface Soil Moisture) vs Time', - xlab = 'Date of Capture', ylab = 'Surface Soil Moisture (ssm)') + plot(smap_data$Date, smap_data$ssm, + main = "SMAP ssm (Surface Soil Moisture) vs Time", + xlab = "Date of Capture", ylab = "Surface Soil Moisture (ssm)" + ) p3 <- recordPlot() - + ## Hex distribution of Model Data with Actual Values as well as Mean and Quantile Range (Way too much) ## - p4 <- ggplot(moistureFrame, aes(x = time, y = SoilMoist)) + geom_hex() + theme_bw() + - geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = 'Mean of Model Data')) + - geom_line(data = quantiles_lower, aes(x = time, y = q_lower, color = '95% Quantile Range')) + - geom_line(data = quantiles_upper, aes(x = time, y = q_upper, color = '95% Quantile Range')) + - geom_line(data = smap_data, aes(x = as_datetime(Date), y = ssm, color = 'Actual SMAP Values')) + - ggtitle('Hex Distribution, Quantile Range, and Mean of \nModel Data Along with Actual SMAP Values') + - theme(plot.title = element_text(hjust = 0.5)) + xlab('Date of Prediction/Collection') + - ylab('Surface Soil Moisture (ssm)') + labs(colour = 'Legend') + - guides(fill=guide_legend(title = 'Frequency of Model Data')) + p4 <- ggplot(moistureFrame, aes(x = time, y = SoilMoist)) + + geom_hex() + + theme_bw() + + geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = "Mean of Model Data")) + + geom_line(data = quantiles_lower, aes(x = time, y = q_lower, color = "95% Quantile Range")) + + geom_line(data = quantiles_upper, aes(x = time, y = q_upper, color = "95% Quantile Range")) + + geom_line(data = smap_data, aes(x = as_datetime(Date), y = ssm, color = "Actual SMAP Values")) + + ggtitle("Hex Distribution, Quantile Range, and Mean of \nModel Data Along with Actual SMAP Values") + + theme(plot.title = element_text(hjust = 0.5)) + + xlab("Date of Prediction/Collection") + + ylab("Surface Soil Moisture (ssm)") + + labs(colour = "Legend") + + guides(fill = guide_legend(title = "Frequency of Model Data")) print(p4) - + ## Actual Data with Mean and 95% quantile range of Model Data ## - p5 <- ggplot() + theme_bw() + - geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = 'Mean of Model Data')) + - geom_line(data = quantiles_lower, aes(x = time, y = q_lower, color = '95% Quantile Range')) + - geom_line(data = quantiles_upper, aes(x = time, y = q_upper, color = '95% Quantile Range')) + - geom_line(data = smap_data, aes(x = as_datetime(Date), y = ssm, color = 'Actual SMAP Values')) + - ggtitle('Actual SMAP vs Mean and 95% Quantile Range of Model Data') + - theme(plot.title = element_text(hjust = 0.5)) + xlab('Date of Prediction/Collection') + - ylab('Surface Soil Moisture (ssm)') + labs(colour = 'Legend') + p5 <- ggplot() + + theme_bw() + + geom_line(data = mean_per_ENS, aes(x = time, y = mean, color = "Mean of Model Data")) + + geom_line(data = quantiles_lower, aes(x = time, y = q_lower, color = "95% Quantile Range")) + + geom_line(data = quantiles_upper, aes(x = time, y = q_upper, color = "95% Quantile Range")) + + geom_line(data = smap_data, aes(x = as_datetime(Date), y = ssm, color = "Actual SMAP Values")) + + ggtitle("Actual SMAP vs Mean and 95% Quantile Range of Model Data") + + theme(plot.title = element_text(hjust = 0.5)) + + xlab("Date of Prediction/Collection") + + ylab("Surface Soil Moisture (ssm)") + + labs(colour = "Legend") print(p5) - + ## Collapsing Temporal Scale to Match SMAP Output Data ## vec_time <- vector() vec_mean <- vector() vec_sd <- vector() running_mean <- 0 running_sd <- 0 - + for (row in 1:nrow(mean_per_ENS)) { cur_day <- as_date(mean_per_ENS$time[row]) - 1 ## -1 to match days. This really isn't that necessary - running_mean <- running_mean + as.numeric(mean_per_ENS[row, 'mean']) - running_sd <- running_sd + as.numeric(sd_per_ENS[row, 'sd']) - - if (row %% 24 == 0){ ## 1:24 ratio between actual and model sample duration + running_mean <- running_mean + as.numeric(mean_per_ENS[row, "mean"]) + running_sd <- running_sd + as.numeric(sd_per_ENS[row, "sd"]) + + if (row %% 24 == 0) { ## 1:24 ratio between actual and model sample duration vec_time <- append(vec_time, cur_day) vec_mean <- append(vec_mean, (running_mean / 24)) vec_sd <- append(vec_sd, running_sd / 24) running_mean <- 0 - running_sd <- 0 + running_sd <- 0 cur_day <- (cur_day + 3) } } df <- data.frame(vec_time, vec_mean, vec_sd) - + ## Quick Plot of Actual SMAP Data vs Mean Model Data ## mod <- lm(smap_data$ssm ~ df$vec_mean) - plot(df$vec_mean, smap_data$ssm, main= 'Actual SMAP vs Mean Model Data\nw/ Linear Regression Line', - xlab = 'Mean of Model Data', ylab = 'Actual SMAP (ssm)') + abline(mod) + plot(df$vec_mean, smap_data$ssm, + main = "Actual SMAP vs Mean Model Data\nw/ Linear Regression Line", + xlab = "Mean of Model Data", ylab = "Actual SMAP (ssm)" + ) + abline(mod) p6 <- recordPlot() - + ## Calculating Bias ## - pbias <- pbias(df['vec_mean'], smap_data['ssm'], rm.NA=TRUE) - + pbias <- pbias(df["vec_mean"], smap_data["ssm"], rm.NA = TRUE) + ## Calculating RMSE ## RMSE <- rmse(smap_data$ssm, df$vec_mean) - + ## Calculating CRPS ## CRPS <- crps(smap_data$ssm, as.matrix(cbind(df$vec_mean, df$vec_sd)))$CRPS - + ## Calculating Correlation ## correlation <- cor(df$vec_mean, smap_data$ssm) - + ## Stat Summary of Linear Regression Model Including R^2 Value ## r.squared <- summary(mod)$r.squared - + ## Outputs ## - cat(paste0('\nStats for site ', betySiteId, ':')) - cat('\nPercent Bias:', pbias, '%\nRMSE:', RMSE,'\nCRPS:', CRPS, - '\nCorrelation:', correlation, '\nR-Squared:', r.squared, '\n') - + cat(paste0("\nStats for site ", betySiteId, ":")) + cat( + "\nPercent Bias:", pbias, "%\nRMSE:", RMSE, "\nCRPS:", CRPS, + "\nCorrelation:", correlation, "\nR-Squared:", r.squared, "\n" + ) + ## Quick Plot of Residuals vs. Time ## - plot(df$vec_time, mod$residuals, main='Residuals vs. Time', xlab = 'Date of Collection', - ylab = 'Residuals of SMAP and Model Data') + abline(h = 0, lty = 2) + plot(df$vec_time, mod$residuals, + main = "Residuals vs. Time", xlab = "Date of Collection", + ylab = "Residuals of SMAP and Model Data" + ) + abline(h = 0, lty = 2) p7 <- recordPlot() - + vtr <- list(betySiteId, p1, p2, p3, p4, p5, p6, p7, pbias, RMSE, CRPS, correlation, r.squared) - - names(vtr) <- c('betySiteId', 'Plot1', 'Plot2', 'Plot3', 'Plot4', 'Plot5', 'Plot6', 'Plot7', - 'pbias','RMSE', 'CRPS', 'correlation', 'r.squared') - + + names(vtr) <- c( + "betySiteId", "Plot1", "Plot2", "Plot3", "Plot4", "Plot5", "Plot6", "Plot7", + "pbias", "RMSE", "CRPS", "correlation", "r.squared" + ) } else { - vtr <- list(betySiteId, p1, p2) - - names(vtr) <- c('betySiteId', 'Plot1', 'Plot2') + + names(vtr) <- c("betySiteId", "Plot1", "Plot2") } return(vtr) } @@ -249,51 +277,54 @@ runSMAPvalidation <- function(betySiteId, start, end, smap_data = NULL, ensemble #' @param geoJSON_outdir directory to place geoJSON files necessary for SMAP download as (String) #' @param smap_outdir directory to place SMPA output netCDF files as (String) #' @param ensemble_dir directory of ensembles to run validation as (String) -#' +#' #' @return data frame object containing stats for each site #' -#' @examples -#'## runs validation on 1000004945, 1000004876, 1000004927, 1000004927 sites ## -#'main_test_sample <- main(c('1000004945', '1000004876', '1000004927', '1000004927'), -#'start = '2016-01-02', -#'end = '2016-07-16', -#'pdf_output = '/projectnb/dietzelab/jbowers1/graphic_output/', -#'geoJSON_outdir = '/projectnb/dietzelab/jbowers1/geoFiles/', -#'smap_outdir = '/projectnb/dietzelab/jbowers1/smap_ncFiles/', -#'ensemble_dir = '/projectnb2/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_testrun/out/') +#' @examples +#' ## runs validation on 1000004945, 1000004876, 1000004927, 1000004927 sites ## +#' main_test_sample <- main(c("1000004945", "1000004876", "1000004927", "1000004927"), +#' start = "2016-01-02", +#' end = "2016-07-16", +#' pdf_output = "/projectnb/dietzelab/jbowers1/graphic_output/", +#' geoJSON_outdir = "/projectnb/dietzelab/jbowers1/geoFiles/", +#' smap_outdir = "/projectnb/dietzelab/jbowers1/smap_ncFiles/", +#' ensemble_dir = "/projectnb2/dietzelab/dongchen/All_NEON_SDA/NEON42/SDA_testrun/out/" +#' ) #' -#' -main <- function(sites, start, end, pdf_output, geoJSON_outdir, smap_outdir, ensemble_dir){ +main <- function(sites, start, end, pdf_output, geoJSON_outdir, smap_outdir, ensemble_dir) { ## Downloads SMAP data for sites ## - source('~/pecan/modules/data.remote/inst/download_SMAP.R') + source("~/pecan/modules/data.remote/inst/download_SMAP.R") smap_data <- download_SMAP(sites, start, end, geoJSON_outdir, smap_outdir) - + ## Convert to Date for Validation ## start <- as.Date(start) end <- as.Date(end) - - stat_frame <- data.frame(site_id = character(), - pbias = double(), - RMSE = double(), - CRPS = double(), - correlation = double(), - r.squared = double()) - + + stat_frame <- data.frame( + site_id = character(), + pbias = double(), + RMSE = double(), + CRPS = double(), + correlation = double(), + r.squared = double() + ) + ## run analysis on each specified site ## count <- 0 for (site in sites) { count <- count + 1 ## Prints graphics to pdf and stats to returned df ## - pdf(paste0(pdf_output, site,'.pdf'), width = 7.5, height = 5) + pdf(paste0(pdf_output, site, ".pdf"), width = 7.5, height = 5) temp_run <- runSMAPvalidation(site, start, end, smap_data, ensemble_dir) - stat_frame[count, ] <- list(temp_run$betySiteId, - temp_run$pbias, - temp_run$RMSE, - temp_run$CRPS, - temp_run$correlation, - temp_run$r.squared) + stat_frame[count, ] <- list( + temp_run$betySiteId, + temp_run$pbias, + temp_run$RMSE, + temp_run$CRPS, + temp_run$correlation, + temp_run$r.squared + ) dev.off() } return(stat_frame) } - diff --git a/modules/data.remote/inst/download_SMAP.R b/modules/data.remote/inst/download_SMAP.R index 8fcd917f609..23e83acdbb5 100644 --- a/modules/data.remote/inst/download_SMAP.R +++ b/modules/data.remote/inst/download_SMAP.R @@ -1,17 +1,17 @@ -##'@name download_SMAP -##'@description: This script downloads SMAP data for all CONUS NEON sites +##' @name download_SMAP +##' @description: This script downloads SMAP data for all CONUS NEON sites ##' -##'Inputs: -##'1. Data range to produce SMAP data -##'2. File location containing XML file of all 39 CONUS NEON sites and their information +##' Inputs: +##' 1. Data range to produce SMAP data +##' 2. File location containing XML file of all 39 CONUS NEON sites and their information ##' ##' -##'@author Joshua Bowers +##' @author Joshua Bowers ## read in CONUS sites from xml file ## -conus_sites <- PEcAn.settings::read.settings('~/pecan/modules/data.remote/inst/conus_sites.xml') +conus_sites <- PEcAn.settings::read.settings("~/pecan/modules/data.remote/inst/conus_sites.xml") -#load necessities +# load necessities library(tidyverse) library(plotly) library(patchwork) @@ -26,7 +26,7 @@ library(ncdf4) #' @title download_SMAP #' #' @param input_sites site ID(s) at locations for which download SMAP data as (vector) -#' @param start start date of analysis YYYY-MM-DD as (String) +#' @param start start date of analysis YYYY-MM-DD as (String) #' @param end end date of analysis YYYY-MM-DD as (String) #' @param geoJSON_outdir file directory to store output site geoJSON files #' @param smap_outdir file directory to store output SMAP netCDF file @@ -35,34 +35,33 @@ library(ncdf4) #' #' #' @examples -#' all_smap_data <- download_SMAP('2016-01-02', '2016-07-16', -#' '/projectnb/dietzelab/jbowers1/geoFiles/', '/projectnb/dietzelab/jbowers1/smap_ncFiles/') -#' -download_SMAP <- function(input_sites, start, end, geoJSON_outdir, smap_outdir){ +#' all_smap_data <- download_SMAP( +#' "2016-01-02", "2016-07-16", +#' "/projectnb/dietzelab/jbowers1/geoFiles/", "/projectnb/dietzelab/jbowers1/smap_ncFiles/" +#' ) +#' +download_SMAP <- function(input_sites, start, end, geoJSON_outdir, smap_outdir) { ## 2015-04-01 is first available smap data - + ######## Download CONUS SMAP data ######## - + # Initialize empty list output_smap <- list() - - source('~/pecan/modules/data.remote/inst/download_SMAP_gee2pecan.R') - + + source("~/pecan/modules/data.remote/inst/download_SMAP_gee2pecan.R") + # Inupt each site's info as a new list - for(index in 1:length(conus_sites)){ # There are 39 sites in the CONUS - + for (index in 1:length(conus_sites)) { # There are 39 sites in the CONUS + if (index != 4 & index != 33 & index != 37) { # 4, 33, and 37 = sites in D17 where SMAP is not available - - if(conus_sites[[index]]$run$site$id %in% input_sites){ # adds only input sites - - output_smap[[length(output_smap) + 1]] <- + + if (conus_sites[[index]]$run$site$id %in% input_sites) { # adds only input sites + + output_smap[[length(output_smap) + 1]] <- download_SMAP_gee2pecan(start, end, conus_sites[[index]]$run$site, geoJSON_outdir, smap_outdir) names(output_smap)[length(output_smap)] <- conus_sites[[index]]$run$site$id - } - } - } return(output_smap) diff --git a/modules/data.remote/inst/download_SMAP_gee2pecan.R b/modules/data.remote/inst/download_SMAP_gee2pecan.R index b2f5fcc7ed5..6cb86b6c302 100644 --- a/modules/data.remote/inst/download_SMAP_gee2pecan.R +++ b/modules/data.remote/inst/download_SMAP_gee2pecan.R @@ -1,40 +1,39 @@ -##'@name download_SMAP_gee2pecan.R -##'@description: -##'Download SMAP data from GEE by date and site location +##' @name download_SMAP_gee2pecan.R +##' @description: +##' Download SMAP data from GEE by date and site location ##' -##'Requires python3 and earthengine-api. -##'Untill 'gee2pecan_smap' is integrated into PEcAn workflow, -##'follow GEE registration 'Installation Instructions' here: -##'https://github.com/PecanProject/pecan/pull/2645 +##' Requires python3 and earthengine-api. +##' Untill 'gee2pecan_smap' is integrated into PEcAn workflow, +##' follow GEE registration 'Installation Instructions' here: +##' https://github.com/PecanProject/pecan/pull/2645 ##' -##'@param start start date as YYYY-mm-dd (chr) -##'@param end end date YYYY-mm-dd (chr) -##'@param site_info list of site info containing name (String), site_id (numeric), lat (numeric), and lon (numeric) -##'@param geoJSON_outdir directory to store site GeoJSON, must be the location same as 'gee2pecan_smap.py' -##'@param smap_outdir directory to store netCDF file of SMAP data, if directory folder does not exist it will be created -##'@return data.frame of SMAP data +##' @param start start date as YYYY-mm-dd (chr) +##' @param end end date YYYY-mm-dd (chr) +##' @param site_info list of site info containing name (String), site_id (numeric), lat (numeric), and lon (numeric) +##' @param geoJSON_outdir directory to store site GeoJSON, must be the location same as 'gee2pecan_smap.py' +##' @param smap_outdir directory to store netCDF file of SMAP data, if directory folder does not exist it will be created +##' @return data.frame of SMAP data ##' ##' -##'@authors Juliette Bateman, Ayush Prasad (gee2pecan_smap.py), Joshua Bowers +##' @authors Juliette Bateman, Ayush Prasad (gee2pecan_smap.py), Joshua Bowers ##' -##'@examples -##'\dontrun{ -##'test <- download_SMAP_from_gee( -##'start = "2019-11-01", -##'end = "2019-11-10", -##'site_info = list(site_id = 1126, name = "Harvard_Forest", lat = 42.531453, lon = -72.188896), -##'geoJSON_outdir = '/projectnb/dietzelab/jbowers1/geoFiles/', -##'smap_outdir = '/projectnb/dietzelab/jbowers1/smap_ncFiles/') -##'} +##' @examples +##' \dontrun{ +##' test <- download_SMAP_from_gee( +##' start = "2019-11-01", +##' end = "2019-11-10", +##' site_info = list(site_id = 1126, name = "Harvard_Forest", lat = 42.531453, lon = -72.188896), +##' geoJSON_outdir = '/projectnb/dietzelab/jbowers1/geoFiles/', +##' smap_outdir = '/projectnb/dietzelab/jbowers1/smap_ncFiles/') +##' } download_SMAP_gee2pecan <- function(start, end, - site_info, - geoJSON_outdir, smap_outdir) { - - ## if site_info is only one id, connect to database and collect into - #################### Connect to BETY #################### - # + site_info, + geoJSON_outdir, smap_outdir) { + ## if site_info is only one id, connect to database and collect into + #################### Connect to BETY #################### + # # con <- PEcAn.DB::db.open( # list(user='bety', password='bety', host='localhost', # dbname='bety', driver='PostgreSQL',write=TRUE)) @@ -46,63 +45,65 @@ download_SMAP_gee2pecan <- function(start, end, # suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) # site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, # lon=qry_results$lon, time_zone=qry_results$time_zone) - # - - #################### Begin Data Extraction #################### - + # + + #################### Begin Data Extraction #################### + # Create geoJSON file for site site_GeoJSON <- data.frame(site_info$lon, site_info$lat) %>% - setNames(c("lon","lat")) %>% + setNames(c("lon", "lat")) %>% leafletR::toGeoJSON(name = site_info$name, dest = geoJSON_outdir, overwrite = TRUE) %>% rgdal::readOGR() - site_GeoJSON$name = site_info$name - site_GeoJSON = site_GeoJSON[-1] %>% + site_GeoJSON$name <- site_info$name + site_GeoJSON <- site_GeoJSON[-1] %>% leafletR::toGeoJSON(name = site_info$name, dest = geoJSON_outdir, overwrite = TRUE) - - # Locate gee2pecan_smap.py function and load into - reticulate::source_python('~/pecan/modules/data.remote/inst/RpTools/RpTools/gee2pecan_smap.py') - + + # Locate gee2pecan_smap.py function and load into + reticulate::source_python("~/pecan/modules/data.remote/inst/RpTools/RpTools/gee2pecan_smap.py") + ## code taken out of this line of code (var = var ## an arg of gee2pecan_smap) - var_filename <- paste0('smap_', site_info$name) - nc.file <- gee2pecan_smap(geofile = site_GeoJSON, outdir = smap_outdir, - filename = var_filename, start = start, end = end) - + var_filename <- paste0("smap_", site_info$name) + nc.file <- gee2pecan_smap( + geofile = site_GeoJSON, outdir = smap_outdir, + filename = var_filename, start = start, end = end + ) + # Run gee2pecan_smap function output <- nc_open(nc.file) - smap.data = cbind((ncdf4::ncvar_get(output, "date")), ncdf4::ncvar_get(output, "ssm"), - ncdf4::ncvar_get(output,"susm"), ncdf4::ncvar_get(output, "smp"), - ncdf4::ncvar_get(output, "ssma"), ncdf4::ncvar_get(output,"susma")) %>% - as.data.frame(stringsAsFactors = FALSE) %>% + smap.data <- cbind( + (ncdf4::ncvar_get(output, "date")), ncdf4::ncvar_get(output, "ssm"), + ncdf4::ncvar_get(output, "susm"), ncdf4::ncvar_get(output, "smp"), + ncdf4::ncvar_get(output, "ssma"), ncdf4::ncvar_get(output, "susma") + ) %>% + as.data.frame(stringsAsFactors = FALSE) %>% setNames(c("Date", "ssm", "susm", "smp", "ssma", "susma")) %>% - dplyr::mutate(Date = as.Date(Date)) %>% + dplyr::mutate(Date = as.Date(Date)) %>% dplyr::mutate_if(is.character, as.numeric) %>% - tidyr::complete(Date = seq.Date(as.Date(start), as.Date(end), by="day")) - + tidyr::complete(Date = seq.Date(as.Date(start), as.Date(end), by = "day")) + #################### Convert to % Soil Moisture #################### - - ## If variable is ssm or susm, must convert unit from mm --> % + + ## If variable is ssm or susm, must convert unit from mm --> % # SSM (surface soil moisture) represents top 0-5cm (50mm) of soil - smap.data$ssm.vol = unlist((smap.data[,2] / 50) * 100) %>% as.numeric() + smap.data$ssm.vol <- unlist((smap.data[, 2] / 50) * 100) %>% as.numeric() # SUSM (subsurface soil moisture) represents top 0-100 cm (1000mm) of soil - smap.data$susm.vol = unlist((smap.data[,2] / 1000) * 100) %>% as.numeric() - - - #################### Date Entry Parameter Check #################### - + smap.data$susm.vol <- unlist((smap.data[, 2] / 1000) * 100) %>% as.numeric() + + + #################### Date Entry Parameter Check #################### + ## Check if there is data for the date range entered if (all(is.na(smap.data[-1])) == TRUE) { - PEcAn.logger::logger.error( "There are no SMAP data observations for this date range (", start, " to ", end, - "), Please choose another date range. (SMAP data is not available before 2015-04-01.)") - + "), Please choose another date range. (SMAP data is not available before 2015-04-01.)" + ) } else if (any(is.na(smap.data)) == TRUE) { - - ## NOTE: SMAP collects data every ~2-3 days. Missing observations are expected. + ## NOTE: SMAP collects data every ~2-3 days. Missing observations are expected. PEcAn.logger::logger.warn( - "WARNING: There are some missing SMAP observations during this date range (", start, " to ", end, ").") - - return(na.omit(smap.data)) } - -} + "WARNING: There are some missing SMAP observations during this date range (", start, " to ", end, ")." + ) + return(na.omit(smap.data)) + } +} diff --git a/modules/data.remote/inst/scripts/Bayesian_curve_fitting.R b/modules/data.remote/inst/scripts/Bayesian_curve_fitting.R index 0722e1d55d2..5aeb35bb091 100644 --- a/modules/data.remote/inst/scripts/Bayesian_curve_fitting.R +++ b/modules/data.remote/inst/scripts/Bayesian_curve_fitting.R @@ -1,26 +1,24 @@ +bayes.curve.fit <- function(outpath, coord.set, fia, n.reps, n.chain) { + library(rjags) + library(R2HTML) -bayes.curve.fit<-function(outpath,coord.set,fia,n.reps,n.chain){ + dat48 <- read.csv(file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = T, sep = ",") + dir.create(file.path(outpath, "model_output")) + outpath1 <- file.path(outpath, "model_output") + # outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving -library(rjags) -library(R2HTML) + x <- dat48$biomass + yvars <- c("dat48$HH.sigma.48", "dat48$HV.sigma.48") -dat48<-read.csv(file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header=T,sep=",") -dir.create(file.path(outpath,"model_output")) -outpath1<-file.path(outpath,"model_output") -# outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving + # n.reps<- 500 #sets value for n.adapt and n.iter + # n.chain<-3 #number of MCMC chains to run -x<-dat48$biomass -yvars<- c("dat48$HH.sigma.48", "dat48$HV.sigma.48") + ################ + ## Define Models + ################ -# n.reps<- 500 #sets value for n.adapt and n.iter -# n.chain<-3 #number of MCMC chains to run - -################ -##Define Models -################ - -#Michaelis-Menton -MM = "model{ + # Michaelis-Menton + MM <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-(b0*x[i])/(b1 + x[i]) @@ -35,8 +33,8 @@ sd<-1/sqrt(tau) }#model" -#Holling Type 4 -H4 = "model{ + # Holling Type 4 + H4 <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-(a*x[i]^2)/(b+(c*x[i])+x[i]^2) @@ -51,8 +49,8 @@ sd<-1/sqrt(tau) # tau.x<-dgamma(1,1) }#model" -#Holling Type 3 -H3 = "model{ + # Holling Type 3 + H3 <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-(a*x[i]^2)/(b^2+x[i]^2) @@ -66,8 +64,8 @@ sd<-1/sqrt(tau) # tau.x<-dgamma(1,1) }#model" -#Ricker -Ri = "model{ + # Ricker + Ri <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<- a*x[i]*exp(-b*x[i]) @@ -81,8 +79,8 @@ sd<-1/sqrt(tau) # tau.x<-dgamma(1,1) }#model" -#Logistic -Log = "model{ + # Logistic + Log <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-a/(1+exp(a-b*x[i])) @@ -97,12 +95,12 @@ sd<-1/sqrt(tau) }#model" -########################### -#Same as above, but with Y intercepts added -########################### + ########################### + # Same as above, but with Y intercepts added + ########################### -#Michaelis-Menton with Y-int -MM.yint = "model{ + # Michaelis-Menton with Y-int + MM.yint <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-((b0*x[i])/(b1 + x[i]))+yint @@ -118,8 +116,8 @@ sd<-1/sqrt(tau) }#model" -#Holling Type 4 with Y-int -H4.yint = "model{ + # Holling Type 4 with Y-int + H4.yint <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-((a*x[i]^2)/(b+(c*x[i])+x[i]^2))+yint @@ -135,8 +133,8 @@ sd<-1/sqrt(tau) # tau.x<-dgamma(1,1) }#model" -#Holling Type 3 with Y-int -H3.yint = "model{ + # Holling Type 3 with Y-int + H3.yint <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-((a*x[i]^2)/(b^2+x[i]^2))+yint @@ -151,8 +149,8 @@ sd<-1/sqrt(tau) # tau.x<-dgamma(1,1) }#model" -#Ricker with Y-int -Ri.yint = "model{ + # Ricker with Y-int + Ri.yint <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-(a*x[i]*exp(-b*x[i]))+yint @@ -167,8 +165,8 @@ sd<-1/sqrt(tau) # tau.x<-dgamma(1,1) }#model" -#Logistic with Y-int -Log.yint = "model{ + # Logistic with Y-int + Log.yint <- "model{ for(i in 1:n){ y[i]~dnorm(mu[i],tau) mu[i]<-(a/(1+exp(a-b*x[i])))+yint @@ -184,226 +182,252 @@ sd<-1/sqrt(tau) }#model" -################ -##Build lists for looping -################ - -mod.names<-c("MM","H3","H4","Ri","Log","MM.yint","H3.yint","H4.yint","Ri.yint","Log.yint") - -#Initial conditions for HH pol band -MM.HH.init = list(b1=4, b0=0.1, tau = 2/stats::var(y)) #for MM -H4.HH.init = list(a=0.1,b=50, c=2, tau = 2/stats::var(y)) #for H4 -H3.HH.init = list(a=0.1,b=100, tau = 2/stats::var(y)) #for H3 -Ri.HH.init = list(a=0.5,b=0.5, tau = 2/stats::var(y)) #for Ri -Log.HH.init = list(a=0.6,b=1, tau = 2/stats::var(y)) #for Log -MM.yint.HH.init = list(b1=4, b0=0.1, yint=0.1, tau = 2/stats::var(y)) #for MM.yint -H3.yint.HH.init = list(a=0.1,b=100, yint=0.1, tau = 2/stats::var(y)) #for H3.yint -H4.yint.HH.init = list(a=0.1,b=50, c=2, yint=0.1, tau = 2/stats::var(y)) #for H4.yint -Ri.yint.HH.init = list(a=0.5, b=0.5, yint=0.1, tau = 2/stats::var(y)) #for Ri.yint -Log.yint.HH.init= list(a=0.6,b=1, yint=0.1, tau = 2/stats::var(y)) #for Log.yint - -#Initial conditions for HV pol band -MM.HV.init = list(b1=8, b0=0.04, tau = 2/stats::var(y)) #for MM -H3.HV.init = list(a=0.02, b=100, tau = 2/stats::var(y)) #for H3 -H4.HV.init = list(a=0.03, b=0.01, c=2, tau = 2/stats::var(y)) #for H4 -Ri.HV.init = list(a=0, b=0, tau = 2/stats::var(y)) #for Ri -Log.HV.init = list(a=0.6, b=1, tau = 2/stats::var(y)) #for Log -MM.yint.HV.init = list(b1=8, b0=0.04, yint=0.04,tau = 2/stats::var(y)) #for MM.yint -H3.yint.HV.init = list(a=0.02, b=100, yint=0.04, tau = 2/stats::var(y)) #for H3.yint -H4.yint.HV.init = list(a=0.03, b=0.01, c=2, yint=0.04, tau = 2/stats::var(y)) #for H4.yint -Ri.yint.HV.init = list(a=0.5, b=0.5, yint=0.04, tau = 2/stats::var(y)) #for Ri.yint -Log.yint.HV.init= list(a=0.6, b=1, yint=0.04, tau = 2/stats::var(y)) #for Log.yint - -MM.var.names<-c("b0","b1","sd") #for MM -H3.var.names<-c("a","b","sd") #for H3 -H4.var.names<-c("a","b","c","sd") #for H4 -Ri.var.names <-c("a","b","sd") #for Ri -Log.var.names<-c("a","b","sd") #for Log -MM.yint.var.names<-c("b0","b1","yint","sd") #for MM.yint -H3.yint.var.names<-c("a","b","yint","sd") #for H3.yint -H4.yint.var.names<-c("a","b","c","yint","sd") #for H4.yint -Ri.yint.var.names <-c("a","b","yint","sd") #for Ri.yint -Log.yint.var.names<-c("a","b","yint","sd") #for Log.yint - -MM.lines<-"lines(xseq,(parm[1]*xseq)/(parm[2]+xseq),col=2,lwd=3)" #For MM -H3.lines<-"lines(xseq,(parm[1]*xseq^2)/(parm[2]^2+xseq^2),col=2,lwd=3)" #For H3 -H4.lines<-"lines(xseq,(parm[1]*xseq^2)/(parm[2]+parm[3]*xseq+xseq^2),col=2,lwd=3)" #For H4 -Ri.lines<-"lines(xseq,(parm[1]*xseq*exp(-parm[2]*xseq)),col=2,lwd=3)" #For Ri a*x[i]*exp(-b*x[i]) -Log.lines<-"lines(xseq,parm[1]/(1+exp(parm[1]-parm[2]*xseq)),col=2,lwd=3)" #For Log exp(a+b*x[i])/(1+exp(a+b*x[i])) -MM.yint.lines<-"lines(xseq,((parm[1]*xseq)/(parm[2]+xseq))+parm[4],col=2,lwd=3)" #For MM.yint -H3.yint.lines<-"lines(xseq,((parm[1]*xseq^2)/(parm[2]^2+xseq^2))+parm[4],col=2,lwd=3)" #For H3.yint -H4.yint.lines<-"lines(xseq,((parm[1]*xseq^2)/(parm[2]+parm[3]*xseq+xseq^2))+parm[5],col=2,lwd=3)" #For H4.yint -Ri.yint.lines<-"lines(xseq,(parm[1]*xseq*exp(-parm[2]*xseq))+parm[4],col=2,lwd=3)" #For Ri.yint -Log.yint.lines<-"lines(xseq,(parm[1]/(1+exp(parm[1]-parm[2]*xseq)))+parm[4],col=2,lwd=3)" #For Log.yint - -MM.mod.eqn<-"(out[k,1]*xseq)/(out[k,2]+xseq)" -H3.mod.eqn<-"(out[k,1]*xseq^2)/(out[k,2]^2+xseq^2)" -H4.mod.eqn<-"(out[k,1]*xseq^2)/(out[k,2]+out[k,3]*xseq+xseq^2)" -Ri.mod.eqn<-"out[k,1]*xseq*exp(-out[k,2]*xseq)" -Log.mod.eqn<-"out[k,1]/(1+exp(out[k,1]-out[k,2]*xseq))" -MM.yint.mod.eqn<-"((out[k,1]*xseq)/(out[k,2]+xseq))+out[k,4]" -H3.yint.mod.eqn<-"((out[k,1]*xseq^2)/(out[k,2]^2+xseq^2))+out[k,4]" -H4.yint.mod.eqn<-"((out[k,1]*xseq^2)/(out[k,2]+out[k,3]*xseq+xseq^2))+out[k,5]" -Ri.yint.mod.eqn<-"(out[k,1]*xseq*exp(-out[k,2]*xseq))+out[k,4]" -Log.yint.mod.eqn<-"(out[k,1]/(1+exp(out[k,1]-out[k,2]*xseq)))+out[k,4]" - -################ -##Compiled model inputs -################ -#MODELS MUST BE IN THE SAME ORDER FOR EACH OF THE FOLLOWING: -models<-c(MM, - H3, - H4, - Ri, - Log, - MM.yint, - H3.yint, - H4.yint, - Ri.yint, - Log.yint) -HH.init <-list(MM.HH.init, - H3.HH.init, - H4.HH.init, - Ri.HH.init, - Log.HH.init, - MM.yint.HH.init, - H3.yint.HH.init, - H4.yint.HH.init, - Ri.yint.HH.init, - Log.yint.HH.init) - -HV.init <-list(MM.HV.init, - H3.HV.init, - H4.HV.init, - Ri.HV.init, - Log.HV.init, - MM.yint.HV.init, - H3.yint.HV.init, - H4.yint.HV.init, - Ri.yint.HV.init, - Log.yint.HV.init) - -init<-c("HH.init","HV.init") - -var.names<-list(MM.var.names, - H3.var.names, - H4.var.names, - Ri.var.names, - Log.var.names, - MM.yint.var.names, - H3.yint.var.names, - H4.yint.var.names, - Ri.yint.var.names, - Log.yint.var.names) -model.fits<-list(MM.lines, - H3.lines, - H4.lines, - Ri.lines, - Log.lines, - MM.yint.lines, - H3.yint.lines, - H4.yint.lines, - Ri.yint.lines, - Log.yint.lines) -mod.eqns <-list(MM.mod.eqn, - H3.mod.eqn, - H4.mod.eqn, - Ri.mod.eqn, - Log.mod.eqn, - MM.yint.mod.eqn, - H3.yint.mod.eqn, - H4.yint.mod.eqn, - Ri.yint.mod.eqn, - Log.yint.mod.eqn) - -################ -##Loop over all models and backscatter polarization bands -################ -for(i in 1:length(yvars)){ #loop over HH and HV pol bands - y<-eval(parse(text=yvars[i])) - - if(length(y[is.na(y)]>0)){ #to get rid of NAs - drop = which(is.na(y)) - data = list(x=x[-drop],y=y[-drop],n=length(x)-length(drop)) - } else{ - data = list(x=x,y=y,n=length(x)) - } - - for(j in 1:length(models)){#looping over models - ##Create dir for output from each model x polband x site combination - dir.create(file.path(outpath1,coord.set[fia+1])) - outpath2<-file.path(outpath1,coord.set[fia+1]) - dir.create(file.path(outpath2,substr(yvars[i],7,8))) - outpath3<-file.path(outpath2,substr(yvars[i],7,8)) - dir.create(file.path(outpath3,mod.names[j])) - outpath4<-file.path(outpath3,mod.names[j]) - - ##Do JAGS stuff - j1 = jags.model(file=textConnection(models[j]), - data = data, - inits = unlist(eval(parse(text=init[i]))[j],recursive=FALSE), - n.chains=n.chain, - n.adapt=min(0.1*n.reps,1000)) - - jags.out = coda.samples(model=j1, - variable.names<-var.names[j][[1]], - n.iter = n.reps) - out <- as.matrix(jags.out) - - #Save MCMC output - write.csv(out,file.path(outpath4, - paste("MCMC_out",coord.set[fia+1],substr(yvars[i],7,8),mod.names[j],".csv",sep="_")), - row.names=FALSE) - - #Save xy pairs - write.csv(cbind(x,y),file.path(outpath4, - paste("xy_pairs",coord.set[fia+1],substr(yvars[i],7,8),mod.names[j],".csv",sep="_")), - row.names=FALSE) - - coda::gelman.diag(jags.out) - summary(jags.out) - ##Save model output summary - saveRDS(summary(jags.out),file=file.path(outpath4, - paste("Jags_Out",coord.set[fia+1],substr(yvars[i],7,8),mod.names[j],".Rdata",sep="_"))) - saveRDS(coda::gelman.diag(jags.out),file=file.path(outpath4, - paste("Gelman_Diag",coord.set[fia+1],substr(yvars[i],7,8),mod.names[j],".Rdata",sep="_"))) - - - #Generate pdf of curve fits - pdf(paste(paste(outpath4,"/","curve_fit_",coord.set[fia+1],sep=""),substr(yvars[i],7,8),mod.names[j],".pdf",sep="_"),width = 6, height = 6, paper='special') - - par(mar = rep(2, 4)) - plot(jags.out) - pairs(out) - - #coda::autocorr.plot(jags.out) - coda::gelman.plot(jags.out) - - #plot data - par(mfrow=c(1,1)) - parm = apply(out,2,mean) - plot(x,y,pch=".",xlab="Biomass",ylab=yvars[i],main=paste(mod.names[j],"fit of",yvars[i],sep=" ")) #plot data - lines(loess.smooth(x,y), col="grey", lty=1, lwd=1) - xseq = seq(0,300,length=3000) - eval(parse(text=model.fits[j])) #plot fitted curve line - - npred = 10 - ypred = matrix(NA,npred,length(xseq)) - samp = sample(1:nrow(out),npred) - for(p in 1:npred){ - k = samp[p] - ypred[p,] = eval(parse(text=mod.eqns[j])) + ################ + ## Build lists for looping + ################ + + mod.names <- c("MM", "H3", "H4", "Ri", "Log", "MM.yint", "H3.yint", "H4.yint", "Ri.yint", "Log.yint") + + # Initial conditions for HH pol band + MM.HH.init <- list(b1 = 4, b0 = 0.1, tau = 2 / stats::var(y)) # for MM + H4.HH.init <- list(a = 0.1, b = 50, c = 2, tau = 2 / stats::var(y)) # for H4 + H3.HH.init <- list(a = 0.1, b = 100, tau = 2 / stats::var(y)) # for H3 + Ri.HH.init <- list(a = 0.5, b = 0.5, tau = 2 / stats::var(y)) # for Ri + Log.HH.init <- list(a = 0.6, b = 1, tau = 2 / stats::var(y)) # for Log + MM.yint.HH.init <- list(b1 = 4, b0 = 0.1, yint = 0.1, tau = 2 / stats::var(y)) # for MM.yint + H3.yint.HH.init <- list(a = 0.1, b = 100, yint = 0.1, tau = 2 / stats::var(y)) # for H3.yint + H4.yint.HH.init <- list(a = 0.1, b = 50, c = 2, yint = 0.1, tau = 2 / stats::var(y)) # for H4.yint + Ri.yint.HH.init <- list(a = 0.5, b = 0.5, yint = 0.1, tau = 2 / stats::var(y)) # for Ri.yint + Log.yint.HH.init <- list(a = 0.6, b = 1, yint = 0.1, tau = 2 / stats::var(y)) # for Log.yint + + # Initial conditions for HV pol band + MM.HV.init <- list(b1 = 8, b0 = 0.04, tau = 2 / stats::var(y)) # for MM + H3.HV.init <- list(a = 0.02, b = 100, tau = 2 / stats::var(y)) # for H3 + H4.HV.init <- list(a = 0.03, b = 0.01, c = 2, tau = 2 / stats::var(y)) # for H4 + Ri.HV.init <- list(a = 0, b = 0, tau = 2 / stats::var(y)) # for Ri + Log.HV.init <- list(a = 0.6, b = 1, tau = 2 / stats::var(y)) # for Log + MM.yint.HV.init <- list(b1 = 8, b0 = 0.04, yint = 0.04, tau = 2 / stats::var(y)) # for MM.yint + H3.yint.HV.init <- list(a = 0.02, b = 100, yint = 0.04, tau = 2 / stats::var(y)) # for H3.yint + H4.yint.HV.init <- list(a = 0.03, b = 0.01, c = 2, yint = 0.04, tau = 2 / stats::var(y)) # for H4.yint + Ri.yint.HV.init <- list(a = 0.5, b = 0.5, yint = 0.04, tau = 2 / stats::var(y)) # for Ri.yint + Log.yint.HV.init <- list(a = 0.6, b = 1, yint = 0.04, tau = 2 / stats::var(y)) # for Log.yint + + MM.var.names <- c("b0", "b1", "sd") # for MM + H3.var.names <- c("a", "b", "sd") # for H3 + H4.var.names <- c("a", "b", "c", "sd") # for H4 + Ri.var.names <- c("a", "b", "sd") # for Ri + Log.var.names <- c("a", "b", "sd") # for Log + MM.yint.var.names <- c("b0", "b1", "yint", "sd") # for MM.yint + H3.yint.var.names <- c("a", "b", "yint", "sd") # for H3.yint + H4.yint.var.names <- c("a", "b", "c", "yint", "sd") # for H4.yint + Ri.yint.var.names <- c("a", "b", "yint", "sd") # for Ri.yint + Log.yint.var.names <- c("a", "b", "yint", "sd") # for Log.yint + + MM.lines <- "lines(xseq,(parm[1]*xseq)/(parm[2]+xseq),col=2,lwd=3)" # For MM + H3.lines <- "lines(xseq,(parm[1]*xseq^2)/(parm[2]^2+xseq^2),col=2,lwd=3)" # For H3 + H4.lines <- "lines(xseq,(parm[1]*xseq^2)/(parm[2]+parm[3]*xseq+xseq^2),col=2,lwd=3)" # For H4 + Ri.lines <- "lines(xseq,(parm[1]*xseq*exp(-parm[2]*xseq)),col=2,lwd=3)" # For Ri a*x[i]*exp(-b*x[i]) + Log.lines <- "lines(xseq,parm[1]/(1+exp(parm[1]-parm[2]*xseq)),col=2,lwd=3)" # For Log exp(a+b*x[i])/(1+exp(a+b*x[i])) + MM.yint.lines <- "lines(xseq,((parm[1]*xseq)/(parm[2]+xseq))+parm[4],col=2,lwd=3)" # For MM.yint + H3.yint.lines <- "lines(xseq,((parm[1]*xseq^2)/(parm[2]^2+xseq^2))+parm[4],col=2,lwd=3)" # For H3.yint + H4.yint.lines <- "lines(xseq,((parm[1]*xseq^2)/(parm[2]+parm[3]*xseq+xseq^2))+parm[5],col=2,lwd=3)" # For H4.yint + Ri.yint.lines <- "lines(xseq,(parm[1]*xseq*exp(-parm[2]*xseq))+parm[4],col=2,lwd=3)" # For Ri.yint + Log.yint.lines <- "lines(xseq,(parm[1]/(1+exp(parm[1]-parm[2]*xseq)))+parm[4],col=2,lwd=3)" # For Log.yint + + MM.mod.eqn <- "(out[k,1]*xseq)/(out[k,2]+xseq)" + H3.mod.eqn <- "(out[k,1]*xseq^2)/(out[k,2]^2+xseq^2)" + H4.mod.eqn <- "(out[k,1]*xseq^2)/(out[k,2]+out[k,3]*xseq+xseq^2)" + Ri.mod.eqn <- "out[k,1]*xseq*exp(-out[k,2]*xseq)" + Log.mod.eqn <- "out[k,1]/(1+exp(out[k,1]-out[k,2]*xseq))" + MM.yint.mod.eqn <- "((out[k,1]*xseq)/(out[k,2]+xseq))+out[k,4]" + H3.yint.mod.eqn <- "((out[k,1]*xseq^2)/(out[k,2]^2+xseq^2))+out[k,4]" + H4.yint.mod.eqn <- "((out[k,1]*xseq^2)/(out[k,2]+out[k,3]*xseq+xseq^2))+out[k,5]" + Ri.yint.mod.eqn <- "(out[k,1]*xseq*exp(-out[k,2]*xseq))+out[k,4]" + Log.yint.mod.eqn <- "(out[k,1]/(1+exp(out[k,1]-out[k,2]*xseq)))+out[k,4]" + + ################ + ## Compiled model inputs + ################ + # MODELS MUST BE IN THE SAME ORDER FOR EACH OF THE FOLLOWING: + models <- c( + MM, + H3, + H4, + Ri, + Log, + MM.yint, + H3.yint, + H4.yint, + Ri.yint, + Log.yint + ) + HH.init <- list( + MM.HH.init, + H3.HH.init, + H4.HH.init, + Ri.HH.init, + Log.HH.init, + MM.yint.HH.init, + H3.yint.HH.init, + H4.yint.HH.init, + Ri.yint.HH.init, + Log.yint.HH.init + ) + + HV.init <- list( + MM.HV.init, + H3.HV.init, + H4.HV.init, + Ri.HV.init, + Log.HV.init, + MM.yint.HV.init, + H3.yint.HV.init, + H4.yint.HV.init, + Ri.yint.HV.init, + Log.yint.HV.init + ) + + init <- c("HH.init", "HV.init") + + var.names <- list( + MM.var.names, + H3.var.names, + H4.var.names, + Ri.var.names, + Log.var.names, + MM.yint.var.names, + H3.yint.var.names, + H4.yint.var.names, + Ri.yint.var.names, + Log.yint.var.names + ) + model.fits <- list( + MM.lines, + H3.lines, + H4.lines, + Ri.lines, + Log.lines, + MM.yint.lines, + H3.yint.lines, + H4.yint.lines, + Ri.yint.lines, + Log.yint.lines + ) + mod.eqns <- list( + MM.mod.eqn, + H3.mod.eqn, + H4.mod.eqn, + Ri.mod.eqn, + Log.mod.eqn, + MM.yint.mod.eqn, + H3.yint.mod.eqn, + H4.yint.mod.eqn, + Ri.yint.mod.eqn, + Log.yint.mod.eqn + ) + + ################ + ## Loop over all models and backscatter polarization bands + ################ + for (i in 1:length(yvars)) { # loop over HH and HV pol bands + y <- eval(parse(text = yvars[i])) + + if (length(y[is.na(y)] > 0)) { # to get rid of NAs + drop <- which(is.na(y)) + data <- list(x = x[-drop], y = y[-drop], n = length(x) - length(drop)) + } else { + data <- list(x = x, y = y, n = length(x)) } - #Add confidence interval - yci = apply(ypred,2,quantile,c(0.025,0.5,0.975)) - lines(xseq,yci[1,],col=3) - lines(xseq,yci[3,],col=3) - legend("topright",lty=c(1,1,1),col=c("grey",2,3),legend=c("Loess","Curve Fit","95% C.I."),bty="n") - - dev.off() - - print(yvars[i]) - print(mod.names[j]) - }#looping over models -}#looping over HH & HV -}#function + + for (j in 1:length(models)) { # looping over models + ## Create dir for output from each model x polband x site combination + dir.create(file.path(outpath1, coord.set[fia + 1])) + outpath2 <- file.path(outpath1, coord.set[fia + 1]) + dir.create(file.path(outpath2, substr(yvars[i], 7, 8))) + outpath3 <- file.path(outpath2, substr(yvars[i], 7, 8)) + dir.create(file.path(outpath3, mod.names[j])) + outpath4 <- file.path(outpath3, mod.names[j]) + + ## Do JAGS stuff + j1 <- jags.model( + file = textConnection(models[j]), + data = data, + inits = unlist(eval(parse(text = init[i]))[j], recursive = FALSE), + n.chains = n.chain, + n.adapt = min(0.1 * n.reps, 1000) + ) + + jags.out <- coda.samples( + model = j1, + variable.names <- var.names[j][[1]], + n.iter = n.reps + ) + out <- as.matrix(jags.out) + + # Save MCMC output + write.csv(out, file.path( + outpath4, + paste("MCMC_out", coord.set[fia + 1], substr(yvars[i], 7, 8), mod.names[j], ".csv", sep = "_") + ), + row.names = FALSE + ) + + # Save xy pairs + write.csv(cbind(x, y), file.path( + outpath4, + paste("xy_pairs", coord.set[fia + 1], substr(yvars[i], 7, 8), mod.names[j], ".csv", sep = "_") + ), + row.names = FALSE + ) + + coda::gelman.diag(jags.out) + summary(jags.out) + ## Save model output summary + saveRDS(summary(jags.out), file = file.path( + outpath4, + paste("Jags_Out", coord.set[fia + 1], substr(yvars[i], 7, 8), mod.names[j], ".Rdata", sep = "_") + )) + saveRDS(coda::gelman.diag(jags.out), file = file.path( + outpath4, + paste("Gelman_Diag", coord.set[fia + 1], substr(yvars[i], 7, 8), mod.names[j], ".Rdata", sep = "_") + )) + + + # Generate pdf of curve fits + pdf(paste(paste(outpath4, "/", "curve_fit_", coord.set[fia + 1], sep = ""), substr(yvars[i], 7, 8), mod.names[j], ".pdf", sep = "_"), width = 6, height = 6, paper = "special") + + par(mar = rep(2, 4)) + plot(jags.out) + pairs(out) + + # coda::autocorr.plot(jags.out) + coda::gelman.plot(jags.out) + + # plot data + par(mfrow = c(1, 1)) + parm <- apply(out, 2, mean) + plot(x, y, pch = ".", xlab = "Biomass", ylab = yvars[i], main = paste(mod.names[j], "fit of", yvars[i], sep = " ")) # plot data + lines(loess.smooth(x, y), col = "grey", lty = 1, lwd = 1) + xseq <- seq(0, 300, length = 3000) + eval(parse(text = model.fits[j])) # plot fitted curve line + + npred <- 10 + ypred <- matrix(NA, npred, length(xseq)) + samp <- sample(1:nrow(out), npred) + for (p in 1:npred) { + k <- samp[p] + ypred[p, ] <- eval(parse(text = mod.eqns[j])) + } + # Add confidence interval + yci <- apply(ypred, 2, quantile, c(0.025, 0.5, 0.975)) + lines(xseq, yci[1, ], col = 3) + lines(xseq, yci[3, ], col = 3) + legend("topright", lty = c(1, 1, 1), col = c("grey", 2, 3), legend = c("Loess", "Curve Fit", "95% C.I."), bty = "n") + + dev.off() + + print(yvars[i]) + print(mod.names[j]) + } # looping over models + } # looping over HH & HV +} # function diff --git a/modules/data.remote/inst/scripts/ChEAS_FIA_03142014.R b/modules/data.remote/inst/scripts/ChEAS_FIA_03142014.R index 79335721067..285f2237c9d 100644 --- a/modules/data.remote/inst/scripts/ChEAS_FIA_03142014.R +++ b/modules/data.remote/inst/scripts/ChEAS_FIA_03142014.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ################################ ## Load Required Packages @@ -20,95 +20,99 @@ library(reshape) ################################ ## OPTIONS ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=0 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -leaf.off=0 #1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 0 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +leaf.off <- 0 # 1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates # buff=c(48) #vector of buffer sizes (in meters) to extract -coord.set<-c("WLEF", "FIA") +coord.set <- c("WLEF", "FIA") # metadata<- read.csv("~/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux -metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files -calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter -outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving +metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files +calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter +outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ## For saving ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns -## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set +## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ -if(fia==1){ #EXTRACTS FROM FIA COORDINATES -# calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -# Sr1<-spTransform(Sr1,CRS(raster)) - -# wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) -# FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + # Sr1<-spTransform(Sr1,CRS(raster)) + + # wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + # FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } -}else{#EXTRACTS FROM WLEF COORDINATES +} else { # EXTRACTS FROM WLEF COORDINATES # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots -# upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier -# calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) ##This will give errors, but these can be safely ignored - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots + # upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier + # calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) ## This will give errors, but these can be safely ignored + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) -ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - -ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent -Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) -ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) +) + +ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent +Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) +ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + +Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) # FIA.in.cheas<-as.vector(over(FIA.points,ChEAS_PLASAR_extent)) #subset of FIA plots that falls within Cheas-PALSAR extent -coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent +coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing -coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing +coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing -##Biomass source data -if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA -} else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF +## Biomass source data +if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA +} else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } ## Subset extraction coords that fall within PALSAR observation area -# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] -cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent +# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] +cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent # spcheasFIA <- SpatialPoints(cheasFIA,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -##Plot-IDs; will be used later on for generating time series of backscatter values -if(fia==1){ - plot<-seq(1,nrow(cheas.coords),1) #for FIA NOTE: Add in FIA plot unique identifiers if available -} else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF +## Plot-IDs; will be used later on for generating time series of backscatter values +if (fia == 1) { + plot <- seq(1, nrow(cheas.coords), 1) # for FIA NOTE: Add in FIA plot unique identifiers if available +} else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } -# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) ################################ ## Begin extracting PALSAR values at FIA plot coordinates @@ -119,235 +123,237 @@ if(fia==1){ # date<-as.Date(metadata$scndate, format='%Y%m%d') # col_names<-c(rbind(paste(date, "HH",sep="_"),paste(date, "HV",sep="_"))) -pol_bands<-c("HH", "HV") -numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) +pol_bands <- c("HH", "HV") +numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) -# +# # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names -if( fia==1){ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -} else{ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +if (fia == 1) { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +} else { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands } # colnames(extracted_48m)<-pol_bands # colnames(extracted_60m)<-pol_bands -for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - - ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. - ## Only extract the ones that do. - ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). - ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. - ## These zeros are truncated in post processing, prior to curve fitting. - ################################################ -# rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene - scnid<-substr(as.character(HV_filelist[i]),1,15) - - ##create data.frame from raster corner coords by querying metadata - ##NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection -# pals.ext<-Polygon(rbind( -# c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), -# c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), -# c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), -# c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), -# c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) - -pals.ext<-Polygon(rbind( - c(xmin(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymin(HH_rast)))) - - - ##make spatial polygon from raster extent - pals.ext.poly<- Polygons(list(pals.ext),"pals.ext") #spatial polygons (plural) -# scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS(CRSargs(HH_rast@crs))) - -# rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent -# c(rast_box[1,1],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,2]))) -# Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon -# pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) - scn.extent<- spTransform(scn.extent,HH_rast@crs) -# if(i == 1){ - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files -# } - - coords.in.rast<-over(spcheascoords,scn.extent) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ #jump to next palsar file if no extraction coordinates fall within this one - next - } - coords.in.rast<-as.logical(coords.in.rast) - +for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + + ################################################ + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Only extract the ones that do. + ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). + ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. + ## These zeros are truncated in post processing, prior to curve fitting. + ################################################ + # rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene + scnid <- substr(as.character(HV_filelist[i]), 1, 15) + + ## create data.frame from raster corner coords by querying metadata + ## NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection + # pals.ext<-Polygon(rbind( + # c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), + # c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), + # c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), + # c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), + # c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) + + pals.ext <- Polygon(rbind( + c(xmin(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymin(HH_rast)) + )) + + + ## make spatial polygon from raster extent + pals.ext.poly <- Polygons(list(pals.ext), "pals.ext") # spatial polygons (plural) + # scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + scn.extent <- SpatialPolygons(list(pals.ext.poly), proj4string = CRS(CRSargs(HH_rast@crs))) + + # rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent + # c(rast_box[1,1],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,2]))) + # Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon + # pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) + scn.extent <- spTransform(scn.extent, HH_rast@crs) + # if(i == 1){ + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + # } + + coords.in.rast <- over(spcheascoords, scn.extent) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { # jump to next palsar file if no extraction coordinates fall within this one + next + } + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) ################################ - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - #extract SE's also? - scnid<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) #vector of this scnid. length = number of coords in this scene - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==scnid[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) # same as above for scn date - - ##cbind for output - if(fia==1){ - all_48<-cbind(scnid,palsar_date,plot,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for FIA (no plot identifiers) - } else{ - all_48<- cbind(scnid,palsar_date,as.character(calib_infile$plot[coords.in.rast]),spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for WLEF - } - ##rbind to previous loop output -# if(i==1){ -# extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 -# }else if(i>1){ -# extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 -# } - extracted_48m<-rbind(extracted_48m,all_48) + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + # extract SE's also? + scnid <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) # vector of this scnid. length = number of coords in this scene + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == scnid[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) # same as above for scn date + + ## cbind for output + if (fia == 1) { + all_48 <- cbind(scnid, palsar_date, plot, spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for FIA (no plot identifiers) + } else { + all_48 <- cbind(scnid, palsar_date, as.character(calib_infile$plot[coords.in.rast]), spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for WLEF + } + ## rbind to previous loop output + # if(i==1){ + # extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 + # }else if(i>1){ + # extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 + # } + extracted_48m <- rbind(extracted_48m, all_48) ############################### - #calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) + # calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) ############################### -# HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# -# ##cbind for output -# if(fia==1){ -# all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) -# } else{ -# all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF -# } -# -# # ##rbind to previous loop output -# # if(i==1){ -# # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 -# # }else if(i>1){ -# # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 -# # } -# extracted_60m<-rbind(extracted_60m,all_60) - - print(paste("i=",i,sep="")) - print(scnid[1]) - print(palsar_date[1]) -# print(length(HH_data_48m) == length(HH_data_60m)) + # HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # + # ##cbind for output + # if(fia==1){ + # all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) + # } else{ + # all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF + # } + # + # # ##rbind to previous loop output + # # if(i==1){ + # # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 + # # }else if(i>1){ + # # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 + # # } + # extracted_60m<-rbind(extracted_60m,all_60) + + print(paste("i=", i, sep = "")) + print(scnid[1]) + print(palsar_date[1]) + # print(length(HH_data_48m) == length(HH_data_60m)) } # write.csv(extracted_48m,file=paste(outpath,"/extracted_48m.csv",sep=""),quote=FALSE,row.names=F) # write.csv(extracted_60m,file=paste(outpath,"/extracted_60m.csv",sep=""),quote=FALSE,row.names=F) ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) # dat48<-data.frame(extracted_48m[as.numeric(extracted_48m[,ncol(extracted_48m)])!=0,]) #& extracted_48m[,ncol(extracted_48m)]>0,]) -dat48<-data.frame(na.exclude(extracted_48m)) +dat48 <- data.frame(na.exclude(extracted_48m)) # dat60<-data.frame(extracted_60m[as.numeric(extracted_60m[,ncol(extracted_60m)])!=0,]) #& extracted_60m[,ncol(extracted_60m)]>0,]) # dat60<-data.frame(extracted_60m) -if(fia==1){ #FIA data does not contain a plot-id, so here I add a dummy plot-id -# plot<-seq(1,nrow(dat48),1) -# dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") -}else{ - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +if (fia == 1) { # FIA data does not contain a plot-id, so here I add a dummy plot-id + # plot<-seq(1,nrow(dat48),1) + # dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +} else { + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") } ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data and save it -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) - -#This will exclude scenes from the leaf off period (Nov-April) -if(leaf.off==1){ #include leaf off data - dat48<-dat48 -}else{ #exclude leaf off data - dat48<-dat48[as.numeric(format(dat48$scndate,"%m"))>=05 & as.numeric(format(dat48$scndate,"%m"))<=10,] +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) + +# This will exclude scenes from the leaf off period (Nov-April) +if (leaf.off == 1) { # include leaf off data + dat48 <- dat48 +} else { # exclude leaf off data + dat48 <- dat48[as.numeric(format(dat48$scndate, "%m")) >= 05 & as.numeric(format(dat48$scndate, "%m")) <= 10, ] } -#Save extracted data -write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),sep=",",quote=FALSE,col.names = TRUE, row.names=F) - -#Switch to working from saved data -dat48<-read.csv(paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header = TRUE) - -#Correctly format data (again...sigh...) -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.character(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) -dat48$year<-as.numeric(format(dat48$scndate,"%Y")) -dat48$month<-as.numeric(format(dat48$scndate,"%m")) -dat48$doy<- - - - -dates<-as.character(dat48$scndate) #reformat as character -data.doy.07<-chron(dates = dat48$scndate[grep("2007",dates)], format = (dates = "Y-m-d"), origin = c(day = 1, month = 0, year = 2007)) -data.doy.07<-as.numeric(data.doy.07) -data.year.07<-substr(dat48$scndate[grep("2007",dates)],1,4) -data.dates.07<-cbind(data.year.07,data.doy.07) - - - -for(y in unique(dat48$year)){ - for(m in unique(dat48$month)){ - if(length(dat48$biomass[dat48$month==m & dat48$year==y])<1){ +# Save extracted data +write.table(dat48, file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), sep = ",", quote = FALSE, col.names = TRUE, row.names = F) + +# Switch to working from saved data +dat48 <- read.csv(paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = TRUE) + +# Correctly format data (again...sigh...) +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.character(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) +dat48$year <- as.numeric(format(dat48$scndate, "%Y")) +dat48$month <- as.numeric(format(dat48$scndate, "%m")) +dat48$doy <- + dates <- as.character(dat48$scndate) # reformat as character +data.doy.07 <- chron(dates = dat48$scndate[grep("2007", dates)], format = (dates <- "Y-m-d"), origin = c(day = 1, month = 0, year = 2007)) +data.doy.07 <- as.numeric(data.doy.07) +data.year.07 <- substr(dat48$scndate[grep("2007", dates)], 1, 4) +data.dates.07 <- cbind(data.year.07, data.doy.07) + + + +for (y in unique(dat48$year)) { + for (m in unique(dat48$month)) { + if (length(dat48$biomass[dat48$month == m & dat48$year == y]) < 1) { next - }else{ - plot(dat48$biomass[dat48$month==m & dat48$year==y],dat48$HH.sigma.48[dat48$month==m & dat48$year==y], - xlab="biomass",ylab='HH',main=paste(month.abb[m],y,sep=" ") ) - }#if - }#for m -}#for y - -par(mfrow=c(1,3)) -plot(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007], - xlab="05 HH",ylab='06 HH',main="may 2007 vs jun 2007") - fit1<-lm(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") -plot(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="05 HH",ylab='08 HH',main="may 2007 vs aug 2007") - fit2<-lm(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") -plot(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="06 HH",ylab='08 HH',main="jun 2007 vs aug 2007") - fit3<-lm(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") + } else { + plot(dat48$biomass[dat48$month == m & dat48$year == y], dat48$HH.sigma.48[dat48$month == m & dat48$year == y], + xlab = "biomass", ylab = "HH", main = paste(month.abb[m], y, sep = " ") + ) + } # if + } # for m +} # for y + +par(mfrow = c(1, 3)) +plot(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007], + xlab = "05 HH", ylab = "06 HH", main = "may 2007 vs jun 2007" +) +fit1 <- lm(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007]) +abline(0, 1, lwd = 2, lty = 2, col = "grey") +abline(fit1, lwd = 2, lty = 1, col = "red") +plot(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "05 HH", ylab = "08 HH", main = "may 2007 vs aug 2007" +) +fit2 <- lm(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007]) +abline(0, 1, lwd = 2, lty = 2, col = "grey") +abline(fit1, lwd = 2, lty = 1, col = "red") +plot(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "06 HH", ylab = "08 HH", main = "jun 2007 vs aug 2007" +) +fit3 <- lm(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007]) +abline(0, 1, lwd = 2, lty = 2, col = "grey") +abline(fit1, lwd = 2, lty = 1, col = "red") # dat60$scnid<-as.character(dat60$scnid) # dat60$scndate<-as.Date(dat60$scndate,"%Y-%M-%d") @@ -359,95 +365,95 @@ plot(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007],dat48$HH.sigma.48[dat # write.csv(dat60,file=paste(outpath,"/dat60.csv",sep=""),quote=FALSE,row.names=F) -#Generate PDF of raw data exploration -#NOTE: Some of these figures will not be relevant for the FIA dataset -pdf(paste(outpath,"/",coord.set[fia+1], "_ExtractionQCplots.pdf",sep=""),width = 6, height = 6, paper='special') +# Generate PDF of raw data exploration +# NOTE: Some of these figures will not be relevant for the FIA dataset +pdf(paste(outpath, "/", coord.set[fia + 1], "_ExtractionQCplots.pdf", sep = ""), width = 6, height = 6, paper = "special") -par(mfrow=c(1,2)) -years<-as.numeric(format(dat48$scndate,"%Y")) -hist(years,freq=TRUE,main="By year") -months<-as.numeric(format(dat48$scndate,"%m")) -hist(months,freq=TRUE,main="By month") +par(mfrow = c(1, 2)) +years <- as.numeric(format(dat48$scndate, "%Y")) +hist(years, freq = TRUE, main = "By year") +months <- as.numeric(format(dat48$scndate, "%m")) +hist(months, freq = TRUE, main = "By month") # par(mfrow=c(1,1)) # hist(dat48$scndate,freq=T,100,xaxt="n") # axis(1, dat48$scndate, format(dat48$scndate, "%b %Y"), cex.axis = .7) -par(mfrow=c(1,3)) -hist(dat48$biomass,main=paste(coord.set[fia+1],"biomass",sep=" ")) -hist(dat48$HH.sigma.48,main=paste(coord.set[fia+1],"HH",sep=" ")) -hist(dat48$HV.sigma.48,main=paste(coord.set[fia+1],"HV",sep=" ")) - -Lab.palette <- colorRampPalette(c("white","violet","blue","green","yellow","orange", "red"), space = "Lab") -par(mfrow=c(1,3)) -smoothScatter(dat48$HV.sigma.48,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="HV",ylab="HH") -smoothScatter(dat48$biomass,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="biomass",ylab="HH",main="Density") -smoothScatter(dat48$biomass,dat48$HV.sigma.48,nbin=256,colramp = Lab.palette,ylim=c(0,max(dat48$HH.sigma.48)),xlab="biomass",ylab="HV") - -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48,cex=0,xlab="biomass",ylab="HH",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48,cex=0,xlab="biomass",ylab="HV",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) +par(mfrow = c(1, 3)) +hist(dat48$biomass, main = paste(coord.set[fia + 1], "biomass", sep = " ")) +hist(dat48$HH.sigma.48, main = paste(coord.set[fia + 1], "HH", sep = " ")) +hist(dat48$HV.sigma.48, main = paste(coord.set[fia + 1], "HV", sep = " ")) + +Lab.palette <- colorRampPalette(c("white", "violet", "blue", "green", "yellow", "orange", "red"), space = "Lab") +par(mfrow = c(1, 3)) +smoothScatter(dat48$HV.sigma.48, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "HV", ylab = "HH") +smoothScatter(dat48$biomass, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "biomass", ylab = "HH", main = "Density") +smoothScatter(dat48$biomass, dat48$HV.sigma.48, nbin = 256, colramp = Lab.palette, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "biomass", ylab = "HV") + +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48, cex = 0, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48, cex = 0, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48/dat48$HH.sigma.48,xlab="biomass",ylab="HV/HH",main="48m",col="grey") +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48 / dat48$HH.sigma.48, xlab = "biomass", ylab = "HV/HH", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HV/HV",main="60m",col="grey") -scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HVxHV",main="60m",col="grey") -par(mfrow=c(1,1)) -scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") +par(mfrow = c(1, 1)) +scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") # scatter.smooth(dat60$biomass,(dat60$HV.sigma.60-dat60$HV.sigma.60)/(dat60$HV.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HV-HV)/(HV+HV)",main="60m", col="gray") -par(mfrow=c(4,2),mar=c(4,4,2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HH",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HV",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HH",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HV",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HH",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HV",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HV",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HV.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HV") -mtext("On same scale", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HV.sigma.48)),xlab="Date",ylab="HV") -mtext("By Date", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HH") -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HV") -mtext("2010 only", side=3, line=-3, outer=TRUE, cex=1, font=2) - -if(leaf.off==1){ -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HH",main="2010 only,Dec. removed") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HV",main="2010 only,Dec. removed") +par(mfrow = c(4, 2), mar = c(4, 4, 2, 2)) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HH", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HV", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HH", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HV", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HH", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HV", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HV", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HV.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HV") +mtext("On same scale", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HV.sigma.48)), xlab = "Date", ylab = "HV") +mtext("By Date", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HH") +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HV") +mtext("2010 only", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +if (leaf.off == 1) { + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only,Dec. removed") + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HV", main = "2010 only,Dec. removed") } # #Plot individual time series of HH for each coordinate set @@ -470,38 +476,38 @@ scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scn # par(new=T) # } -#breaks data into quantiles each containing ~5% of the data -bind.bio<-tapply(dat48$biomass,cut(dat48$biomass,breaks=round(quantile(dat48$biomass,probs = seq(0, 1, 0.05))) ),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=quantile(dat48$HH.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=quantile(dat48$HV.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -par(new=FALSE, mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of the data points", side=3, line=-3, outer=TRUE, cex=1, font=2) - -#breaks data into even-length bins -bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks=seq(0, max(dat48$biomass), 0.05*max(dat48$biomass))),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=seq(0, max(dat48$HH.sigma.48), 0.05*max(dat48$HH.sigma.48))),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=seq(0, max(dat48$HV.sigma.48), 0.05*max(dat48$HV.sigma.48))),mean) -par(mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of data range", side=3, line=-3, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -bplot.xy(dat48$biomass,dat48$HH.sigma.48,N=15,xlab="biomass",ylab="HH (simga naught)") -bplot.xy(dat48$biomass,dat48$HV.sigma.48,N=15,xlab="biomass",ylab="HV (simga naught)") +# breaks data into quantiles each containing ~5% of the data +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = round(quantile(dat48$biomass, probs = seq(0, 1, 0.05)))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = quantile(dat48$HH.sigma.48, probs = seq(0, 1, 0.05))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = quantile(dat48$HV.sigma.48, probs = seq(0, 1, 0.05))), mean) +par(new = FALSE, mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of the data points", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +# breaks data into even-length bins +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = seq(0, max(dat48$biomass), 0.05 * max(dat48$biomass))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = seq(0, max(dat48$HH.sigma.48), 0.05 * max(dat48$HH.sigma.48))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = seq(0, max(dat48$HV.sigma.48), 0.05 * max(dat48$HV.sigma.48))), mean) +par(mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of data range", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +bplot.xy(dat48$biomass, dat48$HH.sigma.48, N = 15, xlab = "biomass", ylab = "HH (simga naught)") +bplot.xy(dat48$biomass, dat48$HV.sigma.48, N = 15, xlab = "biomass", ylab = "HV (simga naught)") dev.off() -#Run curve fitting function -n.reps<- 1000 #sets value for n.adapt and n.iter -n.chain<-3 #number of MCMC chains to run -bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) +# Run curve fitting function +n.reps <- 1000 # sets value for n.adapt and n.iter +n.chain <- 3 # number of MCMC chains to run +bayes.curve.fit(outpath, coord.set, fia, n.reps, n.chain) @@ -552,62 +558,62 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) ######################################################## ####################### -##Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. +## Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. # ####################### # ##NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) # buff<-c("48", "60") -# -# # col.names<-c("pol_band", +# +# # col.names<-c("pol_band", # # "buffer_radius(m)", -# # "biomass_R2", -# # "mod.alpha", -# # "pval.alpha", -# # "alpha.ci.lower", +# # "biomass_R2", +# # "mod.alpha", +# # "pval.alpha", +# # "alpha.ci.lower", # # "alpha.ci.upper", -# # "mod.beta", -# # "pval.b", +# # "mod.beta", +# # "pval.b", # # "beta.ci.upper", # # "beta.ci.upper", -# # "num.iters", +# # "num.iters", # # "convergence") # # mod.params<-matrix(nrow=1,ncol=length(col.names)) # # colnames(mod.params)<-col.names -# # +# # # # par(mfrow=c(length(pol_bands),length(buff))) # # for(i in 1:length(pol_bands)){ # # for(j in 1:length(buff)){ -# # +# # # # y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 -# # +# # # # # Plot backscatter v biomass # # plot(x, y, # # xlab=expression(sqrt(biomass)), # # ylab=pol_bands[i], # # main=buff[j], # # col=51,pch=19, cex=0.6, -# # xlim=c(min(x),max(x)), +# # xlim=c(min(x),max(x)), # # ylim=c(min(y),max(y)), # # las=1, cex.axis=1.2) -# # +# # # # # Calculate rectangular hyperbola between backscatter and biomass # # biomass_curve <- nls(formula=y ~ ((alpha*beta*x)/(beta + alpha*x)), # also in Gu 2002 -# # data=list(y = y, x = x), -# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), +# # data=list(y = y, x = x), +# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), # # na.action="na.exclude", trace=F) # # biomass_R2 <- 1 - stats::var(residuals(biomass_curve)) / stats::var(y) # R2 -# # +# # # # # Plot rectangular hyperbola model fit # # mod.alpha <- summary(biomass_curve)$parameters[1] # alpha value # # mod.beta <- summary(biomass_curve)$parameters[2] # Beta value -# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) -# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) +# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) +# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) # # lines(x=mod.biomass, y=mod.HH, col="black", lty=1, lwd=2.5) -# # +# # # # legend("topright", legend=c(paste("R^2=", format(biomass_R2, digits=2)), # # paste("alpha=",format(mod.alpha,digits=2)), # # paste("beta=",format(mod.beta,digits=2))), bty="n",cex=1.2) -# # +# # # # # Write model parameters to output file # # num.iters <- as.numeric(biomass_curve$convInfo[2]) # # conv <- as.numeric(biomass_curve$convInfo[1]) @@ -625,23 +631,23 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # beta.ci.lower <- as.numeric(ci[2,1]) # # beta.ci.upper <- as.numeric(ci[2,2]) # # pval.b <- as.numeric(summary(biomass_curve)$parameters[8]) -# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], -# # buff[j], -# # biomass_R2, -# # mod.alpha, -# # pval.a, -# # alpha.ci.lower, +# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], +# # buff[j], +# # biomass_R2, +# # mod.alpha, +# # pval.a, +# # alpha.ci.lower, # # alpha.ci.upper, -# # mod.beta, +# # mod.beta, # # pval.b, -# # beta.ci.lower, +# # beta.ci.lower, # # beta.ci.upper, -# # num.iters, +# # num.iters, # # conv))) # # print(paste(pol_bands[i],buff[j])) # # }} # # mod.params<-mod.params[2:nrow(mod.params),] -# # +# # # # xs<-seq(from=1, to=4,by=1) # # ys<-as.numeric(mod.params[,4]) # # upper<-as.numeric(mod.params[,7]) @@ -650,41 +656,41 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # plot(xs,ys,ylim=c(0,max(upper)+0.1*max(upper)),ylab="Alpha estimate",xlab="pol.band/buffer.radius") # # segments(xs,lower,xs,upper,col="black",lwd=2) # # legend("topright",legend=c("1=48,HH", "2=60,HH","3=48,HV", "4=60,HV")) -# # +# # # # rhyp<-mod.params -# -# +# +# # ####################### # ##Use Maximum likelihood to fit curves # ####################### # data<- read.csv(file.path(outpath, "WLEF_dat48.csv"), sep=",", header=T) ##location of PALSAR metadata table -# +# # # model<-c("Holl4", "RecHyp", "Logistic") -# -# +# +# # # for(k in 1:length(model)){ #loop over different functional forms # for(i in 1:length(pol_bands)){ # for(j in 1 ){ -# +# # y<-eval(parse(text=paste(paste("data$",pol_bands[i],'.sigma.',buff[j],sep='')))) #backscatter values # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 # x<-(data$biomass) #biomass # # max.y<-mean(y[x>=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile -# +# # ####################### # ##Use Maximum likelihood to fit Holling Type 4 # ####################### # model<-"Holl4" -# param_est<-matrix(nrow=0,ncol=8) +# param_est<-matrix(nrow=0,ncol=8) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- mean(y[x>=quantile(x,na.rm=TRUE)[4]],na.rm=TRUE) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-quantile(x,na.rm=TRUE)[4] # c<--1 -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] @@ -692,21 +698,21 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # sd<-params[4] # # y.pred<-(max.y*x)/(ki+x) # y.pred<-(a*x^2)/(b+(c*x)+x^2) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:3],2*fit.mod$par[2]/fit.mod$par[3],fit.mod$par[4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -716,47 +722,47 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","bio.at.peak.backscatter", "sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# +# # ####################### # ##Use Maximum likelihood to fit Logistic # ####################### # model<-"Logistic" -# param_est<-matrix(nrow=0,ncol=7) +# param_est<-matrix(nrow=0,ncol=7) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- max(y) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-2 #slope of initial portion of the curve # c<-mean(x[y>=quantile(y,0.9,na.rm=TRUE)],na.rm=TRUE) -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] # c <-params[3] # sd<-params[4] # y.pred<- a/(1+b*exp(-c*x)) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -765,25 +771,25 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# -# -# +# +# +# # }#for j looping over pol_bands # }#for i looping over buff -# +# # }#for k looping over models -# +# # ################################## # ################################## -# -# +# +# # ################# # ##diagnotics? # ################# -# -# +# +# diff --git a/modules/data.remote/inst/scripts/ChEAS_FIA_04042014.R b/modules/data.remote/inst/scripts/ChEAS_FIA_04042014.R index 146f3774dd6..9be3d77836c 100644 --- a/modules/data.remote/inst/scripts/ChEAS_FIA_04042014.R +++ b/modules/data.remote/inst/scripts/ChEAS_FIA_04042014.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ## To work from stored, previously extracted values, run everything up to line 48, then skip to line ~284 @@ -25,155 +25,157 @@ ################################ ## OPTIONS ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=0 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -leaf.off=0 #1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates -plot_ext=1 #Generate figures showing palsar scn extent with overlay of plot coords? (1=yes, 0=no) -machine=1 #1=Brady's Mac; 1=Brady's Linux +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 0 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +leaf.off <- 0 # 1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates +plot_ext <- 1 # Generate figures showing palsar scn extent with overlay of plot coords? (1=yes, 0=no) +machine <- 1 # 1=Brady's Mac; 1=Brady's Linux # buff=c(48) #vector of buffer sizes (in meters) to extract -coord.set<-c("WLEF", "FIA") - -if(machine==2){ #Brady's Linux paths - metadata<- read.csv("/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux - palsar_inpath <- file.path("/home/bhardima/Desktop/cheas/geo_corrected_single_sigma") ##location of PALSAR raw files - calib_inpath <-"/home/bhardima/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter - outpath <- file.path("/home/bhardima/pecan/modules/data.remote/output/data") ##For saving -} -if(machine==1){ #Brady's Mac paths - metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table - palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files - calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter - outpath <- file.path("/Users/hardimanb/Dropbox/PALSAR_Biomass_Study/data") ##For saving +coord.set <- c("WLEF", "FIA") + +if (machine == 2) { # Brady's Linux paths + metadata <- read.csv("/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", sep = "\t", header = T) ## for Brady's Linux + palsar_inpath <- file.path("/home/bhardima/Desktop/cheas/geo_corrected_single_sigma") ## location of PALSAR raw files + calib_inpath <- "/home/bhardima/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter + outpath <- file.path("/home/bhardima/pecan/modules/data.remote/output/data") ## For saving +} +if (machine == 1) { # Brady's Mac paths + metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table + palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files + calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter + outpath <- file.path("/Users/hardimanb/Dropbox/PALSAR_Biomass_Study/data") ## For saving } -#Extract palsar data from calibration coordinates -palsar.extractor(kml,fia,leaf.off,plot_ext) +# Extract palsar data from calibration coordinates +palsar.extractor(kml, fia, leaf.off, plot_ext) ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) -dat48<-data.frame(na.exclude(extracted_48m)) -colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48","HHse_data_48m","HVse_data_48m") +dat48 <- data.frame(na.exclude(extracted_48m)) +colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48", "HHse_data_48m", "HVse_data_48m") ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data and save it -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.character(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) -dat48$year<-as.numeric(format(dat48$scndate,"%Y")) -dat48$month<-as.numeric(format(dat48$scndate,"%m")) -dat48$HHse_data_48m<- as.numeric(as.character(dat48$HHse_data_48m)) -dat48$HVse_data_48m<- as.numeric(as.character(dat48$HVse_data_48m)) - -#This will exclude scenes from the leaf off period (Nov-April) -if(leaf.off==1){ #include leaf off data - dat48<-dat48 -}else{ #exclude leaf off data - dat48<-dat48[as.numeric(format(dat48$scndate,"%m"))>=05 & as.numeric(format(dat48$scndate,"%m"))<=10,] +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.character(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) +dat48$year <- as.numeric(format(dat48$scndate, "%Y")) +dat48$month <- as.numeric(format(dat48$scndate, "%m")) +dat48$HHse_data_48m <- as.numeric(as.character(dat48$HHse_data_48m)) +dat48$HVse_data_48m <- as.numeric(as.character(dat48$HVse_data_48m)) + +# This will exclude scenes from the leaf off period (Nov-April) +if (leaf.off == 1) { # include leaf off data + dat48 <- dat48 +} else { # exclude leaf off data + dat48 <- dat48[as.numeric(format(dat48$scndate, "%m")) >= 05 & as.numeric(format(dat48$scndate, "%m")) <= 10, ] } -#This will exclude plots which returned zeros for both HH and HV -#NOTE: this circumstance corresponds to a subset of the plots falling outside of the palsar scene -#Not sure why these show up as zeros rather than NAs, but they definitely corresponds to non-overlapping scenes/plots +# This will exclude plots which returned zeros for both HH and HV +# NOTE: this circumstance corresponds to a subset of the plots falling outside of the palsar scene +# Not sure why these show up as zeros rather than NAs, but they definitely corresponds to non-overlapping scenes/plots # This can be verified by examining WLEF_SceneExtent_with_plot_overlay.pdf in data.remote/output/data -dat48<-dat48[dat48$HH.sigma.48 !=0 & dat48$HV.sigma.48 !=0,] - - -#Generate column of DOYs -dats<-as.character(dat48$scndate) #reformat as character -data.doy.07<-chron(dates = as.character(dat48$scndate[grep("2007",dats)]), format = (dates = "Y-m-d"), origin = c(day = 1, month = 0, year = 2007)) -data.doy.07<-as.numeric(data.doy.07) -data.year.07<-substr(dat48$scndate[grep("2007",dates)],1,4) -data.dates.07<-cbind(data.year.07,data.doy.07) - -data.doy.08<-chron(dates = as.character(dat48$scndate[grep("2008",dats)]), format = (dates = "Y-m-d"), origin = c(day = 1, month = 0, year = 2008)) -data.doy.08<-as.numeric(data.doy.08) -data.year.08<-substr(dat48$scndate[grep("2008",dates)],1,4) -data.dates.08<-cbind(data.year.08,data.doy.08) - -data.doy.09<-chron(dates = as.character(dat48$scndate[grep("2009",dats)]), format = (dates = "Y-m-d"), origin = c(day = 1, month = 0, year = 2009)) -data.doy.09<-as.numeric(data.doy.09) -data.year.09<-substr(dat48$scndate[grep("2009",dates)],1,4) -data.dates.09<-cbind(data.year.09,data.doy.09) - -data.doy.10<-chron(dates = as.character(dat48$scndate[grep("2010",dats)]), format = (dates = "Y-m-d"), origin = c(day = 1, month = 0, year = 2010)) -data.doy.10<-as.numeric(data.doy.10) -data.year.10<-substr(dat48$scndate[grep("2010",dates)],1,4) -data.dates.10<-cbind(data.year.10,data.doy.10) - -dat48$doy<-c(data.dates.07,data.dates.08,data.dates.09,data.dates.10) - -#Save extracted data -write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),sep=",",quote=FALSE,col.names = TRUE, row.names=F) - -#Switch to working from saved data -dat48<-read.csv(paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header = TRUE) - -#Correctly format data (again...sigh...) -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) -dat48$year<-as.numeric(format(dat48$scndate,"%Y")) -dat48$month<-as.numeric(format(dat48$scndate,"%m")) -dat48$HHse_data_48m<- as.numeric(as.character(dat48$HHse_data_48m)) -dat48$HVse_data_48m<- as.numeric(as.character(dat48$HVse_data_48m)) +dat48 <- dat48[dat48$HH.sigma.48 != 0 & dat48$HV.sigma.48 != 0, ] + + +# Generate column of DOYs +dats <- as.character(dat48$scndate) # reformat as character +data.doy.07 <- chron(dates = as.character(dat48$scndate[grep("2007", dats)]), format = (dates <- "Y-m-d"), origin = c(day = 1, month = 0, year = 2007)) +data.doy.07 <- as.numeric(data.doy.07) +data.year.07 <- substr(dat48$scndate[grep("2007", dates)], 1, 4) +data.dates.07 <- cbind(data.year.07, data.doy.07) + +data.doy.08 <- chron(dates = as.character(dat48$scndate[grep("2008", dats)]), format = (dates <- "Y-m-d"), origin = c(day = 1, month = 0, year = 2008)) +data.doy.08 <- as.numeric(data.doy.08) +data.year.08 <- substr(dat48$scndate[grep("2008", dates)], 1, 4) +data.dates.08 <- cbind(data.year.08, data.doy.08) + +data.doy.09 <- chron(dates = as.character(dat48$scndate[grep("2009", dats)]), format = (dates <- "Y-m-d"), origin = c(day = 1, month = 0, year = 2009)) +data.doy.09 <- as.numeric(data.doy.09) +data.year.09 <- substr(dat48$scndate[grep("2009", dates)], 1, 4) +data.dates.09 <- cbind(data.year.09, data.doy.09) + +data.doy.10 <- chron(dates = as.character(dat48$scndate[grep("2010", dats)]), format = (dates <- "Y-m-d"), origin = c(day = 1, month = 0, year = 2010)) +data.doy.10 <- as.numeric(data.doy.10) +data.year.10 <- substr(dat48$scndate[grep("2010", dates)], 1, 4) +data.dates.10 <- cbind(data.year.10, data.doy.10) + +dat48$doy <- c(data.dates.07, data.dates.08, data.dates.09, data.dates.10) + +# Save extracted data +write.table(dat48, file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), sep = ",", quote = FALSE, col.names = TRUE, row.names = F) + +# Switch to working from saved data +dat48 <- read.csv(paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = TRUE) + +# Correctly format data (again...sigh...) +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) +dat48$year <- as.numeric(format(dat48$scndate, "%Y")) +dat48$month <- as.numeric(format(dat48$scndate, "%m")) +dat48$HHse_data_48m <- as.numeric(as.character(dat48$HHse_data_48m)) +dat48$HVse_data_48m <- as.numeric(as.character(dat48$HVse_data_48m)) ######################################################### ## Run plotting function ######################################################### -palsar.plotter(outpath,coord.set,fia) +palsar.plotter(outpath, coord.set, fia) -#This generates a figure showing the HH values for 2 WLEF plots which are anomalously high +# This generates a figure showing the HH values for 2 WLEF plots which are anomalously high # if(fia==0){ # plot(dat48$biomass, dat48$HH.sigma.48,xlab="Biomass",ylab="HH (sigma)",main="Anomalous Plots") # points(dat48$biomass[dat48$plot=="W47"],dat48$HH.sigma.48[dat48$plot=="W47"],pch=19,col="red") # points(dat48$biomass[dat48$plot=="W52"],dat48$HH.sigma.48[dat48$plot=="W52"],pch=19,col="blue") # legend("topright",legend=c("W47","W52"),pch=19,col=c("red","blue"),bty="n") -# +# # dat48<-dat48[dat48$plot !="W47" & dat48$plot !="W52",] #Excludes returns from WLEF plots W47 and W52 # } -#Plot HH values for each year-month combo -png(file=file.path(outpath,"All_HH_by_scndate.png"),bg="transparent") -par(mfrow=c(3,6),mar=c(3,4,1,0.5)) -for(y in unique(dat48$year)){ - for(m in unique(dat48$month)){ - if(length(dat48$biomass[dat48$month==m & dat48$year==y])<1){ #Skips Year-month combos with no data +# Plot HH values for each year-month combo +png(file = file.path(outpath, "All_HH_by_scndate.png"), bg = "transparent") +par(mfrow = c(3, 6), mar = c(3, 4, 1, 0.5)) +for (y in unique(dat48$year)) { + for (m in unique(dat48$month)) { + if (length(dat48$biomass[dat48$month == m & dat48$year == y]) < 1) { # Skips Year-month combos with no data next - }else{ - scatter.smooth(dat48$biomass[dat48$month==m & dat48$year==y],dat48$HH.sigma.48[dat48$month==m & dat48$year==y], - xlim=c(min(dat48$biomass),max(dat48$biomass)),ylim=c(min(dat48$HH.sigma.48),max(dat48$HH.sigma.48)), - xlab="biomass",ylab='HH',main=paste(month.abb[m],y,sep=" "),pch="." ) - }#if - }#for m -}#for y + } else { + scatter.smooth(dat48$biomass[dat48$month == m & dat48$year == y], dat48$HH.sigma.48[dat48$month == m & dat48$year == y], + xlim = c(min(dat48$biomass), max(dat48$biomass)), ylim = c(min(dat48$HH.sigma.48), max(dat48$HH.sigma.48)), + xlab = "biomass", ylab = "HH", main = paste(month.abb[m], y, sep = " "), pch = "." + ) + } # if + } # for m +} # for y dev.off() -#Plot HV values for each year-month combo -png(file=file.path(outpath,"All_HV_by_scndate.png"),bg="transparent") -par(mfrow=c(3,6),mar=c(3,4,1,0.5)) -for(y in unique(dat48$year)){ - for(m in unique(dat48$month)){ - if(length(dat48$biomass[dat48$month==m & dat48$year==y])<1){ #Skips Year-month combos with no data +# Plot HV values for each year-month combo +png(file = file.path(outpath, "All_HV_by_scndate.png"), bg = "transparent") +par(mfrow = c(3, 6), mar = c(3, 4, 1, 0.5)) +for (y in unique(dat48$year)) { + for (m in unique(dat48$month)) { + if (length(dat48$biomass[dat48$month == m & dat48$year == y]) < 1) { # Skips Year-month combos with no data next - }else{ - scatter.smooth(dat48$biomass[dat48$month==m & dat48$year==y],dat48$HV.sigma.48[dat48$month==m & dat48$year==y], - xlim=c(min(dat48$biomass),max(dat48$biomass)),ylim=c(min(dat48$HV.sigma.48),max(dat48$HV.sigma.48)), - xlab="biomass",ylab='HV',main=paste(month.abb[m],y,sep=" "),pch="." ) - }#if - }#for m -}#for y + } else { + scatter.smooth(dat48$biomass[dat48$month == m & dat48$year == y], dat48$HV.sigma.48[dat48$month == m & dat48$year == y], + xlim = c(min(dat48$biomass), max(dat48$biomass)), ylim = c(min(dat48$HV.sigma.48), max(dat48$HV.sigma.48)), + xlab = "biomass", ylab = "HV", main = paste(month.abb[m], y, sep = " "), pch = "." + ) + } # if + } # for m +} # for y dev.off() @@ -183,9 +185,9 @@ dev.off() ## Run curve fitting function ######################################################### -n.reps<- 1000 #sets value for n.adapt and n.iter -n.chain<-3 #number of MCMC chains to run -bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) +n.reps <- 1000 # sets value for n.adapt and n.iter +n.chain <- 3 # number of MCMC chains to run +bayes.curve.fit(outpath, coord.set, fia, n.reps, n.chain) diff --git a/modules/data.remote/inst/scripts/Download_OSU_AGB_data_v5.R b/modules/data.remote/inst/scripts/Download_OSU_AGB_data_v5.R index 278f70479c3..4059bf1d896 100644 --- a/modules/data.remote/inst/scripts/Download_OSU_AGB_data_v5.R +++ b/modules/data.remote/inst/scripts/Download_OSU_AGB_data_v5.R @@ -10,9 +10,9 @@ #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files dlm <- .Platform$file.sep # <--- What is the platform specific delimiter? #--------------------------------------------------------------------------------------------------# @@ -30,10 +30,10 @@ library(sp) library(rgdal) library(spatial.tools) library(ggplot2) -library(plyr) # needed? -#library(plotrix) +library(plyr) # needed? +# library(plotrix) library(foreach) -#library(magrittr) +# library(magrittr) #--------------------------------------------------------------------------------------------------# @@ -43,19 +43,19 @@ dataset_version <- "v1" target_dataset <- "biomassfiaald" file_ext <- ".zip" target_directory <- file.path("/data2/RS_GIS_Data/NASA_CMS_data/OSU_AGB/") -if (! file.exists(target_directory)) dir.create(target_directory,recursive=TRUE) +if (!file.exists(target_directory)) dir.create(target_directory, recursive = TRUE) start_year <- 1984 end_year <- 2016 inParallel <- TRUE -ncores = if (inParallel) parallel::detectCores() else NULL +ncores <- if (inParallel) parallel::detectCores() else NULL plot_results <- TRUE #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# -target_years <- seq(start_year,end_year,1) -med_files <- paste0(target_dataset,"_",target_years,"_median",file_ext) -stdv_files <- paste0(target_dataset,"_",target_years,"_stdv",file_ext) +target_years <- seq(start_year, end_year, 1) +med_files <- paste0(target_dataset, "_", target_years, "_median", file_ext) +stdv_files <- paste0(target_dataset, "_", target_years, "_stdv", file_ext) #--------------------------------------------------------------------------------------------------# @@ -65,15 +65,15 @@ cl <- parallel::makeCluster(ncores) doParallel::registerDoParallel(cl) # get median data -med_urls <- paste(URL,dataset_version,target_dataset,"median",med_files,sep="/") +med_urls <- paste(URL, dataset_version, target_dataset, "median", med_files, sep = "/") PEcAn.logger::logger.info("*** Downloading AGB median data") -foreach::foreach(i=1:length(med_urls)) %dopar% try(utils::download.file(med_urls[i], file.path(target_directory,med_files[i]))) +foreach::foreach(i = 1:length(med_urls)) %dopar% try(utils::download.file(med_urls[i], file.path(target_directory, med_files[i]))) rm(i) # get stdv data -stdv_urls <- paste(URL,dataset_version,target_dataset,"stdv",stdv_files,sep="/") +stdv_urls <- paste(URL, dataset_version, target_dataset, "stdv", stdv_files, sep = "/") PEcAn.logger::logger.info("*** Downloading AGB stdv data") -foreach::foreach(j=1:length(stdv_urls)) %dopar% try(utils::download.file(stdv_urls[j], file.path(target_directory,stdv_files[j]))) +foreach::foreach(j = 1:length(stdv_urls)) %dopar% try(utils::download.file(stdv_urls[j], file.path(target_directory, stdv_files[j]))) rm(j) #--------------------------------------------------------------------------------------------------# @@ -82,11 +82,12 @@ rm(j) ## unpack files zip_files <- list.files(file.path(target_directory), pattern = "*.zip", full.names = TRUE) PEcAn.logger::logger.info("*** Unzipping downloaded data") -foreach::foreach(k=1:length(zip_files)) %dopar% try(utils::unzip(file.path(zip_files[k]), - files = NULL, list = FALSE, overwrite = TRUE, - junkpaths = FALSE, - exdir = file.path(path.expand(target_directory)), - unzip = getOption("unzip"), setTimes = FALSE)) +foreach::foreach(k = 1:length(zip_files)) %dopar% try(utils::unzip(file.path(zip_files[k]), + files = NULL, list = FALSE, overwrite = TRUE, + junkpaths = FALSE, + exdir = file.path(path.expand(target_directory)), + unzip = getOption("unzip"), setTimes = FALSE +)) rm(k) #--------------------------------------------------------------------------------------------------# @@ -95,21 +96,24 @@ rm(k) ## extract pixel values con <- PEcAn.DB::db.open( - list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE)) -site_ID <- c(2000000023,1000025731) # US-CZ3, US-SSH -sites <- list(PEcAn.DB::query.site(site_ID[1],con),PEcAn.DB::query.site(site_ID[2],con)) + list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE + ) +) +site_ID <- c(2000000023, 1000025731) # US-CZ3, US-SSH +sites <- list(PEcAn.DB::query.site(site_ID[1], con), PEcAn.DB::query.site(site_ID[2], con)) coords_LL <- NULL j <- 1 for (i in seq_along(1:length(sites))) { - if (j==1) { - coords_latlong <- data.frame(cbind(sites[[i]]$lon,sites[[i]]$lat)) - names(coords_latlong) <- c("Longitude","Latitude") + if (j == 1) { + coords_latlong <- data.frame(cbind(sites[[i]]$lon, sites[[i]]$lat)) + names(coords_latlong) <- c("Longitude", "Latitude") } else { - coords_latlong[j,] <- rbind(sites[[i]]$lon,sites[[i]]$lat) + coords_latlong[j, ] <- rbind(sites[[i]]$lon, sites[[i]]$lat) } - j <- j+1 + j <- j + 1 } coords_latlong coords_latlong <- sp::SpatialPoints(coords_latlong) @@ -117,38 +121,40 @@ proj4string(coords_latlong) <- CRS("+init=epsg:4326") biomass_median <- lapply(list.files(file.path(target_directory), pattern = "*median.tif$", full.names = TRUE), raster) biomass_stdv <- lapply(list.files(file.path(target_directory), pattern = "*stdv.tif$", full.names = TRUE), raster) -coords_AEA <- sp::spTransform(coords_latlong,crs(raster::raster(biomass_median[[1]]))) +coords_AEA <- sp::spTransform(coords_latlong, crs(raster::raster(biomass_median[[1]]))) biomass_median_stack <- raster::stack(biomass_median) biomass_stdv_stack <- raster::stack(biomass_stdv) -agb_median_pixel <- raster::extract(x = biomass_median_stack, y = coords_AEA, buffer=NULL, fun=NULL, df=FALSE) +agb_median_pixel <- raster::extract(x = biomass_median_stack, y = coords_AEA, buffer = NULL, fun = NULL, df = FALSE) agb_median_pixel <- data.frame(site_ID, agb_median_pixel) -agb_stdv_pixel <- raster::extract(x = biomass_stdv_stack, y = coords_AEA, buffer=NULL, fun=NULL, df=FALSE) +agb_stdv_pixel <- raster::extract(x = biomass_stdv_stack, y = coords_AEA, buffer = NULL, fun = NULL, df = FALSE) agb_stdv_pixel <- data.frame(site_ID, agb_stdv_pixel) -point_list <- list(median_AGB=list(agb_median_pixel), stdv_AGB=list(agb_stdv_pixel)) +point_list <- list(median_AGB = list(agb_median_pixel), stdv_AGB = list(agb_stdv_pixel)) -#write.csv(x = point_list, file = file.path(target_directory,'example_AGB_output.csv'), row.names = FALSE) -save("point_list",file = file.path(target_directory,'example_AGB_output.RData')) +# write.csv(x = point_list, file = file.path(target_directory,'example_AGB_output.csv'), row.names = FALSE) +save("point_list", file = file.path(target_directory, "example_AGB_output.RData")) if (plot_results) { - PEcAn.logger::logger.info("Generating plot of results") ## format for plotting in ggplot2 - formatted_data <- data.frame(reshape::melt(agb_median_pixel, id=c("site_ID"))) - formatted_data$AGB_Stdv <- reshape::melt(agb_stdv_pixel, id=c("site_ID"))$value - formatted_data$Year <- substr(gsub("[^0-9]","",formatted_data$variable),1,8) - formatted_data <- formatted_data[,c(1,3:5)] - names(formatted_data) <- c("site_ID","AGB_med","AGB_stdv","Year") - #formatted_data - - png(file=file.path(target_directory,'Example_AGB_pixel_extraction.png'),height=1300,width=3900, res=310) - par(mfrow=c(1,1), mar=c(4.5,5.7,0.3,0.4), oma=c(0.3,0.9,0.3,0.1)) # B, L, T, R - ggplot2::ggplot(data=formatted_data, aes(x = Year, y = AGB_med)) + geom_point(size=2) + - geom_errorbar(aes(x=Year, ymin=AGB_med-AGB_stdv, ymax=AGB_med+AGB_stdv), width=0.25) + facet_wrap(~site_ID) + - theme(axis.text=element_text(size=18),legend.text=element_text(size=18),axis.title.y=element_text(size=18,face="bold"), - axis.text.x=element_text(size=12, angle=90, vjust = 0.3)) + formatted_data <- data.frame(reshape::melt(agb_median_pixel, id = c("site_ID"))) + formatted_data$AGB_Stdv <- reshape::melt(agb_stdv_pixel, id = c("site_ID"))$value + formatted_data$Year <- substr(gsub("[^0-9]", "", formatted_data$variable), 1, 8) + formatted_data <- formatted_data[, c(1, 3:5)] + names(formatted_data) <- c("site_ID", "AGB_med", "AGB_stdv", "Year") + # formatted_data + + png(file = file.path(target_directory, "Example_AGB_pixel_extraction.png"), height = 1300, width = 3900, res = 310) + par(mfrow = c(1, 1), mar = c(4.5, 5.7, 0.3, 0.4), oma = c(0.3, 0.9, 0.3, 0.1)) # B, L, T, R + ggplot2::ggplot(data = formatted_data, aes(x = Year, y = AGB_med)) + + geom_point(size = 2) + + geom_errorbar(aes(x = Year, ymin = AGB_med - AGB_stdv, ymax = AGB_med + AGB_stdv), width = 0.25) + + facet_wrap(~site_ID) + + theme( + axis.text = element_text(size = 18), legend.text = element_text(size = 18), axis.title.y = element_text(size = 18, face = "bold"), + axis.text.x = element_text(size = 12, angle = 90, vjust = 0.3) + ) dev.off() - } #--------------------------------------------------------------------------------------------------# diff --git a/modules/data.remote/inst/scripts/Restrict_to_growing_season.R b/modules/data.remote/inst/scripts/Restrict_to_growing_season.R index 5ebe547fb59..aa900f53308 100644 --- a/modules/data.remote/inst/scripts/Restrict_to_growing_season.R +++ b/modules/data.remote/inst/scripts/Restrict_to_growing_season.R @@ -1,37 +1,37 @@ -#Restrict to growing season scndates only (Jun-Aug) -dat48.gs<- dat48[as.numeric(format(dat48$scndate,"%m"))>5 & as.numeric(format(dat48$scndate,"%m"))<9,] -dat48.gs$year<-as.numeric(format(dat48.gs$scndate,"%Y")) -dat48.gs$month<-as.numeric(format(dat48.gs$scndate,"%m")) - -plot(dat48.gs$biomass,dat48.gs$HH.sigma.48) -plot(dat48.gs$biomass,dat48.gs$HV.sigma.48) - -crap<-aggregate(dat48.gs,list(dat48.gs$plot,dat48.gs$year,dat48.gs$month),mean) -plot(crap$biomass[crap$year==2007], crap$HH.sigma.48[crap$year==2007]) - -HH.gs<-tapply(dat48.gs$HH.sigma.48,list(dat48.gs$plot,dat48.gs$month,dat48.gs$year),mean) -HV.gs<-tapply(dat48.gs$HV.sigma.48,list(dat48.gs$plot,dat48.gs$month,dat48.gs$year),mean) - -HH.gs.2007<-cbind(melt(HH.gs[,,1],id=HH.gs[,1,1])[1],rep.int(2007,nrow(HH.gs[,,1])),melt(HH.gs[,,1],id=HH.gs[,1,1])[2:3]) -HH.gs.2008<-cbind(melt(HH.gs[,,2],id=HH.gs[,1,2])[1],rep.int(2008,nrow(HH.gs[,,1])),melt(HH.gs[,,2],id=HH.gs[,1,2])[2:3]) -HH.gs.2009<-cbind(melt(HH.gs[,,3],id=HH.gs[,1,3])[1],rep.int(2009,nrow(HH.gs[,,1])),melt(HH.gs[,,3],id=HH.gs[,1,3])[2:3]) -HH.gs.2010<-cbind(melt(HH.gs[,,4],id=HH.gs[,1,4])[1],rep.int(2010,nrow(HH.gs[,,1])),melt(HH.gs[,,4],id=HH.gs[,1,4])[2:3]) - -HV.gs.2007<-cbind(melt(HV.gs[,,1],id=HV.gs[,1,1])[1],rep.int(2007,nrow(HV.gs[,,1])),melt(HV.gs[,,1],id=HV.gs[,1,1])[2:3]) -HV.gs.2008<-cbind(melt(HV.gs[,,2],id=HV.gs[,1,2])[1],rep.int(2008,nrow(HV.gs[,,1])),melt(HV.gs[,,2],id=HV.gs[,1,2])[2:3]) -HV.gs.2009<-cbind(melt(HV.gs[,,3],id=HV.gs[,1,3])[1],rep.int(2009,nrow(HV.gs[,,1])),melt(HV.gs[,,3],id=HV.gs[,1,3])[2:3]) -HV.gs.2010<-cbind(melt(HV.gs[,,4],id=HV.gs[,1,4])[1],rep.int(2010,nrow(HV.gs[,,1])),melt(HV.gs[,,4],id=HV.gs[,1,4])[2:3]) - -colnames(HH.gs.2007)<-c("plot","year","month","HH") -colnames(HH.gs.2008)<-c("plot","year","month","HH") -colnames(HH.gs.2009)<-c("plot","year","month","HH") -colnames(HH.gs.2010)<-c("plot","year","month","HH") -colnames(HV.gs.2007)<-c("plot","year","month","HV") -colnames(HV.gs.2008)<-c("plot","year","month","HV") -colnames(HV.gs.2009)<-c("plot","year","month","HV") -colnames(HV.gs.2010)<-c("plot","year","month","HV") - -HH.gs<-rbind(HH.gs.2007,HH.gs.2008,HH.gs.2009,HH.gs.2010) -HV.gs<-rbind(HV.gs.2007,HV.gs.2008,HV.gs.2009,HV.gs.2010) - -dat48.gs<-cbind(HH.gs,HV.gs$HV) \ No newline at end of file +# Restrict to growing season scndates only (Jun-Aug) +dat48.gs <- dat48[as.numeric(format(dat48$scndate, "%m")) > 5 & as.numeric(format(dat48$scndate, "%m")) < 9, ] +dat48.gs$year <- as.numeric(format(dat48.gs$scndate, "%Y")) +dat48.gs$month <- as.numeric(format(dat48.gs$scndate, "%m")) + +plot(dat48.gs$biomass, dat48.gs$HH.sigma.48) +plot(dat48.gs$biomass, dat48.gs$HV.sigma.48) + +crap <- aggregate(dat48.gs, list(dat48.gs$plot, dat48.gs$year, dat48.gs$month), mean) +plot(crap$biomass[crap$year == 2007], crap$HH.sigma.48[crap$year == 2007]) + +HH.gs <- tapply(dat48.gs$HH.sigma.48, list(dat48.gs$plot, dat48.gs$month, dat48.gs$year), mean) +HV.gs <- tapply(dat48.gs$HV.sigma.48, list(dat48.gs$plot, dat48.gs$month, dat48.gs$year), mean) + +HH.gs.2007 <- cbind(melt(HH.gs[, , 1], id = HH.gs[, 1, 1])[1], rep.int(2007, nrow(HH.gs[, , 1])), melt(HH.gs[, , 1], id = HH.gs[, 1, 1])[2:3]) +HH.gs.2008 <- cbind(melt(HH.gs[, , 2], id = HH.gs[, 1, 2])[1], rep.int(2008, nrow(HH.gs[, , 1])), melt(HH.gs[, , 2], id = HH.gs[, 1, 2])[2:3]) +HH.gs.2009 <- cbind(melt(HH.gs[, , 3], id = HH.gs[, 1, 3])[1], rep.int(2009, nrow(HH.gs[, , 1])), melt(HH.gs[, , 3], id = HH.gs[, 1, 3])[2:3]) +HH.gs.2010 <- cbind(melt(HH.gs[, , 4], id = HH.gs[, 1, 4])[1], rep.int(2010, nrow(HH.gs[, , 1])), melt(HH.gs[, , 4], id = HH.gs[, 1, 4])[2:3]) + +HV.gs.2007 <- cbind(melt(HV.gs[, , 1], id = HV.gs[, 1, 1])[1], rep.int(2007, nrow(HV.gs[, , 1])), melt(HV.gs[, , 1], id = HV.gs[, 1, 1])[2:3]) +HV.gs.2008 <- cbind(melt(HV.gs[, , 2], id = HV.gs[, 1, 2])[1], rep.int(2008, nrow(HV.gs[, , 1])), melt(HV.gs[, , 2], id = HV.gs[, 1, 2])[2:3]) +HV.gs.2009 <- cbind(melt(HV.gs[, , 3], id = HV.gs[, 1, 3])[1], rep.int(2009, nrow(HV.gs[, , 1])), melt(HV.gs[, , 3], id = HV.gs[, 1, 3])[2:3]) +HV.gs.2010 <- cbind(melt(HV.gs[, , 4], id = HV.gs[, 1, 4])[1], rep.int(2010, nrow(HV.gs[, , 1])), melt(HV.gs[, , 4], id = HV.gs[, 1, 4])[2:3]) + +colnames(HH.gs.2007) <- c("plot", "year", "month", "HH") +colnames(HH.gs.2008) <- c("plot", "year", "month", "HH") +colnames(HH.gs.2009) <- c("plot", "year", "month", "HH") +colnames(HH.gs.2010) <- c("plot", "year", "month", "HH") +colnames(HV.gs.2007) <- c("plot", "year", "month", "HV") +colnames(HV.gs.2008) <- c("plot", "year", "month", "HV") +colnames(HV.gs.2009) <- c("plot", "year", "month", "HV") +colnames(HV.gs.2010) <- c("plot", "year", "month", "HV") + +HH.gs <- rbind(HH.gs.2007, HH.gs.2008, HH.gs.2009, HH.gs.2010) +HV.gs <- rbind(HV.gs.2007, HV.gs.2008, HV.gs.2009, HV.gs.2010) + +dat48.gs <- cbind(HH.gs, HV.gs$HV) diff --git a/modules/data.remote/inst/scripts/figure_generator.R b/modules/data.remote/inst/scripts/figure_generator.R index 37eff32c27f..faa2a25b581 100644 --- a/modules/data.remote/inst/scripts/figure_generator.R +++ b/modules/data.remote/inst/scripts/figure_generator.R @@ -1,36 +1,37 @@ -##Generate Figures for PALSAR manuscript -##Author Brady S. Hardiman -##January 24, 2014 +## Generate Figures for PALSAR manuscript +## Author Brady S. Hardiman +## January 24, 2014 library(maps) -##FIGURE 1: Map of Wisc and MI-UP with FIA plots (points) and PALSAR scenes (bboxes) +## FIGURE 1: Map of Wisc and MI-UP with FIA plots (points) and PALSAR scenes (bboxes) cheascoords.spatial <- as.data.frame(cheas.coords) colnames(cheascoords.spatial)[1] <- "x" colnames(cheascoords.spatial)[2] <- "y" -coordinates(cheascoords.spatial)=~x+y -proj4string(cheascoords.spatial)=CRS("+proj=longlat +datum=WGS84") #define: WGS-84 lon,lat projection -map(database = "state", regions = list("wisconsin","michigan:north")) -points(cheascoords.spatial,pch=16,cex=0.25,col='grey') +coordinates(cheascoords.spatial) <- ~ x + y +proj4string(cheascoords.spatial) <- CRS("+proj=longlat +datum=WGS84") # define: WGS-84 lon,lat projection +map(database = "state", regions = list("wisconsin", "michigan:north")) +points(cheascoords.spatial, pch = 16, cex = 0.25, col = "grey") # spp.albers <- spTransform(cheascoords.spatial,CRS("+init=epsg:3175")) #convert to: NAD83/Great Lakes and St Lawrence Albers projection # points(spp.albers,pch=16,cex=0.25,col='grey') # points(spp.albers,pch=16,cex=0.25,col='grey') -for(i in 1:numfiles){ - scn.bbox<-rbind(c(metadata$scn_nwlon[i],metadata$scn_nwlat[i]), - c(metadata$scn_nelon[i],metadata$scn_nelat[i]), - c(metadata$scn_selon[i],metadata$scn_selat[i]), - c(metadata$scn_swlon[i],metadata$scn_swlat[i]), - c(metadata$scn_nwlon[i],metadata$scn_nwlat[i])) - scn.bbox<- Polygon(scn.bbox) #spatial polygon from cheas-palsar extent - Srs1<- Polygons(list(scn.bbox),"ChEAS_PLASAR_extent") #spatial polygons (plural) - scn.bbox<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - plot(scn.bbox,add=TRUE) +for (i in 1:numfiles) { + scn.bbox <- rbind( + c(metadata$scn_nwlon[i], metadata$scn_nwlat[i]), + c(metadata$scn_nelon[i], metadata$scn_nelat[i]), + c(metadata$scn_selon[i], metadata$scn_selat[i]), + c(metadata$scn_swlon[i], metadata$scn_swlat[i]), + c(metadata$scn_nwlon[i], metadata$scn_nwlat[i]) + ) + scn.bbox <- Polygon(scn.bbox) # spatial polygon from cheas-palsar extent + Srs1 <- Polygons(list(scn.bbox), "ChEAS_PLASAR_extent") # spatial polygons (plural) + scn.bbox <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + plot(scn.bbox, add = TRUE) } map.axes() -title(main="Figure 1. Location of study area", xlab=parse(text=paste("Longitude ","^o","*W",sep="")),ylab=parse(text=paste("Latitude ","^o","*N",sep=""))) -legend("bottomright", c("FIA plot", "PALSAR scene"), col=c("grey", "black"), pch = c(16,0)) - -##FIGURE 2: +title(main = "Figure 1. Location of study area", xlab = parse(text = paste("Longitude ", "^o", "*W", sep = "")), ylab = parse(text = paste("Latitude ", "^o", "*N", sep = ""))) +legend("bottomright", c("FIA plot", "PALSAR scene"), col = c("grey", "black"), pch = c(16, 0)) +## FIGURE 2: diff --git a/modules/data.remote/inst/scripts/old/ChEAS_FIA.R b/modules/data.remote/inst/scripts/old/ChEAS_FIA.R index c21d92aaef5..cbab27921b1 100644 --- a/modules/data.remote/inst/scripts/old/ChEAS_FIA.R +++ b/modules/data.remote/inst/scripts/old/ChEAS_FIA.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ################################ ## Load Required Packages @@ -17,87 +17,91 @@ library(spatsta) ################################ ## Options ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=1 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -metadata<- read.csv("~/git/pecan/modules/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 1 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +metadata <- read.csv("~/git/pecan/modules/data.remote/output/metadata/output_metadata.csv", sep = "\t", header = T) ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns -## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set +## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ -if(fia==1){ #EXTRACTS FROM FIA COORDINATES - calib_inpath <-"/home/bhardima/git/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -# wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) -# FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + calib_inpath <- "/home/bhardima/git/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + # wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + # FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } -if(fia==0){#EXTRACTS FROM WLEF COORDINATES - calib_inpath <-"/home/bhardima/git/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep="\t", header=T) #WLEF plots - -# upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier -# calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) +if (fia == 0) { # EXTRACTS FROM WLEF COORDINATES + calib_inpath <- "/home/bhardima/git/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = "\t", header = T) # WLEF plots + + # upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier + # calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) -ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - -ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent -Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) -ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) +) + +ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent +Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) +ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + +Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) # FIA.in.cheas<-as.vector(over(FIA.points,ChEAS_PLASAR_extent)) #subset of FIA plots that falls within Cheas-PALSAR extent -coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent +coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing -coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing +coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing -##Plot Biomass source data -if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA -} else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF +## Plot Biomass source data +if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA +} else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } -##Plot ID will be used later on for generating time series of backscatter values -if(fia==1){ - plot<-NA #for FIA NOTE: Add in FIA plot unique identifiers if available -} else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF +## Plot ID will be used later on for generating time series of backscatter values +if (fia == 1) { + plot <- NA # for FIA NOTE: Add in FIA plot unique identifiers if available +} else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } ## Subset extraction coords that fall within PALSAR observation area -# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] -cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent +# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] +cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent # spcheasFIA <- SpatialPoints(cheasFIA,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) ################################ ## Begin extracting PALSAR values at FIA plot coordinates @@ -109,174 +113,175 @@ palsar_inpath <- file.path("/home/bhardima/git/pecan/modules/data.remote/palsar_ # date<-as.Date(metadata$scndate, format='%Y%m%d') # col_names<-c(rbind(paste(date, "HH",sep="_"),paste(date, "HV",sep="_"))) -pol_bands<-c("HH", "HV") -numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) +pol_bands <- c("HH", "HV") +numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) -# +# # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names # extracted_48m<-matrix(NA, nrow=length(spcheasFIA)*numfiles, ncol=length(pol_bands)) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands # extracted_60m<-matrix(NA, nrow=length(spcheasFIA)*numfiles, ncol=length(pol_bands)) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheasFIA@coords),ncol=7) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheasFIA@coords),ncol=7) #matrix to store extracted palsar values. -extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. -extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. +# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheasFIA@coords),ncol=7) #matrix to store extracted palsar values. +# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheasFIA@coords),ncol=7) #matrix to store extracted palsar values. +extracted_48m <- matrix(NA, nrow = numfiles * nrow(spcheascoords@coords), ncol = 8) # matrix to store extracted palsar values. +extracted_60m <- matrix(NA, nrow = numfiles * nrow(spcheascoords@coords), ncol = 8) # matrix to store extracted palsar values. # colnames(extracted_48m)<-pol_bands # colnames(extracted_60m)<-pol_bands -for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - - ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. - ## Only extract the ones that do. - ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). - ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. - ## These zeros are truncated in post processing, prior to curve fitting. - ################################################ -# spcheasFIA<-spTransform(spcheasFIA,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files - rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene - rast_Poly<-Polygon(rbind( - c(rast_box[1,1],rast_box[2,2]), - c(rast_box[1,2],rast_box[2,2]), - c(rast_box[1,2],rast_box[2,1]), - c(rast_box[1,1],rast_box[2,1]), - c(rast_box[1,1],rast_box[2,2]))) #polygon from bbox - Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon - pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) - -# fia.in.rast<-over(spcheasFIA,pals_ext) #FIA coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene -# fia.in.rast[is.na(fia.in.rast)]<-0 #replace na's with 0's for indexing -# fia.in.rast<-as.logical(fia.in.rast) - coords.in.rast<-over(spcheascoords,pals_ext) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ - next - } - coords.in.rast<-as.logical(coords.in.rast) - +for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + + ################################################ + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Only extract the ones that do. + ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). + ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. + ## These zeros are truncated in post processing, prior to curve fitting. + ################################################ + # spcheasFIA<-spTransform(spcheasFIA,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + rast_box <- bbox(HH_rast@extent) # bounding box of single palsar scene + rast_Poly <- Polygon(rbind( + c(rast_box[1, 1], rast_box[2, 2]), + c(rast_box[1, 2], rast_box[2, 2]), + c(rast_box[1, 2], rast_box[2, 1]), + c(rast_box[1, 1], rast_box[2, 1]), + c(rast_box[1, 1], rast_box[2, 2]) + )) # polygon from bbox + Srs1 <- Polygons(list(rast_Poly), "PALSAR_extent") # spatial polygon + pals_ext <- SpatialPolygons(list(Srs1), proj4string = HH_rast@crs) + + # fia.in.rast<-over(spcheasFIA,pals_ext) #FIA coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + # fia.in.rast[is.na(fia.in.rast)]<-0 #replace na's with 0's for indexing + # fia.in.rast<-as.logical(fia.in.rast) + coords.in.rast <- over(spcheascoords, pals_ext) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { + next + } + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixles w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixles w/in 48m buffer radius) ################################ -# HH_data_48m<-extract(HH_rast, spcheasFIA[fia.in.rast], method="simple",buffer=48, small=T, fun=mean) #this step is very slow -# HV_data_48m<-extract(HV_rast, spcheasFIA[fia.in.rast], method="simple",buffer=48, small=T, fun=mean) #this step is very slow - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #this step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #this step is very slow - - filename<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==filename[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) - -# all_48<-cbind(filename,palsar_date,spcheasFIA[fia.in.rast]@coords,biomass[fia.in.rast],HH_data_48m,HV_data_48m) - all_48<-cbind(filename,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) - if(i==1){ -# extracted_48m[i : nrow(spcheasFIA[fia.in.rast]@coords),]<-all_48 - extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 - }else if(i>1){ -# extracted_48m[((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+1) : ((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+nrow(spcheasFIA[fia.in.rast]@coords)),1:7]<-all_48 - extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),1:8]<-all_48 - } + # HH_data_48m<-extract(HH_rast, spcheasFIA[fia.in.rast], method="simple",buffer=48, small=T, fun=mean) #this step is very slow + # HV_data_48m<-extract(HV_rast, spcheasFIA[fia.in.rast], method="simple",buffer=48, small=T, fun=mean) #this step is very slow + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # this step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # this step is very slow + + filename <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == filename[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) + + # all_48<-cbind(filename,palsar_date,spcheasFIA[fia.in.rast]@coords,biomass[fia.in.rast],HH_data_48m,HV_data_48m) + all_48 <- cbind(filename, palsar_date, calib_infile$plot[coords.in.rast], spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) + if (i == 1) { + # extracted_48m[i : nrow(spcheasFIA[fia.in.rast]@coords),]<-all_48 + extracted_48m[i:nrow(spcheascoords[coords.in.rast]@coords), ] <- all_48 + } else if (i > 1) { + # extracted_48m[((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+1) : ((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+nrow(spcheasFIA[fia.in.rast]@coords)),1:7]<-all_48 + extracted_48m[((i - 1) * nrow(spcheascoords[coords.in.rast]@coords) + 1):((i - 1) * nrow(spcheascoords[coords.in.rast]@coords) + nrow(spcheascoords[coords.in.rast]@coords)), 1:8] <- all_48 + } ############################### - #calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) + # calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) ############################### -# HH_data_60m<-extract(HH_rast, spcheasFIA[fia.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow -# HV_data_60m<-extract(HV_rast, spcheasFIA[fia.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow - HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow - HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow - - filename<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_60m),ncol=1) - colnames(filename)<-"scnid" - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==filename[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_60m),ncol=1) - colnames(palsar_date)<-"scndate" - -# all_60<-cbind(filename,palsar_date,spcheasFIA[fia.in.rast]@coords,biomass[fia.in.rast],HH_data_60m,HV_data_60m) - all_60<-cbind(filename,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) - if(i==1){ -# extracted_60m[i : nrow(spcheasFIA[fia.in.rast]@coords),]<-all_60 - extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 - }else if(i>1){ -# extracted_60m[((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+1) : ((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+nrow(spcheasFIA[fia.in.rast]@coords)),1:7]<-all_60 - extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),1:8]<-all_60 - } - - print(paste("i=",i,sep="")) -# print(nrow(spcheasFIA[fia.in.rast]@coords)) -# print(paste("j=",j,sep="")) -# } + # HH_data_60m<-extract(HH_rast, spcheasFIA[fia.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow + # HV_data_60m<-extract(HV_rast, spcheasFIA[fia.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow + HH_data_60m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 60, small = T, fun = mean) # this step is very slow + HV_data_60m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 60, small = T, fun = mean) # this step is very slow + + filename <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_60m), ncol = 1) + colnames(filename) <- "scnid" + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == filename[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_60m), ncol = 1) + colnames(palsar_date) <- "scndate" + + # all_60<-cbind(filename,palsar_date,spcheasFIA[fia.in.rast]@coords,biomass[fia.in.rast],HH_data_60m,HV_data_60m) + all_60 <- cbind(filename, palsar_date, calib_infile$plot[coords.in.rast], spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_60m, HV_data_60m) + if (i == 1) { + # extracted_60m[i : nrow(spcheasFIA[fia.in.rast]@coords),]<-all_60 + extracted_60m[i:nrow(spcheascoords[coords.in.rast]@coords), ] <- all_60 + } else if (i > 1) { + # extracted_60m[((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+1) : ((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+nrow(spcheasFIA[fia.in.rast]@coords)),1:7]<-all_60 + extracted_60m[((i - 1) * nrow(spcheascoords[coords.in.rast]@coords) + 1):((i - 1) * nrow(spcheascoords[coords.in.rast]@coords) + nrow(spcheascoords[coords.in.rast]@coords)), 1:8] <- all_60 + } + + print(paste("i=", i, sep = "")) + # print(nrow(spcheasFIA[fia.in.rast]@coords)) + # print(paste("j=",j,sep="")) + # } } # extracted_48m<-na.omit(extracted_48m) # extracted_60m<-na.omit(extracted_60m) -colnames(extracted_48m)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -colnames(extracted_60m)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +colnames(extracted_48m) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") +colnames(extracted_60m) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.60", "HV.sigma.60") ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) -dat48<-data.frame(na.omit(extracted_48m[extracted_48m[,7]>0 & extracted_48m[,8]>0,])) -dat60<-data.frame(na.omit(extracted_60m[extracted_48m[,7]>0 & extracted_48m[,8]>0,])) +dat48 <- data.frame(na.omit(extracted_48m[extracted_48m[, 7] > 0 & extracted_48m[, 8] > 0, ])) +dat60 <- data.frame(na.omit(extracted_60m[extracted_48m[, 7] > 0 & extracted_48m[, 8] > 0, ])) ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%M-%d") -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) -write.table(dat48,file="dat48.csv",quote=FALSE,sep=",",row.names=F) - -dat60$scnid<-as.character(dat60$scnid) -dat60$scndate<-as.Date(dat60$scndate,"%Y-%M-%d") -dat60$UTM.lat<- as.numeric(as.character(dat60$UTM.lat)) -dat60$UTM.lon<- as.numeric(as.character(dat60$UTM.lon)) -dat60$biomass<- as.numeric(as.character(dat60$biomass)) -dat60$HH.sigma.60<- as.numeric(as.character(dat60$HH.sigma.60)) -dat60$HV.sigma.60<- as.numeric(as.character(dat60$HV.sigma.60)) -write.table(dat60,file="dat60.csv",quote=FALSE,sep=",",row.names=F) - -pdf("ExtractionQCplots.pdf",width = 6, height = 6, paper='special') -##QC plots -par(mfrow=c(1,2)) #checking coordinate alignment of different extraction buffer sizes -plot(dat48$UTM.lat,dat60$UTM.lat) -plot(dat48$UTM.lon,dat60$UTM.lon) - -par(mfrow=c(1,2)) # checking extracted backscatter values for extraction buffers (should be different but not TOO different) -plot(dat48$HH.sigma.48,dat60$HH.sigma.60,xlab="48m",ylab="60m",main="HH") -plot(dat48$HV.sigma.48,dat60$HV.sigma.60,xlab="48m",ylab="60m",main="HV") - -##Data Exploration +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%M-%d") +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) +write.table(dat48, file = "dat48.csv", quote = FALSE, sep = ",", row.names = F) + +dat60$scnid <- as.character(dat60$scnid) +dat60$scndate <- as.Date(dat60$scndate, "%Y-%M-%d") +dat60$UTM.lat <- as.numeric(as.character(dat60$UTM.lat)) +dat60$UTM.lon <- as.numeric(as.character(dat60$UTM.lon)) +dat60$biomass <- as.numeric(as.character(dat60$biomass)) +dat60$HH.sigma.60 <- as.numeric(as.character(dat60$HH.sigma.60)) +dat60$HV.sigma.60 <- as.numeric(as.character(dat60$HV.sigma.60)) +write.table(dat60, file = "dat60.csv", quote = FALSE, sep = ",", row.names = F) + +pdf("ExtractionQCplots.pdf", width = 6, height = 6, paper = "special") +## QC plots +par(mfrow = c(1, 2)) # checking coordinate alignment of different extraction buffer sizes +plot(dat48$UTM.lat, dat60$UTM.lat) +plot(dat48$UTM.lon, dat60$UTM.lon) + +par(mfrow = c(1, 2)) # checking extracted backscatter values for extraction buffers (should be different but not TOO different) +plot(dat48$HH.sigma.48, dat60$HH.sigma.60, xlab = "48m", ylab = "60m", main = "HH") +plot(dat48$HV.sigma.48, dat60$HV.sigma.60, xlab = "48m", ylab = "60m", main = "HV") + +## Data Exploration # par(mfrow=c(2,2)) # plot(dat48$biomass,dat48$HH.sigma.48,xlab="biomass",ylab="HH",main="48m") # plot(dat48$biomass,dat48$HV.sigma.48,xlab="biomass",ylab="HV",main="48m") # plot(dat60$biomass,dat60$HH.sigma.60,xlab="biomass",ylab="HH",main="60m") # plot(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m") -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48,xlab="biomass",ylab="HH",main="48m",col="grey") -scatter.smooth(dat48$biomass,dat48$HV.sigma.48,xlab="biomass",ylab="HV",main="48m",col="grey") -scatter.smooth(dat60$biomass,dat60$HH.sigma.60,xlab="biomass",ylab="HH",main="60m",col="grey") -scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") - -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48/dat48$HV.sigma.48,xlab="biomass",ylab="HH/HV",main="48m",col="grey") -scatter.smooth(dat60$biomass,dat60$HH.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HH/HV",main="60m",col="grey") -scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") -scatter.smooth(dat60$biomass,dat60$HH.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HHxHV",main="60m",col="grey") - -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") -scatter.smooth(dat60$biomass,(dat60$HH.sigma.60-dat60$HV.sigma.60)/(dat60$HH.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="60m", col="gray") +par(mfrow = c(2, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") +scatter.smooth(dat48$biomass, dat48$HV.sigma.48, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") +scatter.smooth(dat60$biomass, dat60$HH.sigma.60, xlab = "biomass", ylab = "HH", main = "60m", col = "grey") +scatter.smooth(dat60$biomass, dat60$HV.sigma.60, xlab = "biomass", ylab = "HV", main = "60m", col = "grey") + +par(mfrow = c(2, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 / dat48$HV.sigma.48, xlab = "biomass", ylab = "HH/HV", main = "48m", col = "grey") +scatter.smooth(dat60$biomass, dat60$HH.sigma.60 / dat60$HV.sigma.60, xlab = "biomass", ylab = "HH/HV", main = "60m", col = "grey") +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") +scatter.smooth(dat60$biomass, dat60$HH.sigma.60 * dat60$HV.sigma.60, xlab = "biomass", ylab = "HHxHV", main = "60m", col = "grey") + +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") +scatter.smooth(dat60$biomass, (dat60$HH.sigma.60 - dat60$HV.sigma.60) / (dat60$HH.sigma.60 + dat60$HV.sigma.60), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "60m", col = "gray") dev.off() # sort.dat48<-dat48[with(dat48,order(dat48$scndate)),] # sort.dat60<-dat60[with(dat60,order(dat60$scndate)),] @@ -316,61 +321,61 @@ dev.off() ######################################################## ####################### -##Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. +## Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. ####################### -##NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) +## NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) # buff<-c("48", "60") -# col.names<-c("pol_band", +# col.names<-c("pol_band", # "buffer_radius(m)", -# "biomass_R2", -# "mod.alpha", -# "pval.alpha", -# "alpha.ci.lower", +# "biomass_R2", +# "mod.alpha", +# "pval.alpha", +# "alpha.ci.lower", # "alpha.ci.upper", -# "mod.beta", -# "pval.b", +# "mod.beta", +# "pval.b", # "beta.ci.upper", # "beta.ci.upper", -# "num.iters", +# "num.iters", # "convergence") # mod.params<-matrix(nrow=1,ncol=length(col.names)) # colnames(mod.params)<-col.names -# +# # par(mfrow=c(length(pol_bands),length(buff))) # for(i in 1:length(pol_bands)){ # for(j in 1:length(buff)){ -# +# # y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 -# +# # # Plot backscatter v biomass # plot(x, y, # xlab=expression(sqrt(biomass)), # ylab=pol_bands[i], # main=buff[j], # col=51,pch=19, cex=0.6, -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # las=1, cex.axis=1.2) -# +# # # Calculate rectangular hyperbola between backscatter and biomass # biomass_curve <- nls(formula=y ~ ((alpha*beta*x)/(beta + alpha*x)), # also in Gu 2002 -# data=list(y = y, x = x), -# start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), +# data=list(y = y, x = x), +# start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), # na.action="na.exclude", trace=F) # biomass_R2 <- 1 - var(residuals(biomass_curve)) / var(y) # R2 -# +# # # Plot rectangular hyperbola model fit # mod.alpha <- summary(biomass_curve)$parameters[1] # alpha value # mod.beta <- summary(biomass_curve)$parameters[2] # Beta value -# mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) -# mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) +# mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) +# mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) # lines(x=mod.biomass, y=mod.HH, col="black", lty=1, lwd=2.5) -# +# # legend("topright", legend=c(paste("R^2=", format(biomass_R2, digits=2)), # paste("alpha=",format(mod.alpha,digits=2)), # paste("beta=",format(mod.beta,digits=2))), bty="n",cex=1.2) -# +# # # Write model parameters to output file # num.iters <- as.numeric(biomass_curve$convInfo[2]) # conv <- as.numeric(biomass_curve$convInfo[1]) @@ -388,23 +393,23 @@ dev.off() # beta.ci.lower <- as.numeric(ci[2,1]) # beta.ci.upper <- as.numeric(ci[2,2]) # pval.b <- as.numeric(summary(biomass_curve)$parameters[8]) -# mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], -# buff[j], -# biomass_R2, -# mod.alpha, -# pval.a, -# alpha.ci.lower, +# mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], +# buff[j], +# biomass_R2, +# mod.alpha, +# pval.a, +# alpha.ci.lower, # alpha.ci.upper, -# mod.beta, +# mod.beta, # pval.b, -# beta.ci.lower, +# beta.ci.lower, # beta.ci.upper, -# num.iters, +# num.iters, # conv))) # print(paste(pol_bands[i],buff[j])) # }} # mod.params<-mod.params[2:nrow(mod.params),] -# +# # xs<-seq(from=1, to=4,by=1) # ys<-as.numeric(mod.params[,4]) # upper<-as.numeric(mod.params[,7]) @@ -413,86 +418,86 @@ dev.off() # plot(xs,ys,ylim=c(0,max(upper)+0.1*max(upper)),ylab="Alpha estimate",xlab="pol.band/buffer.radius") # segments(xs,lower,xs,upper,col="black",lwd=2) # legend("topright",legend=c("1=48,HH", "2=60,HH","3=48,HV", "4=60,HV")) -# +# # rhyp<-mod.params ####################### -##Use Maximum likelihood to fit Holling Type 4 +## Use Maximum likelihood to fit Holling Type 4 ####################### -param_est<-matrix(NA,nrow=0,ncol=8) +param_est <- matrix(NA, nrow = 0, ncol = 8) # param_est[,1]<-pol_bands # param_est[,2]<-buff -par(mfrow=c(length(pol_bands),length(buff))) -pdf("HollingIV_curvefits.pdf",width = 6, height = 6, paper='special') -for(i in 1:length(pol_bands)){ - for(j in 1:length(buff)){ - - y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) #backscatter values - x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 - -# x<-x[x=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile - a<- mean(y[x>=quantile(x,names=FALSE)[4]])*100 -# # ki<- round(mean(x[y > (max.y/2)-0.05*max.y & y < (max.y/2)+0.05*max.y])) #value of x (biomass) at half of max.y -# b<- round(mean(x[y > (a/2)-0.05*a & y < (a/2)+0.05*a])) -# c<- -1 -# a<-0.5 -b<-quantile(x,names=FALSE)[4] -c<--1 - sd<-sd(y) #stdev of backscatter values -# params<-c(max.y,ki,sd) - params<-c(a,b,c,sd) - - fit <- function(params,x,y){ -# max.y<-params[1] -# ki<-params[2] -# sd<-params[3] - a <-params[1] - b <-params[2] - c <-params[3] - sd<-params[4] -# y.pred<-(max.y*x)/(ki+x) - y.pred<-(a*x^2)/(b+(c*x)+x^2) - - LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) +par(mfrow = c(length(pol_bands), length(buff))) +pdf("HollingIV_curvefits.pdf", width = 6, height = 6, paper = "special") +for (i in 1:length(pol_bands)) { + for (j in 1:length(buff)) { + y <- eval(parse(text = paste(paste("dat", buff[j], sep = ""), paste("$", sep = ""), paste(pol_bands[i], ".sigma.", buff[j], sep = "")))) # backscatter values + x <- (eval(parse(text = paste(paste("dat", buff[j], sep = ""), paste("$biomass", sep = "")))))^0.5 + + # x<-x[x=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile + a <- mean(y[x >= quantile(x, names = FALSE)[4]]) * 100 + # # ki<- round(mean(x[y > (max.y/2)-0.05*max.y & y < (max.y/2)+0.05*max.y])) #value of x (biomass) at half of max.y + # b<- round(mean(x[y > (a/2)-0.05*a & y < (a/2)+0.05*a])) + # c<- -1 + # a<-0.5 + b <- quantile(x, names = FALSE)[4] + c <- -1 + sd <- sd(y) # stdev of backscatter values + # params<-c(max.y,ki,sd) + params <- c(a, b, c, sd) + + fit <- function(params, x, y) { + # max.y<-params[1] + # ki<-params[2] + # sd<-params[3] + a <- params[1] + b <- params[2] + c <- params[3] + sd <- params[4] + # y.pred<-(max.y*x)/(ki+x) + y.pred <- (a * x^2) / (b + (c * x) + x^2) + + LL <- -sum(dnorm(y, y.pred, sd, log = TRUE)) return(LL) - } #function - - fit.mod = optim(par=params,fit,x=x,y=y) + } # function + + fit.mod <- optim(par = params, fit, x = x, y = y) fit.mod - aic.mod<- -2*fit.mod$value + 2*length(params) - - params <- c(pol_bands[i],buff[j],fit.mod$par[1:3],2*fit.mod$par[2]/fit.mod$par[3],fit.mod$par[4],aic.mod) #par means parameter estimates - param_est<-rbind(param_est, params) - xseq = seq(0,max(x),length=1000) - - plot(x,y, xlab=expression(sqrt(biomass)),ylab=pol_bands[i],main=buff[i], #something wrong here with main - xlim=c(min(x),max(x)), - ylim=c(min(y),max(y)), - pch=16,col="#CCCCCC") - abline(a=0,b=0) -# lines(cbind(xseq,(as.numeric(params[3])*xseq)/(as.numeric(params[4])+xseq)),lwd=3) #closed circles and solid line for CTRL - lines(cbind(xseq,(as.numeric(params[3])*xseq^2)/(as.numeric(params[4])+(as.numeric(params[5])*xseq)+xseq^2)),lwd=3,col="green") - legend('topright', legend=c(paste("a=", format(fit.mod$par[1], digits=2)), - paste("b=",format(fit.mod$par[2],digits=2)), - paste("c=",format(fit.mod$par[3],digits=2)), - paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) - - }#for j looping over pol_bands -}#for i looping over buff + aic.mod <- -2 * fit.mod$value + 2 * length(params) + + params <- c(pol_bands[i], buff[j], fit.mod$par[1:3], 2 * fit.mod$par[2] / fit.mod$par[3], fit.mod$par[4], aic.mod) # par means parameter estimates + param_est <- rbind(param_est, params) + xseq <- seq(0, max(x), length = 1000) + + plot(x, y, + xlab = expression(sqrt(biomass)), ylab = pol_bands[i], main = buff[i], # something wrong here with main + xlim = c(min(x), max(x)), + ylim = c(min(y), max(y)), + pch = 16, col = "#CCCCCC" + ) + abline(a = 0, b = 0) + # lines(cbind(xseq,(as.numeric(params[3])*xseq)/(as.numeric(params[4])+xseq)),lwd=3) #closed circles and solid line for CTRL + lines(cbind(xseq, (as.numeric(params[3]) * xseq^2) / (as.numeric(params[4]) + (as.numeric(params[5]) * xseq) + xseq^2)), lwd = 3, col = "green") + legend("topright", legend = c( + paste("a=", format(fit.mod$par[1], digits = 2)), + paste("b=", format(fit.mod$par[2], digits = 2)), + paste("c=", format(fit.mod$par[3], digits = 2)), + paste("aic=", format(aic.mod, digits = 4)) + ), bty = "n", cex = 0.85) + } # for j looping over pol_bands +} # for i looping over buff dev.off() -colnames(param_est)<-c("pol_bands","buff","a","b","c","bio.at.peak.backscatter", "sd","AIC") +colnames(param_est) <- c("pol_bands", "buff", "a", "b", "c", "bio.at.peak.backscatter", "sd", "AIC") param_est -write.table(param_est,file="HollingIV_param_estimates.csv",quote=FALSE,sep=",",row.names=F) +write.table(param_est, file = "HollingIV_param_estimates.csv", quote = FALSE, sep = ",", row.names = F) ################################## ################################## ################# -##diagnotics? +## diagnotics? ################# - - diff --git a/modules/data.remote/inst/scripts/old/ChEAS_FIA_02102014.R b/modules/data.remote/inst/scripts/old/ChEAS_FIA_02102014.R index d5f893678b2..f1e6ec5ae39 100644 --- a/modules/data.remote/inst/scripts/old/ChEAS_FIA_02102014.R +++ b/modules/data.remote/inst/scripts/old/ChEAS_FIA_02102014.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ################################ ## Load Required Packages @@ -18,92 +18,96 @@ library(R2HTML) ################################ ## OPTIONS ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=0 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -leaf.off=0 #1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 0 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +leaf.off <- 0 # 1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates # buff=c(48) #vector of buffer sizes (in meters) to extract -coord.set<-c("WLEF", "FIA") +coord.set <- c("WLEF", "FIA") # metadata<- read.csv("~/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux -metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files -calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter -outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving +metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files +calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter +outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ## For saving ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns -## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set +## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ -if(fia==1){ #EXTRACTS FROM FIA COORDINATES -# calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -# wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) -# FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + # wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + # FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } -}else{#EXTRACTS FROM WLEF COORDINATES +} else { # EXTRACTS FROM WLEF COORDINATES # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots -# upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier -# calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) ##This will give errors, but these can be safely ignored - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots + # upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier + # calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) ## This will give errors, but these can be safely ignored + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) -ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - -ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent -Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) -ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) +) + +ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent +Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) +ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + +Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) # FIA.in.cheas<-as.vector(over(FIA.points,ChEAS_PLASAR_extent)) #subset of FIA plots that falls within Cheas-PALSAR extent -coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent +coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing -coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing +coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing -##Biomass source data -if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA -} else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF +## Biomass source data +if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA +} else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } -##Plot-IDs; will be used later on for generating time series of backscatter values -if(fia==1){ - plot<-NA #for FIA NOTE: Add in FIA plot unique identifiers if available -} else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF +## Plot-IDs; will be used later on for generating time series of backscatter values +if (fia == 1) { + plot <- NA # for FIA NOTE: Add in FIA plot unique identifiers if available +} else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } ## Subset extraction coords that fall within PALSAR observation area -# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] -cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent +# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] +cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent # spcheasFIA <- SpatialPoints(cheasFIA,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) ################################ ## Begin extracting PALSAR values at FIA plot coordinates @@ -114,170 +118,171 @@ spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellp # date<-as.Date(metadata$scndate, format='%Y%m%d') # col_names<-c(rbind(paste(date, "HH",sep="_"),paste(date, "HV",sep="_"))) -pol_bands<-c("HH", "HV") -numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) +pol_bands <- c("HH", "HV") +numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) -# +# # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names -if( fia==1){ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -} else{ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +if (fia == 1) { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 7) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +} else { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands } # colnames(extracted_48m)<-pol_bands # colnames(extracted_60m)<-pol_bands -for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - - ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. - ## Only extract the ones that do. - ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). - ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. - ## These zeros are truncated in post processing, prior to curve fitting. - ################################################ -# rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene - scnid<-substr(as.character(HV_filelist[i]),1,15) - - ##create data.frame from raster corner coords by querying metadata - ##NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection - pals.ext<-Polygon(rbind( - c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), - c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), - c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), - c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), - c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) - - ##make spatial polygon from raster extent - pals.ext.poly<- Polygons(list(pals.ext),"pals.ext") #spatial polygons (plural) - scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -# rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent -# c(rast_box[1,1],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,2]))) -# Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon -# pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) - scn.extent<- spTransform(scn.extent,HH_rast@crs) -# if(i == 1){ - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files -# } - - coords.in.rast<-over(spcheascoords,scn.extent) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ #jump to next palsar file if no extraction coordinates fall within this one - next - } - coords.in.rast<-as.logical(coords.in.rast) - +for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + + ################################################ + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Only extract the ones that do. + ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). + ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. + ## These zeros are truncated in post processing, prior to curve fitting. + ################################################ + # rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene + scnid <- substr(as.character(HV_filelist[i]), 1, 15) + + ## create data.frame from raster corner coords by querying metadata + ## NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection + pals.ext <- Polygon(rbind( + c(metadata$scn_nwlon[metadata$scnid == scnid[1]], metadata$scn_nwlat[metadata$scnid == scnid[1]]), + c(metadata$scn_nelon[metadata$scnid == scnid[1]], metadata$scn_nelat[metadata$scnid == scnid[1]]), + c(metadata$scn_selon[metadata$scnid == scnid[1]], metadata$scn_selat[metadata$scnid == scnid[1]]), + c(metadata$scn_swlon[metadata$scnid == scnid[1]], metadata$scn_swlat[metadata$scnid == scnid[1]]), + c(metadata$scn_nwlon[metadata$scnid == scnid[1]], metadata$scn_nwlat[metadata$scnid == scnid[1]]) + )) + + ## make spatial polygon from raster extent + pals.ext.poly <- Polygons(list(pals.ext), "pals.ext") # spatial polygons (plural) + scn.extent <- SpatialPolygons(list(pals.ext.poly), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + # rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent + # c(rast_box[1,1],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,2]))) + # Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon + # pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) + scn.extent <- spTransform(scn.extent, HH_rast@crs) + # if(i == 1){ + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + # } + + coords.in.rast <- over(spcheascoords, scn.extent) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { # jump to next palsar file if no extraction coordinates fall within this one + next + } + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) ################################ - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - - scnid<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) #vector of this scnid. length = number of coords in this scene - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==scnid[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) # same as above for scn date - - ##cbind for output - if(fia==1){ - all_48<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for FIA (no plot identifiers) - } else{ - all_48<- cbind(scnid,palsar_date,as.character(calib_infile$plot[coords.in.rast]),spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for WLEF - } - ##rbind to previous loop output -# if(i==1){ -# extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 -# }else if(i>1){ -# extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 -# } - extracted_48m<-rbind(extracted_48m,all_48) + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + + scnid <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) # vector of this scnid. length = number of coords in this scene + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == scnid[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) # same as above for scn date + + ## cbind for output + if (fia == 1) { + all_48 <- cbind(scnid, palsar_date, spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for FIA (no plot identifiers) + } else { + all_48 <- cbind(scnid, palsar_date, as.character(calib_infile$plot[coords.in.rast]), spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for WLEF + } + ## rbind to previous loop output + # if(i==1){ + # extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 + # }else if(i>1){ + # extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 + # } + extracted_48m <- rbind(extracted_48m, all_48) ############################### - #calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) + # calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) ############################### -# HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# -# ##cbind for output -# if(fia==1){ -# all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) -# } else{ -# all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF -# } -# -# # ##rbind to previous loop output -# # if(i==1){ -# # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 -# # }else if(i>1){ -# # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 -# # } -# extracted_60m<-rbind(extracted_60m,all_60) - - print(paste("i=",i,sep="")) - print(scnid[1]) - print(palsar_date[1]) -# print(length(HH_data_48m) == length(HH_data_60m)) + # HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # + # ##cbind for output + # if(fia==1){ + # all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) + # } else{ + # all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF + # } + # + # # ##rbind to previous loop output + # # if(i==1){ + # # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 + # # }else if(i>1){ + # # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 + # # } + # extracted_60m<-rbind(extracted_60m,all_60) + + print(paste("i=", i, sep = "")) + print(scnid[1]) + print(palsar_date[1]) + # print(length(HH_data_48m) == length(HH_data_60m)) } # write.csv(extracted_48m,file=paste(outpath,"/extracted_48m.csv",sep=""),quote=FALSE,row.names=F) # write.csv(extracted_60m,file=paste(outpath,"/extracted_60m.csv",sep=""),quote=FALSE,row.names=F) ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) # dat48<-data.frame(extracted_48m[as.numeric(extracted_48m[,ncol(extracted_48m)])!=0,]) #& extracted_48m[,ncol(extracted_48m)]>0,]) -dat48<-data.frame(extracted_48m) +dat48 <- data.frame(extracted_48m) # dat60<-data.frame(extracted_60m[as.numeric(extracted_60m[,ncol(extracted_60m)])!=0,]) #& extracted_60m[,ncol(extracted_60m)]>0,]) # dat48<-data.frame(extracted_48m) # dat60<-data.frame(extracted_60m) -if(fia==1){ #FIA data does not contain a plot-id, so the outpout has one less column than WLEF - colnames(dat48)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") -}else{ - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +if (fia == 1) { # FIA data does not contain a plot-id, so the outpout has one less column than WLEF + colnames(dat48) <- c("scnid", "scndate", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +} else { + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") } ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data and save it -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) - -#This will exclude scenes from the leaf off period (Nov-April) -if(leaf.off==1){ #include leaf off data - dat48<-dat48 -}else{ #exclude leaf off data - dat48<-dat48[as.numeric(format(dat48$scndate,"%m"))>=05 & as.numeric(format(dat48$scndate,"%m"))<=10,] +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) + +# This will exclude scenes from the leaf off period (Nov-April) +if (leaf.off == 1) { # include leaf off data + dat48 <- dat48 +} else { # exclude leaf off data + dat48 <- dat48[as.numeric(format(dat48$scndate, "%m")) >= 05 & as.numeric(format(dat48$scndate, "%m")) <= 10, ] } -#Save extracted data -write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),sep=",",quote=FALSE,col.names = TRUE, row.names=F) +# Save extracted data +write.table(dat48, file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), sep = ",", quote = FALSE, col.names = TRUE, row.names = F) # dat60$scnid<-as.character(dat60$scnid) @@ -289,136 +294,140 @@ write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),s # dat60$HV.sigma.60<- as.numeric(as.character(dat60$HV.sigma.60)) # write.csv(dat60,file=paste(outpath,"/dat60.csv",sep=""),quote=FALSE,row.names=F) -#Generate PDF of raw data exploration -#NOTE: Some of these figures will not be relevant for the FIA dataset -pdf(paste(outpath,"/",coord.set[fia+1], "_ExtractionQCplots.pdf",sep=""),width = 6, height = 6, paper='special') - -par(mfrow=c(1,2)) -years<-as.numeric(format(dat48$scndate,"%Y")) -hist(years,freq=TRUE,main="By year") -months<-as.numeric(format(dat48$scndate,"%m")) -hist(months,freq=TRUE,main="By month") - -par(mfrow=c(1,3)) -hist(dat48$biomass,main=paste(coord.set[fia+1],"biomass",sep=" ")) -hist(dat48$HH.sigma.48,main=paste(coord.set[fia+1],"HH",sep=" ")) -hist(dat48$HV.sigma.48,main=paste(coord.set[fia+1],"HV",sep=" ")) - -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48,cex=0,xlab="biomass",ylab="HH",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48,cex=0,xlab="biomass",ylab="HV",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) +# Generate PDF of raw data exploration +# NOTE: Some of these figures will not be relevant for the FIA dataset +pdf(paste(outpath, "/", coord.set[fia + 1], "_ExtractionQCplots.pdf", sep = ""), width = 6, height = 6, paper = "special") + +par(mfrow = c(1, 2)) +years <- as.numeric(format(dat48$scndate, "%Y")) +hist(years, freq = TRUE, main = "By year") +months <- as.numeric(format(dat48$scndate, "%m")) +hist(months, freq = TRUE, main = "By month") + +par(mfrow = c(1, 3)) +hist(dat48$biomass, main = paste(coord.set[fia + 1], "biomass", sep = " ")) +hist(dat48$HH.sigma.48, main = paste(coord.set[fia + 1], "HH", sep = " ")) +hist(dat48$HV.sigma.48, main = paste(coord.set[fia + 1], "HV", sep = " ")) + +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48, cex = 0, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48, cex = 0, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48/dat48$HH.sigma.48,xlab="biomass",ylab="HV/HH",main="48m",col="grey") +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48 / dat48$HH.sigma.48, xlab = "biomass", ylab = "HV/HH", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HV/HV",main="60m",col="grey") -scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HVxHV",main="60m",col="grey") -par(mfrow=c(1,1)) -scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") +par(mfrow = c(1, 1)) +scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") # scatter.smooth(dat60$biomass,(dat60$HV.sigma.60-dat60$HV.sigma.60)/(dat60$HV.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HV-HV)/(HV+HV)",main="60m", col="gray") -par(mfrow=c(4,2),mar=c(4,4,2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HH",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HV",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HH",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HV",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HH",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HV",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HV",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HV.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HV") -mtext("On same scale", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HV.sigma.48)),xlab="Date",ylab="HV") -mtext("By Date", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HH") -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HV") -mtext("2010 only", side=3, line=-3, outer=TRUE, cex=1, font=2) - -if(leaf.off==1){ -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HH",main="2010 only,Dec. removed") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HV",main="2010 only,Dec. removed") +par(mfrow = c(4, 2), mar = c(4, 4, 2, 2)) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HH", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HV", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HH", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HV", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HH", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HV", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HV", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HV.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HV") +mtext("On same scale", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HV.sigma.48)), xlab = "Date", ylab = "HV") +mtext("By Date", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HH") +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HV") +mtext("2010 only", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +if (leaf.off == 1) { + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only,Dec. removed") + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HV", main = "2010 only,Dec. removed") } -par(mfrow=c(1,1)) -plot(format(dat48$scndate,"%Y"),dat48$HH.sigma.48,col="grey",cex=0.5, - xlab="Year",ylab="HH",main="Average backscatter/plot/year") -date.plot.HHmean<-tapply(dat48$HH.sigma.48,list(dat48$plot,format(dat48$scndate,"%Y")),mean,na.rm=TRUE) #mean HH for each plot in each year -for(i in 1:length(unique(dat48$plot))){ - lines(cbind(c(2007,2008,2009,2010),date.plot.HHmean[unique(dat48$plot)[i],]),col=i) - par(new=T) +par(mfrow = c(1, 1)) +plot(format(dat48$scndate, "%Y"), dat48$HH.sigma.48, + col = "grey", cex = 0.5, + xlab = "Year", ylab = "HH", main = "Average backscatter/plot/year" +) +date.plot.HHmean <- tapply(dat48$HH.sigma.48, list(dat48$plot, format(dat48$scndate, "%Y")), mean, na.rm = TRUE) # mean HH for each plot in each year +for (i in 1:length(unique(dat48$plot))) { + lines(cbind(c(2007, 2008, 2009, 2010), date.plot.HHmean[unique(dat48$plot)[i], ]), col = i) + par(new = T) } -par(new=FALSE, mfrow=c(1,1)) -plot(format(dat48$scndate,"%Y"),dat48$HV.sigma.48,col="grey",cex=0.5, - xlab="Year",ylab="HV",main="Average backscatter/plot/year") -date.plot.HVmean<-tapply(dat48$HV.sigma.48,list(dat48$plot,format(dat48$scndate,"%Y")),mean,na.rm=TRUE) #mean HV for each plot in each year -for(i in 1:length(unique(dat48$plot))){ - lines(cbind(c(2007,2008,2009,2010),date.plot.HVmean[unique(dat48$plot)[i],]),col=i) - par(new=T) +par(new = FALSE, mfrow = c(1, 1)) +plot(format(dat48$scndate, "%Y"), dat48$HV.sigma.48, + col = "grey", cex = 0.5, + xlab = "Year", ylab = "HV", main = "Average backscatter/plot/year" +) +date.plot.HVmean <- tapply(dat48$HV.sigma.48, list(dat48$plot, format(dat48$scndate, "%Y")), mean, na.rm = TRUE) # mean HV for each plot in each year +for (i in 1:length(unique(dat48$plot))) { + lines(cbind(c(2007, 2008, 2009, 2010), date.plot.HVmean[unique(dat48$plot)[i], ]), col = i) + par(new = T) } -#breaks data into quantiles each containing ~5% of the data -bind.bio<-tapply(dat48$biomass,cut(dat48$biomass,breaks=round(quantile(dat48$biomass,probs = seq(0, 1, 0.05))) ),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=quantile(dat48$HH.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=quantile(dat48$HV.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -par(mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of the data points", side=3, line=-3, outer=TRUE, cex=1, font=2) - -#breaks data into even-length bins -bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks=seq(0, max(dat48$biomass), 0.05*max(dat48$biomass))),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=seq(0, max(dat48$HH.sigma.48), 0.05*max(dat48$HH.sigma.48))),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=seq(0, max(dat48$HV.sigma.48), 0.05*max(dat48$HV.sigma.48))),mean) -par(mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of data range", side=3, line=-3, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -bplot.xy(dat48$biomass,dat48$HH.sigma.48,N=15,xlab="biomass",ylab="HH (simga naught)") -bplot.xy(dat48$biomass,dat48$HV.sigma.48,N=15,xlab="biomass",ylab="HV (simga naught)") +# breaks data into quantiles each containing ~5% of the data +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = round(quantile(dat48$biomass, probs = seq(0, 1, 0.05)))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = quantile(dat48$HH.sigma.48, probs = seq(0, 1, 0.05))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = quantile(dat48$HV.sigma.48, probs = seq(0, 1, 0.05))), mean) +par(mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of the data points", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +# breaks data into even-length bins +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = seq(0, max(dat48$biomass), 0.05 * max(dat48$biomass))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = seq(0, max(dat48$HH.sigma.48), 0.05 * max(dat48$HH.sigma.48))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = seq(0, max(dat48$HV.sigma.48), 0.05 * max(dat48$HV.sigma.48))), mean) +par(mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of data range", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +bplot.xy(dat48$biomass, dat48$HH.sigma.48, N = 15, xlab = "biomass", ylab = "HH (simga naught)") +bplot.xy(dat48$biomass, dat48$HV.sigma.48, N = 15, xlab = "biomass", ylab = "HV (simga naught)") dev.off() -#Run curve fitting function -n.reps<- 5000 #sets value for n.adapt and n.iter -n.chain<-3 #number of MCMC chains to run -bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) +# Run curve fitting function +n.reps <- 5000 # sets value for n.adapt and n.iter +n.chain <- 3 # number of MCMC chains to run +bayes.curve.fit(outpath, coord.set, fia, n.reps, n.chain) @@ -467,62 +476,62 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) ######################################################## ####################### -##Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. +## Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. # ####################### # ##NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) # buff<-c("48", "60") -# -# # col.names<-c("pol_band", +# +# # col.names<-c("pol_band", # # "buffer_radius(m)", -# # "biomass_R2", -# # "mod.alpha", -# # "pval.alpha", -# # "alpha.ci.lower", +# # "biomass_R2", +# # "mod.alpha", +# # "pval.alpha", +# # "alpha.ci.lower", # # "alpha.ci.upper", -# # "mod.beta", -# # "pval.b", +# # "mod.beta", +# # "pval.b", # # "beta.ci.upper", # # "beta.ci.upper", -# # "num.iters", +# # "num.iters", # # "convergence") # # mod.params<-matrix(nrow=1,ncol=length(col.names)) # # colnames(mod.params)<-col.names -# # +# # # # par(mfrow=c(length(pol_bands),length(buff))) # # for(i in 1:length(pol_bands)){ # # for(j in 1:length(buff)){ -# # +# # # # y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 -# # +# # # # # Plot backscatter v biomass # # plot(x, y, # # xlab=expression(sqrt(biomass)), # # ylab=pol_bands[i], # # main=buff[j], # # col=51,pch=19, cex=0.6, -# # xlim=c(min(x),max(x)), +# # xlim=c(min(x),max(x)), # # ylim=c(min(y),max(y)), # # las=1, cex.axis=1.2) -# # +# # # # # Calculate rectangular hyperbola between backscatter and biomass # # biomass_curve <- nls(formula=y ~ ((alpha*beta*x)/(beta + alpha*x)), # also in Gu 2002 -# # data=list(y = y, x = x), -# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), +# # data=list(y = y, x = x), +# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), # # na.action="na.exclude", trace=F) # # biomass_R2 <- 1 - var(residuals(biomass_curve)) / var(y) # R2 -# # +# # # # # Plot rectangular hyperbola model fit # # mod.alpha <- summary(biomass_curve)$parameters[1] # alpha value # # mod.beta <- summary(biomass_curve)$parameters[2] # Beta value -# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) -# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) +# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) +# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) # # lines(x=mod.biomass, y=mod.HH, col="black", lty=1, lwd=2.5) -# # +# # # # legend("topright", legend=c(paste("R^2=", format(biomass_R2, digits=2)), # # paste("alpha=",format(mod.alpha,digits=2)), # # paste("beta=",format(mod.beta,digits=2))), bty="n",cex=1.2) -# # +# # # # # Write model parameters to output file # # num.iters <- as.numeric(biomass_curve$convInfo[2]) # # conv <- as.numeric(biomass_curve$convInfo[1]) @@ -540,23 +549,23 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # beta.ci.lower <- as.numeric(ci[2,1]) # # beta.ci.upper <- as.numeric(ci[2,2]) # # pval.b <- as.numeric(summary(biomass_curve)$parameters[8]) -# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], -# # buff[j], -# # biomass_R2, -# # mod.alpha, -# # pval.a, -# # alpha.ci.lower, +# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], +# # buff[j], +# # biomass_R2, +# # mod.alpha, +# # pval.a, +# # alpha.ci.lower, # # alpha.ci.upper, -# # mod.beta, +# # mod.beta, # # pval.b, -# # beta.ci.lower, +# # beta.ci.lower, # # beta.ci.upper, -# # num.iters, +# # num.iters, # # conv))) # # print(paste(pol_bands[i],buff[j])) # # }} # # mod.params<-mod.params[2:nrow(mod.params),] -# # +# # # # xs<-seq(from=1, to=4,by=1) # # ys<-as.numeric(mod.params[,4]) # # upper<-as.numeric(mod.params[,7]) @@ -565,41 +574,41 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # plot(xs,ys,ylim=c(0,max(upper)+0.1*max(upper)),ylab="Alpha estimate",xlab="pol.band/buffer.radius") # # segments(xs,lower,xs,upper,col="black",lwd=2) # # legend("topright",legend=c("1=48,HH", "2=60,HH","3=48,HV", "4=60,HV")) -# # +# # # # rhyp<-mod.params -# -# +# +# # ####################### # ##Use Maximum likelihood to fit curves # ####################### # data<- read.csv(file.path(outpath, "WLEF_dat48.csv"), sep=",", header=T) ##location of PALSAR metadata table -# +# # # model<-c("Holl4", "RecHyp", "Logistic") -# -# +# +# # # for(k in 1:length(model)){ #loop over different functional forms # for(i in 1:length(pol_bands)){ # for(j in 1 ){ -# +# # y<-eval(parse(text=paste(paste("data$",pol_bands[i],'.sigma.',buff[j],sep='')))) #backscatter values # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 # x<-(data$biomass) #biomass # # max.y<-mean(y[x>=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile -# +# # ####################### # ##Use Maximum likelihood to fit Holling Type 4 # ####################### # model<-"Holl4" -# param_est<-matrix(nrow=0,ncol=8) +# param_est<-matrix(nrow=0,ncol=8) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- mean(y[x>=quantile(x,na.rm=TRUE)[4]],na.rm=TRUE) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-quantile(x,na.rm=TRUE)[4] # c<--1 -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] @@ -607,21 +616,21 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # sd<-params[4] # # y.pred<-(max.y*x)/(ki+x) # y.pred<-(a*x^2)/(b+(c*x)+x^2) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:3],2*fit.mod$par[2]/fit.mod$par[3],fit.mod$par[4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -631,47 +640,47 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","bio.at.peak.backscatter", "sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# +# # ####################### # ##Use Maximum likelihood to fit Logistic # ####################### # model<-"Logistic" -# param_est<-matrix(nrow=0,ncol=7) +# param_est<-matrix(nrow=0,ncol=7) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- max(y) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-2 #slope of initial portion of the curve # c<-mean(x[y>=quantile(y,0.9,na.rm=TRUE)],na.rm=TRUE) -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] # c <-params[3] # sd<-params[4] # y.pred<- a/(1+b*exp(-c*x)) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -680,25 +689,25 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# -# -# +# +# +# # }#for j looping over pol_bands # }#for i looping over buff -# +# # }#for k looping over models -# +# # ################################## # ################################## -# -# +# +# # ################# # ##diagnotics? # ################# -# -# +# +# diff --git a/modules/data.remote/inst/scripts/old/ChEAS_FIA_03102014.R b/modules/data.remote/inst/scripts/old/ChEAS_FIA_03102014.R index 6cbc7b3a62a..548b64e2c9b 100644 --- a/modules/data.remote/inst/scripts/old/ChEAS_FIA_03102014.R +++ b/modules/data.remote/inst/scripts/old/ChEAS_FIA_03102014.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ################################ ## Load Required Packages @@ -19,95 +19,99 @@ library(fields) ################################ ## OPTIONS ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=1 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -leaf.off=0 #1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 1 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +leaf.off <- 0 # 1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates # buff=c(48) #vector of buffer sizes (in meters) to extract -coord.set<-c("WLEF", "FIA") +coord.set <- c("WLEF", "FIA") # metadata<- read.csv("~/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux -metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files -calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter -outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving +metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files +calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter +outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ## For saving ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns -## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set +## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ -if(fia==1){ #EXTRACTS FROM FIA COORDINATES -# calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -# Sr1<-spTransform(Sr1,CRS(raster)) - -# wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) -# FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + # Sr1<-spTransform(Sr1,CRS(raster)) + + # wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + # FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } -}else{#EXTRACTS FROM WLEF COORDINATES +} else { # EXTRACTS FROM WLEF COORDINATES # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots -# upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier -# calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) ##This will give errors, but these can be safely ignored - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots + # upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier + # calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) ## This will give errors, but these can be safely ignored + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) -ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - -ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent -Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) -ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) +) + +ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent +Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) +ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + +Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) # FIA.in.cheas<-as.vector(over(FIA.points,ChEAS_PLASAR_extent)) #subset of FIA plots that falls within Cheas-PALSAR extent -coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent +coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing -coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing +coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing -##Biomass source data -if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA -} else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF +## Biomass source data +if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA +} else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } ## Subset extraction coords that fall within PALSAR observation area -# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] -cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent +# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] +cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent # spcheasFIA <- SpatialPoints(cheasFIA,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -##Plot-IDs; will be used later on for generating time series of backscatter values -if(fia==1){ - plot<-seq(1,nrow(cheas.coords),1) #for FIA NOTE: Add in FIA plot unique identifiers if available -} else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF +## Plot-IDs; will be used later on for generating time series of backscatter values +if (fia == 1) { + plot <- seq(1, nrow(cheas.coords), 1) # for FIA NOTE: Add in FIA plot unique identifiers if available +} else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } -# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) ################################ ## Begin extracting PALSAR values at FIA plot coordinates @@ -118,185 +122,186 @@ if(fia==1){ # date<-as.Date(metadata$scndate, format='%Y%m%d') # col_names<-c(rbind(paste(date, "HH",sep="_"),paste(date, "HV",sep="_"))) -pol_bands<-c("HH", "HV") -numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) +pol_bands <- c("HH", "HV") +numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) -# +# # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names -if( fia==1){ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -} else{ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +if (fia == 1) { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +} else { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands } # colnames(extracted_48m)<-pol_bands # colnames(extracted_60m)<-pol_bands -for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - - ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. - ## Only extract the ones that do. - ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). - ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. - ## These zeros are truncated in post processing, prior to curve fitting. - ################################################ -# rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene - scnid<-substr(as.character(HV_filelist[i]),1,15) - - ##create data.frame from raster corner coords by querying metadata - ##NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection - pals.ext<-Polygon(rbind( - c(xmin(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymin(HH_rast)))) - - ##make spatial polygon from raster extent - pals.ext.poly<- Polygons(list(pals.ext),"pals.ext") #spatial polygons (plural) - scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -# rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent -# c(rast_box[1,1],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,2]))) -# Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon -# pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) - scn.extent<- spTransform(scn.extent,HH_rast@crs) -# if(i == 1){ - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files -# } - - coords.in.rast<-over(spcheascoords,scn.extent) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ #jump to next palsar file if no extraction coordinates fall within this one - next - } - coords.in.rast<-as.logical(coords.in.rast) - +for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + + ################################################ + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Only extract the ones that do. + ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). + ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. + ## These zeros are truncated in post processing, prior to curve fitting. + ################################################ + # rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene + scnid <- substr(as.character(HV_filelist[i]), 1, 15) + + ## create data.frame from raster corner coords by querying metadata + ## NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection + pals.ext <- Polygon(rbind( + c(xmin(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymin(HH_rast)) + )) + + ## make spatial polygon from raster extent + pals.ext.poly <- Polygons(list(pals.ext), "pals.ext") # spatial polygons (plural) + scn.extent <- SpatialPolygons(list(pals.ext.poly), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + # rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent + # c(rast_box[1,1],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,2]))) + # Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon + # pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) + scn.extent <- spTransform(scn.extent, HH_rast@crs) + # if(i == 1){ + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + # } + + coords.in.rast <- over(spcheascoords, scn.extent) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { # jump to next palsar file if no extraction coordinates fall within this one + next + } + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) ################################ - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - #extract SE's also? - scnid<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) #vector of this scnid. length = number of coords in this scene - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==scnid[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) # same as above for scn date - - ##cbind for output - if(fia==1){ - all_48<-cbind(scnid,palsar_date,plot,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for FIA (no plot identifiers) - } else{ - all_48<- cbind(scnid,palsar_date,as.character(calib_infile$plot[coords.in.rast]),spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for WLEF - } - ##rbind to previous loop output -# if(i==1){ -# extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 -# }else if(i>1){ -# extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 -# } - extracted_48m<-rbind(extracted_48m,all_48) + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + # extract SE's also? + scnid <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) # vector of this scnid. length = number of coords in this scene + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == scnid[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) # same as above for scn date + + ## cbind for output + if (fia == 1) { + all_48 <- cbind(scnid, palsar_date, plot, spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for FIA (no plot identifiers) + } else { + all_48 <- cbind(scnid, palsar_date, as.character(calib_infile$plot[coords.in.rast]), spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for WLEF + } + ## rbind to previous loop output + # if(i==1){ + # extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 + # }else if(i>1){ + # extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 + # } + extracted_48m <- rbind(extracted_48m, all_48) ############################### - #calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) + # calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) ############################### -# HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# -# ##cbind for output -# if(fia==1){ -# all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) -# } else{ -# all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF -# } -# -# # ##rbind to previous loop output -# # if(i==1){ -# # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 -# # }else if(i>1){ -# # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 -# # } -# extracted_60m<-rbind(extracted_60m,all_60) - - print(paste("i=",i,sep="")) - print(scnid[1]) - print(palsar_date[1]) -# print(length(HH_data_48m) == length(HH_data_60m)) + # HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # + # ##cbind for output + # if(fia==1){ + # all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) + # } else{ + # all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF + # } + # + # # ##rbind to previous loop output + # # if(i==1){ + # # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 + # # }else if(i>1){ + # # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 + # # } + # extracted_60m<-rbind(extracted_60m,all_60) + + print(paste("i=", i, sep = "")) + print(scnid[1]) + print(palsar_date[1]) + # print(length(HH_data_48m) == length(HH_data_60m)) } # write.csv(extracted_48m,file=paste(outpath,"/extracted_48m.csv",sep=""),quote=FALSE,row.names=F) # write.csv(extracted_60m,file=paste(outpath,"/extracted_60m.csv",sep=""),quote=FALSE,row.names=F) ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) # dat48<-data.frame(extracted_48m[as.numeric(extracted_48m[,ncol(extracted_48m)])!=0,]) #& extracted_48m[,ncol(extracted_48m)]>0,]) -dat48<-data.frame(na.exclude(extracted_48m)) +dat48 <- data.frame(na.exclude(extracted_48m)) # dat60<-data.frame(extracted_60m[as.numeric(extracted_60m[,ncol(extracted_60m)])!=0,]) #& extracted_60m[,ncol(extracted_60m)]>0,]) # dat60<-data.frame(extracted_60m) -if(fia==1){ #FIA data does not contain a plot-id, so here I add a dummy plot-id -# plot<-seq(1,nrow(dat48),1) -# dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") -}else{ - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +if (fia == 1) { # FIA data does not contain a plot-id, so here I add a dummy plot-id + # plot<-seq(1,nrow(dat48),1) + # dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +} else { + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") } ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data and save it -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) - -#This will exclude scenes from the leaf off period (Nov-April) -if(leaf.off==1){ #include leaf off data - dat48<-dat48 -}else{ #exclude leaf off data - dat48<-dat48[as.numeric(format(dat48$scndate,"%m"))>=05 & as.numeric(format(dat48$scndate,"%m"))<=10,] +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) + +# This will exclude scenes from the leaf off period (Nov-April) +if (leaf.off == 1) { # include leaf off data + dat48 <- dat48 +} else { # exclude leaf off data + dat48 <- dat48[as.numeric(format(dat48$scndate, "%m")) >= 05 & as.numeric(format(dat48$scndate, "%m")) <= 10, ] } -#Save extracted data -write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),sep=",",quote=FALSE,col.names = TRUE, row.names=F) - -#Switch to working from saved data -dat48<-read.csv(paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header = TRUE) - -#Correctly format data (again...sigh...) -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) +# Save extracted data +write.table(dat48, file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), sep = ",", quote = FALSE, col.names = TRUE, row.names = F) + +# Switch to working from saved data +dat48 <- read.csv(paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = TRUE) + +# Correctly format data (again...sigh...) +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) # dat60$scnid<-as.character(dat60$scnid) # dat60$scndate<-as.Date(dat60$scndate,"%Y-%M-%d") @@ -307,138 +312,142 @@ dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) # dat60$HV.sigma.60<- as.numeric(as.character(dat60$HV.sigma.60)) # write.csv(dat60,file=paste(outpath,"/dat60.csv",sep=""),quote=FALSE,row.names=F) -#Generate PDF of raw data exploration -#NOTE: Some of these figures will not be relevant for the FIA dataset -pdf(paste(outpath,"/",coord.set[fia+1], "_ExtractionQCplots.pdf",sep=""),width = 6, height = 6, paper='special') - -par(mfrow=c(1,2)) -years<-as.numeric(format(dat48$scndate,"%Y")) -hist(years,freq=TRUE,main="By year") -months<-as.numeric(format(dat48$scndate,"%m")) -hist(months,freq=TRUE,main="By month") - -par(mfrow=c(1,3)) -hist(dat48$biomass,main=paste(coord.set[fia+1],"biomass",sep=" ")) -hist(dat48$HH.sigma.48,main=paste(coord.set[fia+1],"HH",sep=" ")) -hist(dat48$HV.sigma.48,main=paste(coord.set[fia+1],"HV",sep=" ")) - -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48,cex=0,xlab="biomass",ylab="HH",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48,cex=0,xlab="biomass",ylab="HV",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) +# Generate PDF of raw data exploration +# NOTE: Some of these figures will not be relevant for the FIA dataset +pdf(paste(outpath, "/", coord.set[fia + 1], "_ExtractionQCplots.pdf", sep = ""), width = 6, height = 6, paper = "special") + +par(mfrow = c(1, 2)) +years <- as.numeric(format(dat48$scndate, "%Y")) +hist(years, freq = TRUE, main = "By year") +months <- as.numeric(format(dat48$scndate, "%m")) +hist(months, freq = TRUE, main = "By month") + +par(mfrow = c(1, 3)) +hist(dat48$biomass, main = paste(coord.set[fia + 1], "biomass", sep = " ")) +hist(dat48$HH.sigma.48, main = paste(coord.set[fia + 1], "HH", sep = " ")) +hist(dat48$HV.sigma.48, main = paste(coord.set[fia + 1], "HV", sep = " ")) + +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48, cex = 0, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48, cex = 0, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48/dat48$HH.sigma.48,xlab="biomass",ylab="HV/HH",main="48m",col="grey") +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48 / dat48$HH.sigma.48, xlab = "biomass", ylab = "HV/HH", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HV/HV",main="60m",col="grey") -scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HVxHV",main="60m",col="grey") -par(mfrow=c(1,1)) -scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") +par(mfrow = c(1, 1)) +scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") # scatter.smooth(dat60$biomass,(dat60$HV.sigma.60-dat60$HV.sigma.60)/(dat60$HV.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HV-HV)/(HV+HV)",main="60m", col="gray") -par(mfrow=c(4,2),mar=c(4,4,2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HH",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HV",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HH",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HV",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HH",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HV",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HV",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HV.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HV") -mtext("On same scale", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HV.sigma.48)),xlab="Date",ylab="HV") -mtext("By Date", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HH") -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HV") -mtext("2010 only", side=3, line=-3, outer=TRUE, cex=1, font=2) - -if(leaf.off==1){ -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HH",main="2010 only,Dec. removed") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HV",main="2010 only,Dec. removed") +par(mfrow = c(4, 2), mar = c(4, 4, 2, 2)) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HH", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HV", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HH", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HV", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HH", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HV", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HV", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HV.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HV") +mtext("On same scale", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HV.sigma.48)), xlab = "Date", ylab = "HV") +mtext("By Date", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HH") +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HV") +mtext("2010 only", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +if (leaf.off == 1) { + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only,Dec. removed") + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HV", main = "2010 only,Dec. removed") } -#Plot individual time series of HH for each coordinate set -par(new=FALSE, mfrow=c(1,1)) -plot(format(dat48$scndate,"%Y"),dat48$HH.sigma.48,col="grey",cex=0.5, - xlab="Year",ylab="HH",main="Average backscatter/plot/year") -date.plot.HHmean<-tapply(dat48$HH.sigma.48,list(dat48$plot,format(dat48$scndate,"%Y")),mean,na.rm=TRUE) #mean HH for each plot in each year -for(i in dat48$plot){ - lines(cbind(c(2007,2008,2009,2010),date.plot.HHmean[dat48$plot[i],]),col=i) - par(new=T) +# Plot individual time series of HH for each coordinate set +par(new = FALSE, mfrow = c(1, 1)) +plot(format(dat48$scndate, "%Y"), dat48$HH.sigma.48, + col = "grey", cex = 0.5, + xlab = "Year", ylab = "HH", main = "Average backscatter/plot/year" +) +date.plot.HHmean <- tapply(dat48$HH.sigma.48, list(dat48$plot, format(dat48$scndate, "%Y")), mean, na.rm = TRUE) # mean HH for each plot in each year +for (i in dat48$plot) { + lines(cbind(c(2007, 2008, 2009, 2010), date.plot.HHmean[dat48$plot[i], ]), col = i) + par(new = T) } -#Plot individual time series of HV for each coordinate set -par(new=FALSE, mfrow=c(1,1)) -plot(format(dat48$scndate,"%Y"),dat48$HV.sigma.48,col="grey",cex=0.5, - xlab="Year",ylab="HV",main="Average backscatter/plot/year") -date.plot.HVmean<-tapply(dat48$HV.sigma.48,list(dat48$plot,format(dat48$scndate,"%Y")),mean,na.rm=TRUE) #mean HV for each plot in each year -date.plot.HVmean<-na.exclude(date.plot.HVmean) -for(i in dat48$plot){ - lines(cbind(c(2007,2008,2009,2010),date.plot.HVmean[dat48$plot[i],]),col=i) - par(new=T) +# Plot individual time series of HV for each coordinate set +par(new = FALSE, mfrow = c(1, 1)) +plot(format(dat48$scndate, "%Y"), dat48$HV.sigma.48, + col = "grey", cex = 0.5, + xlab = "Year", ylab = "HV", main = "Average backscatter/plot/year" +) +date.plot.HVmean <- tapply(dat48$HV.sigma.48, list(dat48$plot, format(dat48$scndate, "%Y")), mean, na.rm = TRUE) # mean HV for each plot in each year +date.plot.HVmean <- na.exclude(date.plot.HVmean) +for (i in dat48$plot) { + lines(cbind(c(2007, 2008, 2009, 2010), date.plot.HVmean[dat48$plot[i], ]), col = i) + par(new = T) } -#breaks data into quantiles each containing ~5% of the data -bind.bio<-tapply(dat48$biomass,cut(dat48$biomass,breaks=round(quantile(dat48$biomass,probs = seq(0, 1, 0.05))) ),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=quantile(dat48$HH.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=quantile(dat48$HV.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -par(new=FALSE, mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of the data points", side=3, line=-3, outer=TRUE, cex=1, font=2) - -#breaks data into even-length bins -bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks=seq(0, max(dat48$biomass), 0.05*max(dat48$biomass))),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=seq(0, max(dat48$HH.sigma.48), 0.05*max(dat48$HH.sigma.48))),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=seq(0, max(dat48$HV.sigma.48), 0.05*max(dat48$HV.sigma.48))),mean) -par(mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of data range", side=3, line=-3, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -bplot.xy(dat48$biomass,dat48$HH.sigma.48,N=15,xlab="biomass",ylab="HH (simga naught)") -bplot.xy(dat48$biomass,dat48$HV.sigma.48,N=15,xlab="biomass",ylab="HV (simga naught)") +# breaks data into quantiles each containing ~5% of the data +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = round(quantile(dat48$biomass, probs = seq(0, 1, 0.05)))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = quantile(dat48$HH.sigma.48, probs = seq(0, 1, 0.05))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = quantile(dat48$HV.sigma.48, probs = seq(0, 1, 0.05))), mean) +par(new = FALSE, mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of the data points", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +# breaks data into even-length bins +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = seq(0, max(dat48$biomass), 0.05 * max(dat48$biomass))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = seq(0, max(dat48$HH.sigma.48), 0.05 * max(dat48$HH.sigma.48))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = seq(0, max(dat48$HV.sigma.48), 0.05 * max(dat48$HV.sigma.48))), mean) +par(mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of data range", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +bplot.xy(dat48$biomass, dat48$HH.sigma.48, N = 15, xlab = "biomass", ylab = "HH (simga naught)") +bplot.xy(dat48$biomass, dat48$HV.sigma.48, N = 15, xlab = "biomass", ylab = "HV (simga naught)") dev.off() -#Run curve fitting function -n.reps<- 5000 #sets value for n.adapt and n.iter -n.chain<-3 #number of MCMC chains to run -bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) +# Run curve fitting function +n.reps <- 5000 # sets value for n.adapt and n.iter +n.chain <- 3 # number of MCMC chains to run +bayes.curve.fit(outpath, coord.set, fia, n.reps, n.chain) @@ -487,62 +496,62 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) ######################################################## ####################### -##Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. +## Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. # ####################### # ##NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) # buff<-c("48", "60") -# -# # col.names<-c("pol_band", +# +# # col.names<-c("pol_band", # # "buffer_radius(m)", -# # "biomass_R2", -# # "mod.alpha", -# # "pval.alpha", -# # "alpha.ci.lower", +# # "biomass_R2", +# # "mod.alpha", +# # "pval.alpha", +# # "alpha.ci.lower", # # "alpha.ci.upper", -# # "mod.beta", -# # "pval.b", +# # "mod.beta", +# # "pval.b", # # "beta.ci.upper", # # "beta.ci.upper", -# # "num.iters", +# # "num.iters", # # "convergence") # # mod.params<-matrix(nrow=1,ncol=length(col.names)) # # colnames(mod.params)<-col.names -# # +# # # # par(mfrow=c(length(pol_bands),length(buff))) # # for(i in 1:length(pol_bands)){ # # for(j in 1:length(buff)){ -# # +# # # # y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 -# # +# # # # # Plot backscatter v biomass # # plot(x, y, # # xlab=expression(sqrt(biomass)), # # ylab=pol_bands[i], # # main=buff[j], # # col=51,pch=19, cex=0.6, -# # xlim=c(min(x),max(x)), +# # xlim=c(min(x),max(x)), # # ylim=c(min(y),max(y)), # # las=1, cex.axis=1.2) -# # +# # # # # Calculate rectangular hyperbola between backscatter and biomass # # biomass_curve <- nls(formula=y ~ ((alpha*beta*x)/(beta + alpha*x)), # also in Gu 2002 -# # data=list(y = y, x = x), -# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), +# # data=list(y = y, x = x), +# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), # # na.action="na.exclude", trace=F) # # biomass_R2 <- 1 - var(residuals(biomass_curve)) / var(y) # R2 -# # +# # # # # Plot rectangular hyperbola model fit # # mod.alpha <- summary(biomass_curve)$parameters[1] # alpha value # # mod.beta <- summary(biomass_curve)$parameters[2] # Beta value -# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) -# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) +# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) +# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) # # lines(x=mod.biomass, y=mod.HH, col="black", lty=1, lwd=2.5) -# # +# # # # legend("topright", legend=c(paste("R^2=", format(biomass_R2, digits=2)), # # paste("alpha=",format(mod.alpha,digits=2)), # # paste("beta=",format(mod.beta,digits=2))), bty="n",cex=1.2) -# # +# # # # # Write model parameters to output file # # num.iters <- as.numeric(biomass_curve$convInfo[2]) # # conv <- as.numeric(biomass_curve$convInfo[1]) @@ -560,23 +569,23 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # beta.ci.lower <- as.numeric(ci[2,1]) # # beta.ci.upper <- as.numeric(ci[2,2]) # # pval.b <- as.numeric(summary(biomass_curve)$parameters[8]) -# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], -# # buff[j], -# # biomass_R2, -# # mod.alpha, -# # pval.a, -# # alpha.ci.lower, +# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], +# # buff[j], +# # biomass_R2, +# # mod.alpha, +# # pval.a, +# # alpha.ci.lower, # # alpha.ci.upper, -# # mod.beta, +# # mod.beta, # # pval.b, -# # beta.ci.lower, +# # beta.ci.lower, # # beta.ci.upper, -# # num.iters, +# # num.iters, # # conv))) # # print(paste(pol_bands[i],buff[j])) # # }} # # mod.params<-mod.params[2:nrow(mod.params),] -# # +# # # # xs<-seq(from=1, to=4,by=1) # # ys<-as.numeric(mod.params[,4]) # # upper<-as.numeric(mod.params[,7]) @@ -585,41 +594,41 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # plot(xs,ys,ylim=c(0,max(upper)+0.1*max(upper)),ylab="Alpha estimate",xlab="pol.band/buffer.radius") # # segments(xs,lower,xs,upper,col="black",lwd=2) # # legend("topright",legend=c("1=48,HH", "2=60,HH","3=48,HV", "4=60,HV")) -# # +# # # # rhyp<-mod.params -# -# +# +# # ####################### # ##Use Maximum likelihood to fit curves # ####################### # data<- read.csv(file.path(outpath, "WLEF_dat48.csv"), sep=",", header=T) ##location of PALSAR metadata table -# +# # # model<-c("Holl4", "RecHyp", "Logistic") -# -# +# +# # # for(k in 1:length(model)){ #loop over different functional forms # for(i in 1:length(pol_bands)){ # for(j in 1 ){ -# +# # y<-eval(parse(text=paste(paste("data$",pol_bands[i],'.sigma.',buff[j],sep='')))) #backscatter values # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 # x<-(data$biomass) #biomass # # max.y<-mean(y[x>=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile -# +# # ####################### # ##Use Maximum likelihood to fit Holling Type 4 # ####################### # model<-"Holl4" -# param_est<-matrix(nrow=0,ncol=8) +# param_est<-matrix(nrow=0,ncol=8) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- mean(y[x>=quantile(x,na.rm=TRUE)[4]],na.rm=TRUE) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-quantile(x,na.rm=TRUE)[4] # c<--1 -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] @@ -627,21 +636,21 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # sd<-params[4] # # y.pred<-(max.y*x)/(ki+x) # y.pred<-(a*x^2)/(b+(c*x)+x^2) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:3],2*fit.mod$par[2]/fit.mod$par[3],fit.mod$par[4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -651,47 +660,47 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","bio.at.peak.backscatter", "sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# +# # ####################### # ##Use Maximum likelihood to fit Logistic # ####################### # model<-"Logistic" -# param_est<-matrix(nrow=0,ncol=7) +# param_est<-matrix(nrow=0,ncol=7) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- max(y) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-2 #slope of initial portion of the curve # c<-mean(x[y>=quantile(y,0.9,na.rm=TRUE)],na.rm=TRUE) -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] # c <-params[3] # sd<-params[4] # y.pred<- a/(1+b*exp(-c*x)) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -700,25 +709,25 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# -# -# +# +# +# # }#for j looping over pol_bands # }#for i looping over buff -# +# # }#for k looping over models -# +# # ################################## # ################################## -# -# +# +# # ################# # ##diagnotics? # ################# -# -# +# +# diff --git a/modules/data.remote/inst/scripts/old/ChEAS_FIA_03112014.R b/modules/data.remote/inst/scripts/old/ChEAS_FIA_03112014.R index 4b9cff93ac6..6492d87f726 100644 --- a/modules/data.remote/inst/scripts/old/ChEAS_FIA_03112014.R +++ b/modules/data.remote/inst/scripts/old/ChEAS_FIA_03112014.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ################################ ## Load Required Packages @@ -19,95 +19,99 @@ library(fields) ################################ ## OPTIONS ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=1 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -leaf.off=0 #1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 1 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +leaf.off <- 0 # 1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates # buff=c(48) #vector of buffer sizes (in meters) to extract -coord.set<-c("WLEF", "FIA") +coord.set <- c("WLEF", "FIA") # metadata<- read.csv("~/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux -metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files -calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter -outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving +metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files +calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter +outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ## For saving ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns -## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set +## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ -if(fia==1){ #EXTRACTS FROM FIA COORDINATES -# calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -# Sr1<-spTransform(Sr1,CRS(raster)) - -# wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) -# FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + # Sr1<-spTransform(Sr1,CRS(raster)) + + # wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + # FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } -}else{#EXTRACTS FROM WLEF COORDINATES +} else { # EXTRACTS FROM WLEF COORDINATES # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots -# upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier -# calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) ##This will give errors, but these can be safely ignored - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots + # upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier + # calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) ## This will give errors, but these can be safely ignored + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) -ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - -ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent -Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) -ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) +) + +ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent +Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) +ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + +Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) # FIA.in.cheas<-as.vector(over(FIA.points,ChEAS_PLASAR_extent)) #subset of FIA plots that falls within Cheas-PALSAR extent -coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent +coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing -coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing +coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing -##Biomass source data -if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA -} else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF +## Biomass source data +if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA +} else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } ## Subset extraction coords that fall within PALSAR observation area -# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] -cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent +# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] +cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent # spcheasFIA <- SpatialPoints(cheasFIA,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -##Plot-IDs; will be used later on for generating time series of backscatter values -if(fia==1){ - plot<-seq(1,nrow(cheas.coords),1) #for FIA NOTE: Add in FIA plot unique identifiers if available -} else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF +## Plot-IDs; will be used later on for generating time series of backscatter values +if (fia == 1) { + plot <- seq(1, nrow(cheas.coords), 1) # for FIA NOTE: Add in FIA plot unique identifiers if available +} else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } -# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) ################################ ## Begin extracting PALSAR values at FIA plot coordinates @@ -118,194 +122,195 @@ if(fia==1){ # date<-as.Date(metadata$scndate, format='%Y%m%d') # col_names<-c(rbind(paste(date, "HH",sep="_"),paste(date, "HV",sep="_"))) -pol_bands<-c("HH", "HV") -numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) +pol_bands <- c("HH", "HV") +numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) -# +# # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names -if( fia==1){ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -} else{ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +if (fia == 1) { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +} else { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands } # colnames(extracted_48m)<-pol_bands # colnames(extracted_60m)<-pol_bands -for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - - ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. - ## Only extract the ones that do. - ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). - ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. - ## These zeros are truncated in post processing, prior to curve fitting. - ################################################ -# rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene - scnid<-substr(as.character(HV_filelist[i]),1,15) - - ##create data.frame from raster corner coords by querying metadata - ##NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection -# pals.ext<-Polygon(rbind( -# c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), -# c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), -# c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), -# c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), -# c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) - -pals.ext<-Polygon(rbind( - c(xmin(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymin(HH_rast)))) - - - ##make spatial polygon from raster extent - pals.ext.poly<- Polygons(list(pals.ext),"pals.ext") #spatial polygons (plural) -# scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS(CRSargs(HH_rast@crs))) - -# rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent -# c(rast_box[1,1],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,2]))) -# Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon -# pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) - scn.extent<- spTransform(scn.extent,HH_rast@crs) -# if(i == 1){ - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files -# } - - coords.in.rast<-over(spcheascoords,scn.extent) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ #jump to next palsar file if no extraction coordinates fall within this one - next - } - coords.in.rast<-as.logical(coords.in.rast) - +for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + + ################################################ + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Only extract the ones that do. + ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). + ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. + ## These zeros are truncated in post processing, prior to curve fitting. + ################################################ + # rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene + scnid <- substr(as.character(HV_filelist[i]), 1, 15) + + ## create data.frame from raster corner coords by querying metadata + ## NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection + # pals.ext<-Polygon(rbind( + # c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), + # c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), + # c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), + # c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), + # c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) + + pals.ext <- Polygon(rbind( + c(xmin(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymin(HH_rast)) + )) + + + ## make spatial polygon from raster extent + pals.ext.poly <- Polygons(list(pals.ext), "pals.ext") # spatial polygons (plural) + # scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + scn.extent <- SpatialPolygons(list(pals.ext.poly), proj4string = CRS(CRSargs(HH_rast@crs))) + + # rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent + # c(rast_box[1,1],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,2]))) + # Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon + # pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) + scn.extent <- spTransform(scn.extent, HH_rast@crs) + # if(i == 1){ + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + # } + + coords.in.rast <- over(spcheascoords, scn.extent) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { # jump to next palsar file if no extraction coordinates fall within this one + next + } + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) ################################ - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - #extract SE's also? - scnid<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) #vector of this scnid. length = number of coords in this scene - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==scnid[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) # same as above for scn date - - ##cbind for output - if(fia==1){ - all_48<-cbind(scnid,palsar_date,plot,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for FIA (no plot identifiers) - } else{ - all_48<- cbind(scnid,palsar_date,as.character(calib_infile$plot[coords.in.rast]),spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for WLEF - } - ##rbind to previous loop output -# if(i==1){ -# extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 -# }else if(i>1){ -# extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 -# } - extracted_48m<-rbind(extracted_48m,all_48) + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + # extract SE's also? + scnid <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) # vector of this scnid. length = number of coords in this scene + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == scnid[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) # same as above for scn date + + ## cbind for output + if (fia == 1) { + all_48 <- cbind(scnid, palsar_date, plot, spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for FIA (no plot identifiers) + } else { + all_48 <- cbind(scnid, palsar_date, as.character(calib_infile$plot[coords.in.rast]), spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for WLEF + } + ## rbind to previous loop output + # if(i==1){ + # extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 + # }else if(i>1){ + # extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 + # } + extracted_48m <- rbind(extracted_48m, all_48) ############################### - #calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) + # calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) ############################### -# HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# -# ##cbind for output -# if(fia==1){ -# all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) -# } else{ -# all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF -# } -# -# # ##rbind to previous loop output -# # if(i==1){ -# # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 -# # }else if(i>1){ -# # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 -# # } -# extracted_60m<-rbind(extracted_60m,all_60) - - print(paste("i=",i,sep="")) - print(scnid[1]) - print(palsar_date[1]) -# print(length(HH_data_48m) == length(HH_data_60m)) + # HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # + # ##cbind for output + # if(fia==1){ + # all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) + # } else{ + # all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF + # } + # + # # ##rbind to previous loop output + # # if(i==1){ + # # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 + # # }else if(i>1){ + # # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 + # # } + # extracted_60m<-rbind(extracted_60m,all_60) + + print(paste("i=", i, sep = "")) + print(scnid[1]) + print(palsar_date[1]) + # print(length(HH_data_48m) == length(HH_data_60m)) } # write.csv(extracted_48m,file=paste(outpath,"/extracted_48m.csv",sep=""),quote=FALSE,row.names=F) # write.csv(extracted_60m,file=paste(outpath,"/extracted_60m.csv",sep=""),quote=FALSE,row.names=F) ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) # dat48<-data.frame(extracted_48m[as.numeric(extracted_48m[,ncol(extracted_48m)])!=0,]) #& extracted_48m[,ncol(extracted_48m)]>0,]) -dat48<-data.frame(na.exclude(extracted_48m)) +dat48 <- data.frame(na.exclude(extracted_48m)) # dat60<-data.frame(extracted_60m[as.numeric(extracted_60m[,ncol(extracted_60m)])!=0,]) #& extracted_60m[,ncol(extracted_60m)]>0,]) # dat60<-data.frame(extracted_60m) -if(fia==1){ #FIA data does not contain a plot-id, so here I add a dummy plot-id -# plot<-seq(1,nrow(dat48),1) -# dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") -}else{ - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +if (fia == 1) { # FIA data does not contain a plot-id, so here I add a dummy plot-id + # plot<-seq(1,nrow(dat48),1) + # dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +} else { + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") } ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data and save it -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) - -#This will exclude scenes from the leaf off period (Nov-April) -if(leaf.off==1){ #include leaf off data - dat48<-dat48 -}else{ #exclude leaf off data - dat48<-dat48[as.numeric(format(dat48$scndate,"%m"))>=05 & as.numeric(format(dat48$scndate,"%m"))<=10,] +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) + +# This will exclude scenes from the leaf off period (Nov-April) +if (leaf.off == 1) { # include leaf off data + dat48 <- dat48 +} else { # exclude leaf off data + dat48 <- dat48[as.numeric(format(dat48$scndate, "%m")) >= 05 & as.numeric(format(dat48$scndate, "%m")) <= 10, ] } -#Save extracted data -write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),sep=",",quote=FALSE,col.names = TRUE, row.names=F) - -#Switch to working from saved data -dat48<-read.csv(paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header = TRUE) - -#Correctly format data (again...sigh...) -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) +# Save extracted data +write.table(dat48, file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), sep = ",", quote = FALSE, col.names = TRUE, row.names = F) + +# Switch to working from saved data +dat48 <- read.csv(paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = TRUE) + +# Correctly format data (again...sigh...) +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) # dat60$scnid<-as.character(dat60$scnid) # dat60$scndate<-as.Date(dat60$scndate,"%Y-%M-%d") @@ -316,90 +321,90 @@ dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) # dat60$HV.sigma.60<- as.numeric(as.character(dat60$HV.sigma.60)) # write.csv(dat60,file=paste(outpath,"/dat60.csv",sep=""),quote=FALSE,row.names=F) -#Generate PDF of raw data exploration -#NOTE: Some of these figures will not be relevant for the FIA dataset -pdf(paste(outpath,"/",coord.set[fia+1], "_ExtractionQCplots.pdf",sep=""),width = 6, height = 6, paper='special') - -par(mfrow=c(1,2)) -years<-as.numeric(format(dat48$scndate,"%Y")) -hist(years,freq=TRUE,main="By year") -months<-as.numeric(format(dat48$scndate,"%m")) -hist(months,freq=TRUE,main="By month") - -par(mfrow=c(1,3)) -hist(dat48$biomass,main=paste(coord.set[fia+1],"biomass",sep=" ")) -hist(dat48$HH.sigma.48,main=paste(coord.set[fia+1],"HH",sep=" ")) -hist(dat48$HV.sigma.48,main=paste(coord.set[fia+1],"HV",sep=" ")) - -Lab.palette <- colorRampPalette(c("blue","green","yellow","orange", "red"), space = "Lab") -par(mfrow=c(1,3)) -smoothScatter(dat48$HV.sigma.48,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="HV",ylab="HH") -smoothScatter(dat48$biomass,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="biomass",ylab="HH",main="Density") -smoothScatter(dat48$biomass,dat48$HV.sigma.48,nbin=256,colramp = Lab.palette,ylim=c(0,max(dat48$HH.sigma.48)),xlab="biomass",ylab="HV") - -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48,cex=0,xlab="biomass",ylab="HH",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48,cex=0,xlab="biomass",ylab="HV",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) +# Generate PDF of raw data exploration +# NOTE: Some of these figures will not be relevant for the FIA dataset +pdf(paste(outpath, "/", coord.set[fia + 1], "_ExtractionQCplots.pdf", sep = ""), width = 6, height = 6, paper = "special") + +par(mfrow = c(1, 2)) +years <- as.numeric(format(dat48$scndate, "%Y")) +hist(years, freq = TRUE, main = "By year") +months <- as.numeric(format(dat48$scndate, "%m")) +hist(months, freq = TRUE, main = "By month") + +par(mfrow = c(1, 3)) +hist(dat48$biomass, main = paste(coord.set[fia + 1], "biomass", sep = " ")) +hist(dat48$HH.sigma.48, main = paste(coord.set[fia + 1], "HH", sep = " ")) +hist(dat48$HV.sigma.48, main = paste(coord.set[fia + 1], "HV", sep = " ")) + +Lab.palette <- colorRampPalette(c("blue", "green", "yellow", "orange", "red"), space = "Lab") +par(mfrow = c(1, 3)) +smoothScatter(dat48$HV.sigma.48, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "HV", ylab = "HH") +smoothScatter(dat48$biomass, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "biomass", ylab = "HH", main = "Density") +smoothScatter(dat48$biomass, dat48$HV.sigma.48, nbin = 256, colramp = Lab.palette, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "biomass", ylab = "HV") + +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48, cex = 0, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48, cex = 0, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48/dat48$HH.sigma.48,xlab="biomass",ylab="HV/HH",main="48m",col="grey") +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48 / dat48$HH.sigma.48, xlab = "biomass", ylab = "HV/HH", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HV/HV",main="60m",col="grey") -scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HVxHV",main="60m",col="grey") -par(mfrow=c(1,1)) -scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") +par(mfrow = c(1, 1)) +scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") # scatter.smooth(dat60$biomass,(dat60$HV.sigma.60-dat60$HV.sigma.60)/(dat60$HV.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HV-HV)/(HV+HV)",main="60m", col="gray") -par(mfrow=c(4,2),mar=c(4,4,2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HH",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HV",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HH",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HV",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HH",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HV",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HV",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HV.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HV") -mtext("On same scale", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HV.sigma.48)),xlab="Date",ylab="HV") -mtext("By Date", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HH") -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HV") -mtext("2010 only", side=3, line=-3, outer=TRUE, cex=1, font=2) - -if(leaf.off==1){ -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HH",main="2010 only,Dec. removed") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HV",main="2010 only,Dec. removed") +par(mfrow = c(4, 2), mar = c(4, 4, 2, 2)) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HH", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HV", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HH", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HV", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HH", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HV", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HV", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HV.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HV") +mtext("On same scale", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HV.sigma.48)), xlab = "Date", ylab = "HV") +mtext("By Date", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HH") +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HV") +mtext("2010 only", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +if (leaf.off == 1) { + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only,Dec. removed") + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HV", main = "2010 only,Dec. removed") } # #Plot individual time series of HH for each coordinate set @@ -422,38 +427,38 @@ scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scn # par(new=T) # } -#breaks data into quantiles each containing ~5% of the data -bind.bio<-tapply(dat48$biomass,cut(dat48$biomass,breaks=round(quantile(dat48$biomass,probs = seq(0, 1, 0.05))) ),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=quantile(dat48$HH.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=quantile(dat48$HV.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -par(new=FALSE, mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of the data points", side=3, line=-3, outer=TRUE, cex=1, font=2) - -#breaks data into even-length bins -bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks=seq(0, max(dat48$biomass), 0.05*max(dat48$biomass))),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=seq(0, max(dat48$HH.sigma.48), 0.05*max(dat48$HH.sigma.48))),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=seq(0, max(dat48$HV.sigma.48), 0.05*max(dat48$HV.sigma.48))),mean) -par(mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of data range", side=3, line=-3, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -bplot.xy(dat48$biomass,dat48$HH.sigma.48,N=15,xlab="biomass",ylab="HH (simga naught)") -bplot.xy(dat48$biomass,dat48$HV.sigma.48,N=15,xlab="biomass",ylab="HV (simga naught)") +# breaks data into quantiles each containing ~5% of the data +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = round(quantile(dat48$biomass, probs = seq(0, 1, 0.05)))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = quantile(dat48$HH.sigma.48, probs = seq(0, 1, 0.05))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = quantile(dat48$HV.sigma.48, probs = seq(0, 1, 0.05))), mean) +par(new = FALSE, mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of the data points", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +# breaks data into even-length bins +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = seq(0, max(dat48$biomass), 0.05 * max(dat48$biomass))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = seq(0, max(dat48$HH.sigma.48), 0.05 * max(dat48$HH.sigma.48))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = seq(0, max(dat48$HV.sigma.48), 0.05 * max(dat48$HV.sigma.48))), mean) +par(mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of data range", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +bplot.xy(dat48$biomass, dat48$HH.sigma.48, N = 15, xlab = "biomass", ylab = "HH (simga naught)") +bplot.xy(dat48$biomass, dat48$HV.sigma.48, N = 15, xlab = "biomass", ylab = "HV (simga naught)") dev.off() -#Run curve fitting function -n.reps<- 1000 #sets value for n.adapt and n.iter -n.chain<-3 #number of MCMC chains to run -bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) +# Run curve fitting function +n.reps <- 1000 # sets value for n.adapt and n.iter +n.chain <- 3 # number of MCMC chains to run +bayes.curve.fit(outpath, coord.set, fia, n.reps, n.chain) @@ -504,62 +509,62 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) ######################################################## ####################### -##Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. +## Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. # ####################### # ##NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) # buff<-c("48", "60") -# -# # col.names<-c("pol_band", +# +# # col.names<-c("pol_band", # # "buffer_radius(m)", -# # "biomass_R2", -# # "mod.alpha", -# # "pval.alpha", -# # "alpha.ci.lower", +# # "biomass_R2", +# # "mod.alpha", +# # "pval.alpha", +# # "alpha.ci.lower", # # "alpha.ci.upper", -# # "mod.beta", -# # "pval.b", +# # "mod.beta", +# # "pval.b", # # "beta.ci.upper", # # "beta.ci.upper", -# # "num.iters", +# # "num.iters", # # "convergence") # # mod.params<-matrix(nrow=1,ncol=length(col.names)) # # colnames(mod.params)<-col.names -# # +# # # # par(mfrow=c(length(pol_bands),length(buff))) # # for(i in 1:length(pol_bands)){ # # for(j in 1:length(buff)){ -# # +# # # # y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 -# # +# # # # # Plot backscatter v biomass # # plot(x, y, # # xlab=expression(sqrt(biomass)), # # ylab=pol_bands[i], # # main=buff[j], # # col=51,pch=19, cex=0.6, -# # xlim=c(min(x),max(x)), +# # xlim=c(min(x),max(x)), # # ylim=c(min(y),max(y)), # # las=1, cex.axis=1.2) -# # +# # # # # Calculate rectangular hyperbola between backscatter and biomass # # biomass_curve <- nls(formula=y ~ ((alpha*beta*x)/(beta + alpha*x)), # also in Gu 2002 -# # data=list(y = y, x = x), -# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), +# # data=list(y = y, x = x), +# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), # # na.action="na.exclude", trace=F) # # biomass_R2 <- 1 - var(residuals(biomass_curve)) / var(y) # R2 -# # +# # # # # Plot rectangular hyperbola model fit # # mod.alpha <- summary(biomass_curve)$parameters[1] # alpha value # # mod.beta <- summary(biomass_curve)$parameters[2] # Beta value -# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) -# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) +# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) +# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) # # lines(x=mod.biomass, y=mod.HH, col="black", lty=1, lwd=2.5) -# # +# # # # legend("topright", legend=c(paste("R^2=", format(biomass_R2, digits=2)), # # paste("alpha=",format(mod.alpha,digits=2)), # # paste("beta=",format(mod.beta,digits=2))), bty="n",cex=1.2) -# # +# # # # # Write model parameters to output file # # num.iters <- as.numeric(biomass_curve$convInfo[2]) # # conv <- as.numeric(biomass_curve$convInfo[1]) @@ -577,23 +582,23 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # beta.ci.lower <- as.numeric(ci[2,1]) # # beta.ci.upper <- as.numeric(ci[2,2]) # # pval.b <- as.numeric(summary(biomass_curve)$parameters[8]) -# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], -# # buff[j], -# # biomass_R2, -# # mod.alpha, -# # pval.a, -# # alpha.ci.lower, +# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], +# # buff[j], +# # biomass_R2, +# # mod.alpha, +# # pval.a, +# # alpha.ci.lower, # # alpha.ci.upper, -# # mod.beta, +# # mod.beta, # # pval.b, -# # beta.ci.lower, +# # beta.ci.lower, # # beta.ci.upper, -# # num.iters, +# # num.iters, # # conv))) # # print(paste(pol_bands[i],buff[j])) # # }} # # mod.params<-mod.params[2:nrow(mod.params),] -# # +# # # # xs<-seq(from=1, to=4,by=1) # # ys<-as.numeric(mod.params[,4]) # # upper<-as.numeric(mod.params[,7]) @@ -602,41 +607,41 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # plot(xs,ys,ylim=c(0,max(upper)+0.1*max(upper)),ylab="Alpha estimate",xlab="pol.band/buffer.radius") # # segments(xs,lower,xs,upper,col="black",lwd=2) # # legend("topright",legend=c("1=48,HH", "2=60,HH","3=48,HV", "4=60,HV")) -# # +# # # # rhyp<-mod.params -# -# +# +# # ####################### # ##Use Maximum likelihood to fit curves # ####################### # data<- read.csv(file.path(outpath, "WLEF_dat48.csv"), sep=",", header=T) ##location of PALSAR metadata table -# +# # # model<-c("Holl4", "RecHyp", "Logistic") -# -# +# +# # # for(k in 1:length(model)){ #loop over different functional forms # for(i in 1:length(pol_bands)){ # for(j in 1 ){ -# +# # y<-eval(parse(text=paste(paste("data$",pol_bands[i],'.sigma.',buff[j],sep='')))) #backscatter values # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 # x<-(data$biomass) #biomass # # max.y<-mean(y[x>=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile -# +# # ####################### # ##Use Maximum likelihood to fit Holling Type 4 # ####################### # model<-"Holl4" -# param_est<-matrix(nrow=0,ncol=8) +# param_est<-matrix(nrow=0,ncol=8) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- mean(y[x>=quantile(x,na.rm=TRUE)[4]],na.rm=TRUE) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-quantile(x,na.rm=TRUE)[4] # c<--1 -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] @@ -644,21 +649,21 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # sd<-params[4] # # y.pred<-(max.y*x)/(ki+x) # y.pred<-(a*x^2)/(b+(c*x)+x^2) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:3],2*fit.mod$par[2]/fit.mod$par[3],fit.mod$par[4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -668,47 +673,47 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","bio.at.peak.backscatter", "sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# +# # ####################### # ##Use Maximum likelihood to fit Logistic # ####################### # model<-"Logistic" -# param_est<-matrix(nrow=0,ncol=7) +# param_est<-matrix(nrow=0,ncol=7) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- max(y) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-2 #slope of initial portion of the curve # c<-mean(x[y>=quantile(y,0.9,na.rm=TRUE)],na.rm=TRUE) -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] # c <-params[3] # sd<-params[4] # y.pred<- a/(1+b*exp(-c*x)) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -717,25 +722,25 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# -# -# +# +# +# # }#for j looping over pol_bands # }#for i looping over buff -# +# # }#for k looping over models -# +# # ################################## # ################################## -# -# +# +# # ################# # ##diagnotics? # ################# -# -# +# +# diff --git a/modules/data.remote/inst/scripts/old/ChEAS_FIA_03132014.R b/modules/data.remote/inst/scripts/old/ChEAS_FIA_03132014.R index b32d1b95de9..db6e3c0d63d 100644 --- a/modules/data.remote/inst/scripts/old/ChEAS_FIA_03132014.R +++ b/modules/data.remote/inst/scripts/old/ChEAS_FIA_03132014.R @@ -1,6 +1,6 @@ -##Author Brady S. Hardiman 11/12/2013 +## Author Brady S. Hardiman 11/12/2013 -##Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) +## Read in fuzzed FIA coordinates supplied by Andy Finley (MSU), extract palsar backscatter values, fit curves, save figures and extracted values (with coordinates) ################################ ## Load Required Packages @@ -19,95 +19,99 @@ library(reshape) ################################ ## OPTIONS ################################ -kml=0 #1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml -fia=0 #1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates -leaf.off=0 #1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates +kml <- 0 # 1 = generate and save kml files of extraction coordinates; 0 = do not generate new kml +fia <- 0 # 1 = use FIA coordinates, 0 = use WLEF/Park Falls Tower coordinates +leaf.off <- 0 # 1=include PALSAR scenes acquired duing leaf off period of the year, 0=exclude leaf off scene dates # buff=c(48) #vector of buffer sizes (in meters) to extract -coord.set<-c("WLEF", "FIA") +coord.set <- c("WLEF", "FIA") # metadata<- read.csv("~/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux -metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files -calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter -outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ##For saving +metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files +calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter +outpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/data") ## For saving ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns -## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set +## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ -if(fia==1){ #EXTRACTS FROM FIA COORDINATES -# calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -# Sr1<-spTransform(Sr1,CRS(raster)) - -# wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) -# FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + # Sr1<-spTransform(Sr1,CRS(raster)) + + # wi.fia<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + # FIA.points <- SpatialPointsDataFrame(Sr1, wi.fia) #convert to class="SpatialPointsDataFrame" for export as kml + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } -}else{#EXTRACTS FROM WLEF COORDINATES +} else { # EXTRACTS FROM WLEF COORDINATES # calib_inpath <-"~/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##for Brady's Linux - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots -# upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier -# calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) ##This will give errors, but these can be safely ignored - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots + # upid<-paste(calib_infile$plot,calib_infile$subplot,sep='.') #create unique plot identifier + # calib_infile<-cbind(calib_infile[,1:2],upid,calib_infile[,3:8]) + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) ## This will give errors, but these can be safely ignored + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) -ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - -ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent -Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) -ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - -Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) +) + +ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent +Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) +ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + +Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) # FIA.in.cheas<-as.vector(over(FIA.points,ChEAS_PLASAR_extent)) #subset of FIA plots that falls within Cheas-PALSAR extent -coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent +coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing -coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing +coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing -##Biomass source data -if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA -} else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF +## Biomass source data +if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA +} else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } ## Subset extraction coords that fall within PALSAR observation area -# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] -cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent +# cheasFIA<-Sr1@coords[FIA.in.cheas==1,] +cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent # spcheasFIA <- SpatialPoints(cheasFIA,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) +spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -##Plot-IDs; will be used later on for generating time series of backscatter values -if(fia==1){ - plot<-seq(1,nrow(cheas.coords),1) #for FIA NOTE: Add in FIA plot unique identifiers if available -} else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF +## Plot-IDs; will be used later on for generating time series of backscatter values +if (fia == 1) { + plot <- seq(1, nrow(cheas.coords), 1) # for FIA NOTE: Add in FIA plot unique identifiers if available +} else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } -# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) +# writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) ################################ ## Begin extracting PALSAR values at FIA plot coordinates @@ -118,225 +122,230 @@ if(fia==1){ # date<-as.Date(metadata$scndate, format='%Y%m%d') # col_names<-c(rbind(paste(date, "HH",sep="_"),paste(date, "HV",sep="_"))) -pol_bands<-c("HH", "HV") -numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) +pol_bands <- c("HH", "HV") +numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) -# +# # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names -if( fia==1){ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -} else{ -# extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. -# extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. - extracted_48m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -# extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +if (fia == 1) { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=7) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=7) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +} else { + # extracted_48m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + # extracted_60m<-matrix(NA, nrow=numfiles*nrow(spcheascoords@coords),ncol=8) #matrix to store extracted palsar values. + extracted_48m <- matrix(nrow = 0, ncol = 8) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + # extracted_60m<-matrix(nrow=0, ncol=8) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands } # colnames(extracted_48m)<-pol_bands # colnames(extracted_60m)<-pol_bands -for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - - ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. - ## Only extract the ones that do. - ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). - ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. - ## These zeros are truncated in post processing, prior to curve fitting. - ################################################ -# rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene - scnid<-substr(as.character(HV_filelist[i]),1,15) - - ##create data.frame from raster corner coords by querying metadata - ##NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection -# pals.ext<-Polygon(rbind( -# c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), -# c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), -# c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), -# c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), -# c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) - -pals.ext<-Polygon(rbind( - c(xmin(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymin(HH_rast)))) - - - ##make spatial polygon from raster extent - pals.ext.poly<- Polygons(list(pals.ext),"pals.ext") #spatial polygons (plural) -# scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) -scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS(CRSargs(HH_rast@crs))) - -# rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent -# c(rast_box[1,1],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,2]), -# c(rast_box[1,2],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,1]), -# c(rast_box[1,1],rast_box[2,2]))) -# Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon -# pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) - scn.extent<- spTransform(scn.extent,HH_rast@crs) -# if(i == 1){ - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files -# } - - coords.in.rast<-over(spcheascoords,scn.extent) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ #jump to next palsar file if no extraction coordinates fall within this one - next - } - coords.in.rast<-as.logical(coords.in.rast) - +for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + + ################################################ + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Only extract the ones that do. + ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). + ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. + ## These zeros are truncated in post processing, prior to curve fitting. + ################################################ + # rast_box<-bbox(HH_rast@extent) #bounding box of single palsar scene + scnid <- substr(as.character(HV_filelist[i]), 1, 15) + + ## create data.frame from raster corner coords by querying metadata + ## NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection + # pals.ext<-Polygon(rbind( + # c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]), + # c(metadata$scn_nelon[metadata$scnid==scnid[1]],metadata$scn_nelat[metadata$scnid==scnid[1]]), + # c(metadata$scn_selon[metadata$scnid==scnid[1]],metadata$scn_selat[metadata$scnid==scnid[1]]), + # c(metadata$scn_swlon[metadata$scnid==scnid[1]],metadata$scn_swlat[metadata$scnid==scnid[1]]), + # c(metadata$scn_nwlon[metadata$scnid==scnid[1]],metadata$scn_nwlat[metadata$scnid==scnid[1]]))) + + pals.ext <- Polygon(rbind( + c(xmin(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymin(HH_rast)) + )) + + + ## make spatial polygon from raster extent + pals.ext.poly <- Polygons(list(pals.ext), "pals.ext") # spatial polygons (plural) + # scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + scn.extent <- SpatialPolygons(list(pals.ext.poly), proj4string = CRS(CRSargs(HH_rast@crs))) + + # rast_Poly<-Polygon(rbind( #polygon from bbox NOTE: bbox is not same as true raster extent + # c(rast_box[1,1],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,2]), + # c(rast_box[1,2],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,1]), + # c(rast_box[1,1],rast_box[2,2]))) + # Srs1<- Polygons(list(rast_Poly),"PALSAR_extent") #spatial polygon + # pals_ext<-SpatialPolygons(list(Srs1),proj4string=HH_rast@crs) + scn.extent <- spTransform(scn.extent, HH_rast@crs) + # if(i == 1){ + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + # } + + coords.in.rast <- over(spcheascoords, scn.extent) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { # jump to next palsar file if no extraction coordinates fall within this one + next + } + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) ################################ - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - #extract SE's also? - scnid<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) #vector of this scnid. length = number of coords in this scene - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==scnid[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) # same as above for scn date - - ##cbind for output - if(fia==1){ - all_48<-cbind(scnid,palsar_date,plot,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for FIA (no plot identifiers) - } else{ - all_48<- cbind(scnid,palsar_date,as.character(calib_infile$plot[coords.in.rast]),spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m) #for WLEF - } - ##rbind to previous loop output -# if(i==1){ -# extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 -# }else if(i>1){ -# extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 -# } - extracted_48m<-rbind(extracted_48m,all_48) + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + # extract SE's also? + scnid <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) # vector of this scnid. length = number of coords in this scene + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == scnid[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) # same as above for scn date + + ## cbind for output + if (fia == 1) { + all_48 <- cbind(scnid, palsar_date, plot, spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for FIA (no plot identifiers) + } else { + all_48 <- cbind(scnid, palsar_date, as.character(calib_infile$plot[coords.in.rast]), spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m) # for WLEF + } + ## rbind to previous loop output + # if(i==1){ + # extracted_48m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_48 + # }else if(i>1){ + # extracted_48m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_48 + # } + extracted_48m <- rbind(extracted_48m, all_48) ############################### - #calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) + # calibration PASLAR data from extraction coords (mean of pixles w/in 60m buffer radius) ############################### -# HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow -# -# ##cbind for output -# if(fia==1){ -# all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) -# } else{ -# all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF -# } -# -# # ##rbind to previous loop output -# # if(i==1){ -# # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 -# # }else if(i>1){ -# # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 -# # } -# extracted_60m<-rbind(extracted_60m,all_60) - - print(paste("i=",i,sep="")) - print(scnid[1]) - print(palsar_date[1]) -# print(length(HH_data_48m) == length(HH_data_60m)) + # HH_data_60m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # HV_data_60m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=60, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow + # + # ##cbind for output + # if(fia==1){ + # all_60<-cbind(scnid,palsar_date,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for FIA (no plot identifiers) + # } else{ + # all_60<- cbind(scnid,palsar_date,calib_infile$plot[coords.in.rast],spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_60m,HV_data_60m) #for WLEF + # } + # + # # ##rbind to previous loop output + # # if(i==1){ + # # extracted_60m[i : nrow(spcheascoords[coords.in.rast]@coords),]<-all_60 + # # }else if(i>1){ + # # extracted_60m[((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+1) : ((i-1)*nrow(spcheascoords[coords.in.rast]@coords)+nrow(spcheascoords[coords.in.rast]@coords)),]<-all_60 + # # } + # extracted_60m<-rbind(extracted_60m,all_60) + + print(paste("i=", i, sep = "")) + print(scnid[1]) + print(palsar_date[1]) + # print(length(HH_data_48m) == length(HH_data_60m)) } # write.csv(extracted_48m,file=paste(outpath,"/extracted_48m.csv",sep=""),quote=FALSE,row.names=F) # write.csv(extracted_60m,file=paste(outpath,"/extracted_60m.csv",sep=""),quote=FALSE,row.names=F) ## Create working copy of data (so that I don't need to re-extract if I screw up the data) -## NOTE: Here I remove the NAs from coords that don't fall with in the scene and +## NOTE: Here I remove the NAs from coords that don't fall with in the scene and ## the zeros that are an artifact of the mismatch between palsar bbox dim and palsar raster dim (due to tilted orbital path) # dat48<-data.frame(extracted_48m[as.numeric(extracted_48m[,ncol(extracted_48m)])!=0,]) #& extracted_48m[,ncol(extracted_48m)]>0,]) -dat48<-data.frame(na.exclude(extracted_48m)) +dat48 <- data.frame(na.exclude(extracted_48m)) # dat60<-data.frame(extracted_60m[as.numeric(extracted_60m[,ncol(extracted_60m)])!=0,]) #& extracted_60m[,ncol(extracted_60m)]>0,]) # dat60<-data.frame(extracted_60m) -if(fia==1){ #FIA data does not contain a plot-id, so here I add a dummy plot-id -# plot<-seq(1,nrow(dat48),1) -# dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") -}else{ - colnames(dat48)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -# colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +if (fia == 1) { # FIA data does not contain a plot-id, so here I add a dummy plot-id + # plot<-seq(1,nrow(dat48),1) + # dat48<-cbind(dat48[,1:2],plot,dat48[,3:7]) + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +} else { + colnames(dat48) <- c("scnid", "scndate", "plot", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") + # colnames(dat60)<-c("scnid","scndate", "plot", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") } ## NOTE: Converting to dataframe changes all values to factor, so here I reformat the data and save it -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.numeric(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) - -#This will exclude scenes from the leaf off period (Nov-April) -if(leaf.off==1){ #include leaf off data - dat48<-dat48 -}else{ #exclude leaf off data - dat48<-dat48[as.numeric(format(dat48$scndate,"%m"))>=05 & as.numeric(format(dat48$scndate,"%m"))<=10,] +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.numeric(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) + +# This will exclude scenes from the leaf off period (Nov-April) +if (leaf.off == 1) { # include leaf off data + dat48 <- dat48 +} else { # exclude leaf off data + dat48 <- dat48[as.numeric(format(dat48$scndate, "%m")) >= 05 & as.numeric(format(dat48$scndate, "%m")) <= 10, ] } -#Save extracted data -write.table(dat48,file=paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),sep=",",quote=FALSE,col.names = TRUE, row.names=F) - -#Switch to working from saved data -dat48<-read.csv(paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header = TRUE) - -#Correctly format data (again...sigh...) -dat48$scnid<-as.character(dat48$scnid) -dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") -dat48$plot<-as.character(dat48$plot) -dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) -dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) -dat48$biomass<- as.numeric(as.character(dat48$biomass)) -dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) -dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) -dat48$year<-as.numeric(format(dat48$scndate,"%Y")) -dat48$month<-as.numeric(format(dat48$scndate,"%m")) - - -for(y in unique(dat48$year)){ - for(m in unique(dat48$month)){ - if(length(dat48$biomass[dat48$month==m & dat48$year==y])<1){ +# Save extracted data +write.table(dat48, file = paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), sep = ",", quote = FALSE, col.names = TRUE, row.names = F) + +# Switch to working from saved data +dat48 <- read.csv(paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = TRUE) + +# Correctly format data (again...sigh...) +dat48$scnid <- as.character(dat48$scnid) +dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") +dat48$plot <- as.character(dat48$plot) +dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) +dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) +dat48$biomass <- as.numeric(as.character(dat48$biomass)) +dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) +dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) +dat48$year <- as.numeric(format(dat48$scndate, "%Y")) +dat48$month <- as.numeric(format(dat48$scndate, "%m")) + + +for (y in unique(dat48$year)) { + for (m in unique(dat48$month)) { + if (length(dat48$biomass[dat48$month == m & dat48$year == y]) < 1) { next - }else{ - plot(dat48$biomass[dat48$month==m & dat48$year==y],dat48$HH.sigma.48[dat48$month==m & dat48$year==y], - xlab="biomass",ylab='HH',main=paste(month.abb[m],y,sep=" ") ) - }#if - }#for m -}#for y - -par(mfrow=c(1,3)) -plot(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007], - xlab="05 HH",ylab='06 HH',main="may 2007 vs jun 2007") - fit1<-lm(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") -plot(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="05 HH",ylab='08 HH',main="may 2007 vs aug 2007") - fit2<-lm(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") -plot(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="06 HH",ylab='08 HH',main="jun 2007 vs aug 2007") - fit3<-lm(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") + } else { + plot(dat48$biomass[dat48$month == m & dat48$year == y], dat48$HH.sigma.48[dat48$month == m & dat48$year == y], + xlab = "biomass", ylab = "HH", main = paste(month.abb[m], y, sep = " ") + ) + } # if + } # for m +} # for y + +par(mfrow = c(1, 3)) +plot(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007], + xlab = "05 HH", ylab = "06 HH", main = "may 2007 vs jun 2007" +) +fit1 <- lm(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007]) +abline(0, 1, lwd = 2, lty = 2, col = "grey") +abline(fit1, lwd = 2, lty = 1, col = "red") +plot(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "05 HH", ylab = "08 HH", main = "may 2007 vs aug 2007" +) +fit2 <- lm(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007]) +abline(0, 1, lwd = 2, lty = 2, col = "grey") +abline(fit1, lwd = 2, lty = 1, col = "red") +plot(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "06 HH", ylab = "08 HH", main = "jun 2007 vs aug 2007" +) +fit3 <- lm(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007]) +abline(0, 1, lwd = 2, lty = 2, col = "grey") +abline(fit1, lwd = 2, lty = 1, col = "red") # dat60$scnid<-as.character(dat60$scnid) # dat60$scndate<-as.Date(dat60$scndate,"%Y-%M-%d") @@ -348,95 +357,95 @@ plot(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007],dat48$HH.sigma.48[dat # write.csv(dat60,file=paste(outpath,"/dat60.csv",sep=""),quote=FALSE,row.names=F) -#Generate PDF of raw data exploration -#NOTE: Some of these figures will not be relevant for the FIA dataset -pdf(paste(outpath,"/",coord.set[fia+1], "_ExtractionQCplots.pdf",sep=""),width = 6, height = 6, paper='special') +# Generate PDF of raw data exploration +# NOTE: Some of these figures will not be relevant for the FIA dataset +pdf(paste(outpath, "/", coord.set[fia + 1], "_ExtractionQCplots.pdf", sep = ""), width = 6, height = 6, paper = "special") -par(mfrow=c(1,2)) -years<-as.numeric(format(dat48$scndate,"%Y")) -hist(years,freq=TRUE,main="By year") -months<-as.numeric(format(dat48$scndate,"%m")) -hist(months,freq=TRUE,main="By month") +par(mfrow = c(1, 2)) +years <- as.numeric(format(dat48$scndate, "%Y")) +hist(years, freq = TRUE, main = "By year") +months <- as.numeric(format(dat48$scndate, "%m")) +hist(months, freq = TRUE, main = "By month") # par(mfrow=c(1,1)) # hist(dat48$scndate,freq=T,100,xaxt="n") # axis(1, dat48$scndate, format(dat48$scndate, "%b %Y"), cex.axis = .7) -par(mfrow=c(1,3)) -hist(dat48$biomass,main=paste(coord.set[fia+1],"biomass",sep=" ")) -hist(dat48$HH.sigma.48,main=paste(coord.set[fia+1],"HH",sep=" ")) -hist(dat48$HV.sigma.48,main=paste(coord.set[fia+1],"HV",sep=" ")) - -Lab.palette <- colorRampPalette(c("white","violet","blue","green","yellow","orange", "red"), space = "Lab") -par(mfrow=c(1,3)) -smoothScatter(dat48$HV.sigma.48,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="HV",ylab="HH") -smoothScatter(dat48$biomass,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="biomass",ylab="HH",main="Density") -smoothScatter(dat48$biomass,dat48$HV.sigma.48,nbin=256,colramp = Lab.palette,ylim=c(0,max(dat48$HH.sigma.48)),xlab="biomass",ylab="HV") - -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HH.sigma.48,cex=0,xlab="biomass",ylab="HH",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48,cex=0,xlab="biomass",ylab="HV",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) +par(mfrow = c(1, 3)) +hist(dat48$biomass, main = paste(coord.set[fia + 1], "biomass", sep = " ")) +hist(dat48$HH.sigma.48, main = paste(coord.set[fia + 1], "HH", sep = " ")) +hist(dat48$HV.sigma.48, main = paste(coord.set[fia + 1], "HV", sep = " ")) + +Lab.palette <- colorRampPalette(c("white", "violet", "blue", "green", "yellow", "orange", "red"), space = "Lab") +par(mfrow = c(1, 3)) +smoothScatter(dat48$HV.sigma.48, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "HV", ylab = "HH") +smoothScatter(dat48$biomass, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "biomass", ylab = "HH", main = "Density") +smoothScatter(dat48$biomass, dat48$HV.sigma.48, nbin = 256, colramp = Lab.palette, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "biomass", ylab = "HV") + +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HH.sigma.48, cex = 0, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48, cex = 0, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") +points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) +points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) +legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") -par(mfrow=c(1,2)) -scatter.smooth(dat48$biomass,dat48$HV.sigma.48/dat48$HH.sigma.48,xlab="biomass",ylab="HV/HH",main="48m",col="grey") +par(mfrow = c(1, 2)) +scatter.smooth(dat48$biomass, dat48$HV.sigma.48 / dat48$HH.sigma.48, xlab = "biomass", ylab = "HV/HH", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HV/HV",main="60m",col="grey") -scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") +scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HVxHV",main="60m",col="grey") -par(mfrow=c(1,1)) -scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") +par(mfrow = c(1, 1)) +scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") # scatter.smooth(dat60$biomass,(dat60$HV.sigma.60-dat60$HV.sigma.60)/(dat60$HV.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HV-HV)/(HV+HV)",main="60m", col="gray") -par(mfrow=c(4,2),mar=c(4,4,2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HH",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HV",main="2007") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HH",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col="grey",xlab="biomass",ylab="HV",main="2008") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HH",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col="grey",xlab="biomass",ylab="HV",main="2009") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HV",main="2010") - points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HV.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") - legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HV") -mtext("On same scale", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") -plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HV.sigma.48)),xlab="Date",ylab="HV") -mtext("By Date", side=3, line=-2, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HH") -plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HV") -mtext("2010 only", side=3, line=-3, outer=TRUE, cex=1, font=2) - -if(leaf.off==1){ -par(mfrow=c(2,2)) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") -points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HH",main="2010 only,Dec. removed") -scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HV",main="2010 only,Dec. removed") +par(mfrow = c(4, 2), mar = c(4, 4, 2, 2)) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HH", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = "grey", xlab = "biomass", ylab = "HV", main = "2007") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HH", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = "grey", xlab = "biomass", ylab = "HV", main = "2008") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HH", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = "grey", xlab = "biomass", ylab = "HV", main = "2009") +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) +scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HV", main = "2010") +points(dat48$biomass[format(dat48$scndate, "%m") > 10], dat48$HV.sigma.48[format(dat48$scndate, "%m") > 10], col = "red", xlab = "biomass", ylab = "HV", main = "2010") +legend("topright", pch = 1, legend = c("!Dec", "Dec"), cex = 0.7, pt.cex = 0.5, col = c("grey", "red"), bty = "n", xjust = 1) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HV") +mtext("On same scale", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") +plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HV.sigma.48)), xlab = "Date", ylab = "HV") +mtext("By Date", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HH") +plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HV") +mtext("2010 only", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +if (leaf.off == 1) { + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only,Dec. removed") + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HV", main = "2010 only,Dec. removed") } # #Plot individual time series of HH for each coordinate set @@ -459,38 +468,38 @@ scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scn # par(new=T) # } -#breaks data into quantiles each containing ~5% of the data -bind.bio<-tapply(dat48$biomass,cut(dat48$biomass,breaks=round(quantile(dat48$biomass,probs = seq(0, 1, 0.05))) ),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=quantile(dat48$HH.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=quantile(dat48$HV.sigma.48,probs = seq(0, 1, 0.05)) ),mean) -par(new=FALSE, mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of the data points", side=3, line=-3, outer=TRUE, cex=1, font=2) - -#breaks data into even-length bins -bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks=seq(0, max(dat48$biomass), 0.05*max(dat48$biomass))),mean) -bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=seq(0, max(dat48$HH.sigma.48), 0.05*max(dat48$HH.sigma.48))),mean) -bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=seq(0, max(dat48$HV.sigma.48), 0.05*max(dat48$HV.sigma.48))),mean) -par(mfrow=c(1,2)) -plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") - points(bind.bio,bind.HH) -plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") - points(bind.bio,bind.HV) -mtext("Bins each contain 5% of data range", side=3, line=-3, outer=TRUE, cex=1, font=2) - -par(mfrow=c(1,2)) -bplot.xy(dat48$biomass,dat48$HH.sigma.48,N=15,xlab="biomass",ylab="HH (simga naught)") -bplot.xy(dat48$biomass,dat48$HV.sigma.48,N=15,xlab="biomass",ylab="HV (simga naught)") +# breaks data into quantiles each containing ~5% of the data +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = round(quantile(dat48$biomass, probs = seq(0, 1, 0.05)))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = quantile(dat48$HH.sigma.48, probs = seq(0, 1, 0.05))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = quantile(dat48$HV.sigma.48, probs = seq(0, 1, 0.05))), mean) +par(new = FALSE, mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of the data points", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +# breaks data into even-length bins +bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = seq(0, max(dat48$biomass), 0.05 * max(dat48$biomass))), mean) +bind.HH <- tapply(dat48$HH.sigma.48, cut(dat48$HH.sigma.48, breaks = seq(0, max(dat48$HH.sigma.48), 0.05 * max(dat48$HH.sigma.48))), mean) +bind.HV <- tapply(dat48$HV.sigma.48, cut(dat48$HV.sigma.48, breaks = seq(0, max(dat48$HV.sigma.48), 0.05 * max(dat48$HV.sigma.48))), mean) +par(mfrow = c(1, 2)) +plot(dat48$biomass, dat48$HH.sigma.48, col = "grey", pch = ".", xlab = "Binned Biomass", ylab = "Binned HH") +points(bind.bio, bind.HH) +plot(dat48$biomass, dat48$HV.sigma.48, col = "grey", , pch = ".", xlab = "Binned Biomass", ylab = "Binned HV") +points(bind.bio, bind.HV) +mtext("Bins each contain 5% of data range", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + +par(mfrow = c(1, 2)) +bplot.xy(dat48$biomass, dat48$HH.sigma.48, N = 15, xlab = "biomass", ylab = "HH (simga naught)") +bplot.xy(dat48$biomass, dat48$HV.sigma.48, N = 15, xlab = "biomass", ylab = "HV (simga naught)") dev.off() -#Run curve fitting function -n.reps<- 1000 #sets value for n.adapt and n.iter -n.chain<-3 #number of MCMC chains to run -bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) +# Run curve fitting function +n.reps <- 1000 # sets value for n.adapt and n.iter +n.chain <- 3 # number of MCMC chains to run +bayes.curve.fit(outpath, coord.set, fia, n.reps, n.chain) @@ -541,62 +550,62 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) ######################################################## ####################### -##Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. +## Use Non-linear Least Squares (nls) to fit rectangular hyperbola NOTE:This method is not preferred. # ####################### # ##NOTE: backscatter=((alpha*beta*biomass)/(beta + alpha*biomass)) # buff<-c("48", "60") -# -# # col.names<-c("pol_band", +# +# # col.names<-c("pol_band", # # "buffer_radius(m)", -# # "biomass_R2", -# # "mod.alpha", -# # "pval.alpha", -# # "alpha.ci.lower", +# # "biomass_R2", +# # "mod.alpha", +# # "pval.alpha", +# # "alpha.ci.lower", # # "alpha.ci.upper", -# # "mod.beta", -# # "pval.b", +# # "mod.beta", +# # "pval.b", # # "beta.ci.upper", # # "beta.ci.upper", -# # "num.iters", +# # "num.iters", # # "convergence") # # mod.params<-matrix(nrow=1,ncol=length(col.names)) # # colnames(mod.params)<-col.names -# # +# # # # par(mfrow=c(length(pol_bands),length(buff))) # # for(i in 1:length(pol_bands)){ # # for(j in 1:length(buff)){ -# # +# # # # y<-eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$",sep=''),paste(pol_bands[i],'.sigma.',buff[j],sep='')))) # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 -# # +# # # # # Plot backscatter v biomass # # plot(x, y, # # xlab=expression(sqrt(biomass)), # # ylab=pol_bands[i], # # main=buff[j], # # col=51,pch=19, cex=0.6, -# # xlim=c(min(x),max(x)), +# # xlim=c(min(x),max(x)), # # ylim=c(min(y),max(y)), # # las=1, cex.axis=1.2) -# # +# # # # # Calculate rectangular hyperbola between backscatter and biomass # # biomass_curve <- nls(formula=y ~ ((alpha*beta*x)/(beta + alpha*x)), # also in Gu 2002 -# # data=list(y = y, x = x), -# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), +# # data=list(y = y, x = x), +# # start=list(alpha = .75, beta = mean(y[x > quantile(x)[4]])), # # na.action="na.exclude", trace=F) # # biomass_R2 <- 1 - var(residuals(biomass_curve)) / var(y) # R2 -# # +# # # # # Plot rectangular hyperbola model fit # # mod.alpha <- summary(biomass_curve)$parameters[1] # alpha value # # mod.beta <- summary(biomass_curve)$parameters[2] # Beta value -# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) -# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) +# # mod.biomass <- seq(min(x, na.rm=TRUE), max(x, na.rm=TRUE), by=0.01) +# # mod.HH <- (mod.alpha*mod.beta*mod.biomass)/(mod.beta + mod.alpha*mod.biomass) # # lines(x=mod.biomass, y=mod.HH, col="black", lty=1, lwd=2.5) -# # +# # # # legend("topright", legend=c(paste("R^2=", format(biomass_R2, digits=2)), # # paste("alpha=",format(mod.alpha,digits=2)), # # paste("beta=",format(mod.beta,digits=2))), bty="n",cex=1.2) -# # +# # # # # Write model parameters to output file # # num.iters <- as.numeric(biomass_curve$convInfo[2]) # # conv <- as.numeric(biomass_curve$convInfo[1]) @@ -614,23 +623,23 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # beta.ci.lower <- as.numeric(ci[2,1]) # # beta.ci.upper <- as.numeric(ci[2,2]) # # pval.b <- as.numeric(summary(biomass_curve)$parameters[8]) -# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], -# # buff[j], -# # biomass_R2, -# # mod.alpha, -# # pval.a, -# # alpha.ci.lower, +# # mod.params <- rbind(mod.params,as.vector(c(pol_bands[i], +# # buff[j], +# # biomass_R2, +# # mod.alpha, +# # pval.a, +# # alpha.ci.lower, # # alpha.ci.upper, -# # mod.beta, +# # mod.beta, # # pval.b, -# # beta.ci.lower, +# # beta.ci.lower, # # beta.ci.upper, -# # num.iters, +# # num.iters, # # conv))) # # print(paste(pol_bands[i],buff[j])) # # }} # # mod.params<-mod.params[2:nrow(mod.params),] -# # +# # # # xs<-seq(from=1, to=4,by=1) # # ys<-as.numeric(mod.params[,4]) # # upper<-as.numeric(mod.params[,7]) @@ -639,41 +648,41 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # # plot(xs,ys,ylim=c(0,max(upper)+0.1*max(upper)),ylab="Alpha estimate",xlab="pol.band/buffer.radius") # # segments(xs,lower,xs,upper,col="black",lwd=2) # # legend("topright",legend=c("1=48,HH", "2=60,HH","3=48,HV", "4=60,HV")) -# # +# # # # rhyp<-mod.params -# -# +# +# # ####################### # ##Use Maximum likelihood to fit curves # ####################### # data<- read.csv(file.path(outpath, "WLEF_dat48.csv"), sep=",", header=T) ##location of PALSAR metadata table -# +# # # model<-c("Holl4", "RecHyp", "Logistic") -# -# +# +# # # for(k in 1:length(model)){ #loop over different functional forms # for(i in 1:length(pol_bands)){ # for(j in 1 ){ -# +# # y<-eval(parse(text=paste(paste("data$",pol_bands[i],'.sigma.',buff[j],sep='')))) #backscatter values # # x<-(eval(parse(text=paste(paste("dat",buff[j],sep=''),paste("$biomass",sep='')))))^0.5 # x<-(data$biomass) #biomass # # max.y<-mean(y[x>=quantile(x)[4]]) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile -# +# # ####################### # ##Use Maximum likelihood to fit Holling Type 4 # ####################### # model<-"Holl4" -# param_est<-matrix(nrow=0,ncol=8) +# param_est<-matrix(nrow=0,ncol=8) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- mean(y[x>=quantile(x,na.rm=TRUE)[4]],na.rm=TRUE) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-quantile(x,na.rm=TRUE)[4] # c<--1 -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] @@ -681,21 +690,21 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # sd<-params[4] # # y.pred<-(max.y*x)/(ki+x) # y.pred<-(a*x^2)/(b+(c*x)+x^2) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:3],2*fit.mod$par[2]/fit.mod$par[3],fit.mod$par[4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -705,47 +714,47 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","bio.at.peak.backscatter", "sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# +# # ####################### # ##Use Maximum likelihood to fit Logistic # ####################### # model<-"Logistic" -# param_est<-matrix(nrow=0,ncol=7) +# param_est<-matrix(nrow=0,ncol=7) # par(mfrow=c(1,length(pol_bands))) # pdf(paste(outpath,"/",model,"_curvefits.pdf",sep=""),width = 6, height = 6, paper='special') -# +# # a<- max(y) #starting est. of max backscatter is taken as the mean backscatter value when biomass > the 75th quantile # b<-2 #slope of initial portion of the curve # c<-mean(x[y>=quantile(y,0.9,na.rm=TRUE)],na.rm=TRUE) -# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values +# sd<-sd(y,na.rm=TRUE) #stdev of backscatter values # params<-c(a,b,c,sd) -# +# # fit <- function(params,x,y){ # a <-params[1] # b <-params[2] # c <-params[3] # sd<-params[4] # y.pred<- a/(1+b*exp(-c*x)) -# +# # LL<- -sum(dnorm(y,y.pred,sd,log=TRUE)) # return(LL) # } #function -# +# # fit.mod = optim(par=params,fit,x=x,y=y) # fit.mod # aic.mod<- -2*fit.mod$value + 2*length(params) -# +# # params <- c(pol_bands[i],buff[j],fit.mod$par[1:4],aic.mod) #par means parameter estimates # param_est<-rbind(param_est, params) # xseq = seq(0,max(x),length=1000) -# +# # plot(x,y, xlab="biomass",ylab=paste(pol_bands[i],buff[j],sep="_"),main=model, #something wrong here with main title -# xlim=c(min(x),max(x)), +# xlim=c(min(x),max(x)), # ylim=c(min(y),max(y)), # pch=16,col="#CCCCCC") # abline(a=0,b=0) @@ -754,25 +763,25 @@ bayes.curve.fit(outpath,coord.set,fia,n.reps,n.chain) # paste("b=",format(fit.mod$par[2],digits=2)), # paste("c=",format(fit.mod$par[3],digits=2)), # paste("aic=", format(aic.mod, digits=4))), bty="n",cex=0.85) -# +# # dev.off() # colnames(param_est)<-c("pol_bands","buff","a","b","c","sd","AIC") # param_est # write.table(param_est,file=paste(outpath, "/", model, "_param_estimates.csv",sep=""),quote=FALSE,sep=",",row.names=F) -# -# -# +# +# +# # }#for j looping over pol_bands # }#for i looping over buff -# +# # }#for k looping over models -# +# # ################################## # ################################## -# -# +# +# # ################# # ##diagnotics? # ################# -# -# +# +# diff --git a/modules/data.remote/inst/scripts/old/LMfit_and_disturbance_chronosequence.R b/modules/data.remote/inst/scripts/old/LMfit_and_disturbance_chronosequence.R index eb94bbb6ba0..ac436bf5dbe 100644 --- a/modules/data.remote/inst/scripts/old/LMfit_and_disturbance_chronosequence.R +++ b/modules/data.remote/inst/scripts/old/LMfit_and_disturbance_chronosequence.R @@ -1,239 +1,248 @@ -extracted_40m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",sep="\t", header=T) -file.info<-read.table(file="/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") ##For Mac -wlef_abg<-read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep=",", header=T) -disturbance_extracted_40m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/disturbance_extracted_40m.csv",sep="\t", header=T) -disturbance_inpath <-"/Users/hardimanb/Desktop/data.remote/biometry" ##For Mac +extracted_40m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv", sep = "\t", header = T) +file.info <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") ## For Mac +wlef_abg <- read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep = ",", header = T) +disturbance_extracted_40m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/disturbance_extracted_40m.csv", sep = "\t", header = T) +disturbance_inpath <- "/Users/hardimanb/Desktop/data.remote/biometry" ## For Mac # disturbance_inpath <-"/home/bhardima/pecan/modules/data.remote/biometry" -disturbance_infile <-read.csv(file.path(disturbance_inpath,"Cheas_coordinates_disturbance_year.csv"), sep=",", header=T) #disturbance plots +disturbance_infile <- read.csv(file.path(disturbance_inpath, "Cheas_coordinates_disturbance_year.csv"), sep = ",", header = T) # disturbance plots -odds<-seq(1,ncol(extracted_40m),by=2) -evens<-seq(2,ncol(extracted_40m),by=2) -date.time<-as.vector(substr(file.info$scndate,1,8)) -col_names<-c(rbind(paste(date.time, "HH",sep="_"),paste(date.time, "HV",sep="_"))) -HHscn.dates<-as.Date(substr(col_names[odds],1,8),"%Y%m%d") -HVscn.dates<-as.Date(substr(col_names[evens],1,8),"%Y%m%d") +odds <- seq(1, ncol(extracted_40m), by = 2) +evens <- seq(2, ncol(extracted_40m), by = 2) +date.time <- as.vector(substr(file.info$scndate, 1, 8)) +col_names <- c(rbind(paste(date.time, "HH", sep = "_"), paste(date.time, "HV", sep = "_"))) +HHscn.dates <- as.Date(substr(col_names[odds], 1, 8), "%Y%m%d") +HVscn.dates <- as.Date(substr(col_names[evens], 1, 8), "%Y%m%d") -HH_wlef<-extracted_40m[,odds] -colnames(HH_wlef)<-date.time -HV_wlef<-extracted_40m[,evens] -colnames(HV_wlef)<-date.time +HH_wlef <- extracted_40m[, odds] +colnames(HH_wlef) <- date.time +HV_wlef <- extracted_40m[, evens] +colnames(HV_wlef) <- date.time -HH.calib <-HH_wlef[,1] -HV.calib <-HV_wlef[,1] +HH.calib <- HH_wlef[, 1] +HV.calib <- HV_wlef[, 1] -x<-HV.calib -y<-(wlef_abg$ABG_biomass)^0.5 -z<-HH.calib +x <- HV.calib +y <- (wlef_abg$ABG_biomass)^0.5 +z <- HH.calib -par(mfrow=c(1,2)) -scatter.smooth(y,z,col="#CCCCCC",xlab=expression(~ABG_biomass^2~ "(Mg/ha)"),ylab="HH (gamma)",main=date.time[1]) -scatter.smooth(y,x,col="#CCCCCC",xlab=expression(~ABG_biomass^2~ "(Mg/ha)"),ylab="HV (gamma)",main=date.time[1]) +par(mfrow = c(1, 2)) +scatter.smooth(y, z, col = "#CCCCCC", xlab = expression(~ ABG_biomass^2 ~ "(Mg/ha)"), ylab = "HH (gamma)", main = date.time[1]) +scatter.smooth(y, x, col = "#CCCCCC", xlab = expression(~ ABG_biomass^2 ~ "(Mg/ha)"), ylab = "HV (gamma)", main = date.time[1]) -plot(x,y) -summary(lm(y~x)) -abline(reg=lm(y~x),col=2,lwd=3) +plot(x, y) +summary(lm(y ~ x)) +abline(reg = lm(y ~ x), col = 2, lwd = 3) ################################################################################################ -odds<-seq(1,ncol(disturbance_extracted_40m),by=2) -evens<-seq(2,ncol(disturbance_extracted_40m),by=2) +odds <- seq(1, ncol(disturbance_extracted_40m), by = 2) +evens <- seq(2, ncol(disturbance_extracted_40m), by = 2) -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] ################################################################################################ -##Linear Regression +## Linear Regression ################################################################################################ -HVfit <- lm(y ~ x) -HHfit <- lm(y ~ z) -HH.HVfit <- lm(y ~ x + z) +HVfit <- lm(y ~ x) +HHfit <- lm(y ~ z) +HH.HVfit <- lm(y ~ x + z) -AIC(HVfit,HHfit,HH.HVfit) +AIC(HVfit, HHfit, HH.HVfit) summary(HH.HVfit) -coef<-coefficients(HH.HVfit) +coef <- coefficients(HH.HVfit) -xseq = seq(min(x),max(x),length=length(wlef_abg$ABG_biomass[wlef_abg$ABG_biomass<100])) -zseq = seq(min(z),max(z),length=length(wlef_abg$ABG_biomass[wlef_abg$ABG_biomass<100])) +xseq <- seq(min(x), max(x), length = length(wlef_abg$ABG_biomass[wlef_abg$ABG_biomass < 100])) +zseq <- seq(min(z), max(z), length = length(wlef_abg$ABG_biomass[wlef_abg$ABG_biomass < 100])) -par(mfrow=c(1,1)) -plot(HV.calib,(wlef_abg$ABG_biomass)^0.5,col="#CCCCCC",ylab="ABG_biomass",xlab="HV (gamma (dB))",main=date.time[1]) -lines(cbind(xseq,coef[2]*xseq+ coef[3]*zseq + coef[1]),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(HV.calib, (wlef_abg$ABG_biomass)^0.5, col = "#CCCCCC", ylab = "ABG_biomass", xlab = "HV (gamma (dB))", main = date.time[1]) +lines(cbind(xseq, coef[2] * xseq + coef[3] * zseq + coef[1]), col = 2, lwd = 3) -par(mfrow=c(1,1)) -plot(cbind(sqrt(wlef_abg$ABG_biomass[wlef_abg$ABG_biomass<100]),(coef[2]*xseq+ coef[3]*zseq + coef[1])),ylim=c(0,10),xlab="(Observed Biomass)^0.5 (Mg/ha)",ylab="(Predicted Biomass)^0.5 (Mg/ha)") -abline(a = 0, b = 1,col="red",lwd=3) +par(mfrow = c(1, 1)) +plot(cbind(sqrt(wlef_abg$ABG_biomass[wlef_abg$ABG_biomass < 100]), (coef[2] * xseq + coef[3] * zseq + coef[1])), ylim = c(0, 10), xlab = "(Observed Biomass)^0.5 (Mg/ha)", ylab = "(Predicted Biomass)^0.5 (Mg/ha)") +abline(a = 0, b = 1, col = "red", lwd = 3) ################################################################################################ -disturbance_signal<- disturbance_extracted_40m -HH_disturb<-disturbance_signal[,odds] -HV_disturb<-disturbance_signal[,evens] +disturbance_signal <- disturbance_extracted_40m +HH_disturb <- disturbance_signal[, odds] +HV_disturb <- disturbance_signal[, evens] disturbance_extracted_40m -scn.dates<-as.Date(substr(colnames(disturbance_extracted_40m),2,9),"%Y%m%d") -scn.yr<-substr(colnames(disturbance_extracted_40m),2,5) -scn.yr<-as.numeric(scn.yr[odds]) +scn.dates <- as.Date(substr(colnames(disturbance_extracted_40m), 2, 9), "%Y%m%d") +scn.yr <- substr(colnames(disturbance_extracted_40m), 2, 5) +scn.yr <- as.numeric(scn.yr[odds]) -colnames(HH_disturb)<-as.character(scn.dates[odds]) -colnames(HV_disturb)<-as.character(scn.dates[evens]) +colnames(HH_disturb) <- as.character(scn.dates[odds]) +colnames(HV_disturb) <- as.character(scn.dates[evens]) -disturbance_ages<-matrix(NA,nrow(HH_disturb),length(scn.yr)) -colnames(disturbance_ages)<-as.character(scn.dates[evens]) -for(i in 1:length(scn.yr)){ - disturbance_ages[,i]<-scn.yr[i]-disturbance_infile$distyr +disturbance_ages <- matrix(NA, nrow(HH_disturb), length(scn.yr)) +colnames(disturbance_ages) <- as.character(scn.dates[evens]) +for (i in 1:length(scn.yr)) { + disturbance_ages[, i] <- scn.yr[i] - disturbance_infile$distyr } -initial_age<-sort(unique(disturbance_ages[,1])) +initial_age <- sort(unique(disturbance_ages[, 1])) -dist_biomass_est<-(coef[2]*disturbance_extracted_40m[,2]+ coef[3]*disturbance_extracted_40m[,1] + coef[1]) -plot(disturbance_ages[,1],dist_biomass_est,ylab="sqrt(Estimated Biomass (Mg/ha))",xlab="Time since disturbance (years)") +dist_biomass_est <- (coef[2] * disturbance_extracted_40m[, 2] + coef[3] * disturbance_extracted_40m[, 1] + coef[1]) +plot(disturbance_ages[, 1], dist_biomass_est, ylab = "sqrt(Estimated Biomass (Mg/ha))", xlab = "Time since disturbance (years)") -mean_est_biomass<-vector(mode="numeric",length=0) -for(i in 1:length(initial_age)){ - mean_est_biomass<-c(mean_est_biomass,mean(dist_biomass_est[initial_age==initial_age[i]],na.rm = T)) +mean_est_biomass <- vector(mode = "numeric", length = 0) +for (i in 1:length(initial_age)) { + mean_est_biomass <- c(mean_est_biomass, mean(dist_biomass_est[initial_age == initial_age[i]], na.rm = T)) } -plot(initial_age,mean_est_biomass,ylab="sqrt(Estimated Biomass (Mg/ha))",xlab="Time since disturbance (years)") +plot(initial_age, mean_est_biomass, ylab = "sqrt(Estimated Biomass (Mg/ha))", xlab = "Time since disturbance (years)") -par(new=F) -par(mfrow=c(1,1)) -for(i in 1:ncol(HH_disturb)){ - plot(disturbance_infile$distyr[HH_disturb[,i]>0],HH_disturb[HH_disturb[,i]>0,i], xlim=c(1985,2010),ylim=c(0,0.5),xlab="",ylab="",axes=F) - par(new=T) +par(new = F) +par(mfrow = c(1, 1)) +for (i in 1:ncol(HH_disturb)) { + plot(disturbance_infile$distyr[HH_disturb[, i] > 0], HH_disturb[HH_disturb[, i] > 0, i], xlim = c(1985, 2010), ylim = c(0, 0.5), xlab = "", ylab = "", axes = F) + par(new = T) } -par(new=T) -plot(disturbance_infile$distyr[HH_disturb[,i]>0],HH_disturb[HH_disturb[,i]>0,i], xlim=c(1985,2010),ylim=c(0,0.5),xlab="Year of disturbance",ylab="Extracted HH returns",type="n") - -par(new=F) -par(mfrow=c(1,1)) -for(i in 1:ncol(HH_disturb)){ - plot(disturbance_infile$distyr[HV_disturb[,i]>0],HV_disturb[HV_disturb[,i]>0,i], xlim=c(1985,2010),ylim=c(0,0.15),xlab="",ylab="",axes=F) - par(new=T) +par(new = T) +plot(disturbance_infile$distyr[HH_disturb[, i] > 0], HH_disturb[HH_disturb[, i] > 0, i], xlim = c(1985, 2010), ylim = c(0, 0.5), xlab = "Year of disturbance", ylab = "Extracted HH returns", type = "n") + +par(new = F) +par(mfrow = c(1, 1)) +for (i in 1:ncol(HH_disturb)) { + plot(disturbance_infile$distyr[HV_disturb[, i] > 0], HV_disturb[HV_disturb[, i] > 0, i], xlim = c(1985, 2010), ylim = c(0, 0.15), xlab = "", ylab = "", axes = F) + par(new = T) } -par(new=T) -plot(disturbance_infile$distyr[HV_disturb[,i]>0],HV_disturb[HV_disturb[,i]>0,i], xlim=c(1985,2010),ylim=c(0,0.15),xlab="Year of disturbance",ylab="Extracted HV returns",type="n") - -HV_dist_chrono<-na.omit( -rbind(cbind(disturbance_ages[HV_disturb[,12]>0,12],HV_disturb[HV_disturb[,12]>0,12]), - cbind(disturbance_ages[HV_disturb[,13]>0,13],HV_disturb[HV_disturb[,13]>0,13]), - cbind(disturbance_ages[HV_disturb[,14]>0,14],HV_disturb[HV_disturb[,14]>0,14]), - cbind(disturbance_ages[HV_disturb[,15]>0,15],HV_disturb[HV_disturb[,15]>0,15]), - cbind(disturbance_ages[HV_disturb[,16]>0,16],HV_disturb[HV_disturb[,16]>0,16]), - cbind(disturbance_ages[HV_disturb[,17]>0,17],HV_disturb[HV_disturb[,17]>0,17]), - cbind(disturbance_ages[HV_disturb[,18]>0,18],HV_disturb[HV_disturb[,18]>0,18]), - cbind(disturbance_ages[HV_disturb[,19]>0,19],HV_disturb[HV_disturb[,19]>0,19]), - cbind(disturbance_ages[HV_disturb[,20]>0,20],HV_disturb[HV_disturb[,20]>0,20]), - cbind(disturbance_ages[HV_disturb[,21]>0,21],HV_disturb[HV_disturb[,21]>0,21]))) -colnames(HV_dist_chrono)<-c("age","HV") - - -chrono<-sort(unique(HV_dist_chrono[,1])) -distchron<-matrix(NA,length(chrono),2) - -for(i in 1:length(chrono)){ - distchron[i,1]<-chrono[i] - distchron[i,2]<-mean(HV_dist_chrono[HV_dist_chrono[,1]==chrono[i],2],na.rm=T) +par(new = T) +plot(disturbance_infile$distyr[HV_disturb[, i] > 0], HV_disturb[HV_disturb[, i] > 0, i], xlim = c(1985, 2010), ylim = c(0, 0.15), xlab = "Year of disturbance", ylab = "Extracted HV returns", type = "n") + +HV_dist_chrono <- na.omit( + rbind( + cbind(disturbance_ages[HV_disturb[, 12] > 0, 12], HV_disturb[HV_disturb[, 12] > 0, 12]), + cbind(disturbance_ages[HV_disturb[, 13] > 0, 13], HV_disturb[HV_disturb[, 13] > 0, 13]), + cbind(disturbance_ages[HV_disturb[, 14] > 0, 14], HV_disturb[HV_disturb[, 14] > 0, 14]), + cbind(disturbance_ages[HV_disturb[, 15] > 0, 15], HV_disturb[HV_disturb[, 15] > 0, 15]), + cbind(disturbance_ages[HV_disturb[, 16] > 0, 16], HV_disturb[HV_disturb[, 16] > 0, 16]), + cbind(disturbance_ages[HV_disturb[, 17] > 0, 17], HV_disturb[HV_disturb[, 17] > 0, 17]), + cbind(disturbance_ages[HV_disturb[, 18] > 0, 18], HV_disturb[HV_disturb[, 18] > 0, 18]), + cbind(disturbance_ages[HV_disturb[, 19] > 0, 19], HV_disturb[HV_disturb[, 19] > 0, 19]), + cbind(disturbance_ages[HV_disturb[, 20] > 0, 20], HV_disturb[HV_disturb[, 20] > 0, 20]), + cbind(disturbance_ages[HV_disturb[, 21] > 0, 21], HV_disturb[HV_disturb[, 21] > 0, 21]) + ) +) +colnames(HV_dist_chrono) <- c("age", "HV") + + +chrono <- sort(unique(HV_dist_chrono[, 1])) +distchron <- matrix(NA, length(chrono), 2) + +for (i in 1:length(chrono)) { + distchron[i, 1] <- chrono[i] + distchron[i, 2] <- mean(HV_dist_chrono[HV_dist_chrono[, 1] == chrono[i], 2], na.rm = T) } -plot(distchron[,1],distchron[,2]) -age_vs_HV <- lm(distchron[,2] ~ distchron[,1]) +plot(distchron[, 1], distchron[, 2]) +age_vs_HV <- lm(distchron[, 2] ~ distchron[, 1]) summary(age_vs_HV) -abline(reg=age_vs_HV,col=2,lwd=3) +abline(reg = age_vs_HV, col = 2, lwd = 3) -plot(HV_dist_chrono,xlab="Forest Age (years)",ylab="HV (gamma)") -age_vs_HV <- lm(HV_dist_chrono[,2] ~ HV_dist_chrono[,1]) +plot(HV_dist_chrono, xlab = "Forest Age (years)", ylab = "HV (gamma)") +age_vs_HV <- lm(HV_dist_chrono[, 2] ~ HV_dist_chrono[, 1]) summary(age_vs_HV) -abline(reg=age_vs_HV,col=2,lwd=3) -text(x=15,y=0.11,expression(~R^2~"= 0.06, p<0.005")) - -par(mfrow=c(1,1)) -iseq<-seq(12,21, by=1) -cols<-rainbow(length(iseq)) -cols<-c("black","red","blue", "green","cyan","magenta","yellow","orange") -for(i in 13:18){ - plot(disturbance_ages[HV_disturb[,i]>0,i],HV_disturb[HV_disturb[,i]>0,i],xlim=c(min(HV_dist_chrono[,1]),max(HV_dist_chrono[,1])),ylim=c(min(HV_dist_chrono[,2]),max(HV_dist_chrono[,2])),xlab="",ylab="",axes=F,col=cols[i-11]) - age_vs_HV <- lm(HV_disturb[HV_disturb[,i]>0,i] ~ disturbance_ages[HV_disturb[,i]>0,i]) +abline(reg = age_vs_HV, col = 2, lwd = 3) +text(x = 15, y = 0.11, expression(~ R^2 ~ "= 0.06, p<0.005")) + +par(mfrow = c(1, 1)) +iseq <- seq(12, 21, by = 1) +cols <- rainbow(length(iseq)) +cols <- c("black", "red", "blue", "green", "cyan", "magenta", "yellow", "orange") +for (i in 13:18) { + plot(disturbance_ages[HV_disturb[, i] > 0, i], HV_disturb[HV_disturb[, i] > 0, i], xlim = c(min(HV_dist_chrono[, 1]), max(HV_dist_chrono[, 1])), ylim = c(min(HV_dist_chrono[, 2]), max(HV_dist_chrono[, 2])), xlab = "", ylab = "", axes = F, col = cols[i - 11]) + age_vs_HV <- lm(HV_disturb[HV_disturb[, i] > 0, i] ~ disturbance_ages[HV_disturb[, i] > 0, i]) summary(age_vs_HV) - abline(reg=age_vs_HV,col=cols[1],lwd=3) - par(new=T) + abline(reg = age_vs_HV, col = cols[1], lwd = 3) + par(new = T) } -par(new=T) -plot(disturbance_ages[HV_disturb[,i]>0,i],HV_disturb[HV_disturb[,i]>0,i],xlim=c(min(HV_dist_chrono[,1]),max(HV_dist_chrono[,1])),ylim=c(min(HV_dist_chrono[,2]),max(HV_dist_chrono[,2])),xlab="Forest Age (years)",ylab="HV (gamma)",main="2010 PALSAR Scenes") -text(x=22,y=0.11,expression(~R^2~"= 0.18, p<0.05")) +par(new = T) +plot(disturbance_ages[HV_disturb[, i] > 0, i], HV_disturb[HV_disturb[, i] > 0, i], xlim = c(min(HV_dist_chrono[, 1]), max(HV_dist_chrono[, 1])), ylim = c(min(HV_dist_chrono[, 2]), max(HV_dist_chrono[, 2])), xlab = "Forest Age (years)", ylab = "HV (gamma)", main = "2010 PALSAR Scenes") +text(x = 22, y = 0.11, expression(~ R^2 ~ "= 0.18, p<0.05")) ################################################################# -a<- 0.10 -b<-10 -int<-0.05 -sd<-sd(HV.calib) -params<-c(a,b,int,sd) +a <- 0.10 +b <- 10 +int <- 0.05 +sd <- sd(HV.calib) +params <- c(a, b, int, sd) -fit.mono = optim(params,ll.mono,x=x,y=y) -aic.mono<- -2*fit.mono$value + 2*length(params) +fit.mono <- optim(params, ll.mono, x = x, y = y) +aic.mono <- -2 * fit.mono$value + 2 * length(params) -params = fit.mono$par -xseq = seq(min(x),max(x),length=1000) +params <- fit.mono$par +xseq <- seq(min(x), max(x), length = 1000) -lines(cbind(xseq,params[1]*(1-exp(-xseq*params[2]))+params[3]),col=6,lwd=3) +lines(cbind(xseq, params[1] * (1 - exp(-xseq * params[2])) + params[3]), col = 6, lwd = 3) ################################################################# -HV_dist_chrono<-cbind(HV_dist_chrono[,1],HV_dist_chrono[,2]) -colnames(HV_dist_chrono)<-c("age","HV") -boxplot(data=HV_dist_chrono, HV ~ age,xlab="Forest Age (years)",ylab="HV (gamma)") - -HH_dist_chrono<-na.omit( - rbind(cbind(disturbance_ages[HH_disturb[,12]>0,12],HH_disturb[HH_disturb[,12]>0,12]), - cbind(disturbance_ages[HH_disturb[,13]>0,13],HH_disturb[HH_disturb[,13]>0,13]), - cbind(disturbance_ages[HH_disturb[,14]>0,14],HH_disturb[HH_disturb[,14]>0,14]), - cbind(disturbance_ages[HH_disturb[,15]>0,15],HH_disturb[HH_disturb[,15]>0,15]), - cbind(disturbance_ages[HH_disturb[,16]>0,16],HH_disturb[HH_disturb[,16]>0,16]), - cbind(disturbance_ages[HH_disturb[,17]>0,17],HH_disturb[HH_disturb[,17]>0,17]), - cbind(disturbance_ages[HH_disturb[,18]>0,18],HH_disturb[HH_disturb[,18]>0,18]), - cbind(disturbance_ages[HH_disturb[,19]>0,19],HH_disturb[HH_disturb[,19]>0,19]), - cbind(disturbance_ages[HH_disturb[,20]>0,20],HH_disturb[HH_disturb[,20]>0,20]), - cbind(disturbance_ages[HH_disturb[,21]>0,21],HH_disturb[HH_disturb[,21]>0,21]))) +HV_dist_chrono <- cbind(HV_dist_chrono[, 1], HV_dist_chrono[, 2]) +colnames(HV_dist_chrono) <- c("age", "HV") +boxplot(data = HV_dist_chrono, HV ~ age, xlab = "Forest Age (years)", ylab = "HV (gamma)") + +HH_dist_chrono <- na.omit( + rbind( + cbind(disturbance_ages[HH_disturb[, 12] > 0, 12], HH_disturb[HH_disturb[, 12] > 0, 12]), + cbind(disturbance_ages[HH_disturb[, 13] > 0, 13], HH_disturb[HH_disturb[, 13] > 0, 13]), + cbind(disturbance_ages[HH_disturb[, 14] > 0, 14], HH_disturb[HH_disturb[, 14] > 0, 14]), + cbind(disturbance_ages[HH_disturb[, 15] > 0, 15], HH_disturb[HH_disturb[, 15] > 0, 15]), + cbind(disturbance_ages[HH_disturb[, 16] > 0, 16], HH_disturb[HH_disturb[, 16] > 0, 16]), + cbind(disturbance_ages[HH_disturb[, 17] > 0, 17], HH_disturb[HH_disturb[, 17] > 0, 17]), + cbind(disturbance_ages[HH_disturb[, 18] > 0, 18], HH_disturb[HH_disturb[, 18] > 0, 18]), + cbind(disturbance_ages[HH_disturb[, 19] > 0, 19], HH_disturb[HH_disturb[, 19] > 0, 19]), + cbind(disturbance_ages[HH_disturb[, 20] > 0, 20], HH_disturb[HH_disturb[, 20] > 0, 20]), + cbind(disturbance_ages[HH_disturb[, 21] > 0, 21], HH_disturb[HH_disturb[, 21] > 0, 21]) + ) +) plot(HH_dist_chrono) -HH_dist_chrono<-cbind(HH_dist_chrono[,1],HH_dist_chrono[,2]) -colnames(HH_dist_chrono)<-c("age","HH") -boxplot(data=HH_dist_chrono, HH ~ age,xlab="Age of disturbance (years)",ylab="HH (gamma)") - - - -sort(unique(c(unique(disturbance_ages[,1]), - unique(disturbance_ages[,2]), -unique(disturbance_ages[,3]), -unique(disturbance_ages[,4]), -unique(disturbance_ages[,5]), -unique(disturbance_ages[,6]), -unique(disturbance_ages[,7]), -unique(disturbance_ages[,8]), -unique(disturbance_ages[,9]), -unique(disturbance_ages[,10]), -unique(disturbance_ages[,11]), -unique(disturbance_ages[,12]), -unique(disturbance_ages[,13]), -unique(disturbance_ages[,14]), -unique(disturbance_ages[,15]), -unique(disturbance_ages[,16]), -unique(disturbance_ages[,17]), -unique(disturbance_ages[,18]), -unique(disturbance_ages[,19]), -unique(disturbance_ages[,20]), -unique(disturbance_ages[,21])))) - - -par(new=F) -par(mfrow=c(1,1)) -plot(scn.dates[odds],HV_disturb[3,],type="n") -lines(scn.dates[odds],HV_disturb[3,],type="b") -lines(scn.dates[odds],HV_disturb[4,],type="b", col=2) -lines(scn.dates[odds],HV_disturb[5,],type="b", col=3) -lines(scn.dates[odds],HV_disturb[6,],type="b", col=4) -lines(scn.dates[odds],HV_disturb[7,],type="b", col=5) -lines(scn.dates[odds],HV_disturb[8,],type="b", col=6) -lines(scn.dates[odds],HV_disturb[9,],type="b", col=7) - - -HV.calib <-HV_wlef[,1] -(params[1]*xseq^2)/(params[2]+(params[3]*xseq)+xseq^2) +HH_dist_chrono <- cbind(HH_dist_chrono[, 1], HH_dist_chrono[, 2]) +colnames(HH_dist_chrono) <- c("age", "HH") +boxplot(data = HH_dist_chrono, HH ~ age, xlab = "Age of disturbance (years)", ylab = "HH (gamma)") + + + +sort(unique(c( + unique(disturbance_ages[, 1]), + unique(disturbance_ages[, 2]), + unique(disturbance_ages[, 3]), + unique(disturbance_ages[, 4]), + unique(disturbance_ages[, 5]), + unique(disturbance_ages[, 6]), + unique(disturbance_ages[, 7]), + unique(disturbance_ages[, 8]), + unique(disturbance_ages[, 9]), + unique(disturbance_ages[, 10]), + unique(disturbance_ages[, 11]), + unique(disturbance_ages[, 12]), + unique(disturbance_ages[, 13]), + unique(disturbance_ages[, 14]), + unique(disturbance_ages[, 15]), + unique(disturbance_ages[, 16]), + unique(disturbance_ages[, 17]), + unique(disturbance_ages[, 18]), + unique(disturbance_ages[, 19]), + unique(disturbance_ages[, 20]), + unique(disturbance_ages[, 21]) +))) + + +par(new = F) +par(mfrow = c(1, 1)) +plot(scn.dates[odds], HV_disturb[3, ], type = "n") +lines(scn.dates[odds], HV_disturb[3, ], type = "b") +lines(scn.dates[odds], HV_disturb[4, ], type = "b", col = 2) +lines(scn.dates[odds], HV_disturb[5, ], type = "b", col = 3) +lines(scn.dates[odds], HV_disturb[6, ], type = "b", col = 4) +lines(scn.dates[odds], HV_disturb[7, ], type = "b", col = 5) +lines(scn.dates[odds], HV_disturb[8, ], type = "b", col = 6) +lines(scn.dates[odds], HV_disturb[9, ], type = "b", col = 7) + + +HV.calib <- HV_wlef[, 1] +(params[1] * xseq^2) / (params[2] + (params[3] * xseq) + xseq^2) diff --git a/modules/data.remote/inst/scripts/old/Park_Falls_Golf.R b/modules/data.remote/inst/scripts/old/Park_Falls_Golf.R index 31d95404071..bbe83b9b532 100644 --- a/modules/data.remote/inst/scripts/old/Park_Falls_Golf.R +++ b/modules/data.remote/inst/scripts/old/Park_Falls_Golf.R @@ -17,102 +17,101 @@ library(spatstat) # Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) -calib_inpath <-"/Users/hardimanb/Desktop/data.remote/biometry" ##For Mac +calib_inpath <- "/Users/hardimanb/Desktop/data.remote/biometry" ## For Mac # calib_inpath <-"/home/bhardima/pecan/modules/data.remote/biometry" -calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots +calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots -coords<-data.frame(calib_infile$easting[1:100],calib_infile$northing[1:100]) #eastings and northings (UTM Zone 15N NAD83) +coords <- data.frame(calib_infile$easting[1:100], calib_infile$northing[1:100]) # eastings and northings (UTM Zone 15N NAD83) -##Convert to class=SpatialPoints for palsar extraction (this is used for creating kml files and will be used later on) -Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +a=6378137 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) +## Convert to class=SpatialPoints for palsar extraction (this is used for creating kml files and will be used later on) +Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +a=6378137 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma") ##For Mac +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma") ## For Mac # palsar_inpath <- file.path("/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/geo_corrected_single_gamma") -file.info<-read.table(file="/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") ##For Mac +file.info <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") ## For Mac # file.info<-read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") -date.time<-as.vector(substr(file.info$scndate,1,8)) -date.time<-as.Date(date.time,"%Y%m%d") -col_names<-c(rbind(paste(date.time, "HH",sep="_"),paste(date.time, "HV",sep="_"))) - -pol_bands<-c("HH", "HV") -numfiles<-length(date.time) - -extracted_10m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_20m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_40m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_60m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_80m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands - -colnames(extracted_10m)<-col_names -colnames(extracted_20m)<-col_names -colnames(extracted_40m)<-col_names -colnames(extracted_60m)<-col_names -colnames(extracted_80m)<-col_names - -for(i in 1:numfiles){ - for(j in 1:2){ - - filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern=".tif" ,recursive=F)) - inpath<-file.path(palsar_inpath,pol_bands[j],filelist[i]) - rast<-raster(inpath) - -# ################################ -# ##golf plots 10m BUFFER MEAN -# ################################ -# data_10m<-extract(rast, Sr1, method="simple",buffer=10, small=T, fun=mean) -# cols<-seq(j,ncol(extracted_10m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) -# extracted_10m[,cols[i]]<-data_10m -# -# ############################### -# #golf plots 20m BUFFER MEAN -# ############################### -# data_20m<-extract(rast, Sr1, method="simple",buffer=20, small=T, fun=mean) -# cols<-seq(j,ncol(extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) -# extracted_20m[,cols[i]]<-data_20m - +date.time <- as.vector(substr(file.info$scndate, 1, 8)) +date.time <- as.Date(date.time, "%Y%m%d") +col_names <- c(rbind(paste(date.time, "HH", sep = "_"), paste(date.time, "HV", sep = "_"))) + +pol_bands <- c("HH", "HV") +numfiles <- length(date.time) + +extracted_10m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_20m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_40m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_60m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_80m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + +colnames(extracted_10m) <- col_names +colnames(extracted_20m) <- col_names +colnames(extracted_40m) <- col_names +colnames(extracted_60m) <- col_names +colnames(extracted_80m) <- col_names + +for (i in 1:numfiles) { + for (j in 1:2) { + filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern = ".tif", recursive = F)) + inpath <- file.path(palsar_inpath, pol_bands[j], filelist[i]) + rast <- raster(inpath) + + # ################################ + # ##golf plots 10m BUFFER MEAN + # ################################ + # data_10m<-extract(rast, Sr1, method="simple",buffer=10, small=T, fun=mean) + # cols<-seq(j,ncol(extracted_10m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + # extracted_10m[,cols[i]]<-data_10m + # + # ############################### + # #golf plots 20m BUFFER MEAN + # ############################### + # data_20m<-extract(rast, Sr1, method="simple",buffer=20, small=T, fun=mean) + # cols<-seq(j,ncol(extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + # extracted_20m[,cols[i]]<-data_20m + ################################ - ##golf plots 40m BUFFER MEAN + ## golf plots 40m BUFFER MEAN ################################ - data_40m<-extract(rast, Sr1, method="simple",buffer=40, small=T, fun=mean) - cols<-seq(j,ncol(extracted_40m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) - extracted_40m[,cols[i]]<-data_40m - -# ################################ -# ##golf plots 60m BUFFER MEAN -# ################################ -# data_60m<-extract(rast, Sr1, method="simple",buffer=60, small=T, fun=mean) -# cols<-seq(j,ncol(extracted_60m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) -# extracted_60m[,cols[i]]<-data_60m -# -# ################################ -# ##golf plots 80m BUFFER MEAN -# ################################ -# data_80m<-extract(rast, Sr1, method="simple",buffer=80, small=T, fun=mean) -# cols<-seq(j,ncol(extracted_80m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) -# extracted_80m[,cols[i]]<-data_80m - - - print(paste("i=",i,sep="")) - print(paste("j=",j,sep="")) + data_40m <- extract(rast, Sr1, method = "simple", buffer = 40, small = T, fun = mean) + cols <- seq(j, ncol(extracted_40m), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + extracted_40m[, cols[i]] <- data_40m + + # ################################ + # ##golf plots 60m BUFFER MEAN + # ################################ + # data_60m<-extract(rast, Sr1, method="simple",buffer=60, small=T, fun=mean) + # cols<-seq(j,ncol(extracted_60m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + # extracted_60m[,cols[i]]<-data_60m + # + # ################################ + # ##golf plots 80m BUFFER MEAN + # ################################ + # data_80m<-extract(rast, Sr1, method="simple",buffer=80, small=T, fun=mean) + # cols<-seq(j,ncol(extracted_80m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + # extracted_80m[,cols[i]]<-data_80m + + + print(paste("i=", i, sep = "")) + print(paste("j=", j, sep = "")) } } -write.table(extracted_10m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_10m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) -write.table(extracted_20m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_20m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) -write.table(extracted_40m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) -write.table(extracted_60m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_60m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) -write.table(extracted_80m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_80m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) +write.table(extracted_10m, file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_10m.csv", quote = F, sep = "\t", eol = "\r\n", row.names = F, col.names = T) +write.table(extracted_20m, file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_20m.csv", quote = F, sep = "\t", eol = "\r\n", row.names = F, col.names = T) +write.table(extracted_40m, file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv", quote = F, sep = "\t", eol = "\r\n", row.names = F, col.names = T) +write.table(extracted_60m, file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_60m.csv", quote = F, sep = "\t", eol = "\r\n", row.names = F, col.names = T) +write.table(extracted_80m, file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_80m.csv", quote = F, sep = "\t", eol = "\r\n", row.names = F, col.names = T) -extracted_10m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_10m.csv",sep="\t", header=T) -extracted_20m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_20m.csv",sep="\t", header=T) -extracted_40m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",sep="\t", header=T) -extracted_60m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_60m.csv",sep="\t", header=T) -extracted_80m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_80m.csv",sep="\t", header=T) +extracted_10m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_10m.csv", sep = "\t", header = T) +extracted_20m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_20m.csv", sep = "\t", header = T) +extracted_40m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv", sep = "\t", header = T) +extracted_60m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_60m.csv", sep = "\t", header = T) +extracted_80m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_80m.csv", sep = "\t", header = T) # par(new=F) # par(mfrow=c(1,1)) @@ -121,22 +120,22 @@ extracted_80m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/da # lines(c(10,20,40,60,80),c(extracted_10m[i,1],extracted_20m[i,1],extracted_40m[i,1], extracted_60m[i,1], extracted_80m[i,1]), type="b") # par(new=TRUE) # } -# -par(new=F) -par(mfrow=c(1,1)) -for(i in 1:nrow(coords)){ - plot(c(10,20,40,60,80),c(extracted_10m[i,2],extracted_20m[i,2],extracted_40m[i,2], extracted_60m[i,2], extracted_80m[i,2]), xlim=c(10,80),ylim=c(-18,-6), xlab="plot radius (m)",ylab='MEAN of extracted PALSAR returns \n HV, gamma (dB))',type="n") - lines(c(10,20,40,60,80),c(extracted_10m[i,2],extracted_20m[i,2],extracted_40m[i,2], extracted_60m[i,2], extracted_80m[i,2]), type="b") - par(new=TRUE) +# +par(new = F) +par(mfrow = c(1, 1)) +for (i in 1:nrow(coords)) { + plot(c(10, 20, 40, 60, 80), c(extracted_10m[i, 2], extracted_20m[i, 2], extracted_40m[i, 2], extracted_60m[i, 2], extracted_80m[i, 2]), xlim = c(10, 80), ylim = c(-18, -6), xlab = "plot radius (m)", ylab = "MEAN of extracted PALSAR returns \n HV, gamma (dB))", type = "n") + lines(c(10, 20, 40, 60, 80), c(extracted_10m[i, 2], extracted_20m[i, 2], extracted_40m[i, 2], extracted_60m[i, 2], extracted_80m[i, 2]), type = "b") + par(new = TRUE) } # evens<-seq(2,ncol(extracted_10m),by=2) # odds<-seq(1,ncol(extracted_10m),by=2) -# -# ##plot time series +# +# ##plot time series # colors<-c("red","blue","green","orange","yellow","black","magenta") -# +# # par(new=F) # par(mfrow=c(1,1)) # for(j in 1:nrow(coords)){ @@ -144,133 +143,130 @@ for(i in 1:nrow(coords)){ # lines(date.time,extracted_20m[j,evens],ylim=c(-26,-5),col=colors[j],type="b") # par(new=TRUE) # } -# +# # colnames(extracted_10m[,evens])<-date.time # colnames(extracted_10m[,odds])<-date.time # boxplot(extracted_20m[,evens],xlab="scn date",ylab="MEAN of extracted PALSAR returns (HV, gamma (dB))",main="WLEF: mean HV (gamma (dB), 10m buffer)",type="n") -se_10m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -se_20m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -se_40m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -se_60m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -se_80m_extracted<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands - -colnames(se_10m_extracted)<-col_names -colnames(se_20m_extracted)<-col_names -colnames(se_40m_extracted)<-col_names -colnames(se_60m_extracted)<-col_names -colnames(se_80m_extracted)<-col_names - -coords<-Sr1@coords -for(i in 1:numfiles){ - for(j in 1:2){ - for(k in 1:nrow(coords)){ - - filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern=".tif" ,recursive=F)) - inpath<-file.path(palsar_inpath,pol_bands[j],filelist[i]) - rast<-raster(inpath) - - if(as.numeric(substr(projection(rast),17,18)) == as.numeric(substr(projection(Sr1),17,18))){ - +se_10m_extracted <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +se_20m_extracted <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +se_40m_extracted <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +se_60m_extracted <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +se_80m_extracted <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + +colnames(se_10m_extracted) <- col_names +colnames(se_20m_extracted) <- col_names +colnames(se_40m_extracted) <- col_names +colnames(se_60m_extracted) <- col_names +colnames(se_80m_extracted) <- col_names + +coords <- Sr1@coords +for (i in 1:numfiles) { + for (j in 1:2) { + for (k in 1:nrow(coords)) { + filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern = ".tif", recursive = F)) + inpath <- file.path(palsar_inpath, pol_bands[j], filelist[i]) + rast <- raster(inpath) + + if (as.numeric(substr(projection(rast), 17, 18)) == as.numeric(substr(projection(Sr1), 17, 18))) { ################################ - ##calibration data from WLEF plots 10m BUFFER STANDARD DEVIATION + ## calibration data from WLEF plots 10m BUFFER STANDARD DEVIATION ################################ - radius<-10 + radius <- 10 # ROI_sd(radius,coords,k) - buffext<-as.vector(disc(radius=radius, centre=coords[k,])) ##10m is the smallest buffer size that overlaps >1 cell - ext<-extent(c(buffext[[2]],buffext[[3]])) - cellnums<-cellsFromExtent(rast,ext) - - cols<-seq(j,ncol(se_10m_extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + buffext <- as.vector(disc(radius = radius, centre = coords[k, ])) ## 10m is the smallest buffer size that overlaps >1 cell + ext <- extent(c(buffext[[2]], buffext[[3]])) + cellnums <- cellsFromExtent(rast, ext) + + cols <- seq(j, ncol(se_10m_extracted), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) # sd_20m_extracted[k,cols[i]]<- sd_ROI - se_10m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))/sqrt(length(cellnums)) + se_10m_extracted[k, cols[i]] <- sd(extract(rast, cellnums)) / sqrt(length(cellnums)) ################################ - ##calibration data from WLEF plots 20m BUFFER STANDARD DEVIATION + ## calibration data from WLEF plots 20m BUFFER STANDARD DEVIATION ################################ - radius<-20 + radius <- 20 # ROI_sd(radius,coords,k) - buffext<-as.vector(disc(radius=radius, centre=coords[k,])) ##10m is the smallest buffer size that overlaps >1 cell - ext<-extent(c(buffext[[2]],buffext[[3]])) - cellnums<-cellsFromExtent(rast,ext) - - cols<-seq(j,ncol(se_20m_extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + buffext <- as.vector(disc(radius = radius, centre = coords[k, ])) ## 10m is the smallest buffer size that overlaps >1 cell + ext <- extent(c(buffext[[2]], buffext[[3]])) + cellnums <- cellsFromExtent(rast, ext) + + cols <- seq(j, ncol(se_20m_extracted), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) # sd_20m_extracted[k,cols[i]]<- sd_ROI - se_20m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))/sqrt(length(cellnums)) - + se_20m_extracted[k, cols[i]] <- sd(extract(rast, cellnums)) / sqrt(length(cellnums)) + ################################ - ##calibration data from WLEF plots 40m BUFFER STANDARD DEVIATION + ## calibration data from WLEF plots 40m BUFFER STANDARD DEVIATION ################################ - radius<-40 + radius <- 40 # ROI_sd(radius,coords,k) - buffext<-as.vector(disc(radius=radius, centre=coords[k,])) ##10m is the smallest buffer size that overlaps >1 cell - ext<-extent(c(buffext[[2]],buffext[[3]])) - cellnums<-cellsFromExtent(rast,ext) - - cols<-seq(j,ncol(se_40m_extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + buffext <- as.vector(disc(radius = radius, centre = coords[k, ])) ## 10m is the smallest buffer size that overlaps >1 cell + ext <- extent(c(buffext[[2]], buffext[[3]])) + cellnums <- cellsFromExtent(rast, ext) + + cols <- seq(j, ncol(se_40m_extracted), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) # sd_20m_extracted[k,cols[i]]<- sd_ROI - se_40m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))/sqrt(length(cellnums)) - + se_40m_extracted[k, cols[i]] <- sd(extract(rast, cellnums)) / sqrt(length(cellnums)) + ################################ - ##calibration data from WLEF plots 60m BUFFER STANDARD DEVIATION + ## calibration data from WLEF plots 60m BUFFER STANDARD DEVIATION ################################ - radius<-60 + radius <- 60 # ROI_sd(radius,coords,k) - buffext<-as.vector(disc(radius=radius, centre=coords[k,])) ##10m is the smallest buffer size that overlaps >1 cell - ext<-extent(c(buffext[[2]],buffext[[3]])) - cellnums<-cellsFromExtent(rast,ext) - - cols<-seq(j,ncol(se_60m_extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + buffext <- as.vector(disc(radius = radius, centre = coords[k, ])) ## 10m is the smallest buffer size that overlaps >1 cell + ext <- extent(c(buffext[[2]], buffext[[3]])) + cellnums <- cellsFromExtent(rast, ext) + + cols <- seq(j, ncol(se_60m_extracted), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) # sd_20m_extracted[k,cols[i]]<- sd_ROI - se_60m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))/sqrt(length(cellnums)) - + se_60m_extracted[k, cols[i]] <- sd(extract(rast, cellnums)) / sqrt(length(cellnums)) + ################################ - ##calibration data from WLEF plots 80m BUFFER STANDARD DEVIATION + ## calibration data from WLEF plots 80m BUFFER STANDARD DEVIATION ################################ - radius<-80 + radius <- 80 # ROI_sd(radius,coords,k) - buffext<-as.vector(disc(radius=radius, centre=coords[k,])) ##10m is the smallest buffer size that overlaps >1 cell - ext<-extent(c(buffext[[2]],buffext[[3]])) - cellnums<-cellsFromExtent(rast,ext) - - cols<-seq(j,ncol(se_80m_extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + buffext <- as.vector(disc(radius = radius, centre = coords[k, ])) ## 10m is the smallest buffer size that overlaps >1 cell + ext <- extent(c(buffext[[2]], buffext[[3]])) + cellnums <- cellsFromExtent(rast, ext) + + cols <- seq(j, ncol(se_80m_extracted), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) # sd_20m_extracted[k,cols[i]]<- sd_ROI - se_80m_extracted[k,cols[i]]<- sd(extract(rast,cellnums))/sqrt(length(cellnums)) - - print(paste("i=",i,sep="")) - print(paste("j=",j,sep="")) - print(paste("k=",k,sep="")) - + se_80m_extracted[k, cols[i]] <- sd(extract(rast, cellnums)) / sqrt(length(cellnums)) + + print(paste("i=", i, sep = "")) + print(paste("j=", j, sep = "")) + print(paste("k=", k, sep = "")) } } } } -colors<-c("red","blue","green","orange","cyan","black","magenta") -par(new=F) -par(mfrow=c(1,1)) -for(j in 1:nrow(coords)){ - plot(date.time,se_20m_extracted[j,evens],xlab="scn date",ylim=c(0,5),ylab="STDERR of extracted PALSAR returns (HV, gamma (dB))",main="WLEF Plots (n=609)",type="n") - lines(date.time,se_20m_extracted[j,evens],ylim=c(0,5),type="b") - par(new=TRUE) +colors <- c("red", "blue", "green", "orange", "cyan", "black", "magenta") +par(new = F) +par(mfrow = c(1, 1)) +for (j in 1:nrow(coords)) { + plot(date.time, se_20m_extracted[j, evens], xlab = "scn date", ylim = c(0, 5), ylab = "STDERR of extracted PALSAR returns (HV, gamma (dB))", main = "WLEF Plots (n=609)", type = "n") + lines(date.time, se_20m_extracted[j, evens], ylim = c(0, 5), type = "b") + par(new = TRUE) } -par(mfrow=c(1,2)) +par(mfrow = c(1, 2)) -for(i in 1:nrow(coords)){ - plot(c(10,20,40,60,80),c(se_10m_extracted[i,1],se_20m_extracted[i,1],se_40m_extracted[i,1], se_60m_extracted[i,1], se_80m_extracted[i,1]), xlim=c(10,80),ylim=c(0,0.07), ylab="",xlab="",type="n", axes=FALSE) - lines(c(10,20,40,60,80),c(se_10m_extracted[i,1],se_20m_extracted[i,1],se_40m_extracted[i,1], se_60m_extracted[i,1], se_80m_extracted[i,1]), type="b") - par(new=TRUE) +for (i in 1:nrow(coords)) { + plot(c(10, 20, 40, 60, 80), c(se_10m_extracted[i, 1], se_20m_extracted[i, 1], se_40m_extracted[i, 1], se_60m_extracted[i, 1], se_80m_extracted[i, 1]), xlim = c(10, 80), ylim = c(0, 0.07), ylab = "", xlab = "", type = "n", axes = FALSE) + lines(c(10, 20, 40, 60, 80), c(se_10m_extracted[i, 1], se_20m_extracted[i, 1], se_40m_extracted[i, 1], se_60m_extracted[i, 1], se_80m_extracted[i, 1]), type = "b") + par(new = TRUE) } -par(new=T) -plot(c(10,20,40,60,80),c(se_10m_extracted[i,1],se_20m_extracted[i,1],se_40m_extracted[i,1], se_60m_extracted[i,1], se_80m_extracted[i,1]), xlim=c(10,80),ylim=c(0,0.07), xlab="plot radius (m)",ylab="S.E. of extracted HH returns (gamma)",type="n") +par(new = T) +plot(c(10, 20, 40, 60, 80), c(se_10m_extracted[i, 1], se_20m_extracted[i, 1], se_40m_extracted[i, 1], se_60m_extracted[i, 1], se_80m_extracted[i, 1]), xlim = c(10, 80), ylim = c(0, 0.07), xlab = "plot radius (m)", ylab = "S.E. of extracted HH returns (gamma)", type = "n") -for(i in 1:nrow(coords)){ - plot(c(10,20,40,60,80),c(se_10m_extracted[i,2],se_20m_extracted[i,2],se_40m_extracted[i,2], se_60m_extracted[i,2], se_80m_extracted[i,2]), xlim=c(10,80),ylim=c(0,0.025), ylab="",xlab="",type="n", axes=FALSE) - lines(c(10,20,40,60,80),c(se_10m_extracted[i,2],se_20m_extracted[i,2],se_40m_extracted[i,2], se_60m_extracted[i,2], se_80m_extracted[i,2]), type="b") - par(new=TRUE) +for (i in 1:nrow(coords)) { + plot(c(10, 20, 40, 60, 80), c(se_10m_extracted[i, 2], se_20m_extracted[i, 2], se_40m_extracted[i, 2], se_60m_extracted[i, 2], se_80m_extracted[i, 2]), xlim = c(10, 80), ylim = c(0, 0.025), ylab = "", xlab = "", type = "n", axes = FALSE) + lines(c(10, 20, 40, 60, 80), c(se_10m_extracted[i, 2], se_20m_extracted[i, 2], se_40m_extracted[i, 2], se_60m_extracted[i, 2], se_80m_extracted[i, 2]), type = "b") + par(new = TRUE) } -par(new=T) -plot(c(10,20,40,60,80),c(se_10m_extracted[i,1],se_20m_extracted[i,1],se_40m_extracted[i,1], se_60m_extracted[i,1], se_80m_extracted[i,1]), xlab="plot radius (m)",ylab="S.E. of extracted HV returns (gamma)",type="n") +par(new = T) +plot(c(10, 20, 40, 60, 80), c(se_10m_extracted[i, 1], se_20m_extracted[i, 1], se_40m_extracted[i, 1], se_60m_extracted[i, 1], se_80m_extracted[i, 1]), xlab = "plot radius (m)", ylab = "S.E. of extracted HV returns (gamma)", type = "n") diff --git a/modules/data.remote/inst/scripts/old/Park_Falls_Runway.R b/modules/data.remote/inst/scripts/old/Park_Falls_Runway.R index dfe44fcf50b..34e1b23c31b 100644 --- a/modules/data.remote/inst/scripts/old/Park_Falls_Runway.R +++ b/modules/data.remote/inst/scripts/old/Park_Falls_Runway.R @@ -1,95 +1,96 @@ -coords<-data.frame(rbind(c(699557.00, 5091807.00), - c(699578.00 ,5091855.00), - c(699580.00, 5091908.00), - c(699579.00, 5091984.00), - c(699504.00, 5091962.00), - c(699505.00, 5091999.00), - c(699507.00, 5092040.00))) -Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - - -calib_inpath <-"/Users/hardimanb/Desktop/data.remote/biometry" ##For Mac +coords <- data.frame(rbind( + c(699557.00, 5091807.00), + c(699578.00, 5091855.00), + c(699580.00, 5091908.00), + c(699579.00, 5091984.00), + c(699504.00, 5091962.00), + c(699505.00, 5091999.00), + c(699507.00, 5092040.00) +)) +Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + + +calib_inpath <- "/Users/hardimanb/Desktop/data.remote/biometry" ## For Mac # calib_inpath <-"/home/bhardima/pecan/modules/data.remote/biometry" -calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep="\t", header=T) #WLEF plots -# +calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = "\t", header = T) # WLEF plots +# # coords<-data.frame(calib_infile$easting[1:50],calib_infile$northing[1:50]) #eastings and northings (UTM Zone 15N NAD83) -# +# # ##Convert to class=SpatialPoints for palsar extraction (this is used for creating kml files and will be used later on) # Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +a=6378137 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) -# +# -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma_db") ##For Mac +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma_db") ## For Mac # palsar_inpath <- file.path("/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/geo_corrected_single_gamma") -file.info<-read.table(file="/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") ##For Mac +file.info <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") ## For Mac # file.info<-read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") -date.time<-as.vector(substr(file.info$scndate,1,8)) -date.time<-as.Date(date.time,"%Y%m%d") -col_names<-c(rbind(paste(date.time, "HH",sep="_"),paste(date.time, "HV",sep="_"))) - -pol_bands<-c("HH", "HV") -numfiles<-length(date.time) - -extracted_10m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_20m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_40m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_60m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands -extracted_80m<-matrix(NA, nrow(coords),length(pol_bands)*numfiles) #df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands - -colnames(extracted_10m)<-col_names -colnames(extracted_20m)<-col_names -colnames(extracted_40m)<-col_names -colnames(extracted_60m)<-col_names -colnames(extracted_80m)<-col_names - -for(i in 1:numfiles){ - for(j in 1:2){ - - filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern=".tif" ,recursive=F)) - inpath<-file.path(palsar_inpath,pol_bands[j],filelist[i]) - rast<-raster(inpath) - +date.time <- as.vector(substr(file.info$scndate, 1, 8)) +date.time <- as.Date(date.time, "%Y%m%d") +col_names <- c(rbind(paste(date.time, "HH", sep = "_"), paste(date.time, "HV", sep = "_"))) + +pol_bands <- c("HH", "HV") +numfiles <- length(date.time) + +extracted_10m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_20m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_40m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_60m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands +extracted_80m <- matrix(NA, nrow(coords), length(pol_bands) * numfiles) # df to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + +colnames(extracted_10m) <- col_names +colnames(extracted_20m) <- col_names +colnames(extracted_40m) <- col_names +colnames(extracted_60m) <- col_names +colnames(extracted_80m) <- col_names + +for (i in 1:numfiles) { + for (j in 1:2) { + filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[j]), pattern = ".tif", recursive = F)) + inpath <- file.path(palsar_inpath, pol_bands[j], filelist[i]) + rast <- raster(inpath) + ################################ - ##golf plots 10m BUFFER MEAN + ## golf plots 10m BUFFER MEAN ################################ - data_10m<-extract(rast, Sr1, method="simple",buffer=10, small=T, fun=mean) - cols<-seq(j,ncol(extracted_10m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) - extracted_10m[,cols[i]]<-data_10m - + data_10m <- extract(rast, Sr1, method = "simple", buffer = 10, small = T, fun = mean) + cols <- seq(j, ncol(extracted_10m), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + extracted_10m[, cols[i]] <- data_10m + ############################### - #golf plots 20m BUFFER MEAN + # golf plots 20m BUFFER MEAN ############################### - data_20m<-extract(rast, Sr1, method="simple",buffer=20, small=T, fun=mean) - cols<-seq(j,ncol(extracted),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) - extracted_20m[,cols[i]]<-data_20m - + data_20m <- extract(rast, Sr1, method = "simple", buffer = 20, small = T, fun = mean) + cols <- seq(j, ncol(extracted), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + extracted_20m[, cols[i]] <- data_20m + ################################ - ##golf plots 40m BUFFER MEAN + ## golf plots 40m BUFFER MEAN ################################ - data_40m<-extract(rast, Sr1, method="simple",buffer=40, small=T, fun=mean) - cols<-seq(j,ncol(extracted_40m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) - extracted_40m[,cols[i]]<-data_40m - + data_40m <- extract(rast, Sr1, method = "simple", buffer = 40, small = T, fun = mean) + cols <- seq(j, ncol(extracted_40m), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + extracted_40m[, cols[i]] <- data_40m + ################################ - ##golf plots 60m BUFFER MEAN + ## golf plots 60m BUFFER MEAN ################################ - data_60m<-extract(rast, Sr1, method="simple",buffer=60, small=T, fun=mean) - cols<-seq(j,ncol(extracted_60m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) - extracted_60m[,cols[i]]<-data_60m - + data_60m <- extract(rast, Sr1, method = "simple", buffer = 60, small = T, fun = mean) + cols <- seq(j, ncol(extracted_60m), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + extracted_60m[, cols[i]] <- data_60m + ################################ - ##golf plots 80m BUFFER MEAN + ## golf plots 80m BUFFER MEAN ################################ - data_80m<-extract(rast, Sr1, method="simple",buffer=80, small=T, fun=mean) - cols<-seq(j,ncol(extracted_80m),by=2) #columns to be filled with palsar data (if j is odd=HH, if j is even=HV) - extracted_80m[,cols[i]]<-data_80m - - - print(paste("i=",i,sep="")) - print(paste("j=",j,sep="")) + data_80m <- extract(rast, Sr1, method = "simple", buffer = 80, small = T, fun = mean) + cols <- seq(j, ncol(extracted_80m), by = 2) # columns to be filled with palsar data (if j is odd=HH, if j is even=HV) + extracted_80m[, cols[i]] <- data_80m + + + print(paste("i=", i, sep = "")) + print(paste("j=", j, sep = "")) } } @@ -98,43 +99,43 @@ for(i in 1:numfiles){ # write.table(extracted_40m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) # write.table(extracted_60m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_60m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) # write.table(extracted_80m,file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_80m.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=T) -# +# # extracted_10m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_10m.csv",sep="\t", header=T) # extracted_20m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_20m.csv",sep="\t", header=T) # extracted_40m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",sep="\t", header=T) # extracted_60m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_60m.csv",sep="\t", header=T) # extracted_80m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_80m.csv",sep="\t", header=T) -par(new=F) -par(mfrow=c(1,1)) -for(i in 1:nrow(coords)){ - plot(c(10,20,40,60,80),c(extracted_10m[i,1],extracted_20m[i,1],extracted_40m[i,1], extracted_60m[i,1], extracted_80m[i,1]), xlim=c(10,80),ylim=c(-16,0), xlab="plot radius (m)",ylab="MEAN of extracted PALSAR returns (HH, gamma (dB))",type="n") - lines(c(10,20,40,60,80),c(extracted_10m[i,1],extracted_20m[i,1],extracted_40m[i,1], extracted_60m[i,1], extracted_80m[i,1]), type="b") - par(new=TRUE) +par(new = F) +par(mfrow = c(1, 1)) +for (i in 1:nrow(coords)) { + plot(c(10, 20, 40, 60, 80), c(extracted_10m[i, 1], extracted_20m[i, 1], extracted_40m[i, 1], extracted_60m[i, 1], extracted_80m[i, 1]), xlim = c(10, 80), ylim = c(-16, 0), xlab = "plot radius (m)", ylab = "MEAN of extracted PALSAR returns (HH, gamma (dB))", type = "n") + lines(c(10, 20, 40, 60, 80), c(extracted_10m[i, 1], extracted_20m[i, 1], extracted_40m[i, 1], extracted_60m[i, 1], extracted_80m[i, 1]), type = "b") + par(new = TRUE) } -par(new=F) -par(mfrow=c(1,1)) -for(i in 1:nrow(coords)){ - plot(c(col_names[evens]),c(extracted_10m[i,2],extracted_20m[i,2],extracted_40m[i,2], extracted_60m[i,2], extracted_80m[i,2]), xlim=c(10,80),ylim=c(-18,0), xlab="plot radius (m)",ylab="MEAN of extracted PALSAR returns (HV, gamma (dB))",type="n") - lines(c(10,20,40,60,80),c(extracted_10m[i,2],extracted_20m[i,2],extracted_40m[i,2], extracted_60m[i,2], extracted_80m[i,2]), type="b") - par(new=TRUE) +par(new = F) +par(mfrow = c(1, 1)) +for (i in 1:nrow(coords)) { + plot(c(col_names[evens]), c(extracted_10m[i, 2], extracted_20m[i, 2], extracted_40m[i, 2], extracted_60m[i, 2], extracted_80m[i, 2]), xlim = c(10, 80), ylim = c(-18, 0), xlab = "plot radius (m)", ylab = "MEAN of extracted PALSAR returns (HV, gamma (dB))", type = "n") + lines(c(10, 20, 40, 60, 80), c(extracted_10m[i, 2], extracted_20m[i, 2], extracted_40m[i, 2], extracted_60m[i, 2], extracted_80m[i, 2]), type = "b") + par(new = TRUE) } -evens<-seq(2,ncol(extracted_10m),by=2) -odds<-seq(1,ncol(extracted_10m),by=2) +evens <- seq(2, ncol(extracted_10m), by = 2) +odds <- seq(1, ncol(extracted_10m), by = 2) -##plot time series -colors<-c("red","blue","green","orange","cyan","black","magenta") +## plot time series +colors <- c("red", "blue", "green", "orange", "cyan", "black", "magenta") -par(new=F) -par(mfrow=c(1,1)) -for(j in 1:nrow(coords)){ - plot(date.time,extracted_20m[j,evens],ylim=c(-26,-5),xlab="scn date",ylab="MEAN of extracted PALSAR returns (HV, gamma (dB))",main="Runways",type="n") - lines(date.time,extracted_20m[j,evens],ylim=c(-26,-5),col=colors[j],type="b") - par(new=TRUE) +par(new = F) +par(mfrow = c(1, 1)) +for (j in 1:nrow(coords)) { + plot(date.time, extracted_20m[j, evens], ylim = c(-26, -5), xlab = "scn date", ylab = "MEAN of extracted PALSAR returns (HV, gamma (dB))", main = "Runways", type = "n") + lines(date.time, extracted_20m[j, evens], ylim = c(-26, -5), col = colors[j], type = "b") + par(new = TRUE) } -colnames(extracted_10m[,evens])<-date.time -colnames(extracted_10m[,odds])<-date.time -boxplot(extracted_20m[,evens],xlab="scn date",ylab="MEAN of extracted PALSAR returns (HV, gamma (dB))",main="WLEF: mean HV (gamma (dB), 10m buffer)",type="n") +colnames(extracted_10m[, evens]) <- date.time +colnames(extracted_10m[, odds]) <- date.time +boxplot(extracted_20m[, evens], xlab = "scn date", ylab = "MEAN of extracted PALSAR returns (HV, gamma (dB))", main = "WLEF: mean HV (gamma (dB), 10m buffer)", type = "n") diff --git a/modules/data.remote/inst/scripts/old/ROI_sd.R b/modules/data.remote/inst/scripts/old/ROI_sd.R index 089f84ea7e6..e170c12d1ea 100644 --- a/modules/data.remote/inst/scripts/old/ROI_sd.R +++ b/modules/data.remote/inst/scripts/old/ROI_sd.R @@ -1,13 +1,11 @@ -ROI_sd<-function(radius,coords,k){ - +ROI_sd <- function(radius, coords, k) { + buffext <- as.vector(disc(radius = radius, centre = coords[k, ])) ## 10m is the smallest buffer size that overlaps >1 cell + ext <- extent(c(buffext[[2]], buffext[[3]])) + cellnums <- cellsFromExtent(rast, ext) + # xyFromCell(rast, cellnums, spatial=T) + + sd_ROI <- suppressMessages(sd(extract(rast, cellnums))) - buffext<-as.vector(disc(radius=radius, centre=coords[k,])) ##10m is the smallest buffer size that overlaps >1 cell - ext<-extent(c(buffext[[2]],buffext[[3]])) - cellnums<-cellsFromExtent(rast,ext) -# xyFromCell(rast, cellnums, spatial=T) - - sd_ROI<-suppressMessages(sd(extract(rast,cellnums))) - return(sd_ROI) -} \ No newline at end of file +} diff --git a/modules/data.remote/inst/scripts/old/Raster_snapshots.R b/modules/data.remote/inst/scripts/old/Raster_snapshots.R index 77ec5ff67e9..ee5bc419daf 100644 --- a/modules/data.remote/inst/scripts/old/Raster_snapshots.R +++ b/modules/data.remote/inst/scripts/old/Raster_snapshots.R @@ -1,20 +1,20 @@ -palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma") ##For Mac -pol_bands<-c("HH", "HV") +palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote/palsar_scenes/geo_corrected_single_gamma") ## For Mac +pol_bands <- c("HH", "HV") -filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) +filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) -inpath<-file.path(palsar_inpath,pol_bands[2],filelist[19]) -rast<-raster(inpath) +inpath <- file.path(palsar_inpath, pol_bands[2], filelist[19]) +rast <- raster(inpath) -coords<-data.frame(297973.08,5088571.03) -Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) -coords<-Sr1@coords +coords <- data.frame(297973.08, 5088571.03) +Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) +coords <- Sr1@coords -radius<-875 -buffext<-as.vector(disc(radius=radius, centre=coords[1,])) ##10m is the smallest buffer size that overlaps >1 cell -ext<-extent(c(buffext[[2]],buffext[[3]])) +radius <- 875 +buffext <- as.vector(disc(radius = radius, centre = coords[1, ])) ## 10m is the smallest buffer size that overlaps >1 cell +ext <- extent(c(buffext[[2]], buffext[[3]])) -image(crop(rast,ext),col=gray(1:255/255)) +image(crop(rast, ext), col = gray(1:255 / 255)) @@ -23,15 +23,15 @@ image(crop(rast,ext),col=gray(1:255/255)) -disturbance_inpath <-"/Users/hardimanb/Desktop/data.remote/biometry" ##For Mac +disturbance_inpath <- "/Users/hardimanb/Desktop/data.remote/biometry" ## For Mac # disturbance_inpath <-"/home/bhardima/pecan/modules/data.remote/biometry" -disturbance_infile <-read.csv(file.path(disturbance_inpath,"Cheas_coordinates_disturbance_year.csv"), sep=",", header=T) #disturbance plots +disturbance_infile <- read.csv(file.path(disturbance_inpath, "Cheas_coordinates_disturbance_year.csv"), sep = ",", header = T) # disturbance plots -disturbance_coords<-data.frame(cbind(-1*disturbance_infile$dec_lon,disturbance_infile$dec_lat)) -dist_df<-data.frame(disturbance_infile$distyr) +disturbance_coords <- data.frame(cbind(-1 * disturbance_infile$dec_lon, disturbance_infile$dec_lat)) +dist_df <- data.frame(disturbance_infile$distyr) -coordinates(dist_df)<-disturbance_coords #lat and lon (dec deg) +coordinates(dist_df) <- disturbance_coords # lat and lon (dec deg) -##Convert to class=SpatialPoints for palsar extraction (this is used for creating kml files and will be used later on) -disturbance_Sr1<- SpatialPoints(dist_df,CRS(as.character(NA))) +## Convert to class=SpatialPoints for palsar extraction (this is used for creating kml files and will be used later on) +disturbance_Sr1 <- SpatialPoints(dist_df, CRS(as.character(NA))) diff --git a/modules/data.remote/inst/scripts/old/build_params_table.R b/modules/data.remote/inst/scripts/old/build_params_table.R index 20db1023bcd..c4d4f7de430 100644 --- a/modules/data.remote/inst/scripts/old/build_params_table.R +++ b/modules/data.remote/inst/scripts/old/build_params_table.R @@ -1,31 +1,26 @@ ###### -##Author Brady S. Hardiman +## Author Brady S. Hardiman ## 04/25/2013 -##path, directory +## path, directory setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/uncorrected") -num_folders <-length(as.vector(dir())) -filelist <- as.vector(list.dirs(path=getwd(), recursive=F)) +num_folders <- length(as.vector(dir())) +filelist <- as.vector(list.dirs(path = getwd(), recursive = F)) -##create metadata file (csv) -#file.create(file.path(outpath, "palsar_metadata.csv"), overwrite=F, showWarnings=T) ##not sure about overwriting (will we need to? Should we?) and this configuration does not seem to generate any actual warnings when overwriting extant files. +## create metadata file (csv) +# file.create(file.path(outpath, "palsar_metadata.csv"), overwrite=F, showWarnings=T) ##not sure about overwriting (will we need to? Should we?) and this configuration does not seem to generate any actual warnings when overwriting extant files. -header <- c("scnid","scndate", "scnUTMzone", "scnpix","scncf","scn_pix2coord_a11","scn_pix2coord_a12","scn_pix2coord_a13","scn_pix2coord_a14","scn_pix2coord_a21","scn_pix2coord_a22","scn_pix2coord_a23","scn_pix2coord_a24","scn_coord2pix_b11","scn_coord2pix_b12","scn_coord2pix_b13","scn_coord2pix_b14","scn_coord2pix_b21","scn_coord2pix_b22","scn_coord2pix_b23","scn_coord2pix_b24","scn_coord2pix_a0","scn_coord2pix_a1","scn_coord2pix_a2","scn_coord2pix_a3","scn_coord2pix_a4","scn_coord2pix_a5","scn_coord2pix_a6","scn_coord2pix_a7","scn_coord2pix_a8","scn_coord2pix_a9","scn_coord2pix_b0","scn_coord2pix_b1","scn_coord2pix_b2","scn_coord2pix_b3","scn_coord2pix_b4","scn_coord2pix_b5","scn_coord2pix_b6","scn_coord2pix_b7","scn_coord2pix_b8","scn_coord2pix_b9","scn_nwlat","scn_nwlon","scn_nelat","scn_nelon","scn_swlat","scn_swlon","scn_selat","scn_selon","scn_centlat","scn_centlon") -output <- matrix(data=NA, nrow=num_folders+1, ncol=length(header), byrow=T) -output[1,] <-header +header <- c("scnid", "scndate", "scnUTMzone", "scnpix", "scncf", "scn_pix2coord_a11", "scn_pix2coord_a12", "scn_pix2coord_a13", "scn_pix2coord_a14", "scn_pix2coord_a21", "scn_pix2coord_a22", "scn_pix2coord_a23", "scn_pix2coord_a24", "scn_coord2pix_b11", "scn_coord2pix_b12", "scn_coord2pix_b13", "scn_coord2pix_b14", "scn_coord2pix_b21", "scn_coord2pix_b22", "scn_coord2pix_b23", "scn_coord2pix_b24", "scn_coord2pix_a0", "scn_coord2pix_a1", "scn_coord2pix_a2", "scn_coord2pix_a3", "scn_coord2pix_a4", "scn_coord2pix_a5", "scn_coord2pix_a6", "scn_coord2pix_a7", "scn_coord2pix_a8", "scn_coord2pix_a9", "scn_coord2pix_b0", "scn_coord2pix_b1", "scn_coord2pix_b2", "scn_coord2pix_b3", "scn_coord2pix_b4", "scn_coord2pix_b5", "scn_coord2pix_b6", "scn_coord2pix_b7", "scn_coord2pix_b8", "scn_coord2pix_b9", "scn_nwlat", "scn_nwlon", "scn_nelat", "scn_nelon", "scn_swlat", "scn_swlon", "scn_selat", "scn_selon", "scn_centlat", "scn_centlon") +output <- matrix(data = NA, nrow = num_folders + 1, ncol = length(header), byrow = T) +output[1, ] <- header -for (i in 1:num_folders){ - inpath <-filelist[i] +for (i in 1:num_folders) { + inpath <- filelist[i] outpath <- "/home/bhardima/pecan/modules/data.remote/" - - ##function to extract palsar metadata, input to function is path of directory containing palsar folders. This needs to be able to loop ove all folders in thet directory. - output[(i+1),] <- extract_palsar_metadata_function(inpath) -} - -write.table(output,file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=F) - - - + ## function to extract palsar metadata, input to function is path of directory containing palsar folders. This needs to be able to loop ove all folders in thet directory. + output[(i + 1), ] <- extract_palsar_metadata_function(inpath) +} +write.table(output, file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", quote = F, sep = "\t", eol = "\r\n", row.names = F, col.names = F) diff --git a/modules/data.remote/inst/scripts/old/build_polygons.R b/modules/data.remote/inst/scripts/old/build_polygons.R index 15320a0342f..e6654e4c1a7 100644 --- a/modules/data.remote/inst/scripts/old/build_polygons.R +++ b/modules/data.remote/inst/scripts/old/build_polygons.R @@ -1,15 +1,14 @@ -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -inpath <-"/home/bhardima/pecan/modules/data.remote/biometry" +inpath <- "/home/bhardima/pecan/modules/data.remote/biometry" -infile <-read.csv(Sys.glob(file.path(inpath,"*.csv")), header=T) #colClasses=c("character","factor", "character", "numeric","numeric","numeric","numeric","numeric")) +infile <- read.csv(Sys.glob(file.path(inpath, "*.csv")), header = T) # colClasses=c("character","factor", "character", "numeric","numeric","numeric","numeric","numeric")) -# for (i in 1:length(infile)){ - - coords <-cbind(infile$easting,infile$northing) - - Sr1<- SpatialPoints(coords,CRS("+proj=utm +zone=16 +a=6378137 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0")) +# for (i in 1:length(infile)){ + +coords <- cbind(infile$easting, infile$northing) + +Sr1 <- SpatialPoints(coords, CRS("+proj=utm +zone=16 +a=6378137 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0")) # Srs1<- Polygons(list(Sr1),"sr1") # SpP<-SpatialPolygons(list(Srs1)) - \ No newline at end of file diff --git a/modules/data.remote/inst/scripts/old/curve_fitting_07282013.R b/modules/data.remote/inst/scripts/old/curve_fitting_07282013.R index 800a42facb1..eab4735eb32 100644 --- a/modules/data.remote/inst/scripts/old/curve_fitting_07282013.R +++ b/modules/data.remote/inst/scripts/old/curve_fitting_07282013.R @@ -1,174 +1,178 @@ -extracted_40m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",sep="\t", header=T) -file.info<-read.table(file="/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") ##For Mac -wlef_abg<-read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep=",", header=T) +extracted_40m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv", sep = "\t", header = T) +file.info <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") ## For Mac +wlef_abg <- read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep = ",", header = T) -odds<-seq(1,ncol(extracted_40m),by=2) -evens<-seq(2,ncol(extracted_40m),by=2) -date.time<-as.vector(substr(file.info$scndate,1,8)) -col_names<-c(rbind(paste(date.time, "HH",sep="_"),paste(date.time, "HV",sep="_"))) -HHscn.dates<-as.Date(substr(col_names[odds],1,8),"%Y%m%d") -HVscn.dates<-as.Date(substr(col_names[evens],1,8),"%Y%m%d") +odds <- seq(1, ncol(extracted_40m), by = 2) +evens <- seq(2, ncol(extracted_40m), by = 2) +date.time <- as.vector(substr(file.info$scndate, 1, 8)) +col_names <- c(rbind(paste(date.time, "HH", sep = "_"), paste(date.time, "HV", sep = "_"))) +HHscn.dates <- as.Date(substr(col_names[odds], 1, 8), "%Y%m%d") +HVscn.dates <- as.Date(substr(col_names[evens], 1, 8), "%Y%m%d") -HH_wlef<-extracted_40m[,odds] -colnames(HH_wlef)<-date.time -HV_wlef<-extracted_40m[,evens] -colnames(HV_wlef)<-date.time +HH_wlef <- extracted_40m[, odds] +colnames(HH_wlef) <- date.time +HV_wlef <- extracted_40m[, evens] +colnames(HV_wlef) <- date.time -HH.calib <-HH_wlef[,1] -HV.calib <-HV_wlef[,1] +HH.calib <- HH_wlef[, 1] +HV.calib <- HV_wlef[, 1] -par(mfrow=c(1,2)) -scatter.smooth(wlef_abg$ABG_biomass,HH.calib,col="#CCCCCC",xlab="ABG_biomass",ylab="HH (gamma (dB))",main=date.time[1]) -scatter.smooth(wlef_abg$ABG_biomass,HV.calib,col="#CCCCCC",xlab="ABG_biomass",ylab="HV (gamma (dB))",main=date.time[1]) +par(mfrow = c(1, 2)) +scatter.smooth(wlef_abg$ABG_biomass, HH.calib, col = "#CCCCCC", xlab = "ABG_biomass", ylab = "HH (gamma (dB))", main = date.time[1]) +scatter.smooth(wlef_abg$ABG_biomass, HV.calib, col = "#CCCCCC", xlab = "ABG_biomass", ylab = "HV (gamma (dB))", main = date.time[1]) -par(mfrow=c(1,1)) -scatter.smooth(wlef_abg$ABG_biomass,HV.calib,col="#CCCCCC",xlim=c(0,100),xlab="ABG_biomass",ylab="HV (gamma (dB))",main=date.time[1]) +par(mfrow = c(1, 1)) +scatter.smooth(wlef_abg$ABG_biomass, HV.calib, col = "#CCCCCC", xlim = c(0, 100), xlab = "ABG_biomass", ylab = "HV (gamma (dB))", main = date.time[1]) ############################################################################################### -##Funtion to estimate likelihood +## Funtion to estimate likelihood ############################################################################################### -k<-100 ##biomass (x) value at maximum HV (y) value -HVmax<- 0.14 -sd<-sd(HV.calib) +k <- 100 ## biomass (x) value at maximum HV (y) value +HVmax <- 0.14 +sd <- sd(HV.calib) -params<-c(k,HVmax,sd) +params <- c(k, HVmax, sd) -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] -ll.monod(params,x,y) +ll.monod(params, x, y) -##Fits a monod function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.monod,x=x,y=y) +## Fits a monod function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.monod, x = x, y = y) fit1 -params = fit1$par -par(new=F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlim=c(0,100),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Monod Function") -xseq = seq(min(x),max(x),length=1000) -lines(xseq,params[2]*xseq/(xseq+params[1]),col=2,lwd=3) +params <- fit1$par +par(new = F) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlim = c(0, 100), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Monod Function") +xseq <- seq(min(x), max(x), length = 1000) +lines(xseq, params[2] * xseq / (xseq + params[1]), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) ############################################################################################### -##Funtion to estimate likelihood for michaelis-menten +## Funtion to estimate likelihood for michaelis-menten ############################################################################################### -a<- 0.10 -b<-100 -sd<-sd(HV.calib) -params<-c(a,b,sd) - -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] - -ll.micmen(params,x,y) - -##Fits a michaelis-menten function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.micmen,x=x,y=y) +a <- 0.10 +b <- 100 +sd <- sd(HV.calib) +params <- c(a, b, sd) + +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] + +ll.micmen(params, x, y) + +## Fits a michaelis-menten function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.micmen, x = x, y = y) fit1 -params = fit1$par -xseq = seq(min(x),max(x),length=1000) -par(new=F) +params <- fit1$par +xseq <- seq(min(x), max(x), length = 1000) +par(new = F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlim=c(0,100),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Michaelis-Menten Function") -lines(cbind(xseq,(params[1]*xseq)/(params[2]+xseq)),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlim = c(0, 100), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Michaelis-Menten Function") +lines(cbind(xseq, (params[1] * xseq) / (params[2] + xseq)), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) ############################################################################################### -##Funtion to estimate likelihood for Holling Type III +## Funtion to estimate likelihood for Holling Type III ############################################################################################### -a<- 0.14 -b<-100 -sd<-sd(HV.calib) -params<-c(a,b,sd) - -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] - -ll.holling3(params,x,y) - -##Fits a Holling Type III function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.holling3,x=x,y=y) +a <- 0.14 +b <- 100 +sd <- sd(HV.calib) +params <- c(a, b, sd) + +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] + +ll.holling3(params, x, y) + +## Fits a Holling Type III function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.holling3, x = x, y = y) fit1 -params = fit1$par -xseq = seq(min(x),max(x),length=1000) -par(new=F) +params <- fit1$par +xseq <- seq(min(x), max(x), length = 1000) +par(new = F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlim=c(0,100),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Holling Type III Function") -lines(cbind(xseq,(params[1]*xseq^2)/(params[2]^2+xseq^2)),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlim = c(0, 100), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Holling Type III Function") +lines(cbind(xseq, (params[1] * xseq^2) / (params[2]^2 + xseq^2)), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) ############################################################################################### -##Funtion to estimate likelihood for Holling Type IV +## Funtion to estimate likelihood for Holling Type IV ############################################################################################### -a<- 0.14 -b<-100 -c<-1 -sd<-sd(HV.calib) -params<-c(a,b,c,sd) - -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] - -ll.holling4(params,x,y) - -##Fits a Holling Type IV function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.holling4,x=x,y=y) +a <- 0.14 +b <- 100 +c <- 1 +sd <- sd(HV.calib) +params <- c(a, b, c, sd) + +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] + +ll.holling4(params, x, y) + +## Fits a Holling Type IV function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.holling4, x = x, y = y) fit1 -params = fit1$par -xseq = seq(min(x),max(x),length=1000) -par(new=F) +params <- fit1$par +xseq <- seq(min(x), max(x), length = 1000) +par(new = F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlim=c(0,100), xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Holling Type IV Function") -lines(cbind(xseq,(params[1]*xseq^2)/(params[2]+params[3]*xseq+xseq^2)),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlim = c(0, 100), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Holling Type IV Function") +lines(cbind(xseq, (params[1] * xseq^2) / (params[2] + params[3] * xseq + xseq^2)), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) ############################################################################################### -##Multiple linear regression ala Antonarakis 2011 +## Multiple linear regression ala Antonarakis 2011 ############################################################################################### -x<-HV.calib -y<-(wlef_abg$ABG_biomass)^0.5 ##Alex's eqn used sqrt(biomass) -z<-HH.calib +x <- HV.calib +y <- (wlef_abg$ABG_biomass)^0.5 ## Alex's eqn used sqrt(biomass) +z <- HH.calib -HVfit<- lm(y~x) +HVfit <- lm(y ~ x) anova(HVfit) -coeff<-coefficients(lm(y~x)) -xseq = seq(min(x),max(x),length=1000) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),ylab="ABG Biomass (Mg/ha)",xlab="HV (gamma (dB))",main="WLEF_HV vs ABG Biomass") -lines(cbind(xseq,coeff[2]*xseq+coeff[1]),col=2,lwd=3) +coeff <- coefficients(lm(y ~ x)) +xseq <- seq(min(x), max(x), length = 1000) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), ylab = "ABG Biomass (Mg/ha)", xlab = "HV (gamma (dB))", main = "WLEF_HV vs ABG Biomass") +lines(cbind(xseq, coeff[2] * xseq + coeff[1]), col = 2, lwd = 3) -HVfitres<-residuals(HVfit) +HVfitres <- residuals(HVfit) -HHfitresid<-lm(HVfitres ~ z) -anova(HHfitresid) ##So HH explains a significant fraction of residual variation in (ABG biomass vs HV) +HHfitresid <- lm(HVfitres ~ z) +anova(HHfitresid) ## So HH explains a significant fraction of residual variation in (ABG biomass vs HV) summary(HHfitresid) -HHfit <- lm(y ~ z) -lmfit <- lm(y ~ x + z) +HHfit <- lm(y ~ z) +lmfit <- lm(y ~ x + z) summary(lmfit) -layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page +layout(matrix(c(1, 2, 3, 4), 2, 2)) # optional 4 graphs/page plot(lmfit) -AIC(lmfit,HVfit,HHfit) +AIC(lmfit, HVfit, HHfit) -##This section forces the monod function to fit the intercept. It does worse than the regular monod. +## This section forces the monod function to fit the intercept. It does worse than the regular monod. # params2 = c(50,0.7,0.2,1) # fit2 = optim(par=params2,ll.monod2,x=x,y=y) # fit2 @@ -176,17 +180,17 @@ AIC(lmfit,HVfit,HHfit) # lines(xseq,params2[2]*xseq/(xseq+params2[1])+params2[3],col=4,lwd=3) # lines(lowess(x,y),col=5,lwd=3) -bin.size = 25 -xbin = seq(0,450,bin.size) -bin = findInterval(x,xbin) -bin.mu = tapply(y,bin,mean,na.rm=TRUE) -bin.sd = tapply(y,bin,sd,na.rm=TRUE) -points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu,col="orange",cex=3,pch=18) -points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu+bin.sd,col="orange",cex=3,pch="_") -points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu-bin.sd,col="orange",cex=3,pch="_") - -biomass<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])$x -HVvals<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])$y -par(mfrow=c(1,1)) -plot(cbind(biomass,HVvals)) -plot(loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])) \ No newline at end of file +bin.size <- 25 +xbin <- seq(0, 450, bin.size) +bin <- findInterval(x, xbin) +bin.mu <- tapply(y, bin, mean, na.rm = TRUE) +bin.sd <- tapply(y, bin, sd, na.rm = TRUE) +points(xbin[sort(as.numeric(names(bin.mu)))] + bin.size / 2, bin.mu, col = "orange", cex = 3, pch = 18) +points(xbin[sort(as.numeric(names(bin.mu)))] + bin.size / 2, bin.mu + bin.sd, col = "orange", cex = 3, pch = "_") +points(xbin[sort(as.numeric(names(bin.mu)))] + bin.size / 2, bin.mu - bin.sd, col = "orange", cex = 3, pch = "_") + +biomass <- loess.smooth(wlef_abg$ABG_biomass, HV_signal[, 1])$x +HVvals <- loess.smooth(wlef_abg$ABG_biomass, HV_signal[, 1])$y +par(mfrow = c(1, 1)) +plot(cbind(biomass, HVvals)) +plot(loess.smooth(wlef_abg$ABG_biomass, HV_signal[, 1])) diff --git a/modules/data.remote/inst/scripts/old/curve_fitting_07292013.R b/modules/data.remote/inst/scripts/old/curve_fitting_07292013.R index c301a75b2e4..e8847940e41 100644 --- a/modules/data.remote/inst/scripts/old/curve_fitting_07292013.R +++ b/modules/data.remote/inst/scripts/old/curve_fitting_07292013.R @@ -1,165 +1,169 @@ -extracted_40m <- read.table(file="/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv",sep="\t", header=T) -file.info<-read.table(file="/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv",header=T,sep="\t") ##For Mac -wlef_abg<-read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep=",", header=T) +extracted_40m <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/data/WLEF_extracted_40m.csv", sep = "\t", header = T) +file.info <- read.table(file = "/Users/hardimanb/Desktop/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") ## For Mac +wlef_abg <- read.csv("/Users/hardimanb/Desktop/data.remote/biometry/biometry_trimmed.csv", sep = ",", header = T) -odds<-seq(1,ncol(extracted_40m),by=2) -evens<-seq(2,ncol(extracted_40m),by=2) -date.time<-as.vector(substr(file.info$scndate,1,8)) -col_names<-c(rbind(paste(date.time, "HH",sep="_"),paste(date.time, "HV",sep="_"))) -HHscn.dates<-as.Date(substr(col_names[odds],1,8),"%Y%m%d") -HVscn.dates<-as.Date(substr(col_names[evens],1,8),"%Y%m%d") +odds <- seq(1, ncol(extracted_40m), by = 2) +evens <- seq(2, ncol(extracted_40m), by = 2) +date.time <- as.vector(substr(file.info$scndate, 1, 8)) +col_names <- c(rbind(paste(date.time, "HH", sep = "_"), paste(date.time, "HV", sep = "_"))) +HHscn.dates <- as.Date(substr(col_names[odds], 1, 8), "%Y%m%d") +HVscn.dates <- as.Date(substr(col_names[evens], 1, 8), "%Y%m%d") -HH_wlef<-extracted_40m[,odds] -colnames(HH_wlef)<-date.time -HV_wlef<-extracted_40m[,evens] -colnames(HV_wlef)<-date.time +HH_wlef <- extracted_40m[, odds] +colnames(HH_wlef) <- date.time +HV_wlef <- extracted_40m[, evens] +colnames(HV_wlef) <- date.time -HH.calib <-HH_wlef[,1] -HV.calib <-HV_wlef[,1] +HH.calib <- HH_wlef[, 1] +HV.calib <- HV_wlef[, 1] -par(mfrow=c(1,2)) -scatter.smooth(wlef_abg$ABG_biomass,HH.calib,col="#CCCCCC",xlab="ABG_biomass",ylab="HH (gamma (dB))",main=date.time[1]) -scatter.smooth(wlef_abg$ABG_biomass,HV.calib,col="#CCCCCC",xlab="ABG_biomass",ylab="HV (gamma (dB))",main=date.time[1]) +par(mfrow = c(1, 2)) +scatter.smooth(wlef_abg$ABG_biomass, HH.calib, col = "#CCCCCC", xlab = "ABG_biomass", ylab = "HH (gamma (dB))", main = date.time[1]) +scatter.smooth(wlef_abg$ABG_biomass, HV.calib, col = "#CCCCCC", xlab = "ABG_biomass", ylab = "HV (gamma (dB))", main = date.time[1]) -par(mfrow=c(1,1)) -scatter.smooth(wlef_abg$ABG_biomass,HV.calib,col="#CCCCCC",xlab="ABG_biomass",ylab="HV (gamma (dB))",main=date.time[1]) +par(mfrow = c(1, 1)) +scatter.smooth(wlef_abg$ABG_biomass, HV.calib, col = "#CCCCCC", xlab = "ABG_biomass", ylab = "HV (gamma (dB))", main = date.time[1]) ############################################################################################### -##Funtion to estimate likelihood +## Funtion to estimate likelihood ############################################################################################### -k<-10 ##biomass (x) value at maximum HV (y) value -HVmax<-0.14 -sd<-sd(HV.calib) +k <- 10 ## biomass (x) value at maximum HV (y) value +HVmax <- 0.14 +sd <- sd(HV.calib) -params<-c(k,HVmax,0.05,sd) +params <- c(k, HVmax, 0.05, sd) -y<-HV.calib -x<-(wlef_abg$ABG_biomass)^0.5 -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] +y <- HV.calib +x <- (wlef_abg$ABG_biomass)^0.5 +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] -ll.monod2(params,x,y) +ll.monod2(params, x, y) -##Fits a monod function to ABG biomass and HV palsar returns -fit.monod2 = optim(par=params,ll.monod2,x=x,y=y) +## Fits a monod function to ABG biomass and HV palsar returns +fit.monod2 <- optim(par = params, ll.monod2, x = x, y = y) fit.monod2 -aic.monod2<- -2*fit.monod2$value + 2*(length(params)-1) - -params = fit1$par -par(new=F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Monod Function") -xseq = seq(min(x),max(x),length=1000) -lines(xseq,params[2]*xseq/(xseq+params[1])+params[3],col=2,lwd=3) +aic.monod2 <- -2 * fit.monod2$value + 2 * (length(params) - 1) + +params <- fit1$par +par(new = F) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Monod Function") +xseq <- seq(min(x), max(x), length = 1000) +lines(xseq, params[2] * xseq / (xseq + params[1]) + params[3], col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) -par(new=F) -par(mfrow=c(1,1)) -fit.holl2 = optim(params,ll.holling2,x=x,y=y) -aic.holl2<- -2*fit.holl2$value + 2*(length(params)-1) -params = fit2$par -lines(xseq,params[1]*xseq/(1+xseq*params[2])+params[3],col=3,lwd=3) - -par(new=F) -par(mfrow=c(1,1)) -fit.mono = optim(params,ll.mono,x=x,y=y) -aic.mono<- -2*fit.mono2$value + 2*(length(params)-1) -params = fit3$par -lines(xseq,params[1]*(1-exp(-xseq*params[2]))+params[3],col=4,lwd=3) - -par(new=F) -par(mfrow=c(1,1)) -fit.nrh = optim(c(0.1,3,0.8,-15,1),ll.nrh,x=x,y=y) -aic.nrh<- -2*fit.nrh$value + 2*(length(params)-1) -params = fit4$par -lines(xseq,1/(2*params[3])*(params[1]*xseq+params[2]-sqrt((params[1]*xseq+params[2])^2-4*params[3]*params[2]*params[1]*xseq))+params[4],col=5,lwd=3) +par(new = F) +par(mfrow = c(1, 1)) +fit.holl2 <- optim(params, ll.holling2, x = x, y = y) +aic.holl2 <- -2 * fit.holl2$value + 2 * (length(params) - 1) +params <- fit2$par +lines(xseq, params[1] * xseq / (1 + xseq * params[2]) + params[3], col = 3, lwd = 3) + +par(new = F) +par(mfrow = c(1, 1)) +fit.mono <- optim(params, ll.mono, x = x, y = y) +aic.mono <- -2 * fit.mono2$value + 2 * (length(params) - 1) +params <- fit3$par +lines(xseq, params[1] * (1 - exp(-xseq * params[2])) + params[3], col = 4, lwd = 3) + +par(new = F) +par(mfrow = c(1, 1)) +fit.nrh <- optim(c(0.1, 3, 0.8, -15, 1), ll.nrh, x = x, y = y) +aic.nrh <- -2 * fit.nrh$value + 2 * (length(params) - 1) +params <- fit4$par +lines(xseq, 1 / (2 * params[3]) * (params[1] * xseq + params[2] - sqrt((params[1] * xseq + params[2])^2 - 4 * params[3] * params[2] * params[1] * xseq)) + params[4], col = 5, lwd = 3) ############################################################################################### -##Funtion to estimate likelihood for michaelis-menten +## Funtion to estimate likelihood for michaelis-menten ############################################################################################### -a<- -12 -b<-100 -sd<-sd(HV.calib) -params<-c(a,b,sd) - -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] - -ll.micmen(params,x,y) - -##Fits a michaelis-menten function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.micmen,x=x,y=y) +a <- -12 +b <- 100 +sd <- sd(HV.calib) +params <- c(a, b, sd) + +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] + +ll.micmen(params, x, y) + +## Fits a michaelis-menten function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.micmen, x = x, y = y) fit1 -params = fit1$par -xseq = seq(min(x),max(x),length=1000) -par(new=F) +params <- fit1$par +xseq <- seq(min(x), max(x), length = 1000) +par(new = F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Michaelis-Menten Function") -lines(cbind(xseq,(params[1]*xseq)/(params[2]+xseq)),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Michaelis-Menten Function") +lines(cbind(xseq, (params[1] * xseq) / (params[2] + xseq)), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) ############################################################################################### -##Funtion to estimate likelihood for Holling Type III +## Funtion to estimate likelihood for Holling Type III ############################################################################################### -a<- -12 -b<-100 -sd<-sd(HV.calib) -params<-c(a,b,sd) - -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] - -ll.holling3(params,x,y) - -##Fits a Holling Type III function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.micmen,x=x,y=y) +a <- -12 +b <- 100 +sd <- sd(HV.calib) +params <- c(a, b, sd) + +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] + +ll.holling3(params, x, y) + +## Fits a Holling Type III function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.micmen, x = x, y = y) fit1 -params = fit1$par -xseq = seq(min(x),max(x),length=1000) -par(new=F) +params <- fit1$par +xseq <- seq(min(x), max(x), length = 1000) +par(new = F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Holling Type III Function") -lines(cbind(xseq,(params[1]*xseq^2)/(params[2]^2+xseq^2)),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Holling Type III Function") +lines(cbind(xseq, (params[1] * xseq^2) / (params[2]^2 + xseq^2)), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) ############################################################################################### -##Funtion to estimate likelihood for Holling Type IV +## Funtion to estimate likelihood for Holling Type IV ############################################################################################### -a<- -12 -b<-100 -sd<-sd(HV.calib) -params<-c(a,b,sd) - -y<-HV.calib -x<-wlef_abg$ABG_biomass -biomass<-x -sel = which(x>0) -x = x[sel];y=y[sel] - -ll.holling4(params,x,y) - -##Fits a Holling Type IV function to ABG biomass and HV palsar returns -fit1 = optim(par=params,ll.holling4,x=x,y=y) +a <- -12 +b <- 100 +sd <- sd(HV.calib) +params <- c(a, b, sd) + +y <- HV.calib +x <- wlef_abg$ABG_biomass +biomass <- x +sel <- which(x > 0) +x <- x[sel] +y <- y[sel] + +ll.holling4(params, x, y) + +## Fits a Holling Type IV function to ABG biomass and HV palsar returns +fit1 <- optim(par = params, ll.holling4, x = x, y = y) fit1 -params = fit1$par -xseq = seq(min(x),max(x),length=1000) -par(new=F) +params <- fit1$par +xseq <- seq(min(x), max(x), length = 1000) +par(new = F) -par(mfrow=c(1,1)) -plot(x,y,ylim=c(min(y),max(y)),xlab="ABG Biomass (Mg/ha)",ylab="HV (gamma (dB))",main="WLEF_HV Michaelis-Menten Function") -lines(cbind(xseq,(params[1]*xseq^2)/(params[2]^2+xseq^2)),col=2,lwd=3) +par(mfrow = c(1, 1)) +plot(x, y, ylim = c(min(y), max(y)), xlab = "ABG Biomass (Mg/ha)", ylab = "HV (gamma (dB))", main = "WLEF_HV Michaelis-Menten Function") +lines(cbind(xseq, (params[1] * xseq^2) / (params[2]^2 + xseq^2)), col = 2, lwd = 3) # lines(cbind(x,y),col=3,lwd=3) -##This section forces the monod function to fit the intercept. It does worse than the regular monod. +## This section forces the monod function to fit the intercept. It does worse than the regular monod. # params2 = c(50,0.7,0.2,1) # fit2 = optim(par=params2,ll.monod2,x=x,y=y) # fit2 @@ -167,17 +171,17 @@ lines(cbind(xseq,(params[1]*xseq^2)/(params[2]^2+xseq^2)),col=2,lwd=3) # lines(xseq,params2[2]*xseq/(xseq+params2[1])+params2[3],col=4,lwd=3) # lines(lowess(x,y),col=5,lwd=3) -bin.size = 25 -xbin = seq(0,450,bin.size) -bin = findInterval(x,xbin) -bin.mu = tapply(y,bin,mean,na.rm=TRUE) -bin.sd = tapply(y,bin,sd,na.rm=TRUE) -points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu,col="orange",cex=3,pch=18) -points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu+bin.sd,col="orange",cex=3,pch="_") -points(xbin[sort(as.numeric(names(bin.mu)))]+bin.size/2,bin.mu-bin.sd,col="orange",cex=3,pch="_") - -biomass<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])$x -HVvals<-loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])$y -par(mfrow=c(1,1)) -plot(cbind(biomass,HVvals)) -plot(loess.smooth(wlef_abg$ABG_biomass,HV_signal[,1])) \ No newline at end of file +bin.size <- 25 +xbin <- seq(0, 450, bin.size) +bin <- findInterval(x, xbin) +bin.mu <- tapply(y, bin, mean, na.rm = TRUE) +bin.sd <- tapply(y, bin, sd, na.rm = TRUE) +points(xbin[sort(as.numeric(names(bin.mu)))] + bin.size / 2, bin.mu, col = "orange", cex = 3, pch = 18) +points(xbin[sort(as.numeric(names(bin.mu)))] + bin.size / 2, bin.mu + bin.sd, col = "orange", cex = 3, pch = "_") +points(xbin[sort(as.numeric(names(bin.mu)))] + bin.size / 2, bin.mu - bin.sd, col = "orange", cex = 3, pch = "_") + +biomass <- loess.smooth(wlef_abg$ABG_biomass, HV_signal[, 1])$x +HVvals <- loess.smooth(wlef_abg$ABG_biomass, HV_signal[, 1])$y +par(mfrow = c(1, 1)) +plot(cbind(biomass, HVvals)) +plot(loess.smooth(wlef_abg$ABG_biomass, HV_signal[, 1])) diff --git a/modules/data.remote/inst/scripts/old/extract_UNDERC_points.R b/modules/data.remote/inst/scripts/old/extract_UNDERC_points.R index 60ca710d737..9490d25a4a0 100644 --- a/modules/data.remote/inst/scripts/old/extract_UNDERC_points.R +++ b/modules/data.remote/inst/scripts/old/extract_UNDERC_points.R @@ -1,171 +1,164 @@ -##this script will access a palsar file on my local machine to extract radar returns from a polygon -##associated with my UNDERC plot +## this script will access a palsar file on my local machine to extract radar returns from a polygon +## associated with my UNDERC plot -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -##load required pkgs and libraries +## load required pkgs and libraries require(rgdal) library(proj4) library(raster) library(sp) -scn_metadata <- read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header=T, sep="\t") +scn_metadata <- read.table(file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") -##if running this as a function dir can/should be an argument +## if running this as a function dir can/should be an argument setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/UNDERC/") -filelist <- as.vector(list.dirs(path=getwd() ,recursive=F)) +filelist <- as.vector(list.dirs(path = getwd(), recursive = F)) -##read in polygons correspoding to plots with biomass ground-data +## read in polygons correspoding to plots with biomass ground-data -#read in table here -#convert coordinates to UTM (to match PALSAR files) +# read in table here +# convert coordinates to UTM (to match PALSAR files) # x <- c(-89.55009302960, -89.55042213000, -89.54882422370, -89.54913190390) # y <- c(46.25113458880, 46.25338996770, 46.25120674830, 46.25345157750) # xy<- cbind(x,y) -# -# ptransform(xy,'+proj=longlat +ellps=sphere','+proj=merc +ellps=sphere') -head<-c('scnID', 'scnDate', 'cellsw', 'cellnw', 'cellne', 'cellse', 'HHsw', 'HHnw', 'HHne', 'HHse', 'HVsw', 'HVnw', 'HVne', 'HVse') -output<-matrix(NA,length(filelist)+1,length(head)) -output[1,]<-head -output[2:(length(filelist)+1),1]<-as.character(scn_metadata$scnid) -output[2:(length(filelist)+1),2]<-as.character(scn_metadata$scndate) - - - -for (i in 1:length(filelist)){ - - inpath <-filelist[i] - - ##read in PALSAR bands (HH and HV polarizations) - scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*-H1.5_UA")) - scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*-H1.5_UA")) +# +# ptransform(xy,'+proj=longlat +ellps=sphere','+proj=merc +ellps=sphere') +head <- c("scnID", "scnDate", "cellsw", "cellnw", "cellne", "cellse", "HHsw", "HHnw", "HHne", "HHse", "HVsw", "HVnw", "HVne", "HVse") +output <- matrix(NA, length(filelist) + 1, length(head)) +output[1, ] <- head +output[2:(length(filelist) + 1), 1] <- as.character(scn_metadata$scnid) +output[2:(length(filelist) + 1), 2] <- as.character(scn_metadata$scndate) + + + +for (i in 1:length(filelist)) { + inpath <- filelist[i] + + ## read in PALSAR bands (HH and HV polarizations) + scnHH <- Sys.glob(file.path(inpath, "IMG-HH-*-H1.5_UA")) + scnHV <- Sys.glob(file.path(inpath, "IMG-HV-*-H1.5_UA")) rasterHH <- raster(scnHH) rasterHV <- raster(scnHV) - - ##UNDERC plot corner coordinates - #decdeg_coords <-rbind(c(46.25113458880, -89.55009302960), c(46.25338996770, -89.55042213000),c(46.25345157750, -89.54913190390), c(46.25120674830, -89.54882422370), c(46.25113458880, -89.55009302960)) ##Dec degrees - ##Pomeroy Lake zero test - decdeg_coords <-rbind( + + ## UNDERC plot corner coordinates + # decdeg_coords <-rbind(c(46.25113458880, -89.55009302960), c(46.25338996770, -89.55042213000),c(46.25345157750, -89.54913190390), c(46.25120674830, -89.54882422370), c(46.25113458880, -89.55009302960)) ##Dec degrees + ## Pomeroy Lake zero test + decdeg_coords <- rbind( c(46.275724, -89.581100), c(46.278254, -89.581100), c(46.278254, -89.573647), - c(46.275724, -89.573647)) - - - - swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN - nwN <-decdeg_coords[2,1] - neN <-decdeg_coords[3,1] - seN <-decdeg_coords[4,1] - - swE <-decdeg_coords[1,2] - nwE <-decdeg_coords[2,2] - neE <-decdeg_coords[3,2] - seE <-decdeg_coords[4,2] - - a0 <- scn_metadata$scn_coord2pix_a0[scn_metadata$scnid==scn_metadata$scnid[i]] - a1 <- scn_metadata$scn_coord2pix_a1[scn_metadata$scnid==scn_metadata$scnid[i]] - a2 <- scn_metadata$scn_coord2pix_a2[scn_metadata$scnid==scn_metadata$scnid[i]] - a3 <- scn_metadata$scn_coord2pix_a3[scn_metadata$scnid==scn_metadata$scnid[i]] - a4 <- scn_metadata$scn_coord2pix_a4[scn_metadata$scnid==scn_metadata$scnid[i]] - a5 <- scn_metadata$scn_coord2pix_a5[scn_metadata$scnid==scn_metadata$scnid[i]] - a6 <- scn_metadata$scn_coord2pix_a6[scn_metadata$scnid==scn_metadata$scnid[i]] - a7 <- scn_metadata$scn_coord2pix_a7[scn_metadata$scnid==scn_metadata$scnid[i]] - a8 <- scn_metadata$scn_coord2pix_a8[scn_metadata$scnid==scn_metadata$scnid[i]] - a9 <- scn_metadata$scn_coord2pix_a9[scn_metadata$scnid==scn_metadata$scnid[i]] - b0 <- scn_metadata$scn_coord2pix_b0[scn_metadata$scnid==scn_metadata$scnid[i]] - b1 <- scn_metadata$scn_coord2pix_b1[scn_metadata$scnid==scn_metadata$scnid[i]] - b2 <- scn_metadata$scn_coord2pix_b2[scn_metadata$scnid==scn_metadata$scnid[i]] - b3 <- scn_metadata$scn_coord2pix_b3[scn_metadata$scnid==scn_metadata$scnid[i]] - b4 <- scn_metadata$scn_coord2pix_b4[scn_metadata$scnid==scn_metadata$scnid[i]] - b5 <- scn_metadata$scn_coord2pix_b5[scn_metadata$scnid==scn_metadata$scnid[i]] - b6 <- scn_metadata$scn_coord2pix_b6[scn_metadata$scnid==scn_metadata$scnid[i]] - b7 <- scn_metadata$scn_coord2pix_b7[scn_metadata$scnid==scn_metadata$scnid[i]] - b8 <- scn_metadata$scn_coord2pix_b8[scn_metadata$scnid==scn_metadata$scnid[i]] - b9 <- scn_metadata$scn_coord2pix_b9[scn_metadata$scnid==scn_metadata$scnid[i]] - - ##l=line p=pixel - sw_p <- a0 + a1*swN + a2*swE + a3*swN*swE + a4*swN^2 + a5*swE^2 + a6*swN^2*swE + a7*swN*swE^2 + a8*swN^3 + a9*swE^3 - nw_p <- a0 + a1*nwN + a2*nwE + a3*nwN*nwE + a4*nwN^2 + a5*nwE^2 + a6*nwN^2*nwE + a7*nwN*nwE^2 + a8*nwN^3 + a9*nwE^3 - ne_p <- a0 + a1*neN + a2*neE + a3*neN*neE + a4*neN^2 + a5*neE^2 + a6*neN^2*neE + a7*neN*neE^2 + a8*neN^3 + a9*neE^3 - se_p <- a0 + a1*seN + a2*seE + a3*seN*seE + a4*seN^2 + a5*seE^2 + a6*seN^2*seE + a7*seN*seE^2 + a8*seN^3 + a9*seE^3 - - sw_l <- b0 + b1*swN + b2*swE + b3*swN*swE + b4*swN^2 + b5*swE^2 + b6*swN^2*swE + b7*swN*swE^2 + b8*swN^3 + b9*swE^3 - nw_l <- b0 + b1*nwN + b2*nwE + b3*nwN*nwE + b4*nwN^2 + b5*nwE^2 + b6*nwN^2*nwE + b7*nwN*nwE^2 + b8*nwN^3 + b9*nwE^3 - ne_l <- b0 + b1*neN + b2*neE + b3*neN*neE + b4*neN^2 + b5*neE^2 + b6*neN^2*neE + b7*neN*neE^2 + b8*neN^3 + b9*neE^3 - se_l <- b0 + b1*seN + b2*seE + b3*seN*seE + b4*seN^2 + b5*seE^2 + b6*seN^2*seE + b7*seN*seE^2 + b8*seN^3 + b9*seE^3 - - lpcoords <-rbind(c(sw_l, sw_p), c(nw_l, nw_p),c(ne_l, ne_p), c(se_l, se_p)) ##l=line p=pixel - - HHcells<-extract(rasterHH, lpcoords, method='simple',cellnumbers=T) - HVcells<-extract(rasterHV, lpcoords, method='simple',cellnumbers=T) - - cells <- HHcells[,1] - - output[i+1,3]<-cells[1] - output[i+1,4]<-cells[2] - output[i+1,5]<-cells[3] - output[i+1,6]<-cells[4] - - LUT<-cbind(unique(rowFromCell(rasterHH,cells[1:length(cells)])),unique(colFromCell(rasterHH,cells[1:length(cells)]))) - - HH<-vector(mode='numeric',length(rows)) - HV<-vector(mode='numeric',length(rows)) - - for(j in 1:nrow(LUT)){ - HH[j]<-rasterHH[LUT[j,1],LUT[j,2]] - HV[j]<-rasterHV[LUT[j,1],LUT[j,2]] + c(46.275724, -89.573647) + ) + + + + swN <- decdeg_coords[1, 1] # southwest corner easting=swE, southwest corner northing=swN + nwN <- decdeg_coords[2, 1] + neN <- decdeg_coords[3, 1] + seN <- decdeg_coords[4, 1] + + swE <- decdeg_coords[1, 2] + nwE <- decdeg_coords[2, 2] + neE <- decdeg_coords[3, 2] + seE <- decdeg_coords[4, 2] + + a0 <- scn_metadata$scn_coord2pix_a0[scn_metadata$scnid == scn_metadata$scnid[i]] + a1 <- scn_metadata$scn_coord2pix_a1[scn_metadata$scnid == scn_metadata$scnid[i]] + a2 <- scn_metadata$scn_coord2pix_a2[scn_metadata$scnid == scn_metadata$scnid[i]] + a3 <- scn_metadata$scn_coord2pix_a3[scn_metadata$scnid == scn_metadata$scnid[i]] + a4 <- scn_metadata$scn_coord2pix_a4[scn_metadata$scnid == scn_metadata$scnid[i]] + a5 <- scn_metadata$scn_coord2pix_a5[scn_metadata$scnid == scn_metadata$scnid[i]] + a6 <- scn_metadata$scn_coord2pix_a6[scn_metadata$scnid == scn_metadata$scnid[i]] + a7 <- scn_metadata$scn_coord2pix_a7[scn_metadata$scnid == scn_metadata$scnid[i]] + a8 <- scn_metadata$scn_coord2pix_a8[scn_metadata$scnid == scn_metadata$scnid[i]] + a9 <- scn_metadata$scn_coord2pix_a9[scn_metadata$scnid == scn_metadata$scnid[i]] + b0 <- scn_metadata$scn_coord2pix_b0[scn_metadata$scnid == scn_metadata$scnid[i]] + b1 <- scn_metadata$scn_coord2pix_b1[scn_metadata$scnid == scn_metadata$scnid[i]] + b2 <- scn_metadata$scn_coord2pix_b2[scn_metadata$scnid == scn_metadata$scnid[i]] + b3 <- scn_metadata$scn_coord2pix_b3[scn_metadata$scnid == scn_metadata$scnid[i]] + b4 <- scn_metadata$scn_coord2pix_b4[scn_metadata$scnid == scn_metadata$scnid[i]] + b5 <- scn_metadata$scn_coord2pix_b5[scn_metadata$scnid == scn_metadata$scnid[i]] + b6 <- scn_metadata$scn_coord2pix_b6[scn_metadata$scnid == scn_metadata$scnid[i]] + b7 <- scn_metadata$scn_coord2pix_b7[scn_metadata$scnid == scn_metadata$scnid[i]] + b8 <- scn_metadata$scn_coord2pix_b8[scn_metadata$scnid == scn_metadata$scnid[i]] + b9 <- scn_metadata$scn_coord2pix_b9[scn_metadata$scnid == scn_metadata$scnid[i]] + + ## l=line p=pixel + sw_p <- a0 + a1 * swN + a2 * swE + a3 * swN * swE + a4 * swN^2 + a5 * swE^2 + a6 * swN^2 * swE + a7 * swN * swE^2 + a8 * swN^3 + a9 * swE^3 + nw_p <- a0 + a1 * nwN + a2 * nwE + a3 * nwN * nwE + a4 * nwN^2 + a5 * nwE^2 + a6 * nwN^2 * nwE + a7 * nwN * nwE^2 + a8 * nwN^3 + a9 * nwE^3 + ne_p <- a0 + a1 * neN + a2 * neE + a3 * neN * neE + a4 * neN^2 + a5 * neE^2 + a6 * neN^2 * neE + a7 * neN * neE^2 + a8 * neN^3 + a9 * neE^3 + se_p <- a0 + a1 * seN + a2 * seE + a3 * seN * seE + a4 * seN^2 + a5 * seE^2 + a6 * seN^2 * seE + a7 * seN * seE^2 + a8 * seN^3 + a9 * seE^3 + + sw_l <- b0 + b1 * swN + b2 * swE + b3 * swN * swE + b4 * swN^2 + b5 * swE^2 + b6 * swN^2 * swE + b7 * swN * swE^2 + b8 * swN^3 + b9 * swE^3 + nw_l <- b0 + b1 * nwN + b2 * nwE + b3 * nwN * nwE + b4 * nwN^2 + b5 * nwE^2 + b6 * nwN^2 * nwE + b7 * nwN * nwE^2 + b8 * nwN^3 + b9 * nwE^3 + ne_l <- b0 + b1 * neN + b2 * neE + b3 * neN * neE + b4 * neN^2 + b5 * neE^2 + b6 * neN^2 * neE + b7 * neN * neE^2 + b8 * neN^3 + b9 * neE^3 + se_l <- b0 + b1 * seN + b2 * seE + b3 * seN * seE + b4 * seN^2 + b5 * seE^2 + b6 * seN^2 * seE + b7 * seN * seE^2 + b8 * seN^3 + b9 * seE^3 + + lpcoords <- rbind(c(sw_l, sw_p), c(nw_l, nw_p), c(ne_l, ne_p), c(se_l, se_p)) ## l=line p=pixel + + HHcells <- extract(rasterHH, lpcoords, method = "simple", cellnumbers = T) + HVcells <- extract(rasterHV, lpcoords, method = "simple", cellnumbers = T) + + cells <- HHcells[, 1] + + output[i + 1, 3] <- cells[1] + output[i + 1, 4] <- cells[2] + output[i + 1, 5] <- cells[3] + output[i + 1, 6] <- cells[4] + + LUT <- cbind(unique(rowFromCell(rasterHH, cells[1:length(cells)])), unique(colFromCell(rasterHH, cells[1:length(cells)]))) + + HH <- vector(mode = "numeric", length(rows)) + HV <- vector(mode = "numeric", length(rows)) + + for (j in 1:nrow(LUT)) { + HH[j] <- rasterHH[LUT[j, 1], LUT[j, 2]] + HV[j] <- rasterHV[LUT[j, 1], LUT[j, 2]] } - output[i+1,7] <- HH[1] - output[i+1,8] <- HH[2] - output[i+1,9] <- HH[3] - output[i+1,10]<- HH[4] - - output[i+1,11] <-HV[1] - output[i+1,12] <-HV[2] - output[i+1,13] <-HV[3] - output[i+1,14] <-HV[4] - + output[i + 1, 7] <- HH[1] + output[i + 1, 8] <- HH[2] + output[i + 1, 9] <- HH[3] + output[i + 1, 10] <- HH[4] + + output[i + 1, 11] <- HV[1] + output[i + 1, 12] <- HV[2] + output[i + 1, 13] <- HV[3] + output[i + 1, 14] <- HV[4] } -dates<-as.date(as.character(substr(output[2:nrow(output),2],1,8)),order='ymd') -max_return<-vector() -##To plot an 8 panel time series (line plot) of each of 4 coordinate's HH and HV returns -par(mfcol=c(4,2)) -for(l in 1:8){ - max_return[l]<-max(as.numeric(as.character(output[2:nrow(output),l+6])),na.rm=T) - plot(dates,as.numeric(as.character(output[2:nrow(output),l+6])), type="n",xlab='Date', ylab=output[1,l+6], ylim=c(0,max(max_return))) - lines(dates,as.numeric(as.character(output[2:nrow(output),l+6])), type="b") +dates <- as.date(as.character(substr(output[2:nrow(output), 2], 1, 8)), order = "ymd") +max_return <- vector() +## To plot an 8 panel time series (line plot) of each of 4 coordinate's HH and HV returns +par(mfcol = c(4, 2)) +for (l in 1:8) { + max_return[l] <- max(as.numeric(as.character(output[2:nrow(output), l + 6])), na.rm = T) + plot(dates, as.numeric(as.character(output[2:nrow(output), l + 6])), type = "n", xlab = "Date", ylab = output[1, l + 6], ylim = c(0, max(max_return))) + lines(dates, as.numeric(as.character(output[2:nrow(output), l + 6])), type = "b") } -par(mfcol=c(1,1)) -plot(dates,as.numeric(as.character(output[2:nrow(output),7])), col='red', ylim=c(0,max(max_return)),pch=19,ps=20,type="n") -title(main='HH') -points(dates,as.numeric(as.character(output[2:nrow(output),7])), col='red', pch=19,ps=20) -points(dates,as.numeric(as.character(output[2:nrow(output),8])), col='green', pch=19,ps=20) -points(dates,as.numeric(as.character(output[2:nrow(output),9])), col='blue', pch=19,ps=20) -points(dates,as.numeric(as.character(output[2:nrow(output),10])), col='black', pch=19,ps=20) - -par(mfcol=c(1,1)) -plot(dates,as.numeric(as.character(output[2:nrow(output),7])), col='red', ylim=c(0,max(max_return)),pch=19,ps=20,type="n") -title(main='HV') -points(dates,as.numeric(as.character(output[2:nrow(output),11])), col='red', pch=19,ps=20) -points(dates,as.numeric(as.character(output[2:nrow(output),12])), col='green', pch=19,ps=20) -points(dates,as.numeric(as.character(output[2:nrow(output),13])), col='blue', pch=19,ps=20) -points(dates,as.numeric(as.character(output[2:nrow(output),14])), col='black', pch=19,ps=20) - -# -# +par(mfcol = c(1, 1)) +plot(dates, as.numeric(as.character(output[2:nrow(output), 7])), col = "red", ylim = c(0, max(max_return)), pch = 19, ps = 20, type = "n") +title(main = "HH") +points(dates, as.numeric(as.character(output[2:nrow(output), 7])), col = "red", pch = 19, ps = 20) +points(dates, as.numeric(as.character(output[2:nrow(output), 8])), col = "green", pch = 19, ps = 20) +points(dates, as.numeric(as.character(output[2:nrow(output), 9])), col = "blue", pch = 19, ps = 20) +points(dates, as.numeric(as.character(output[2:nrow(output), 10])), col = "black", pch = 19, ps = 20) + +par(mfcol = c(1, 1)) +plot(dates, as.numeric(as.character(output[2:nrow(output), 7])), col = "red", ylim = c(0, max(max_return)), pch = 19, ps = 20, type = "n") +title(main = "HV") +points(dates, as.numeric(as.character(output[2:nrow(output), 11])), col = "red", pch = 19, ps = 20) +points(dates, as.numeric(as.character(output[2:nrow(output), 12])), col = "green", pch = 19, ps = 20) +points(dates, as.numeric(as.character(output[2:nrow(output), 13])), col = "blue", pch = 19, ps = 20) +points(dates, as.numeric(as.character(output[2:nrow(output), 14])), col = "black", pch = 19, ps = 20) + +# +# # write.table(output,file="/home/bhardima/pecan/modules/data.remote/output/data/output_point-value-timeseries.csv",quote=F,sep="\t",eol="\r\n", row.names=F,col.names=F) # path="/home/bhardima/pecan/modules/data.remote/output/data/output_point-value-timeseries.csv" # output<-read.delim(path, header=F, sep="\t", fill=T) -# +# # values1){ - WLEF_extracted_48m[((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+1) : ((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+nrow(spcheasFIA[fia.in.rast]@coords)),1:7]<-all_48 + HH_data_48m <- extract(HH_rast, spcheasFIA[fia.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # this step is very slow + HV_data_48m <- extract(HV_rast, spcheasFIA[fia.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # this step is very slow + + filename <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == filename[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) + + all_48 <- cbind(filename, palsar_date, spcheasFIA[fia.in.rast]@coords, biomass[fia.in.rast], HH_data_48m, HV_data_48m) + if (i == 1) { + WLEF_extracted_48m[i:nrow(spcheasFIA[fia.in.rast]@coords), ] <- all_48 + } else if (i > 1) { + WLEF_extracted_48m[((i - 1) * nrow(spcheasFIA[fia.in.rast]@coords) + 1):((i - 1) * nrow(spcheasFIA[fia.in.rast]@coords) + nrow(spcheasFIA[fia.in.rast]@coords)), 1:7] <- all_48 } ############################### - #calibration data from FIA plots 60m BUFFER MEAN + # calibration data from FIA plots 60m BUFFER MEAN ############################### - HH_data_60m<-extract(HH_rast, spcheasFIA[fia.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow - HV_data_60m<-extract(HV_rast, spcheasFIA[fia.in.rast], method="simple",buffer=60, small=T, fun=mean) #this step is very slow - - filename<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_60m),ncol=1) - colnames(filename)<-"scnid" - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==filename[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_60m),ncol=1) - colnames(palsar_date)<-"scndate" - - all_60<-cbind(filename,palsar_date,spcheasFIA[fia.in.rast]@coords,biomass[fia.in.rast],HH_data_60m,HV_data_60m) - if(i==1){ - WLEF_extracted_60m[i : nrow(spcheasFIA[fia.in.rast]@coords),]<-all_60 - }else if(i>1){ - WLEF_extracted_60m[((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+1) : ((i-1)*nrow(spcheasFIA[fia.in.rast]@coords)+nrow(spcheasFIA[fia.in.rast]@coords)),1:7]<-all_60 + HH_data_60m <- extract(HH_rast, spcheasFIA[fia.in.rast], method = "simple", buffer = 60, small = T, fun = mean) # this step is very slow + HV_data_60m <- extract(HV_rast, spcheasFIA[fia.in.rast], method = "simple", buffer = 60, small = T, fun = mean) # this step is very slow + + filename <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_60m), ncol = 1) + colnames(filename) <- "scnid" + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == filename[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_60m), ncol = 1) + colnames(palsar_date) <- "scndate" + + all_60 <- cbind(filename, palsar_date, spcheasFIA[fia.in.rast]@coords, biomass[fia.in.rast], HH_data_60m, HV_data_60m) + if (i == 1) { + WLEF_extracted_60m[i:nrow(spcheasFIA[fia.in.rast]@coords), ] <- all_60 + } else if (i > 1) { + WLEF_extracted_60m[((i - 1) * nrow(spcheasFIA[fia.in.rast]@coords) + 1):((i - 1) * nrow(spcheasFIA[fia.in.rast]@coords) + nrow(spcheasFIA[fia.in.rast]@coords)), 1:7] <- all_60 } - - print(paste("i=",i,sep="")) + + print(paste("i=", i, sep = "")) # print(nrow(spcheasFIA[fia.in.rast]@coords)) # print(paste("j=",j,sep="")) # } } # extracted_48m<-na.omit(extracted_48m) # extracted_60m<-na.omit(extracted_60m) -WLEF_dat48<-extracted_48m #Create working copy of data (so that I don't need to re-extract if I screw up the data) -WLEF_dat60<-extracted_60m #Create working copy of data (so that I don't need to re-extract if I screw up the data) -dat48<-extracted_48m #Create working copy of data (so that I don't need to re-extract if I screw up the data) -dat60<-extracted_60m #Create working copy of data (so that I don't need to re-extract if I screw up the data) +WLEF_dat48 <- extracted_48m # Create working copy of data (so that I don't need to re-extract if I screw up the data) +WLEF_dat60 <- extracted_60m # Create working copy of data (so that I don't need to re-extract if I screw up the data) +dat48 <- extracted_48m # Create working copy of data (so that I don't need to re-extract if I screw up the data) +dat60 <- extracted_60m # Create working copy of data (so that I don't need to re-extract if I screw up the data) -colnames(dat48)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.48", "HV.sigma.48") -colnames(dat60)<-c("scnid","scndate", "UTM.lat", "UTM.lon", "biomass","HH.sigma.60", "HV.sigma.60") +colnames(dat48) <- c("scnid", "scndate", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.48", "HV.sigma.48") +colnames(dat60) <- c("scnid", "scndate", "UTM.lat", "UTM.lon", "biomass", "HH.sigma.60", "HV.sigma.60") -extracted_48m[,1]<-as.character(extracted_48m[,1]) -extracted_48m[,2]<-as.Date(extracted_48m[,2],"%Y-%M-%d") -extracted_48m[,3:7]<- as.numeric(as.character(extracted_48m[,3:7])) +extracted_48m[, 1] <- as.character(extracted_48m[, 1]) +extracted_48m[, 2] <- as.Date(extracted_48m[, 2], "%Y-%M-%d") +extracted_48m[, 3:7] <- as.numeric(as.character(extracted_48m[, 3:7])) -HH_48<-as.numeric(dat48[,6]) -HV_48<-as.numeric(dat48[,7]) +HH_48 <- as.numeric(dat48[, 6]) +HV_48 <- as.numeric(dat48[, 7]) -HH_60<-as.numeric(dat60[,6]) -HV_60<-as.numeric(dat60[,7]) +HH_60 <- as.numeric(dat60[, 6]) +HV_60 <- as.numeric(dat60[, 7]) -##QC plots -par(mfrow=c(1,2)) #checking coordinate alignment of different extraction buffer sizes -plot(dat48[,3],dat60[,3]) -plot(dat48[,4],dat60[,4]) +## QC plots +par(mfrow = c(1, 2)) # checking coordinate alignment of different extraction buffer sizes +plot(dat48[, 3], dat60[, 3]) +plot(dat48[, 4], dat60[, 4]) -par(mfrow=c(1,2)) # checking extracted backscatter values for extraction buffers (should bedifferent but not TOO different) -plot(HH_48,HH_60,xlab="48m",ylab="60m",main="HH") -plot(HV_48,HV_60,xlab="48m",ylab="60m",main="HV") +par(mfrow = c(1, 2)) # checking extracted backscatter values for extraction buffers (should bedifferent but not TOO different) +plot(HH_48, HH_60, xlab = "48m", ylab = "60m", main = "HH") +plot(HV_48, HV_60, xlab = "48m", ylab = "60m", main = "HV") -##Data Exploration -par(mfrow=c(2,2)) -plot(dat48[,5],HH_48/HV_48,xlab="biomass",ylab="HH/HV",main="48m") -plot(dat60[,5],HH_60/HV_60,xlab="biomass",ylab="HH/HV",main="60m") +## Data Exploration +par(mfrow = c(2, 2)) +plot(dat48[, 5], HH_48 / HV_48, xlab = "biomass", ylab = "HH/HV", main = "48m") +plot(dat60[, 5], HH_60 / HV_60, xlab = "biomass", ylab = "HH/HV", main = "60m") -plot(dat48[,5],HH_48*HV_48,xlab="biomass",ylab="HHxHV",main="48m") -plot(dat60[,5],HH_60*HV_60,xlab="biomass",ylab="HHxHV",main="60m") +plot(dat48[, 5], HH_48 * HV_48, xlab = "biomass", ylab = "HHxHV", main = "48m") +plot(dat60[, 5], HH_60 * HV_60, xlab = "biomass", ylab = "HHxHV", main = "60m") -par(mfrow=c(1,2)) -plot(dat48[,5],(HH_48-HV_48)/(HH_48+HV_48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m") -plot(dat60[,5],(HH_60-HV_60)/(HH_60+HV_60),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="60m") +par(mfrow = c(1, 2)) +plot(dat48[, 5], (HH_48 - HV_48) / (HH_48 + HV_48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m") +plot(dat60[, 5], (HH_60 - HV_60) / (HH_60 + HV_60), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "60m") ######################################################## ######################################################## ## Curve fitting -param_est<-matrix(NA,nrow=length(spp)*length(trt),ncol=length(trt)+4) #columns for spp, trt, and 3 params,+ AIC -param_est[,1]<-spp -param_est[,2]<-trt - -colnames(param_est)<-c("spp","trt","Pmax","ki","sd","AIC") - -par(mfrow=c(length(spp),length(trt))) -for(j in 1:length(trt)){ - for(i in 1:length(spp)){ - - y<-data_pts$photo[data_pts$TRT== trt[j] & data_pts$spp== spp[i]] - x<-data_pts$par [data_pts$TRT== trt[j] & data_pts$spp== spp[i]] - - Pmax<-mean(data_pts$photo[data_pts$spp==spp[i] & data_pts$TRT==trt[j] & data_pts$par>1500]) - ki<- round(mean(data_pts$par[data_pts$photo > (Pmax/2)-0.5 & data_pts$photo < (Pmax/2)+0.5])) - sd<-sd(data_pts$photo[data_pts$spp==spp[i] & data_pts$TRT==trt[j]]) - - params<-c(Pmax,ki,sd) - - rect.hyperbola <- function(params,x,y){ - Pmax<-params[1] - ki<-params[2] - sd<-params[3] - - photo_pred<-(Pmax*x)/(ki+x) - LL<- -sum(dnorm(y,photo_pred,sd,log=TRUE)) +param_est <- matrix(NA, nrow = length(spp) * length(trt), ncol = length(trt) + 4) # columns for spp, trt, and 3 params,+ AIC +param_est[, 1] <- spp +param_est[, 2] <- trt + +colnames(param_est) <- c("spp", "trt", "Pmax", "ki", "sd", "AIC") + +par(mfrow = c(length(spp), length(trt))) +for (j in 1:length(trt)) { + for (i in 1:length(spp)) { + y <- data_pts$photo[data_pts$TRT == trt[j] & data_pts$spp == spp[i]] + x <- data_pts$par[data_pts$TRT == trt[j] & data_pts$spp == spp[i]] + + Pmax <- mean(data_pts$photo[data_pts$spp == spp[i] & data_pts$TRT == trt[j] & data_pts$par > 1500]) + ki <- round(mean(data_pts$par[data_pts$photo > (Pmax / 2) - 0.5 & data_pts$photo < (Pmax / 2) + 0.5])) + sd <- sd(data_pts$photo[data_pts$spp == spp[i] & data_pts$TRT == trt[j]]) + + params <- c(Pmax, ki, sd) + + rect.hyperbola <- function(params, x, y) { + Pmax <- params[1] + ki <- params[2] + sd <- params[3] + + photo_pred <- (Pmax * x) / (ki + x) + LL <- -sum(dnorm(y, photo_pred, sd, log = TRUE)) return(LL) - } #function - - fit.recthyp = optim(par=params,rect.hyperbola,x=x,y=y) + } # function + + fit.recthyp <- optim(par = params, rect.hyperbola, x = x, y = y) fit.recthyp - aic.recthyp<- -2*fit.recthyp$value + 2*length(params) - - params = c(fit.recthyp$par,aic.recthyp) #this par means paramaters, not light level - param_est[param_est[,1]==spp[i] & param_est[,2]==trt[j],3:6]<-params - xseq = seq(0,max(x),length=1000) - - plot(x,y,xlab="Q (PAR)",ylab="A",main=paste(spp[i], "treatment=",trt[j]), - xlim=c(min(data_pts$par),max(data_pts$par)), - ylim=c(min(data_pts$photo),max(data_pts$photo)), - pch=16,col="#CCCCCC") - abline(a=0,b=0) - lines(cbind(xseq,(params[1]*xseq)/(params[2]+xseq)),lwd=3) #closed circles and solid line for CTRL - }#for j looping over trt -}#for i looping over spp + aic.recthyp <- -2 * fit.recthyp$value + 2 * length(params) + + params <- c(fit.recthyp$par, aic.recthyp) # this par means paramaters, not light level + param_est[param_est[, 1] == spp[i] & param_est[, 2] == trt[j], 3:6] <- params + xseq <- seq(0, max(x), length = 1000) + + plot(x, y, + xlab = "Q (PAR)", ylab = "A", main = paste(spp[i], "treatment=", trt[j]), + xlim = c(min(data_pts$par), max(data_pts$par)), + ylim = c(min(data_pts$photo), max(data_pts$photo)), + pch = 16, col = "#CCCCCC" + ) + abline(a = 0, b = 0) + lines(cbind(xseq, (params[1] * xseq) / (params[2] + xseq)), lwd = 3) # closed circles and solid line for CTRL + } # for j looping over trt +} # for i looping over spp param_est -#diagnotics? - - +# diagnotics? diff --git a/modules/data.remote/inst/scripts/old/extract_palsar_metadata_function.R b/modules/data.remote/inst/scripts/old/extract_palsar_metadata_function.R index 9971e1fa1c7..9360f991c93 100644 --- a/modules/data.remote/inst/scripts/old/extract_palsar_metadata_function.R +++ b/modules/data.remote/inst/scripts/old/extract_palsar_metadata_function.R @@ -1,150 +1,151 @@ -extract_palsar_metadata_function <-function(inpath){ - -###### -##Author Brady S. Hardiman -## 04/24/2013 +extract_palsar_metadata_function <- function(inpath) { + ###### + ## Author Brady S. Hardiman + ## 04/24/2013 -##Parameters I (Metadata) will be extracted from these files: -workreport <- read.delim(file.path(inpath,"workreport"), header=F, sep="=", fill=T) + ## Parameters I (Metadata) will be extracted from these files: + workreport <- read.delim(file.path(inpath, "workreport"), header = F, sep = "=", fill = T) -##scene ID -scnid <- as.character(workreport$V2[grep("Scs_SceneID",workreport$V1)]) + ## scene ID + scnid <- as.character(workreport$V2[grep("Scs_SceneID", workreport$V1)]) -##Parameters II (Metadata) will be extracted from these files: -txtfile <- read.delim(file.path(inpath, paste(scnid,".LED.txt",sep="")),header=F, sep="\t", fill=T) + ## Parameters II (Metadata) will be extracted from these files: + txtfile <- read.delim(file.path(inpath, paste(scnid, ".LED.txt", sep = "")), header = F, sep = "\t", fill = T) -##scene centroid date time (Format is YYYYMMDD HH:MM:SS.SSS) -scndate <- as.character(workreport$V2[grep("Img_SceneCenterDateTime",workreport$V1)]) + ## scene centroid date time (Format is YYYYMMDD HH:MM:SS.SSS) + scndate <- as.character(workreport$V2[grep("Img_SceneCenterDateTime", workreport$V1)]) -##scene UTM zone -scnUTMzone <- as.numeric(as.character(workreport$V2[grep("Pds_UTM_ZoneNo",workreport$V1)])) + ## scene UTM zone + scnUTMzone <- as.numeric(as.character(workreport$V2[grep("Pds_UTM_ZoneNo", workreport$V1)])) -##scene Pixel spacing (m) -scnpix <- as.numeric(as.character(workreport$V2[grep("Pds_PixelSpacing",workreport$V1)])) + ## scene Pixel spacing (m) + scnpix <- as.numeric(as.character(workreport$V2[grep("Pds_PixelSpacing", workreport$V1)])) -##scene calibration factor -## Note: Level 1.5 s0 = 10*log10 + CF where DN is the pixel value of level 1.5 products -## and CF is calibration factor -scncf_raw <- as.matrix(txtfile$V1[grep("Calibration factor", txtfile$V1)],nrow=2) -library(stringr) -scncf <- as.numeric(str_extract_all(scncf_raw[1,1],"\\(?[0-9,.-]+\\)?")[[1]]) + ## scene calibration factor + ## Note: Level 1.5 s0 = 10*log10 + CF where DN is the pixel value of level 1.5 products + ## and CF is calibration factor + scncf_raw <- as.matrix(txtfile$V1[grep("Calibration factor", txtfile$V1)], nrow = 2) + library(stringr) + scncf <- as.numeric(str_extract_all(scncf_raw[1, 1], "\\(?[0-9,.-]+\\)?")[[1]]) -## Coefficients to convert a line & pixel to a projection reference -## To convert a line (L) and pixel (P) position to the map projection frame of reference, say (E, N) where: -## E = A11 + A12*L + A13*P + A14*L*P -## N = A21 + A22*L + A23*P + A24*L*P -options(digits=8) -scn_pix2coord_a11 <- as.numeric(as.character(txtfile$V2[grep("a11", txtfile$V1)])) -scn_pix2coord_a12 <- as.numeric(as.character(txtfile$V2[grep("a12", txtfile$V1)])) -scn_pix2coord_a13 <- as.numeric(as.character(txtfile$V2[grep("a13", txtfile$V1)])) -scn_pix2coord_a14 <- as.numeric(as.character(txtfile$V2[grep("a14", txtfile$V1)])) -scn_pix2coord_a21 <- as.numeric(as.character(txtfile$V2[grep("a21", txtfile$V1)])) -scn_pix2coord_a22 <- as.numeric(as.character(txtfile$V2[grep("a22", txtfile$V1)])) -scn_pix2coord_a23 <- as.numeric(as.character(txtfile$V2[grep("a23", txtfile$V1)])) -scn_pix2coord_a24 <- as.numeric(as.character(txtfile$V2[grep("a24", txtfile$V1)])) + ## Coefficients to convert a line & pixel to a projection reference + ## To convert a line (L) and pixel (P) position to the map projection frame of reference, say (E, N) where: + ## E = A11 + A12*L + A13*P + A14*L*P + ## N = A21 + A22*L + A23*P + A24*L*P + options(digits = 8) + scn_pix2coord_a11 <- as.numeric(as.character(txtfile$V2[grep("a11", txtfile$V1)])) + scn_pix2coord_a12 <- as.numeric(as.character(txtfile$V2[grep("a12", txtfile$V1)])) + scn_pix2coord_a13 <- as.numeric(as.character(txtfile$V2[grep("a13", txtfile$V1)])) + scn_pix2coord_a14 <- as.numeric(as.character(txtfile$V2[grep("a14", txtfile$V1)])) + scn_pix2coord_a21 <- as.numeric(as.character(txtfile$V2[grep("a21", txtfile$V1)])) + scn_pix2coord_a22 <- as.numeric(as.character(txtfile$V2[grep("a22", txtfile$V1)])) + scn_pix2coord_a23 <- as.numeric(as.character(txtfile$V2[grep("a23", txtfile$V1)])) + scn_pix2coord_a24 <- as.numeric(as.character(txtfile$V2[grep("a24", txtfile$V1)])) -##Coefficients to convert a projection reference to a line & pixel -## To convert from the map projection (E, N) of the pixel at the upper left to line (L) and pixel (P) position in the image, say (L, P) where: corner and (E, N) show alongitude (deg.) and a latitude -## L = B11 + B12*E + B13*N + B14*E*N -## P = B21 + B22*E + B23*N + B24*E*N -scn_coord2pix_b11 <- as.numeric(as.character(txtfile$V2[grep("b11", txtfile$V1)])) -scn_coord2pix_b12 <- as.numeric(as.character(txtfile$V2[grep("b12", txtfile$V1)])) -scn_coord2pix_b13 <- as.numeric(as.character(txtfile$V2[grep("b13", txtfile$V1)])) -scn_coord2pix_b14 <- as.numeric(as.character(txtfile$V2[grep("b14", txtfile$V1)])) -scn_coord2pix_b21 <- as.numeric(as.character(txtfile$V2[grep("b21", txtfile$V1)])) -scn_coord2pix_b22 <- as.numeric(as.character(txtfile$V2[grep("b22", txtfile$V1)])) -scn_coord2pix_b23 <- as.numeric(as.character(txtfile$V2[grep("b23", txtfile$V1)])) -scn_coord2pix_b24 <- as.numeric(as.character(txtfile$V2[grep("b24", txtfile$V1)])) + ## Coefficients to convert a projection reference to a line & pixel + ## To convert from the map projection (E, N) of the pixel at the upper left to line (L) and pixel (P) position in the image, say (L, P) where: corner and (E, N) show alongitude (deg.) and a latitude + ## L = B11 + B12*E + B13*N + B14*E*N + ## P = B21 + B22*E + B23*N + B24*E*N + scn_coord2pix_b11 <- as.numeric(as.character(txtfile$V2[grep("b11", txtfile$V1)])) + scn_coord2pix_b12 <- as.numeric(as.character(txtfile$V2[grep("b12", txtfile$V1)])) + scn_coord2pix_b13 <- as.numeric(as.character(txtfile$V2[grep("b13", txtfile$V1)])) + scn_coord2pix_b14 <- as.numeric(as.character(txtfile$V2[grep("b14", txtfile$V1)])) + scn_coord2pix_b21 <- as.numeric(as.character(txtfile$V2[grep("b21", txtfile$V1)])) + scn_coord2pix_b22 <- as.numeric(as.character(txtfile$V2[grep("b22", txtfile$V1)])) + scn_coord2pix_b23 <- as.numeric(as.character(txtfile$V2[grep("b23", txtfile$V1)])) + scn_coord2pix_b24 <- as.numeric(as.character(txtfile$V2[grep("b24", txtfile$V1)])) -##More coordinates to convert dec degree LatLon to line and pixel -##P = a0+a1*N+a2*E+a3*N*E+a4*N^2+a5*E^2+a6*N^2*E+a7*N*E^2+a8*N^3+a9*E^3 -##L = b0+b1*N+b2*E+b3*N*E+b4*N^2+b5*E^2+b6*N^2*E+b7*N*E^2+b8*N^3+b9*E^3 + ## More coordinates to convert dec degree LatLon to line and pixel + ## P = a0+a1*N+a2*E+a3*N*E+a4*N^2+a5*E^2+a6*N^2*E+a7*N*E^2+a8*N^3+a9*E^3 + ## L = b0+b1*N+b2*E+b3*N*E+b4*N^2+b5*E^2+b6*N^2*E+b7*N*E^2+b8*N^3+b9*E^3 -scn_coord2pix_a0 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.0.", txtfile$V1)])) -scn_coord2pix_a1 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.1.", txtfile$V1)])) -scn_coord2pix_a2 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.2.", txtfile$V1)])) -scn_coord2pix_a3 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.3.", txtfile$V1)])) -scn_coord2pix_a4 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.4.", txtfile$V1)])) -scn_coord2pix_a5 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.5.", txtfile$V1)])) -scn_coord2pix_a6 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.6.", txtfile$V1)])) -scn_coord2pix_a7 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.7.", txtfile$V1)])) -scn_coord2pix_a8 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.8.", txtfile$V1)])) -scn_coord2pix_a9 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.9.", txtfile$V1)])) -scn_coord2pix_b0 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.0.", txtfile$V1)])) -scn_coord2pix_b1 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.1.", txtfile$V1)])) -scn_coord2pix_b2 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.2.", txtfile$V1)])) -scn_coord2pix_b3 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.3.", txtfile$V1)])) -scn_coord2pix_b4 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.4.", txtfile$V1)])) -scn_coord2pix_b5 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.5.", txtfile$V1)])) -scn_coord2pix_b6 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.6.", txtfile$V1)])) -scn_coord2pix_b7 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.7.", txtfile$V1)])) -scn_coord2pix_b8 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.8.", txtfile$V1)])) -scn_coord2pix_b9 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.9.", txtfile$V1)])) - -##Extract scene corner and center coordinates (UTM) -scn_nwlat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftTopLatitude",workreport$V1)])) -scn_nwlon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftTopLongitude",workreport$V1)])) -scn_nelat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightTopLatitude",workreport$V1)])) -scn_nelon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightTopLongitude",workreport$V1)])) -scn_swlat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftBottomLatitude",workreport$V1)])) -scn_swlon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftBottomLongitude",workreport$V1)])) -scn_selat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightBottomLatitude",workreport$V1)])) -scn_selon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightBottomLongitude",workreport$V1)])) -scn_centlat <-as.numeric(as.character(workreport$V2[grep("Img_ImageSceneCenterLatitude",workreport$V1)])) -scn_centlon <-as.numeric(as.character(workreport$V2[grep("Img_ImageSceneCenterLongitude",workreport$V1)])) + scn_coord2pix_a0 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.0.", txtfile$V1)])) + scn_coord2pix_a1 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.1.", txtfile$V1)])) + scn_coord2pix_a2 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.2.", txtfile$V1)])) + scn_coord2pix_a3 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.3.", txtfile$V1)])) + scn_coord2pix_a4 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.4.", txtfile$V1)])) + scn_coord2pix_a5 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.5.", txtfile$V1)])) + scn_coord2pix_a6 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.6.", txtfile$V1)])) + scn_coord2pix_a7 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.7.", txtfile$V1)])) + scn_coord2pix_a8 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.8.", txtfile$V1)])) + scn_coord2pix_a9 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..a.9.", txtfile$V1)])) + scn_coord2pix_b0 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.0.", txtfile$V1)])) + scn_coord2pix_b1 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.1.", txtfile$V1)])) + scn_coord2pix_b2 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.2.", txtfile$V1)])) + scn_coord2pix_b3 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.3.", txtfile$V1)])) + scn_coord2pix_b4 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.4.", txtfile$V1)])) + scn_coord2pix_b5 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.5.", txtfile$V1)])) + scn_coord2pix_b6 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.6.", txtfile$V1)])) + scn_coord2pix_b7 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.7.", txtfile$V1)])) + scn_coord2pix_b8 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.8.", txtfile$V1)])) + scn_coord2pix_b9 <- as.numeric(as.character(txtfile$V3[grep("Map to line/sample coefficients..b.9.", txtfile$V1)])) + ## Extract scene corner and center coordinates (UTM) + scn_nwlat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftTopLatitude", workreport$V1)])) + scn_nwlon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftTopLongitude", workreport$V1)])) + scn_nelat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightTopLatitude", workreport$V1)])) + scn_nelon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightTopLongitude", workreport$V1)])) + scn_swlat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftBottomLatitude", workreport$V1)])) + scn_swlon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneLeftBottomLongitude", workreport$V1)])) + scn_selat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightBottomLatitude", workreport$V1)])) + scn_selon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneRightBottomLongitude", workreport$V1)])) + scn_centlat <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneCenterLatitude", workreport$V1)])) + scn_centlon <- as.numeric(as.character(workreport$V2[grep("Img_ImageSceneCenterLongitude", workreport$V1)])) -scn_vector <- c(scnid, - scndate, - scnUTMzone, - scnpix, - scncf, - scn_pix2coord_a11, - scn_pix2coord_a12, - scn_pix2coord_a13, - scn_pix2coord_a14, - scn_pix2coord_a21, - scn_pix2coord_a22, - scn_pix2coord_a23, - scn_pix2coord_a24, - scn_coord2pix_b11, - scn_coord2pix_b12, - scn_coord2pix_b13, - scn_coord2pix_b14, - scn_coord2pix_b21, - scn_coord2pix_b22, - scn_coord2pix_b23, - scn_coord2pix_b24, - scn_coord2pix_a0, - scn_coord2pix_a1, - scn_coord2pix_a2, - scn_coord2pix_a3, - scn_coord2pix_a4, - scn_coord2pix_a5, - scn_coord2pix_a6, - scn_coord2pix_a7, - scn_coord2pix_a8, - scn_coord2pix_a9, - scn_coord2pix_b0, - scn_coord2pix_b1, - scn_coord2pix_b2, - scn_coord2pix_b3, - scn_coord2pix_b4, - scn_coord2pix_b5, - scn_coord2pix_b6, - scn_coord2pix_b7, - scn_coord2pix_b8, - scn_coord2pix_b9, - scn_nwlat, - scn_nwlon, - scn_nelat, - scn_nelon, - scn_swlat, - scn_swlon, - scn_selat, - scn_selon, - scn_centlat, - scn_centlon) -return(scn_vector) -} \ No newline at end of file + scn_vector <- c( + scnid, + scndate, + scnUTMzone, + scnpix, + scncf, + scn_pix2coord_a11, + scn_pix2coord_a12, + scn_pix2coord_a13, + scn_pix2coord_a14, + scn_pix2coord_a21, + scn_pix2coord_a22, + scn_pix2coord_a23, + scn_pix2coord_a24, + scn_coord2pix_b11, + scn_coord2pix_b12, + scn_coord2pix_b13, + scn_coord2pix_b14, + scn_coord2pix_b21, + scn_coord2pix_b22, + scn_coord2pix_b23, + scn_coord2pix_b24, + scn_coord2pix_a0, + scn_coord2pix_a1, + scn_coord2pix_a2, + scn_coord2pix_a3, + scn_coord2pix_a4, + scn_coord2pix_a5, + scn_coord2pix_a6, + scn_coord2pix_a7, + scn_coord2pix_a8, + scn_coord2pix_a9, + scn_coord2pix_b0, + scn_coord2pix_b1, + scn_coord2pix_b2, + scn_coord2pix_b3, + scn_coord2pix_b4, + scn_coord2pix_b5, + scn_coord2pix_b6, + scn_coord2pix_b7, + scn_coord2pix_b8, + scn_coord2pix_b9, + scn_nwlat, + scn_nwlon, + scn_nelat, + scn_nelon, + scn_swlat, + scn_swlon, + scn_selat, + scn_selon, + scn_centlat, + scn_centlon + ) + + return(scn_vector) +} diff --git a/modules/data.remote/inst/scripts/old/extract_polygons_function.R b/modules/data.remote/inst/scripts/old/extract_polygons_function.R index 6b9ec937f31..d6d614c9474 100644 --- a/modules/data.remote/inst/scripts/old/extract_polygons_function.R +++ b/modules/data.remote/inst/scripts/old/extract_polygons_function.R @@ -1,9 +1,9 @@ -##this script will access a palsar file on my local machine to extract radar returns from a polygon -##associated with my UNDERC plot +## this script will access a palsar file on my local machine to extract radar returns from a polygon +## associated with my UNDERC plot -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -##load required pkgs and libraries +## load required pkgs and libraries require(rgdal) library(proj4) library(raster) @@ -11,85 +11,84 @@ library(sp) library(spatstat) library(maptools) -scn_metadata <- read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header=T, sep="\t") +scn_metadata <- read.table(file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") -##if running this as a function dir can/should be an argument +## if running this as a function dir can/should be an argument setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/UNDERC/") -filelist <- as.vector(list.dirs(path=getwd() ,recursive=F)) +filelist <- as.vector(list.dirs(path = getwd(), recursive = F)) -##read in polygons correspoding to plots with biomass ground-data +## read in polygons correspoding to plots with biomass ground-data -#read in table here -#convert coordinates to UTM (to match PALSAR files) +# read in table here +# convert coordinates to UTM (to match PALSAR files) # x <- c(-89.55009302960, -89.55042213000, -89.54882422370, -89.54913190390) # y <- c(46.25113458880, 46.25338996770, 46.25120674830, 46.25345157750) # xy<- cbind(x,y) -# -# ptransform(xy,'+proj=longlat +ellps=sphere','+proj=merc +ellps=sphere') -for (i in 1:length(filelist)){ - - inpath <-filelist[i] - - ##read in PALSAR bands (HH and HV polarizations) - scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) - scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) +# +# ptransform(xy,'+proj=longlat +ellps=sphere','+proj=merc +ellps=sphere') +for (i in 1:length(filelist)) { + inpath <- filelist[i] + + ## read in PALSAR bands (HH and HV polarizations) + scnHH <- Sys.glob(file.path(inpath, "*_HH.tif")) + scnHV <- Sys.glob(file.path(inpath, "*_HV.tif")) rasterHH <- raster(scnHH) rasterHV <- raster(scnHV) - par(mfrow=(c(1,1))) + par(mfrow = (c(1, 1))) image(rasterHV) # image(rasterHH) - + # rasterHH[96,1111] # rasterHV[96,1111] - - ##overlay polygon - #overlay(scnHH scnHVpolygon filename) - #see also: Extract values from Raster objects + + ## overlay polygon + # overlay(scnHH scnHVpolygon filename) + # see also: Extract values from Raster objects # coords <-rbind(c(303438,5125112), c(303421,5125363),c(303521,5125367), c(303536,5125117), c(303438,5125112)) ##UTM - - ##UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. - decdeg_coords <-rbind(c(46.25113458880, -89.55009302960), c(46.25338996770, -89.55042213000),c(46.25345157750, -89.54913190390), c(46.25120674830, -89.54882422370), c(46.25113458880, -89.55009302960)) ##Dec degrees - - ##Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -# decdeg_coords <-rbind( -# c(46.217102,-89.601745), -# c(46.231731,-89.601745), -# c(46.231731,-89.581994), -# c(46.217102,-89.581994), -# c(46.217102,-89.601745)) - - swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN - nwN <-decdeg_coords[2,1] - neN <-decdeg_coords[3,1] - seN <-decdeg_coords[4,1] - - swE <-decdeg_coords[1,2] - nwE <-decdeg_coords[2,2] - neE <-decdeg_coords[3,2] - seE <-decdeg_coords[4,2] - - a0 <- scn_metadata$scn_coord2pix_a0[scn_metadata$scnid==scn_metadata$scnid[i]] - a1 <- scn_metadata$scn_coord2pix_a1[scn_metadata$scnid==scn_metadata$scnid[i]] - a2 <- scn_metadata$scn_coord2pix_a2[scn_metadata$scnid==scn_metadata$scnid[i]] - a3 <- scn_metadata$scn_coord2pix_a3[scn_metadata$scnid==scn_metadata$scnid[i]] - a4 <- scn_metadata$scn_coord2pix_a4[scn_metadata$scnid==scn_metadata$scnid[i]] - a5 <- scn_metadata$scn_coord2pix_a5[scn_metadata$scnid==scn_metadata$scnid[i]] - a6 <- scn_metadata$scn_coord2pix_a6[scn_metadata$scnid==scn_metadata$scnid[i]] - a7 <- scn_metadata$scn_coord2pix_a7[scn_metadata$scnid==scn_metadata$scnid[i]] - a8 <- scn_metadata$scn_coord2pix_a8[scn_metadata$scnid==scn_metadata$scnid[i]] - a9 <- scn_metadata$scn_coord2pix_a9[scn_metadata$scnid==scn_metadata$scnid[i]] - b0 <- scn_metadata$scn_coord2pix_b0[scn_metadata$scnid==scn_metadata$scnid[i]] - b1 <- scn_metadata$scn_coord2pix_b1[scn_metadata$scnid==scn_metadata$scnid[i]] - b2 <- scn_metadata$scn_coord2pix_b2[scn_metadata$scnid==scn_metadata$scnid[i]] - b3 <- scn_metadata$scn_coord2pix_b3[scn_metadata$scnid==scn_metadata$scnid[i]] - b4 <- scn_metadata$scn_coord2pix_b4[scn_metadata$scnid==scn_metadata$scnid[i]] - b5 <- scn_metadata$scn_coord2pix_b5[scn_metadata$scnid==scn_metadata$scnid[i]] - b6 <- scn_metadata$scn_coord2pix_b6[scn_metadata$scnid==scn_metadata$scnid[i]] - b7 <- scn_metadata$scn_coord2pix_b7[scn_metadata$scnid==scn_metadata$scnid[i]] - b8 <- scn_metadata$scn_coord2pix_b8[scn_metadata$scnid==scn_metadata$scnid[i]] - b9 <- scn_metadata$scn_coord2pix_b9[scn_metadata$scnid==scn_metadata$scnid[i]] - + + ## UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + decdeg_coords <- rbind(c(46.25113458880, -89.55009302960), c(46.25338996770, -89.55042213000), c(46.25345157750, -89.54913190390), c(46.25120674830, -89.54882422370), c(46.25113458880, -89.55009302960)) ## Dec degrees + + ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # decdeg_coords <-rbind( + # c(46.217102,-89.601745), + # c(46.231731,-89.601745), + # c(46.231731,-89.581994), + # c(46.217102,-89.581994), + # c(46.217102,-89.601745)) + + swN <- decdeg_coords[1, 1] # southwest corner easting=swE, southwest corner northing=swN + nwN <- decdeg_coords[2, 1] + neN <- decdeg_coords[3, 1] + seN <- decdeg_coords[4, 1] + + swE <- decdeg_coords[1, 2] + nwE <- decdeg_coords[2, 2] + neE <- decdeg_coords[3, 2] + seE <- decdeg_coords[4, 2] + + a0 <- scn_metadata$scn_coord2pix_a0[scn_metadata$scnid == scn_metadata$scnid[i]] + a1 <- scn_metadata$scn_coord2pix_a1[scn_metadata$scnid == scn_metadata$scnid[i]] + a2 <- scn_metadata$scn_coord2pix_a2[scn_metadata$scnid == scn_metadata$scnid[i]] + a3 <- scn_metadata$scn_coord2pix_a3[scn_metadata$scnid == scn_metadata$scnid[i]] + a4 <- scn_metadata$scn_coord2pix_a4[scn_metadata$scnid == scn_metadata$scnid[i]] + a5 <- scn_metadata$scn_coord2pix_a5[scn_metadata$scnid == scn_metadata$scnid[i]] + a6 <- scn_metadata$scn_coord2pix_a6[scn_metadata$scnid == scn_metadata$scnid[i]] + a7 <- scn_metadata$scn_coord2pix_a7[scn_metadata$scnid == scn_metadata$scnid[i]] + a8 <- scn_metadata$scn_coord2pix_a8[scn_metadata$scnid == scn_metadata$scnid[i]] + a9 <- scn_metadata$scn_coord2pix_a9[scn_metadata$scnid == scn_metadata$scnid[i]] + b0 <- scn_metadata$scn_coord2pix_b0[scn_metadata$scnid == scn_metadata$scnid[i]] + b1 <- scn_metadata$scn_coord2pix_b1[scn_metadata$scnid == scn_metadata$scnid[i]] + b2 <- scn_metadata$scn_coord2pix_b2[scn_metadata$scnid == scn_metadata$scnid[i]] + b3 <- scn_metadata$scn_coord2pix_b3[scn_metadata$scnid == scn_metadata$scnid[i]] + b4 <- scn_metadata$scn_coord2pix_b4[scn_metadata$scnid == scn_metadata$scnid[i]] + b5 <- scn_metadata$scn_coord2pix_b5[scn_metadata$scnid == scn_metadata$scnid[i]] + b6 <- scn_metadata$scn_coord2pix_b6[scn_metadata$scnid == scn_metadata$scnid[i]] + b7 <- scn_metadata$scn_coord2pix_b7[scn_metadata$scnid == scn_metadata$scnid[i]] + b8 <- scn_metadata$scn_coord2pix_b8[scn_metadata$scnid == scn_metadata$scnid[i]] + b9 <- scn_metadata$scn_coord2pix_b9[scn_metadata$scnid == scn_metadata$scnid[i]] + # B11 <- scn_metadata$scn_pix2coord_b11[scn_metadata$scnid==scn_metadata$scnid[i]] # B12 <- scn_metadata$scn_pix2coord_b12[scn_metadata$scnid==scn_metadata$scnid[i]] # B13 <- scn_metadata$scn_pix2coord_b13[scn_metadata$scnid==scn_metadata$scnid[i]] @@ -98,115 +97,107 @@ for (i in 1:length(filelist)){ # B22 <- scn_metadata$scn_pix2coord_b22[scn_metadata$scnid==scn_metadata$scnid[i]] # B23 <- scn_metadata$scn_pix2coord_b23[scn_metadata$scnid==scn_metadata$scnid[i]] # B24 <- scn_metadata$scn_pix2coord_b24[scn_metadata$scnid==scn_metadata$scnid[i]] - + # scn_metadata$scnUTMzone[scn_metadata$scnid==scn_metadata$scnid[i]] - + # sw_l= B11 + B12*swE + B13*swN + B14*swE*swN # nw_l= B11 + B12*nwE + B13*nwN + B14*nwE*nwN # ne_l= B11 + B12*neE + B13*neN + B14*neE*neN - # se_l= B11 + B12*seE + B13*seN + B14*seE*seN - # + # se_l= B11 + B12*seE + B13*seN + B14*seE*seN + # # sw_p= B21 + B22*swE + B23*swN + B24*swE*swN # nw_p= B21 + B22*nwE + B23*nwN + B24*nwE*nwN # ne_p= B21 + B22*neE + B23*neN + B24*neE*neN # se_p= B21 + B22*seE + B23*seN + B24*seE*seN - - ##l=line p=pixel - sw_p <- a0 + a1*swN + a2*swE + a3*swN*swE + a4*swN^2 + a5*swE^2 + a6*swN^2*swE + a7*swN*swE^2 + a8*swN^3 + a9*swE^3 - nw_p <- a0 + a1*nwN + a2*nwE + a3*nwN*nwE + a4*nwN^2 + a5*nwE^2 + a6*nwN^2*nwE + a7*nwN*nwE^2 + a8*nwN^3 + a9*nwE^3 - ne_p <- a0 + a1*neN + a2*neE + a3*neN*neE + a4*neN^2 + a5*neE^2 + a6*neN^2*neE + a7*neN*neE^2 + a8*neN^3 + a9*neE^3 - se_p <- a0 + a1*seN + a2*seE + a3*seN*seE + a4*seN^2 + a5*seE^2 + a6*seN^2*seE + a7*seN*seE^2 + a8*seN^3 + a9*seE^3 - - sw_l <- b0 + b1*swN + b2*swE + b3*swN*swE + b4*swN^2 + b5*swE^2 + b6*swN^2*swE + b7*swN*swE^2 + b8*swN^3 + b9*swE^3 - nw_l <- b0 + b1*nwN + b2*nwE + b3*nwN*nwE + b4*nwN^2 + b5*nwE^2 + b6*nwN^2*nwE + b7*nwN*nwE^2 + b8*nwN^3 + b9*nwE^3 - ne_l <- b0 + b1*neN + b2*neE + b3*neN*neE + b4*neN^2 + b5*neE^2 + b6*neN^2*neE + b7*neN*neE^2 + b8*neN^3 + b9*neE^3 - se_l <- b0 + b1*seN + b2*seE + b3*seN*seE + b4*seN^2 + b5*seE^2 + b6*seN^2*seE + b7*seN*seE^2 + b8*seN^3 + b9*seE^3 - - lpcoords <-rbind(c(sw_l, sw_p), c(nw_l, nw_p),c(ne_l, ne_p), c(se_l, se_p), c(sw_l, sw_p)) ##l=line p=pixel - - Sr1<- Polygon(lpcoords) - Srs1<- Polygons(list(Sr1),"sr1") - SpP<-SpatialPolygons(list(Srs1)) - #SpatialPolygons(lpcorners) - plotarea<-SpP@polygons[[1]]@area - + + ## l=line p=pixel + sw_p <- a0 + a1 * swN + a2 * swE + a3 * swN * swE + a4 * swN^2 + a5 * swE^2 + a6 * swN^2 * swE + a7 * swN * swE^2 + a8 * swN^3 + a9 * swE^3 + nw_p <- a0 + a1 * nwN + a2 * nwE + a3 * nwN * nwE + a4 * nwN^2 + a5 * nwE^2 + a6 * nwN^2 * nwE + a7 * nwN * nwE^2 + a8 * nwN^3 + a9 * nwE^3 + ne_p <- a0 + a1 * neN + a2 * neE + a3 * neN * neE + a4 * neN^2 + a5 * neE^2 + a6 * neN^2 * neE + a7 * neN * neE^2 + a8 * neN^3 + a9 * neE^3 + se_p <- a0 + a1 * seN + a2 * seE + a3 * seN * seE + a4 * seN^2 + a5 * seE^2 + a6 * seN^2 * seE + a7 * seN * seE^2 + a8 * seN^3 + a9 * seE^3 + + sw_l <- b0 + b1 * swN + b2 * swE + b3 * swN * swE + b4 * swN^2 + b5 * swE^2 + b6 * swN^2 * swE + b7 * swN * swE^2 + b8 * swN^3 + b9 * swE^3 + nw_l <- b0 + b1 * nwN + b2 * nwE + b3 * nwN * nwE + b4 * nwN^2 + b5 * nwE^2 + b6 * nwN^2 * nwE + b7 * nwN * nwE^2 + b8 * nwN^3 + b9 * nwE^3 + ne_l <- b0 + b1 * neN + b2 * neE + b3 * neN * neE + b4 * neN^2 + b5 * neE^2 + b6 * neN^2 * neE + b7 * neN * neE^2 + b8 * neN^3 + b9 * neE^3 + se_l <- b0 + b1 * seN + b2 * seE + b3 * seN * seE + b4 * seN^2 + b5 * seE^2 + b6 * seN^2 * seE + b7 * seN * seE^2 + b8 * seN^3 + b9 * seE^3 + + lpcoords <- rbind(c(sw_l, sw_p), c(nw_l, nw_p), c(ne_l, ne_p), c(se_l, se_p), c(sw_l, sw_p)) ## l=line p=pixel + + Sr1 <- Polygon(lpcoords) + Srs1 <- Polygons(list(Sr1), "sr1") + SpP <- SpatialPolygons(list(Srs1)) + # SpatialPolygons(lpcorners) + plotarea <- SpP@polygons[[1]]@area + # utmext <- extent(303438,303521,5125112,5125367) - #extract(rasterHH, SpP, method='simple', buffer=NULL, small=T, fun=mean, df=T) - HHcells<-as.data.frame(extract(rasterHH, SpP, method='simple',cellnumbers=T)[[1]]) - HVcells<-as.data.frame(extract(rasterHV, SpP, method='simple',cellnumbers=T)[[1]]) -# par(mfrow=c(1,1)) -# plot(HHcells$value,HVcells$value) - - HHcells$cell==HVcells$cell ##check to make sure HH and HV are aligned - + # extract(rasterHH, SpP, method='simple', buffer=NULL, small=T, fun=mean, df=T) + HHcells <- as.data.frame(extract(rasterHH, SpP, method = "simple", cellnumbers = T)[[1]]) + HVcells <- as.data.frame(extract(rasterHV, SpP, method = "simple", cellnumbers = T)[[1]]) + # par(mfrow=c(1,1)) + # plot(HHcells$value,HVcells$value) + + HHcells$cell == HVcells$cell ## check to make sure HH and HV are aligned + cells <- HHcells$cell # r<-rasterFromCells(rasterHH,cells) - #r@extent ##useful, but I'm not using this yet - rows<-unique(rowFromCell(rasterHH,cells)) - cols<-unique(colFromCell(rasterHH,cells)) - HH<-matrix(NA,length(rows),length(cols)) - HV<-matrix(NA,length(rows),length(cols)) - -####### -##This step is slow and inelegant. I am filling each cell in an m x n matrix - for(j in 1:length(rows)){ - for(k in 1:length(cols)){ - HH[j,k]<-rasterHH[rows[j],cols[k]] - HV[j,k]<-rasterHV[rows[j],cols[k]] - #print(c("k=",100-(100*(k/length(cols))))) + # r@extent ##useful, but I'm not using this yet + rows <- unique(rowFromCell(rasterHH, cells)) + cols <- unique(colFromCell(rasterHH, cells)) + HH <- matrix(NA, length(rows), length(cols)) + HV <- matrix(NA, length(rows), length(cols)) + + ####### + ## This step is slow and inelegant. I am filling each cell in an m x n matrix + for (j in 1:length(rows)) { + for (k in 1:length(cols)) { + HH[j, k] <- rasterHH[rows[j], cols[k]] + HV[j, k] <- rasterHV[rows[j], cols[k]] + # print(c("k=",100-(100*(k/length(cols))))) } - print(c("j=",100-(100*(j/length(cols))))) + print(c("j=", 100 - (100 * (j / length(cols))))) } -image(HH, col=gray(1:255/255)) -#overlay(HH,HV, fun=function(HH,HV){return(HH*HV)} ) - dates<-as.date(as.character(substr(output[2:nrow(output),2],1,8)),order='ymd') -# jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) - - - par(mfrow=c(1,3)) - image(HH, col=gray(1:255/255)) #, xlim=c(swE,seE), ylim=c(swN,nwN)) - title(main=c(as.character(dates[i]),'HH')) - image(HV, col=gray(1:255/255)) #, xlim=c(swE,seE), ylim=c(swN,nwN)) - title(main=c(as.character(dates[i]),'HV')) - image(HH-HV, col=gray(1:255/255)) - title(main=c(as.character(dates[i]),'HH-HV')) - + image(HH, col = gray(1:255 / 255)) + # overlay(HH,HV, fun=function(HH,HV){return(HH*HV)} ) + dates <- as.date(as.character(substr(output[2:nrow(output), 2], 1, 8)), order = "ymd") + # jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) + + + par(mfrow = c(1, 3)) + image(HH, col = gray(1:255 / 255)) # , xlim=c(swE,seE), ylim=c(swN,nwN)) + title(main = c(as.character(dates[i]), "HH")) + image(HV, col = gray(1:255 / 255)) # , xlim=c(swE,seE), ylim=c(swN,nwN)) + title(main = c(as.character(dates[i]), "HV")) + image(HH - HV, col = gray(1:255 / 255)) + title(main = c(as.character(dates[i]), "HH-HV")) + scn_metadata$scnid[i] } # image(xyLayer(HH,HV),asp=1) -# +# # cbind(1:ncell(r), getValues(r)) -# +# # nrow = sum(diff(HH$cell)>1)+1 # #ncol = ceiling(length(HH$value)/nrow) # ncol=1;myrow = 1; mycol=1 -# for(i in 1:length(HH$value)){ +# for(i in 1:length(HH$value)){ # if(c(diff(HH$cell),1)[i]>1){ # ncol = max(c(ncol,mycol)) # myrow = myrow + 1 # mycol = 1 # } else{ # mycol = mycol + 1 -# } +# } # } # HHmat = matrix(NA,nrow,ncol) # myrow = 1; mycol=1 -# for(i in 1:length(HH$value)){ +# for(i in 1:length(HH$value)){ # HHmat[myrow,mycol] = HH$value[i] # if(c(diff(HH$cell),1)[i]>1){ # myrow = myrow + 1 # mycol = 1 # } else{ # mycol = mycol + 1 -# } +# } # } # image(HHmat) - - - - - - - - diff --git a/modules/data.remote/inst/scripts/old/extract_polygons_function_v05092013.R b/modules/data.remote/inst/scripts/old/extract_polygons_function_v05092013.R index 53f8de02179..229bb1e7b96 100644 --- a/modules/data.remote/inst/scripts/old/extract_polygons_function_v05092013.R +++ b/modules/data.remote/inst/scripts/old/extract_polygons_function_v05092013.R @@ -1,44 +1,44 @@ -##this script will access a palsar file on my local machine to extract radar returns from a polygon -##associated with my UNDERC plot +## this script will access a palsar file on my local machine to extract radar returns from a polygon +## associated with my UNDERC plot -##I'm going to try doing this with rgdal instead of raster package since that seems to be producing nonsense +## I'm going to try doing this with rgdal instead of raster package since that seems to be producing nonsense -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -##load required pkgs and libraries +## load required pkgs and libraries require(rgdal) library(sp) -scn_metadata <- read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header=T, sep="\t") +scn_metadata <- read.table(file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") -##if running this as a function dir can/should be an argument +## if running this as a function dir can/should be an argument setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/UNDERC/") -filelist <- as.vector(list.dirs(path=getwd() ,recursive=F)) -# for (i in 1:length(filelist)){ - i=1 - inpath <-filelist[i] +filelist <- as.vector(list.dirs(path = getwd(), recursive = F)) +# for (i in 1:length(filelist)){ +i <- 1 +inpath <- filelist[i] -scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) -scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) +scnHH <- Sys.glob(file.path(inpath, "*_HH.tif")) +scnHV <- Sys.glob(file.path(inpath, "*_HV.tif")) -kml <- attributes(readOGR(Sys.glob(file.path(inpath,"*.kml")),'Layer #0')) ##read info from kml -bbox <- kml$bbox +kml <- attributes(readOGR(Sys.glob(file.path(inpath, "*.kml")), "Layer #0")) ## read info from kml +bbox <- kml$bbox CRS(kml$proj4string) -proj <- CRS(" +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0 ") -# +proj <- CRS(" +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0 ") +# # rasterHH <- raster(scnHH) # rasterHV <- raster(scnHV) -HH<-readGDAL(scnHH) ##alt: use readGDAL? +HH <- readGDAL(scnHH) ## alt: use readGDAL? # spTransform(HH,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0 ")) - -HV<-GDAL.open(scnHV) -#displayDataset(x, offset=c(0, 0), region.dim=dim(x), reduction = 1,band = 1, col = NULL, reset.par = TRUE, max.dim = 500, ...) +HV <- GDAL.open(scnHV) -#getColorTable(HH, band = 1) +# displayDataset(x, offset=c(0, 0), region.dim=dim(x), reduction = 1,band = 1, col = NULL, reset.par = TRUE, max.dim = 500, ...) + +# getColorTable(HH, band = 1) @@ -46,24 +46,22 @@ HV<-GDAL.open(scnHV) dim(HH) trim(HH) projectRaster(HH) -hh<-getRasterTable(HH, band = 1, offset = c(140,650),region.dim = c(250,250)) ##origin is at bottom right (se corner) -#region.dim = (0.03*dim(HH)) -x<-unique(hh$x) -y<-unique(hh$y) -hh_mat<-matrix(NA,length(x),length(y),) -for(j in 1:length(x)){ - for(k in 1:length(y)){ - hh_mat[j,k]<-hh$band1[hh$y==y[k] & hh$x==x[j]] - #hv_mat[j,k]<-hv$band1[hv$x==x[j] & hv$y==y[k]] +hh <- getRasterTable(HH, band = 1, offset = c(140, 650), region.dim = c(250, 250)) ## origin is at bottom right (se corner) +# region.dim = (0.03*dim(HH)) +x <- unique(hh$x) +y <- unique(hh$y) +hh_mat <- matrix(NA, length(x), length(y), ) +for (j in 1:length(x)) { + for (k in 1:length(y)) { + hh_mat[j, k] <- hh$band1[hh$y == y[k] & hh$x == x[j]] + # hv_mat[j,k]<-hv$band1[hv$x==x[j] & hv$y==y[k]] } - print(c("j=",100-(100*(j/length(x))))) + print(c("j=", 100 - (100 * (j / length(x))))) } -image(hh_mat, col=gray(1:255/255)) +image(hh_mat, col = gray(1:255 / 255)) plot(density(hh_mat)) -plot(hh[1:10,1],hh[1:10,2]) -plot(hh[seq(1,nrow(hh),by=100),1],hh[seq(1,nrow(hh),by=100),3]) -plot(hh[seq(1,nrow(hh),by=100),2],hh[seq(1,nrow(hh),by=100),3]) - - +plot(hh[1:10, 1], hh[1:10, 2]) +plot(hh[seq(1, nrow(hh), by = 100), 1], hh[seq(1, nrow(hh), by = 100), 3]) +plot(hh[seq(1, nrow(hh), by = 100), 2], hh[seq(1, nrow(hh), by = 100), 3]) diff --git a/modules/data.remote/inst/scripts/old/extract_polygons_function_v05282013.R b/modules/data.remote/inst/scripts/old/extract_polygons_function_v05282013.R index bf70ccdbd24..375006c00eb 100644 --- a/modules/data.remote/inst/scripts/old/extract_polygons_function_v05282013.R +++ b/modules/data.remote/inst/scripts/old/extract_polygons_function_v05282013.R @@ -1,125 +1,127 @@ -##this script will access a palsar file on my local machine to extract radar returns from a polygon -##associated with my UNDERC plot +## this script will access a palsar file on my local machine to extract radar returns from a polygon +## associated with my UNDERC plot -##---------------------------------------------------------------------------------------------------- -##Prior to running this script, use MapReady to create GeoTiffs for each band in each scene -##---------------------------------------------------------------------------------------------------- +## ---------------------------------------------------------------------------------------------------- +## Prior to running this script, use MapReady to create GeoTiffs for each band in each scene +## ---------------------------------------------------------------------------------------------------- -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -##load required pkgs and libraries +## load required pkgs and libraries require(rgdal) library(raster) library(sp) -scn_metadata <- read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header=T, sep="\t") - - -##if running this as a function dir can/should be an argument -setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/") ##This goes to a shortcut which redirects to the location of the palsar files -filelist <- as.vector(list.dirs(path=getwd() ,recursive=F)) - - -for (i in 1:length(filelist)){ -inpath <-filelist[i] - -kml <- attributes(readOGR(Sys.glob(file.path(inpath,"*.kml")),'Layer #0')) ##read info from kml -kml$proj4string -bbox <- kml$bbox - -scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) -scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) - -# scnHH <- Sys.glob(file.path(inpath,"ALPSRP071200910-H1.5_UA-zone15_HH.tif")) - -# scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*")) -# scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*")) - -HH<-raster(scnHH) -HV<-raster(scnHV) -dataType(HH) -trim(HH) - -##UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -#decdeg_coords <-rbind(c(303438.40, 5125111.76), c(303421.10, 5125363.16),c(303520.75, 5125366.81), c(303536.45, 5125116.63), c(303438.40, 5125111.76)) ##Dec degrees - -#Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -# coords <-rbind( -# c(299333.08,5121459.90), -# c(299386.41,5123085.28), -# c(300909.31,5123035.49), -# c(300856.39,5121410.11), -# c(299333.08,5121459.90)) - -# ##Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -# decdeg_coords <-rbind( -# c(46.217102,-89.601745), -# c(46.231731,-89.601745), -# c(46.231731,-89.581994), -# c(46.217102,-89.581994), -# c(46.217102,-89.601745)) - -##Pomeroy Lake zero test (POINTS) -coords <-rbind( - c(301137.43, 5127921.18), - c(301146.58, 5128202.28), - c(301720.76, 5128183.60), - c(301711.63, 5127902.50)) - -##Pomeroy Lake zero test (POLYGON) -coords <-rbind( - c(301137.43, 5127921.18), - c(301146.58, 5128202.28), - c(301720.76, 5128183.60), - c(301711.63, 5127902.50), - c(301137.43, 5127921.18)) - -##Pomeroy Lake zero test (WERIRD POLYGON) -coords <-rbind( - c(300432.54,5127593.85), - c(300564.37,5128034.23), - c(300830.18,5128032.83), - c(300856.44,5127709.75), - c(300432.54,5127593.85)) - -# swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN -# nwN <-decdeg_coords[2,1] -# neN <-decdeg_coords[3,1] -# seN <-decdeg_coords[4,1] -# -# swE <-decdeg_coords[1,2] -# nwE <-decdeg_coords[2,2] -# neE <-decdeg_coords[3,2] -# seE <-decdeg_coords[4,2] - -##----------------------------------------------------------------------------------------- -## add offset to compensate for lack of registration in PALSAR scenes -##---------------------------------------------------------------------------------------- -offset <- 600 - -coords <- cbind(coords[,1]-offset,coords[,2]) - - - -Sr1<- Polygon(coords) -Srs1<- Polygons(list(Sr1),"sr1") -SpP<-SpatialPolygons(list(Srs1)) -#SpatialPolygons(lpcorners) -plotarea<-SpP@polygons[[1]]@area - -hh_crop <- crop(HH, SpP) -hv_crop <- crop(HV, SpP) - -date<-as.character(scn_metadata$scndate[i]) - -par(mfrow=c(1,2)) -image(hh_crop, main=paste("HH",date, sep=" "), xlab="Easting (m)",ylab="Northing (m)", col=gray(1:255/255)) -image(hv_crop, main=paste("HV",date, sep=" "), xlab="Easting (m)",ylab="Northing (m)", col=gray(1:255/255)) - -hh_vals<-extract(HH, bbox, method='simple', cellnumbers=T) - -hh_vals<-extract(HH, coords, method='simple', cellnumbers=T) -hv_vals<-extract(HV, coords, method='simple', cellnumbers=T) - -} \ No newline at end of file +scn_metadata <- read.table(file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") + + +## if running this as a function dir can/should be an argument +setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/") ## This goes to a shortcut which redirects to the location of the palsar files +filelist <- as.vector(list.dirs(path = getwd(), recursive = F)) + + +for (i in 1:length(filelist)) { + inpath <- filelist[i] + + kml <- attributes(readOGR(Sys.glob(file.path(inpath, "*.kml")), "Layer #0")) ## read info from kml + kml$proj4string + bbox <- kml$bbox + + scnHH <- Sys.glob(file.path(inpath, "*_HH.tif")) + scnHV <- Sys.glob(file.path(inpath, "*_HV.tif")) + + # scnHH <- Sys.glob(file.path(inpath,"ALPSRP071200910-H1.5_UA-zone15_HH.tif")) + + # scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*")) + # scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*")) + + HH <- raster(scnHH) + HV <- raster(scnHV) + dataType(HH) + trim(HH) + + ## UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # decdeg_coords <-rbind(c(303438.40, 5125111.76), c(303421.10, 5125363.16),c(303520.75, 5125366.81), c(303536.45, 5125116.63), c(303438.40, 5125111.76)) ##Dec degrees + + # Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # coords <-rbind( + # c(299333.08,5121459.90), + # c(299386.41,5123085.28), + # c(300909.31,5123035.49), + # c(300856.39,5121410.11), + # c(299333.08,5121459.90)) + + # ##Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # decdeg_coords <-rbind( + # c(46.217102,-89.601745), + # c(46.231731,-89.601745), + # c(46.231731,-89.581994), + # c(46.217102,-89.581994), + # c(46.217102,-89.601745)) + + ## Pomeroy Lake zero test (POINTS) + coords <- rbind( + c(301137.43, 5127921.18), + c(301146.58, 5128202.28), + c(301720.76, 5128183.60), + c(301711.63, 5127902.50) + ) + + ## Pomeroy Lake zero test (POLYGON) + coords <- rbind( + c(301137.43, 5127921.18), + c(301146.58, 5128202.28), + c(301720.76, 5128183.60), + c(301711.63, 5127902.50), + c(301137.43, 5127921.18) + ) + + ## Pomeroy Lake zero test (WERIRD POLYGON) + coords <- rbind( + c(300432.54, 5127593.85), + c(300564.37, 5128034.23), + c(300830.18, 5128032.83), + c(300856.44, 5127709.75), + c(300432.54, 5127593.85) + ) + + # swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN + # nwN <-decdeg_coords[2,1] + # neN <-decdeg_coords[3,1] + # seN <-decdeg_coords[4,1] + # + # swE <-decdeg_coords[1,2] + # nwE <-decdeg_coords[2,2] + # neE <-decdeg_coords[3,2] + # seE <-decdeg_coords[4,2] + + ## ----------------------------------------------------------------------------------------- + ## add offset to compensate for lack of registration in PALSAR scenes + ## ---------------------------------------------------------------------------------------- + offset <- 600 + + coords <- cbind(coords[, 1] - offset, coords[, 2]) + + + + Sr1 <- Polygon(coords) + Srs1 <- Polygons(list(Sr1), "sr1") + SpP <- SpatialPolygons(list(Srs1)) + # SpatialPolygons(lpcorners) + plotarea <- SpP@polygons[[1]]@area + + hh_crop <- crop(HH, SpP) + hv_crop <- crop(HV, SpP) + + date <- as.character(scn_metadata$scndate[i]) + + par(mfrow = c(1, 2)) + image(hh_crop, main = paste("HH", date, sep = " "), xlab = "Easting (m)", ylab = "Northing (m)", col = gray(1:255 / 255)) + image(hv_crop, main = paste("HV", date, sep = " "), xlab = "Easting (m)", ylab = "Northing (m)", col = gray(1:255 / 255)) + + hh_vals <- extract(HH, bbox, method = "simple", cellnumbers = T) + + hh_vals <- extract(HH, coords, method = "simple", cellnumbers = T) + hv_vals <- extract(HV, coords, method = "simple", cellnumbers = T) +} diff --git a/modules/data.remote/inst/scripts/old/extract_polygons_function_v06252013.R b/modules/data.remote/inst/scripts/old/extract_polygons_function_v06252013.R index c5312d9c386..c1e9036bde0 100644 --- a/modules/data.remote/inst/scripts/old/extract_polygons_function_v06252013.R +++ b/modules/data.remote/inst/scripts/old/extract_polygons_function_v06252013.R @@ -1,129 +1,129 @@ -##this script will access a palsar file on my local machine to extract radar returns from a polygon -##associated with my UNDERC plot +## this script will access a palsar file on my local machine to extract radar returns from a polygon +## associated with my UNDERC plot -##---------------------------------------------------------------------------------------------------- -##Prior to running this script, use MapReady to create GeoTiffs for each band in each scene -##---------------------------------------------------------------------------------------------------- +## ---------------------------------------------------------------------------------------------------- +## Prior to running this script, use MapReady to create GeoTiffs for each band in each scene +## ---------------------------------------------------------------------------------------------------- -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -##load required pkgs and libraries +## load required pkgs and libraries require(rgdal) library(raster) library(sp) -scn_metadata <- read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header=T, sep="\t") +scn_metadata <- read.table(file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") -##if running this as a function dir can/should be an argument +## if running this as a function dir can/should be an argument working_path <- "/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/geo_corrected_single_gamma/HV" setwd(working_path) # Sys.glob(file.path(working_path,"*.tif")) ##This goes to a shortcut which redirects to the location of the palsar files # filelist <- as.vector(list.dirs(path=getwd() ,recursive=F)) -filelist <- as.vector(list.files(path=getwd(), pattern=".tif" ,recursive=F)) - -for (i in 1:length(filelist)){ -HH_inpath <-file.path(getwd(),filelist[i]) -HV_inpath <-file.path(getwd(),filelist[i]) - -# kml <- attributes(readOGR(Sys.glob(file.path(inpath,"*.kml")),'Layer #0')) ##read info from kml -# kml$proj4string -# bbox <- kml$bbox - -# scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) -# scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) - -# scnHH <- Sys.glob(file.path(inpath,"ALPSRP071200910-H1.5_UA-zone15_HH.tif")) - -# scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*")) -# scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*")) -# scn_dual<-raster(inpath) - -HH<-raster(HH_inpath) -HV<-raster(HV_inpath) -dataType(HH) -trim(HH) - -##UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -#decdeg_coords <-rbind(c(303438.40, 5125111.76), c(303421.10, 5125363.16),c(303520.75, 5125366.81), c(303536.45, 5125116.63), c(303438.40, 5125111.76)) ##Dec degrees - -# ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. - coords <-rbind( - c(299333.08,5121459.90), - c(299386.41,5123085.28), - c(300909.31,5123035.49), - c(300856.39,5121410.11), - c(299333.08,5121459.90)) - -##Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -# decdeg_coords <-rbind( -# c(46.217102,-89.601745), -# c(46.231731,-89.601745), -# c(46.231731,-89.581994), -# c(46.217102,-89.581994), -# c(46.217102,-89.601745)) - -# ##Pomeroy Lake zero test (POINTS) -# coords <-rbind( -# c(301137.43, 5127921.18), -# c(301146.58, 5128202.28), -# c(301720.76, 5128183.60), -# c(301711.63, 5127902.50)) -# -# ##Pomeroy Lake zero test (POLYGON) -# coords <-rbind( -# c(301137.43, 5127921.18), -# c(301146.58, 5128202.28), -# c(301720.76, 5128183.60), -# c(301711.63, 5127902.50), -# c(301137.43, 5127921.18)) -# -# ##Pomeroy Lake zero test (WERIRD POLYGON) -# coords <-rbind( -# c(300432.54,5127593.85), -# c(300564.37,5128034.23), -# c(300830.18,5128032.83), -# c(300856.44,5127709.75), -# c(300432.54,5127593.85)) - -# swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN -# nwN <-decdeg_coords[2,1] -# neN <-decdeg_coords[3,1] -# seN <-decdeg_coords[4,1] -# -# swE <-decdeg_coords[1,2] -# nwE <-decdeg_coords[2,2] -# neE <-decdeg_coords[3,2] -# seE <-decdeg_coords[4,2] - -##----------------------------------------------------------------------------------------- -## add offset to compensate for lack of registration in PALSAR scenes -##---------------------------------------------------------------------------------------- -# offset <- 600 -# -# coords <- cbind(coords[,1]-offset,coords[,2]) - - - -Sr1<- Polygon(coords) -Srs1<- Polygons(list(Sr1),"sr1") -SpP<-SpatialPolygons(list(Srs1)) -#SpatialPolygons(lpcorners) -plotarea<-SpP@polygons[[1]]@area - -hh_crop <- crop(HH, SpP) -hv_crop <- crop(HV, SpP) - -date<-as.character(scn_metadata$scndate[i]) - -par(mfrow=c(1,2)) -image(hh_crop, main=paste("HH",date, sep=" "), xlab="Easting (m)",ylab="Northing (m)", col=gray(1:255/255)) -image(hv_crop, main=paste("HV",date, sep=" "), xlab="Easting (m)",ylab="Northing (m)", col=gray(1:255/255)) - -hh_vals<-extract(HH, bbox, method='simple', cellnumbers=T) - -hh_vals<-extract(HH, coords, method='simple', cellnumbers=T) -hv_vals<-extract(HV, coords, method='simple', cellnumbers=T) - -} \ No newline at end of file +filelist <- as.vector(list.files(path = getwd(), pattern = ".tif", recursive = F)) + +for (i in 1:length(filelist)) { + HH_inpath <- file.path(getwd(), filelist[i]) + HV_inpath <- file.path(getwd(), filelist[i]) + + # kml <- attributes(readOGR(Sys.glob(file.path(inpath,"*.kml")),'Layer #0')) ##read info from kml + # kml$proj4string + # bbox <- kml$bbox + + # scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) + # scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) + + # scnHH <- Sys.glob(file.path(inpath,"ALPSRP071200910-H1.5_UA-zone15_HH.tif")) + + # scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*")) + # scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*")) + # scn_dual<-raster(inpath) + + HH <- raster(HH_inpath) + HV <- raster(HV_inpath) + dataType(HH) + trim(HH) + + ## UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # decdeg_coords <-rbind(c(303438.40, 5125111.76), c(303421.10, 5125363.16),c(303520.75, 5125366.81), c(303536.45, 5125116.63), c(303438.40, 5125111.76)) ##Dec degrees + + # ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + coords <- rbind( + c(299333.08, 5121459.90), + c(299386.41, 5123085.28), + c(300909.31, 5123035.49), + c(300856.39, 5121410.11), + c(299333.08, 5121459.90) + ) + + ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # decdeg_coords <-rbind( + # c(46.217102,-89.601745), + # c(46.231731,-89.601745), + # c(46.231731,-89.581994), + # c(46.217102,-89.581994), + # c(46.217102,-89.601745)) + + # ##Pomeroy Lake zero test (POINTS) + # coords <-rbind( + # c(301137.43, 5127921.18), + # c(301146.58, 5128202.28), + # c(301720.76, 5128183.60), + # c(301711.63, 5127902.50)) + # + # ##Pomeroy Lake zero test (POLYGON) + # coords <-rbind( + # c(301137.43, 5127921.18), + # c(301146.58, 5128202.28), + # c(301720.76, 5128183.60), + # c(301711.63, 5127902.50), + # c(301137.43, 5127921.18)) + # + # ##Pomeroy Lake zero test (WERIRD POLYGON) + # coords <-rbind( + # c(300432.54,5127593.85), + # c(300564.37,5128034.23), + # c(300830.18,5128032.83), + # c(300856.44,5127709.75), + # c(300432.54,5127593.85)) + + # swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN + # nwN <-decdeg_coords[2,1] + # neN <-decdeg_coords[3,1] + # seN <-decdeg_coords[4,1] + # + # swE <-decdeg_coords[1,2] + # nwE <-decdeg_coords[2,2] + # neE <-decdeg_coords[3,2] + # seE <-decdeg_coords[4,2] + + ## ----------------------------------------------------------------------------------------- + ## add offset to compensate for lack of registration in PALSAR scenes + ## ---------------------------------------------------------------------------------------- + # offset <- 600 + # + # coords <- cbind(coords[,1]-offset,coords[,2]) + + + + Sr1 <- Polygon(coords) + Srs1 <- Polygons(list(Sr1), "sr1") + SpP <- SpatialPolygons(list(Srs1)) + # SpatialPolygons(lpcorners) + plotarea <- SpP@polygons[[1]]@area + + hh_crop <- crop(HH, SpP) + hv_crop <- crop(HV, SpP) + + date <- as.character(scn_metadata$scndate[i]) + + par(mfrow = c(1, 2)) + image(hh_crop, main = paste("HH", date, sep = " "), xlab = "Easting (m)", ylab = "Northing (m)", col = gray(1:255 / 255)) + image(hv_crop, main = paste("HV", date, sep = " "), xlab = "Easting (m)", ylab = "Northing (m)", col = gray(1:255 / 255)) + + hh_vals <- extract(HH, bbox, method = "simple", cellnumbers = T) + + hh_vals <- extract(HH, coords, method = "simple", cellnumbers = T) + hv_vals <- extract(HV, coords, method = "simple", cellnumbers = T) +} diff --git a/modules/data.remote/inst/scripts/old/extract_polygons_function_v06262013.R b/modules/data.remote/inst/scripts/old/extract_polygons_function_v06262013.R index f76e0a60952..13f340e65b3 100644 --- a/modules/data.remote/inst/scripts/old/extract_polygons_function_v06262013.R +++ b/modules/data.remote/inst/scripts/old/extract_polygons_function_v06262013.R @@ -1,161 +1,156 @@ -##this script will access a palsar file on my local machine to extract radar returns from a polygon -##associated with my UNDERC plot +## this script will access a palsar file on my local machine to extract radar returns from a polygon +## associated with my UNDERC plot -##---------------------------------------------------------------------------------------------------- -##Prior to running this script, use MapReady to create GeoTiffs for each band in each scene -##---------------------------------------------------------------------------------------------------- +## ---------------------------------------------------------------------------------------------------- +## Prior to running this script, use MapReady to create GeoTiffs for each band in each scene +## ---------------------------------------------------------------------------------------------------- -##Author: Brady S. Hardiman 04/30/2013 +## Author: Brady S. Hardiman 04/30/2013 -##load required pkgs and libraries +## load required pkgs and libraries require(rgdal) library(raster) library(sp) -scn_metadata <- read.table(file="/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header=T, sep="\t") +scn_metadata <- read.table(file = "/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", header = T, sep = "\t") -##if running this as a function dir can/should be an argument +## if running this as a function dir can/should be an argument inpath <- file.path("/home/bhardima/pecan/modules/data.remote/palsar_scenes/Link_to_cheas/geo_corrected_single_gamma") # setwd(inpath) # Sys.glob(file.path(working_path,"*.tif")) ##This goes to a shortcut which redirects to the location of the palsar files # filelist <- as.vector(list.dirs(path=getwd() ,recursive=F)) -# for (i in 1:length(filelist)){ - -for (i in 2:4){ - HH_filelist <- as.vector(list.files(file.path(inpath, "HH"), pattern=".tif" ,recursive=F)) - HH_inpath <-file.path(inpath,"HH", HH_filelist[i]) - - HV_filelist <- as.vector(list.files(file.path(inpath, "HV"), pattern=".tif" ,recursive=F)) - HV_inpath <-file.path(inpath,"HV", HV_filelist[i]) - -# kml <- attributes(readOGR(Sys.glob(file.path(inpath,"*.kml")),'Layer #0')) ##read info from kml -# kml$proj4string -# bbox <- kml$bbox - -# scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) -# scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) - -# scnHH <- Sys.glob(file.path(inpath,"ALPSRP071200910-H1.5_UA-zone15_HH.tif")) - -# scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*")) -# scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*")) -# scn_dual<-raster(inpath) - -HH<-raster(HH_inpath) -HV<-raster(HV_inpath) -# dataType(HH) -# trim(HH) - -##UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. - coords <-rbind( - c(303438.40, 5125111.76), - c(303421.10, 5125363.16), - c(303520.75, 5125366.81), - c(303536.45, 5125116.63), - c(303438.40, 5125111.76)) - - -# ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -# coords <-rbind( -# c(299333.08,5121459.90), -# c(299386.41,5123085.28), -# c(300909.31,5123035.49), -# c(300856.39,5121410.11), -# c(299333.08,5121459.90)) - -##Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. -# decdeg_coords <-rbind( -# c(46.217102,-89.601745), -# c(46.231731,-89.601745), -# c(46.231731,-89.581994), -# c(46.217102,-89.581994), -# c(46.217102,-89.601745)) - -# ##Pomeroy Lake zero test (POINTS) -# coords <-rbind( -# c(301137.43, 5127921.18), -# c(301146.58, 5128202.28), -# c(301720.76, 5128183.60), -# c(301711.63, 5127902.50)) -# -# ##Pomeroy Lake zero test (POLYGON) -# coords <-rbind( -# c(301137.43, 5127921.18), -# c(301146.58, 5128202.28), -# c(301720.76, 5128183.60), -# c(301711.63, 5127902.50), -# c(301137.43, 5127921.18)) -# -# ##Pomeroy Lake zero test (WERIRD POLYGON) -# coords <-rbind( -# c(300432.54,5127593.85), -# c(300564.37,5128034.23), -# c(300830.18,5128032.83), -# c(300856.44,5127709.75), -# c(300432.54,5127593.85)) - -# ##Lake Laura zero test (POLYGON) -# coords <-rbind( -# c(310951.00,5103162.00), -# c(310951.00,5103477.00), -# c(311328.00,5103477.00), -# c(311328.00,5103162.00), -# c(310951.00,5103162.00)) - -# swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN -# nwN <-decdeg_coords[2,1] -# neN <-decdeg_coords[3,1] -# seN <-decdeg_coords[4,1] -# -# swE <-decdeg_coords[1,2] -# nwE <-decdeg_coords[2,2] -# neE <-decdeg_coords[3,2] -# seE <-decdeg_coords[4,2] - -##----------------------------------------------------------------------------------------- -## add offset to compensate for lack of registration in PALSAR scenes -##---------------------------------------------------------------------------------------- -# offset <- 600 -# -# coords <- cbind(coords[,1]-offset,coords[,2]) - - - -Sr1<- Polygon(coords) -Srs1<- Polygons(list(Sr1),"sr1") -SpP<-SpatialPolygons(list(Srs1)) -#SpatialPolygons(lpcorners) -plotarea<-SpP@polygons[[1]]@area - +# for (i in 1:length(filelist)){ + +for (i in 2:4) { + HH_filelist <- as.vector(list.files(file.path(inpath, "HH"), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(inpath, "HH", HH_filelist[i]) + + HV_filelist <- as.vector(list.files(file.path(inpath, "HV"), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(inpath, "HV", HV_filelist[i]) + + # kml <- attributes(readOGR(Sys.glob(file.path(inpath,"*.kml")),'Layer #0')) ##read info from kml + # kml$proj4string + # bbox <- kml$bbox + + # scnHH <- Sys.glob(file.path(inpath,"*_HH.tif")) + # scnHV <- Sys.glob(file.path(inpath,"*_HV.tif")) + + # scnHH <- Sys.glob(file.path(inpath,"ALPSRP071200910-H1.5_UA-zone15_HH.tif")) + + # scnHH <- Sys.glob(file.path(inpath,"IMG-HH-*")) + # scnHV <- Sys.glob(file.path(inpath,"IMG-HV-*")) + # scn_dual<-raster(inpath) + + HH <- raster(HH_inpath) + HV <- raster(HV_inpath) + # dataType(HH) + # trim(HH) + + ## UNDERC plot corner coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + coords <- rbind( + c(303438.40, 5125111.76), + c(303421.10, 5125363.16), + c(303520.75, 5125366.81), + c(303536.45, 5125116.63), + c(303438.40, 5125111.76) + ) + + + # ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # coords <-rbind( + # c(299333.08,5121459.90), + # c(299386.41,5123085.28), + # c(300909.31,5123035.49), + # c(300856.39,5121410.11), + # c(299333.08,5121459.90)) + + ## Twin Island Lake bounding box coordinates (clockwise from lower left [ie, se]). First coordinate is repeated to close the Polygon. + # decdeg_coords <-rbind( + # c(46.217102,-89.601745), + # c(46.231731,-89.601745), + # c(46.231731,-89.581994), + # c(46.217102,-89.581994), + # c(46.217102,-89.601745)) + + # ##Pomeroy Lake zero test (POINTS) + # coords <-rbind( + # c(301137.43, 5127921.18), + # c(301146.58, 5128202.28), + # c(301720.76, 5128183.60), + # c(301711.63, 5127902.50)) + # + # ##Pomeroy Lake zero test (POLYGON) + # coords <-rbind( + # c(301137.43, 5127921.18), + # c(301146.58, 5128202.28), + # c(301720.76, 5128183.60), + # c(301711.63, 5127902.50), + # c(301137.43, 5127921.18)) + # + # ##Pomeroy Lake zero test (WERIRD POLYGON) + # coords <-rbind( + # c(300432.54,5127593.85), + # c(300564.37,5128034.23), + # c(300830.18,5128032.83), + # c(300856.44,5127709.75), + # c(300432.54,5127593.85)) + + # ##Lake Laura zero test (POLYGON) + # coords <-rbind( + # c(310951.00,5103162.00), + # c(310951.00,5103477.00), + # c(311328.00,5103477.00), + # c(311328.00,5103162.00), + # c(310951.00,5103162.00)) + + # swN <-decdeg_coords[1,1] #southwest corner easting=swE, southwest corner northing=swN + # nwN <-decdeg_coords[2,1] + # neN <-decdeg_coords[3,1] + # seN <-decdeg_coords[4,1] + # + # swE <-decdeg_coords[1,2] + # nwE <-decdeg_coords[2,2] + # neE <-decdeg_coords[3,2] + # seE <-decdeg_coords[4,2] + + ## ----------------------------------------------------------------------------------------- + ## add offset to compensate for lack of registration in PALSAR scenes + ## ---------------------------------------------------------------------------------------- + # offset <- 600 + # + # coords <- cbind(coords[,1]-offset,coords[,2]) + + + + Sr1 <- Polygon(coords) + Srs1 <- Polygons(list(Sr1), "sr1") + SpP <- SpatialPolygons(list(Srs1)) + # SpatialPolygons(lpcorners) + plotarea <- SpP@polygons[[1]]@area + hh_crop <- crop(HH, SpP) hv_crop <- crop(HV, SpP) - - date<-as.character(scn_metadata$scndate[i]) - - par(mfrow=c(1,2)) - image(hh_crop, main=paste("HH",date, sep=" "), xlab="Easting (m)",ylab="Northing (m)", col=gray(1:255/255)) - image(hv_crop, main=paste("HV",date, sep=" "), xlab="Easting (m)",ylab="Northing (m)", col=gray(1:255/255)) - -values<-cbind(values, as.vector(extract(HH, SpP)[[1]]), as.vector(extract(HV, SpP)[[1]])) - -# hh_vals<-extract(HH, coords, method='simple', cellnumbers=T) -# hv_vals<-extract(HV, coords, method='simple', cellnumbers=T) - -} - -par(mfrow=c(2,3)) -plot(values[,1],values[,3]) -plot(values[,1],values[,5]) -plot(values[,3],values[,5]) -plot(values[,2],values[,4]) -plot(values[,2],values[,6]) -plot(values[,4],values[,6]) + date <- as.character(scn_metadata$scndate[i]) + par(mfrow = c(1, 2)) + image(hh_crop, main = paste("HH", date, sep = " "), xlab = "Easting (m)", ylab = "Northing (m)", col = gray(1:255 / 255)) + image(hv_crop, main = paste("HV", date, sep = " "), xlab = "Easting (m)", ylab = "Northing (m)", col = gray(1:255 / 255)) + values <- cbind(values, as.vector(extract(HH, SpP)[[1]]), as.vector(extract(HV, SpP)[[1]])) + # hh_vals<-extract(HH, coords, method='simple', cellnumbers=T) + # hv_vals<-extract(HV, coords, method='simple', cellnumbers=T) +} +par(mfrow = c(2, 3)) +plot(values[, 1], values[, 3]) +plot(values[, 1], values[, 5]) +plot(values[, 3], values[, 5]) +plot(values[, 2], values[, 4]) +plot(values[, 2], values[, 6]) +plot(values[, 4], values[, 6]) diff --git a/modules/data.remote/inst/scripts/old/ll.monod.R b/modules/data.remote/inst/scripts/old/ll.monod.R index ff0e2ac550c..a76cd71b5b3 100644 --- a/modules/data.remote/inst/scripts/old/ll.monod.R +++ b/modules/data.remote/inst/scripts/old/ll.monod.R @@ -1,61 +1,61 @@ -ll.monod <- function(params,x,y){ -## x<-HV_signal -## y<-wlef_abg$ABG_biomass - k<-params[1] - HVmax<-params[2] - sd<-params[3] - HVpred<-HVmax*(x/(x+k)) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) +ll.monod <- function(params, x, y) { + ## x<-HV_signal + ## y<-wlef_abg$ABG_biomass + k <- params[1] + HVmax <- params[2] + sd <- params[3] + HVpred <- HVmax * (x / (x + k)) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.monod2 <- function(params,x,y){ +ll.monod2 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - k<-params[1] - HVmax<-params[2] + k <- params[1] + HVmax <- params[2] int <- params[3] - sd<-params[4] - HVpred<-HVmax*(x/(x+k))+int - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + sd <- params[4] + HVpred <- HVmax * (x / (x + k)) + int + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.micmen <- function(params,x,y){ +ll.micmen <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] - sd<-params[3] - HVpred<-(a*x)/(b+x) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + a <- params[1] + b <- params[2] + sd <- params[3] + HVpred <- (a * x) / (b + x) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.holling3 <- function(params,x,y){ +ll.holling3 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] - sd<-params[3] - HVpred<-(a*x^2)/(b^2+x^2) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + a <- params[1] + b <- params[2] + sd <- params[3] + HVpred <- (a * x^2) / (b^2 + x^2) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.holling4 <- function(params,x,y){ +ll.holling4 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] - c<-params[3] - sd<-params[4] - HVpred<-(a*x^2)/(b+ c*x+x^2) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + a <- params[1] + b <- params[2] + c <- params[3] + sd <- params[4] + HVpred <- (a * x^2) / (b + c * x + x^2) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } diff --git a/modules/data.remote/inst/scripts/old/ll.monod_v07292013.R b/modules/data.remote/inst/scripts/old/ll.monod_v07292013.R index e5fbcbb3639..ac0f216c081 100644 --- a/modules/data.remote/inst/scripts/old/ll.monod_v07292013.R +++ b/modules/data.remote/inst/scripts/old/ll.monod_v07292013.R @@ -1,102 +1,102 @@ -ll.monod <- function(params,x,y){ -## x<-HV_signal -## y<-wlef_abg$ABG_biomass - k<-params[1] - HVmax<-params[2] - sd<-params[3] - HVpred<-HVmax*(x/(x+k)) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) +ll.monod <- function(params, x, y) { + ## x<-HV_signal + ## y<-wlef_abg$ABG_biomass + k <- params[1] + HVmax <- params[2] + sd <- params[3] + HVpred <- HVmax * (x / (x + k)) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.monod2 <- function(params,x,y){ +ll.monod2 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - k<-params[1] - HVmax<-params[2] + k <- params[1] + HVmax <- params[2] int <- params[3] - sd<-params[4] - HVpred<-HVmax*(x/(x+k))+int - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + sd <- params[4] + HVpred <- HVmax * (x / (x + k)) + int + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.micmen <- function(params,x,y){ +ll.micmen <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] - sd<-params[3] - HVpred<-(a*x)/(b+x) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + a <- params[1] + b <- params[2] + sd <- params[3] + HVpred <- (a * x) / (b + x) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.mono <- function(params,x,y){ +ll.mono <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] + a <- params[1] + b <- params[2] int <- params[3] - sd<-params[4] - HVpred<-a*(1-exp(-b*x))+int - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + sd <- params[4] + HVpred <- a * (1 - exp(-b * x)) + int + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.nrh <- function(params,x,y){ +ll.nrh <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] + a <- params[1] + b <- params[2] theta <- params[3] int <- params[4] - sd<-params[5] - HVpred<-1/(2*theta)*(a*x+b-sqrt((a*x+b)^2-4*theta*a*b*x))+int - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + sd <- params[5] + HVpred <- 1 / (2 * theta) * (a * x + b - sqrt((a * x + b)^2 - 4 * theta * a * b * x)) + int + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.holling2 <- function(params,x,y){ +ll.holling2 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] + a <- params[1] + b <- params[2] int <- params[3] - sd<-params[4] - HVpred<-(a*x)/(1+b*x)+int - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + sd <- params[4] + HVpred <- (a * x) / (1 + b * x) + int + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.holling3 <- function(params,x,y){ +ll.holling3 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] - sd<-params[3] - HVpred<-(a*x^2)/(b^2+x^2) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + a <- params[1] + b <- params[2] + sd <- params[3] + HVpred <- (a * x^2) / (b^2 + x^2) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) } -ll.holling4 <- function(params,x,y){ +ll.holling4 <- function(params, x, y) { ## x<-HV_signal ## y<-wlef_abg$ABG_biomass - a<-params[1] - b<-params[2] - c<-params[3] - sd<-params[4] - HVpred<-(a*x^2)/(b+(c*x)+x^2) - - LL<- -sum(dnorm(y,HVpred,sd,log=TRUE)) + a <- params[1] + b <- params[2] + c <- params[3] + sd <- params[4] + HVpred <- (a * x^2) / (b + (c * x) + x^2) + + LL <- -sum(dnorm(y, HVpred, sd, log = TRUE)) return(LL) -} \ No newline at end of file +} diff --git a/modules/data.remote/inst/scripts/old/read_binary.R b/modules/data.remote/inst/scripts/old/read_binary.R index bcddaf40b7c..e272709b21a 100644 --- a/modules/data.remote/inst/scripts/old/read_binary.R +++ b/modules/data.remote/inst/scripts/old/read_binary.R @@ -1,12 +1,12 @@ ## Open binary file setwd("/home/bhardima/pecan/modules/data.remote/palsar_scenes/UNDERC/ALPSRP185270910-L1.5") -bin.file=file("LED-ALPSRP185270910-H1.5_UA","rb") +bin.file <- file("LED-ALPSRP185270910-H1.5_UA", "rb") ## read the first 1024 bytes as a raw data: -raw<-readBin(bin.file,what="raw",size=1,n=30) +raw <- readBin(bin.file, what = "raw", size = 1, n = 30) ## read 10 integer values -int.values<-readBin( bin.file, integer(),10) +int.values <- readBin(bin.file, integer(), 10) ## close file -close(bin.file) \ No newline at end of file +close(bin.file) diff --git a/modules/data.remote/inst/scripts/palsar_extractor.R b/modules/data.remote/inst/scripts/palsar_extractor.R index fdb49bf43b8..bc5b3015041 100644 --- a/modules/data.remote/inst/scripts/palsar_extractor.R +++ b/modules/data.remote/inst/scripts/palsar_extractor.R @@ -1,5 +1,4 @@ -palsar.extractor<-function(kml,fia,leaf.off,plot_ext){ - +palsar.extractor <- function(kml, fia, leaf.off, plot_ext) { library(sp) require(rgdal) library(raster) @@ -13,192 +12,197 @@ palsar.extractor<-function(kml,fia,leaf.off,plot_ext){ library(reshape) library(rjags) library(R2HTML) - - coord.set<-c("WLEF", "FIA") - - if(machine==2){ #Brady's Linux paths - metadata<- read.csv("/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", sep="\t", header=T) ##for Brady's Linux - palsar_inpath <- file.path("/home/bhardima/Desktop/cheas/geo_corrected_single_sigma") ##location of PALSAR raw files - calib_inpath <-"/home/bhardima/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter - outpath <- file.path("/home/bhardima/pecan/modules/data.remote/output/data") ##For saving - } - if(machine==1){ #Brady's Mac paths - metadata<- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep="\t", header=T) ##location of PALSAR metadata table - palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ##location of PALSAR raw files - calib_inpath <-"/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ##location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter - outpath <- file.path("/Users/hardimanb/Dropbox/PALSAR_Biomass_Study/data") ##For saving + + coord.set <- c("WLEF", "FIA") + + if (machine == 2) { # Brady's Linux paths + metadata <- read.csv("/home/bhardima/pecan/modules/data.remote/output/metadata/output_metadata.csv", sep = "\t", header = T) ## for Brady's Linux + palsar_inpath <- file.path("/home/bhardima/Desktop/cheas/geo_corrected_single_sigma") ## location of PALSAR raw files + calib_inpath <- "/home/bhardima/pecan/modules/data.remote/biometry/Link_to_Forest_Biomass_Calibration_Coords" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter + outpath <- file.path("/home/bhardima/pecan/modules/data.remote/output/data") ## For saving } - + if (machine == 1) { # Brady's Mac paths + metadata <- read.csv("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/output/metadata/output_metadata.csv", sep = "\t", header = T) ## location of PALSAR metadata table + palsar_inpath <- file.path("/Users/hardimanb/Desktop/data.remote(Andys_Copy)/palsar_scenes/geo_corrected_single_sigma") ## location of PALSAR raw files + calib_inpath <- "/Users/hardimanb/Desktop/data.remote(Andys_Copy)/biometry" ## location of file containing (FIA) plot coords and biomass values for calibrating PALSAR backscatter + outpath <- file.path("/Users/hardimanb/Dropbox/PALSAR_Biomass_Study/data") ## For saving + } + ################################ ## Read in coordinate data for calibration of PALSAR backscatter returns - ## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set + ## Uses PALSAR metadata file to determine geographic extent of available PALSAR data and crops extraction coord set ## to match PALSAR extent. Reprojects extraction coords to match PALSAR geotiffs. ################################ - if(fia==1){ #EXTRACTS FROM FIA COORDINATES - calib_infile <-read.csv(file.path(calib_inpath,"wi-biomass-fuzzed.csv"), sep=",", header=T) #Wisconsin FIA plots - coords<-data.frame(calib_infile$FUZZED_LON,calib_infile$FUZZED_LAT) #lon and lat (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - - latlon<-data.frame(calib_infile$FUZZED_LAT,calib_infile$FUZZED_LON) - spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(spdf.latlon, layer=1, "WI_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) + if (fia == 1) { # EXTRACTS FROM FIA COORDINATES + calib_infile <- read.csv(file.path(calib_inpath, "wi-biomass-fuzzed.csv"), sep = ",", header = T) # Wisconsin FIA plots + coords <- data.frame(calib_infile$FUZZED_LON, calib_infile$FUZZED_LAT) # lon and lat (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + latlon <- data.frame(calib_infile$FUZZED_LAT, calib_infile$FUZZED_LON) + spdf.latlon <- SpatialPointsDataFrame(Sr1, latlon) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(spdf.latlon, layer = 1, "WI_FIA.kml", driver = "KML") # export as kml (this puts in in the Home folder) } - }else{#EXTRACTS FROM WLEF COORDINATES - calib_infile <-read.csv(file.path(calib_inpath,"biometry_trimmed.csv"), sep=",", header=T) #WLEF plots - calib_infile<-aggregate(calib_infile, list(calib_infile[,1]), mean) ##This will give errors, but these can be safely ignored - calib_infile$plot<-calib_infile$Group.1 - calib_infile<-cbind(calib_infile[,2],calib_infile[,5:9]) - colnames(calib_infile)<-c("plot","easting","northing","adult_density","sapling_density","ABG_biomass") - - coords<-data.frame(calib_infile$easting,calib_infile$northing) #eastings and northings (ChEAS: UTM Zone 15N NAD83) - Sr1<- SpatialPoints(coords,proj4string=CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) - wlef<-data.frame(paste(calib_infile$plot,calib_infile$subplot,sep="_")) + } else { # EXTRACTS FROM WLEF COORDINATES + calib_infile <- read.csv(file.path(calib_inpath, "biometry_trimmed.csv"), sep = ",", header = T) # WLEF plots + calib_infile <- aggregate(calib_infile, list(calib_infile[, 1]), mean) ## This will give errors, but these can be safely ignored + calib_infile$plot <- calib_infile$Group.1 + calib_infile <- cbind(calib_infile[, 2], calib_infile[, 5:9]) + colnames(calib_infile) <- c("plot", "easting", "northing", "adult_density", "sapling_density", "ABG_biomass") + + coords <- data.frame(calib_infile$easting, calib_infile$northing) # eastings and northings (ChEAS: UTM Zone 15N NAD83) + Sr1 <- SpatialPoints(coords, proj4string = CRS("+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")) + wlef <- data.frame(paste(calib_infile$plot, calib_infile$subplot, sep = "_")) epsg4326String <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") - Sr1_4google <- spTransform(Sr1,epsg4326String) #class=SpatialPoints - Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) #convert to class="SpatialPointsDataFrame" for export as kml - if(kml==1){writeOGR(Sr1_4google, layer=1, "WLEF.kml", driver="KML") #export as kml (this puts in in the Home folder) + Sr1_4google <- spTransform(Sr1, epsg4326String) # class=SpatialPoints + Sr1_4google <- SpatialPointsDataFrame(Sr1_4google, wlef) # convert to class="SpatialPointsDataFrame" for export as kml + if (kml == 1) { + writeOGR(Sr1_4google, layer = 1, "WLEF.kml", driver = "KML") # export as kml (this puts in in the Home folder) } } - + ## corner coords for cheas domain based on avaialable PALSAR data. (Maybe switch to bounding.box.xy()? ) - ChEAS_PLASAR_extent <-rbind(cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat)), - cbind(max(metadata$scn_nelon),max(metadata$scn_nelat)), - cbind(max(metadata$scn_selon),min(metadata$scn_selat)), - cbind(min(metadata$scn_swlon),min(metadata$scn_swlat)), - cbind(min(metadata$scn_nwlon),max(metadata$scn_nwlat))) - - ChEAS_PLASAR_extent<- Polygon(ChEAS_PLASAR_extent) #spatial polygon from cheas-palsar extent - Srs1<- Polygons(list(ChEAS_PLASAR_extent),"ChEAS_PLASAR_extent") #spatial polygons (plural) - ChEAS_PLASAR_extent<-SpatialPolygons(list(Srs1),proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - - Sr1<-spTransform(Sr1,CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - coords.in.cheas<-as.vector(over(Sr1,ChEAS_PLASAR_extent)) #subset of plots that falls within Cheas-PALSAR extent - + ChEAS_PLASAR_extent <- rbind( + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)), + cbind(max(metadata$scn_nelon), max(metadata$scn_nelat)), + cbind(max(metadata$scn_selon), min(metadata$scn_selat)), + cbind(min(metadata$scn_swlon), min(metadata$scn_swlat)), + cbind(min(metadata$scn_nwlon), max(metadata$scn_nwlat)) + ) + + ChEAS_PLASAR_extent <- Polygon(ChEAS_PLASAR_extent) # spatial polygon from cheas-palsar extent + Srs1 <- Polygons(list(ChEAS_PLASAR_extent), "ChEAS_PLASAR_extent") # spatial polygons (plural) + ChEAS_PLASAR_extent <- SpatialPolygons(list(Srs1), proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + Sr1 <- spTransform(Sr1, CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + coords.in.cheas <- as.vector(over(Sr1, ChEAS_PLASAR_extent)) # subset of plots that falls within Cheas-PALSAR extent + # FIA.in.cheas[is.na(FIA.in.cheas)]<-0 #replace na's with 0's for indexing - coords.in.cheas[is.na(coords.in.cheas)]<-0 #replace na's with 0's for indexing - - ##Biomass source data - if(fia==1){ - biomass<-calib_infile[as.logical(coords.in.cheas),4] #for FIA - } else{ - biomass<-calib_infile[as.logical(coords.in.cheas),'ABG_biomass'] #for WLEF + coords.in.cheas[is.na(coords.in.cheas)] <- 0 # replace na's with 0's for indexing + + ## Biomass source data + if (fia == 1) { + biomass <- calib_infile[as.logical(coords.in.cheas), 4] # for FIA + } else { + biomass <- calib_infile[as.logical(coords.in.cheas), "ABG_biomass"] # for WLEF } - + ## Subset extraction coords that fall within PALSAR observation area - cheas.coords<-Sr1@coords[coords.in.cheas==1,] ##subset of coords that falls within Cheas-PALSAR extent - spcheascoords <- SpatialPoints(cheas.coords,proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) - - ##Plot-IDs; will be used later on for generating time series of backscatter values - if(fia==1){ - plot<-seq(1,nrow(cheas.coords),1) #for FIA NOTE: Add in FIA plot unique identifiers if available - } else{ - plot<-calib_infile[as.logical(coords.in.cheas),'plot'] #for WLEF + cheas.coords <- Sr1@coords[coords.in.cheas == 1, ] ## subset of coords that falls within Cheas-PALSAR extent + spcheascoords <- SpatialPoints(cheas.coords, proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")) + + ## Plot-IDs; will be used later on for generating time series of backscatter values + if (fia == 1) { + plot <- seq(1, nrow(cheas.coords), 1) # for FIA NOTE: Add in FIA plot unique identifiers if available + } else { + plot <- calib_infile[as.logical(coords.in.cheas), "plot"] # for WLEF } - - # writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) - + + # writeOGR(cheasFIA, layer=1, "cheas_FIA.kml", driver="KML") #export as kml (this puts in in the Home folder) + ################################ ## Begin extracting PALSAR values at FIA plot coordinates ################################ - pol_bands<-c("HH", "HV") - numfiles<-length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - + pol_bands <- c("HH", "HV") + numfiles <- length(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + # lake_extracted<-matrix(NA, nrow(lake_coords),length(pol_bands)*numfiles) # disturbance_extracted_40m<-matrix(NA, nrow(disturbance_coords),length(pol_bands)*numfiles) - # + # # colnames(lake_extracted)<-col_names # colnames(disturbance_extracted)<-col_names # colnames(disturbance_extracted_40m)<-col_names - - extracted_48m<-matrix(nrow=0, ncol=10) #matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands - - #Start file of scene extent figures - pdf(paste(outpath,"/",coord.set[fia+1], "_SceneExtent_with_plot_overlay.pdf",sep=""),width = 6, height = 6, paper='special') - - for(i in 1:numfiles){ - HH_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern=".tif" ,recursive=F)) - HH_inpath<-file.path(palsar_inpath, pol_bands[1],HH_filelist[i]) - HH_rast<-raster(HH_inpath) - - HV_filelist<-as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern=".tif" ,recursive=F)) - HV_inpath<-file.path(palsar_inpath, pol_bands[2],HV_filelist[i]) - HV_rast<-raster(HV_inpath) - + + extracted_48m <- matrix(nrow = 0, ncol = 10) # matrix to store extracted palsar values. nrow=number of coordinates being extracted. ncol=# of pol_bands + + # Start file of scene extent figures + pdf(paste(outpath, "/", coord.set[fia + 1], "_SceneExtent_with_plot_overlay.pdf", sep = ""), width = 6, height = 6, paper = "special") + + for (i in 1:numfiles) { + HH_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[1]), pattern = ".tif", recursive = F)) + HH_inpath <- file.path(palsar_inpath, pol_bands[1], HH_filelist[i]) + HH_rast <- raster(HH_inpath) + + HV_filelist <- as.vector(list.files(file.path(palsar_inpath, pol_bands[2]), pattern = ".tif", recursive = F)) + HV_inpath <- file.path(palsar_inpath, pol_bands[2], HV_filelist[i]) + HV_rast <- raster(HV_inpath) + ################################################ - ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. + ## Check each extraction coordinate to see if it falls inside the bounding box of this palsar scene. ## Only extract the ones that do. ## NOTE: The bounding box for the PALSAR scene will be larger than the PALSAR scene itself (due to a tilted orbital path). ## This means that the extraction loop will some number of palsar scenes that have all zeros for the backscatter values. ## These zeros are truncated in post processing, prior to curve fitting. ################################################ - scnid<-substr(as.character(HV_filelist[i]),1,15) - - ##create data.frame from raster corner coords by querying metadata - ##NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection - pals.ext<-Polygon(rbind( - c(xmin(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymax(HH_rast)), - c(xmax(HH_rast),ymin(HH_rast)), - c(xmin(HH_rast),ymin(HH_rast)))) - - - ##make spatial polygon from raster extent - pals.ext.poly<- Polygons(list(pals.ext),"pals.ext") #spatial polygons (plural) - scn.extent<-SpatialPolygons(list(pals.ext.poly),proj4string=CRS(CRSargs(HH_rast@crs))) - - scn.extent<- spTransform(scn.extent,HH_rast@crs) - spcheascoords<-spTransform(spcheascoords,HH_rast@crs) #Convert coords being extracted to CRS of PALSAR raster files - - coords.in.rast<-over(spcheascoords,scn.extent) #extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene - coords.in.rast[is.na(coords.in.rast)]<-0 #replace na's with 0's for indexing - if(max(coords.in.rast)!=1){ #jump to next palsar file if no extraction coordinates fall within this one + scnid <- substr(as.character(HV_filelist[i]), 1, 15) + + ## create data.frame from raster corner coords by querying metadata + ## NOTE: I multiply the lon coord by -1 to make it work with the 'longlat' projection + pals.ext <- Polygon(rbind( + c(xmin(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymax(HH_rast)), + c(xmax(HH_rast), ymin(HH_rast)), + c(xmin(HH_rast), ymin(HH_rast)) + )) + + + ## make spatial polygon from raster extent + pals.ext.poly <- Polygons(list(pals.ext), "pals.ext") # spatial polygons (plural) + scn.extent <- SpatialPolygons(list(pals.ext.poly), proj4string = CRS(CRSargs(HH_rast@crs))) + + scn.extent <- spTransform(scn.extent, HH_rast@crs) + spcheascoords <- spTransform(spcheascoords, HH_rast@crs) # Convert coords being extracted to CRS of PALSAR raster files + + coords.in.rast <- over(spcheascoords, scn.extent) # extraction coords (already filtered to fall within the ChEAS domain) that fall within this palsar scene + coords.in.rast[is.na(coords.in.rast)] <- 0 # replace na's with 0's for indexing + if (max(coords.in.rast) != 1) { # jump to next palsar file if no extraction coordinates fall within this one next } - coords.in.rast<-as.logical(coords.in.rast) - + coords.in.rast <- as.logical(coords.in.rast) + ################################ - ##calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) + ## calibration PASLAR data from extraction coords (mean of pixels w/in 48m buffer radius) ################################ - HH_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - HV_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=mean) #Extract backscatter values from all coords in this scn. This step is very slow - - #extract SE's also - #Get cell numbers of pixels within buffer of each set of coords - buff.dim.list<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, cellnumbers=TRUE) - #number of pixles in each buffer - ncells<-matrix(unlist(lapply(buff.dim.list,dim)),nrow=2)[1,] - #Extract stdev of cell values in buffer, divide by sqrt(n) - HHse_data_48m<-extract(HH_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=sd)/sqrt(ncells) - HVse_data_48m<-extract(HV_rast, spcheascoords[coords.in.rast], method="simple",buffer=48, small=T, fun=sd)/sqrt(ncells) - - scnid<-matrix(substr(as.character(HV_filelist[i]),1,15),nrow=length(HH_data_48m),ncol=1) #vector of this scnid. length = number of coords in this scene - palsar_date<-matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid==scnid[1]]),1,8),"%Y%m%d")),nrow=length(HH_data_48m),ncol=1) # same as above for scn date - - ##cbind for output - if(fia==1){ - all_48<- cbind(scnid,palsar_date,plot,spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m,HHse_data_48m,HVse_data_48m) #for FIA (no plot identifiers) - } else{ - all_48<- cbind(scnid,palsar_date,as.character(calib_infile$plot[coords.in.rast]),spcheascoords[coords.in.rast]@coords,biomass[coords.in.rast],HH_data_48m,HV_data_48m,HHse_data_48m,HVse_data_48m) #for WLEF + HH_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + HV_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = mean) # Extract backscatter values from all coords in this scn. This step is very slow + + # extract SE's also + # Get cell numbers of pixels within buffer of each set of coords + buff.dim.list <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, cellnumbers = TRUE) + # number of pixles in each buffer + ncells <- matrix(unlist(lapply(buff.dim.list, dim)), nrow = 2)[1, ] + # Extract stdev of cell values in buffer, divide by sqrt(n) + HHse_data_48m <- extract(HH_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = sd) / sqrt(ncells) + HVse_data_48m <- extract(HV_rast, spcheascoords[coords.in.rast], method = "simple", buffer = 48, small = T, fun = sd) / sqrt(ncells) + + scnid <- matrix(substr(as.character(HV_filelist[i]), 1, 15), nrow = length(HH_data_48m), ncol = 1) # vector of this scnid. length = number of coords in this scene + palsar_date <- matrix(as.character(as.Date(substr(as.character(metadata$scndate[metadata$scnid == scnid[1]]), 1, 8), "%Y%m%d")), nrow = length(HH_data_48m), ncol = 1) # same as above for scn date + + ## cbind for output + if (fia == 1) { + all_48 <- cbind(scnid, palsar_date, plot, spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m, HHse_data_48m, HVse_data_48m) # for FIA (no plot identifiers) + } else { + all_48 <- cbind(scnid, palsar_date, as.character(calib_infile$plot[coords.in.rast]), spcheascoords[coords.in.rast]@coords, biomass[coords.in.rast], HH_data_48m, HV_data_48m, HHse_data_48m, HVse_data_48m) # for WLEF } - - extracted_48m<-rbind(extracted_48m,all_48) - - #Create figure showing palsar scn extent and overlay with plot coordinates - #NOTE:This was created to verify that zeros int he output of palsar data actually come from plots + + extracted_48m <- rbind(extracted_48m, all_48) + + # Create figure showing palsar scn extent and overlay with plot coordinates + # NOTE:This was created to verify that zeros int he output of palsar data actually come from plots # that fall outside of a particular scene - if(plot_ext==1){ + if (plot_ext == 1) { plot(extent(HV_rast)) - points(spcheascoords,pch=19,col="red") - points(spcheascoords[coords.in.rast],pch=19) - legend("top",legend=c("Inside","Outside"),pch=19,col=c("black", "red"),bty="n") - mtext(paste(unique(scnid), unique(palsar_date),sep=" ")) + points(spcheascoords, pch = 19, col = "red") + points(spcheascoords[coords.in.rast], pch = 19) + legend("top", legend = c("Inside", "Outside"), pch = 19, col = c("black", "red"), bty = "n") + mtext(paste(unique(scnid), unique(palsar_date), sep = " ")) } - - print(paste("i=",i,sep="")) + + print(paste("i=", i, sep = "")) print(scnid[1]) print(palsar_date[1]) } dev.off() return(extracted_48m) -}#function +} # function diff --git a/modules/data.remote/inst/scripts/palsar_plotter.R b/modules/data.remote/inst/scripts/palsar_plotter.R index 3e3513c4fc5..45c5ff48047 100644 --- a/modules/data.remote/inst/scripts/palsar_plotter.R +++ b/modules/data.remote/inst/scripts/palsar_plotter.R @@ -1,130 +1,135 @@ -palsar.plotter<-function(outpath,coord.set,fia){ - - dat48<-read.csv(paste(outpath,"/",coord.set[fia+1],"_dat48.csv",sep=""),header = TRUE) - - dat48$scnid<-as.character(dat48$scnid) - dat48$scndate<-as.Date(dat48$scndate,"%Y-%m-%d") - dat48$plot<-as.numeric(dat48$plot) - dat48$UTM.lat<- as.numeric(as.character(dat48$UTM.lat)) - dat48$UTM.lon<- as.numeric(as.character(dat48$UTM.lon)) - dat48$biomass<- as.numeric(as.character(dat48$biomass)) - dat48$HH.sigma.48<- as.numeric(as.character(dat48$HH.sigma.48)) - dat48$HV.sigma.48<- as.numeric(as.character(dat48$HV.sigma.48)) - dat48$year<-as.numeric(format(dat48$scndate,"%Y")) - dat48$month<-as.numeric(format(dat48$scndate,"%m")) - dat48$HHse_data_48m<- as.numeric(as.character(dat48$HHse_data_48m)) - dat48$HVse_data_48m<- as.numeric(as.character(dat48$HVse_data_48m)) - - #Generate PDF of raw data exploration - #NOTE: Some of these figures will not be relevant for the FIA dataset - pdf(paste(outpath,"/",coord.set[fia+1], "_ExtractionQCplots.pdf",sep=""),width = 6, height = 6, paper='special') - - #Plot boxplots of each scndate by year (HH) - par(mfrow=c(2,2)) - boxplot(dat48$HH.sigma.48[dat48$year==2007] ~ dat48$month[dat48$year==2007],xlab="month",main="2007 HH") - boxplot(dat48$HH.sigma.48[dat48$year==2008] ~ dat48$month[dat48$year==2008],xlab="month",main="2008 HH") - boxplot(dat48$HH.sigma.48[dat48$year==2009] ~ dat48$month[dat48$year==2009],xlab="month",main="2009 HH") - boxplot(dat48$HH.sigma.48[dat48$year==2010] ~ dat48$month[dat48$year==2010],xlab="month",main="2010 HH") - - #Plot boxplots of each scndate by year (HV) - par(mfrow=c(2,2)) - boxplot(dat48$HV.sigma.48[dat48$year==2007] ~ dat48$month[dat48$year==2007],xlab="month",main="2007 HV") - boxplot(dat48$HV.sigma.48[dat48$year==2008] ~ dat48$month[dat48$year==2008],xlab="month",main="2008 HV") - boxplot(dat48$HV.sigma.48[dat48$year==2009] ~ dat48$month[dat48$year==2009],xlab="month",main="2009 HV") - boxplot(dat48$HV.sigma.48[dat48$year==2010] ~ dat48$month[dat48$year==2010],xlab="month",main="2010 HV") - - #Plot comparing HH values of May, June, August 2007 - par(mfrow=c(1,3)) - plot(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007], - xlab="05 HH",ylab='06 HH',main="may 2007 vs jun 2007") - fit1<-lm(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") - plot(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="05 HH",ylab='08 HH',main="may 2007 vs aug 2007") - fit2<-lm(dat48$HH.sigma.48[dat48$month==5 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") - plot(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007],dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="06 HH",ylab='08 HH',main="jun 2007 vs aug 2007") - fit3<-lm(dat48$HH.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HH.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") - - #Plot comparing HV values of May, June, August 2007 - par(mfrow=c(1,3)) - plot(dat48$HV.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HV.sigma.48[dat48$month==6 & dat48$year==2007], - xlab="05 HV",ylab='06 HV',main="may 2007 vs jun 2007") - fit1<-lm(dat48$HV.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HV.sigma.48[dat48$month==5 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") - plot(dat48$HV.sigma.48[dat48$month==5 & dat48$year==2007],dat48$HV.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="05 HV",ylab='08 HV',main="may 2007 vs aug 2007") - fit2<-lm(dat48$HV.sigma.48[dat48$month==5 & dat48$year==2007] ~ dat48$HV.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") - plot(dat48$HV.sigma.48[dat48$month==6 & dat48$year==2007],dat48$HV.sigma.48[dat48$month==8 & dat48$year==2007], - xlab="06 HV",ylab='08 HV',main="jun 2007 vs aug 2007") - fit3<-lm(dat48$HV.sigma.48[dat48$month==6 & dat48$year==2007] ~ dat48$HV.sigma.48[dat48$month==8 & dat48$year==2007]) - abline(0,1,lwd=2,lty=2,col="grey") - abline(fit1,lwd=2,lty=1,col="red") - +palsar.plotter <- function(outpath, coord.set, fia) { + dat48 <- read.csv(paste(outpath, "/", coord.set[fia + 1], "_dat48.csv", sep = ""), header = TRUE) + + dat48$scnid <- as.character(dat48$scnid) + dat48$scndate <- as.Date(dat48$scndate, "%Y-%m-%d") + dat48$plot <- as.numeric(dat48$plot) + dat48$UTM.lat <- as.numeric(as.character(dat48$UTM.lat)) + dat48$UTM.lon <- as.numeric(as.character(dat48$UTM.lon)) + dat48$biomass <- as.numeric(as.character(dat48$biomass)) + dat48$HH.sigma.48 <- as.numeric(as.character(dat48$HH.sigma.48)) + dat48$HV.sigma.48 <- as.numeric(as.character(dat48$HV.sigma.48)) + dat48$year <- as.numeric(format(dat48$scndate, "%Y")) + dat48$month <- as.numeric(format(dat48$scndate, "%m")) + dat48$HHse_data_48m <- as.numeric(as.character(dat48$HHse_data_48m)) + dat48$HVse_data_48m <- as.numeric(as.character(dat48$HVse_data_48m)) + + # Generate PDF of raw data exploration + # NOTE: Some of these figures will not be relevant for the FIA dataset + pdf(paste(outpath, "/", coord.set[fia + 1], "_ExtractionQCplots.pdf", sep = ""), width = 6, height = 6, paper = "special") + + # Plot boxplots of each scndate by year (HH) + par(mfrow = c(2, 2)) + boxplot(dat48$HH.sigma.48[dat48$year == 2007] ~ dat48$month[dat48$year == 2007], xlab = "month", main = "2007 HH") + boxplot(dat48$HH.sigma.48[dat48$year == 2008] ~ dat48$month[dat48$year == 2008], xlab = "month", main = "2008 HH") + boxplot(dat48$HH.sigma.48[dat48$year == 2009] ~ dat48$month[dat48$year == 2009], xlab = "month", main = "2009 HH") + boxplot(dat48$HH.sigma.48[dat48$year == 2010] ~ dat48$month[dat48$year == 2010], xlab = "month", main = "2010 HH") + + # Plot boxplots of each scndate by year (HV) + par(mfrow = c(2, 2)) + boxplot(dat48$HV.sigma.48[dat48$year == 2007] ~ dat48$month[dat48$year == 2007], xlab = "month", main = "2007 HV") + boxplot(dat48$HV.sigma.48[dat48$year == 2008] ~ dat48$month[dat48$year == 2008], xlab = "month", main = "2008 HV") + boxplot(dat48$HV.sigma.48[dat48$year == 2009] ~ dat48$month[dat48$year == 2009], xlab = "month", main = "2009 HV") + boxplot(dat48$HV.sigma.48[dat48$year == 2010] ~ dat48$month[dat48$year == 2010], xlab = "month", main = "2010 HV") + + # Plot comparing HH values of May, June, August 2007 + par(mfrow = c(1, 3)) + plot(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007], + xlab = "05 HH", ylab = "06 HH", main = "may 2007 vs jun 2007" + ) + fit1 <- lm(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007]) + abline(0, 1, lwd = 2, lty = 2, col = "grey") + abline(fit1, lwd = 2, lty = 1, col = "red") + plot(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "05 HH", ylab = "08 HH", main = "may 2007 vs aug 2007" + ) + fit2 <- lm(dat48$HH.sigma.48[dat48$month == 5 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007]) + abline(0, 1, lwd = 2, lty = 2, col = "grey") + abline(fit1, lwd = 2, lty = 1, col = "red") + plot(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007], dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "06 HH", ylab = "08 HH", main = "jun 2007 vs aug 2007" + ) + fit3 <- lm(dat48$HH.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HH.sigma.48[dat48$month == 8 & dat48$year == 2007]) + abline(0, 1, lwd = 2, lty = 2, col = "grey") + abline(fit1, lwd = 2, lty = 1, col = "red") + + # Plot comparing HV values of May, June, August 2007 + par(mfrow = c(1, 3)) + plot(dat48$HV.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HV.sigma.48[dat48$month == 6 & dat48$year == 2007], + xlab = "05 HV", ylab = "06 HV", main = "may 2007 vs jun 2007" + ) + fit1 <- lm(dat48$HV.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HV.sigma.48[dat48$month == 5 & dat48$year == 2007]) + abline(0, 1, lwd = 2, lty = 2, col = "grey") + abline(fit1, lwd = 2, lty = 1, col = "red") + plot(dat48$HV.sigma.48[dat48$month == 5 & dat48$year == 2007], dat48$HV.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "05 HV", ylab = "08 HV", main = "may 2007 vs aug 2007" + ) + fit2 <- lm(dat48$HV.sigma.48[dat48$month == 5 & dat48$year == 2007] ~ dat48$HV.sigma.48[dat48$month == 8 & dat48$year == 2007]) + abline(0, 1, lwd = 2, lty = 2, col = "grey") + abline(fit1, lwd = 2, lty = 1, col = "red") + plot(dat48$HV.sigma.48[dat48$month == 6 & dat48$year == 2007], dat48$HV.sigma.48[dat48$month == 8 & dat48$year == 2007], + xlab = "06 HV", ylab = "08 HV", main = "jun 2007 vs aug 2007" + ) + fit3 <- lm(dat48$HV.sigma.48[dat48$month == 6 & dat48$year == 2007] ~ dat48$HV.sigma.48[dat48$month == 8 & dat48$year == 2007]) + abline(0, 1, lwd = 2, lty = 2, col = "grey") + abline(fit1, lwd = 2, lty = 1, col = "red") + ####################################### - #### ### + #### ### ####################################### - - - #Plot scene frequency by year, month - par(mfrow=c(1,2)) - hist(dat48$year,freq=TRUE,main="By year") - hist(dat48$month,freq=TRUE,main="By month") - + + + # Plot scene frequency by year, month + par(mfrow = c(1, 2)) + hist(dat48$year, freq = TRUE, main = "By year") + hist(dat48$month, freq = TRUE, main = "By month") + # par(mfrow=c(1,1)) # hist(dat48$scndate,freq=T,100,xaxt="n") # axis(1, dat48$scndate, format(dat48$scndate, "%b %Y"), cex.axis = .7) - - par(mfrow=c(1,3)) - hist(dat48$biomass,main=paste(coord.set[fia+1],"biomass",sep=" ")) - hist(dat48$HH.sigma.48,main=paste(coord.set[fia+1],"HH",sep=" ")) - hist(dat48$HV.sigma.48,main=paste(coord.set[fia+1],"HV",sep=" ")) - - #Figure showing color-coded density plots of the data - Lab.palette <- colorRampPalette(c("white","violet","blue","green","yellow","orange", "red"), space = "Lab") - par(mfrow=c(1,3)) - smoothScatter(dat48$HV.sigma.48,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="HV",ylab="HH") - smoothScatter(dat48$biomass,dat48$HH.sigma.48,nbin=256,colramp = Lab.palette,xlab="biomass",ylab="HH",main="Density") - smoothScatter(dat48$biomass,dat48$HV.sigma.48,nbin=256,colramp = Lab.palette,ylim=c(0,max(dat48$HH.sigma.48)),xlab="biomass",ylab="HV") - - #Figure of biomass vs backscatter color-coded by year of scndate - par(mfrow=c(1,2)) - scatter.smooth(dat48$biomass,dat48$HH.sigma.48,cex=0,xlab="biomass",ylab="HH",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) - scatter.smooth(dat48$biomass,dat48$HV.sigma.48,cex=0,xlab="biomass",ylab="HV",main="48m",col="grey") - points(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2007],col=1,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2008],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2008],col=2,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2009],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2009],col=3,cex=0.5) - points(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col=4,cex=0.5) - legend("topright",pch=1,legend=c(2007,2008,2009,2010), cex=0.7,pt.cex=0.5,col=1:4,bty="n",xjust=1) + + par(mfrow = c(1, 3)) + hist(dat48$biomass, main = paste(coord.set[fia + 1], "biomass", sep = " ")) + hist(dat48$HH.sigma.48, main = paste(coord.set[fia + 1], "HH", sep = " ")) + hist(dat48$HV.sigma.48, main = paste(coord.set[fia + 1], "HV", sep = " ")) + + # Figure showing color-coded density plots of the data + Lab.palette <- colorRampPalette(c("white", "violet", "blue", "green", "yellow", "orange", "red"), space = "Lab") + par(mfrow = c(1, 3)) + smoothScatter(dat48$HV.sigma.48, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "HV", ylab = "HH") + smoothScatter(dat48$biomass, dat48$HH.sigma.48, nbin = 256, colramp = Lab.palette, xlab = "biomass", ylab = "HH", main = "Density") + smoothScatter(dat48$biomass, dat48$HV.sigma.48, nbin = 256, colramp = Lab.palette, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "biomass", ylab = "HV") + + # Figure of biomass vs backscatter color-coded by year of scndate + par(mfrow = c(1, 2)) + scatter.smooth(dat48$biomass, dat48$HH.sigma.48, cex = 0, xlab = "biomass", ylab = "HH", main = "48m", col = "grey") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) + points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) + points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) + legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) + scatter.smooth(dat48$biomass, dat48$HV.sigma.48, cex = 0, xlab = "biomass", ylab = "HV", main = "48m", col = "grey") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2007], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2007], col = 1, cex = 0.5) + points(dat48$biomass[format(dat48$scndate, "%Y") == 2008], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2008], col = 2, cex = 0.5) + points(dat48$biomass[format(dat48$scndate, "%Y") == 2009], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2009], col = 3, cex = 0.5) + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], col = 4, cex = 0.5) + legend("topright", pch = 1, legend = c(2007, 2008, 2009, 2010), cex = 0.7, pt.cex = 0.5, col = 1:4, bty = "n", xjust = 1) # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60,xlab="biomass",ylab="HV",main="60m",col="grey") - - #Figure showing ratio and product of backscatter bands vs biomass - par(mfrow=c(1,2)) - scatter.smooth(dat48$biomass,dat48$HV.sigma.48/dat48$HH.sigma.48,xlab="biomass",ylab="HV/HH",main="48m",col="grey") + + # Figure showing ratio and product of backscatter bands vs biomass + par(mfrow = c(1, 2)) + scatter.smooth(dat48$biomass, dat48$HV.sigma.48 / dat48$HH.sigma.48, xlab = "biomass", ylab = "HV/HH", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60/dat60$HV.sigma.60,xlab="biomass",ylab="HV/HV",main="60m",col="grey") - scatter.smooth(dat48$biomass,dat48$HH.sigma.48*dat48$HV.sigma.48,xlab="biomass",ylab="HHxHV",main="48m",col="grey") + scatter.smooth(dat48$biomass, dat48$HH.sigma.48 * dat48$HV.sigma.48, xlab = "biomass", ylab = "HHxHV", main = "48m", col = "grey") # scatter.smooth(dat60$biomass,dat60$HV.sigma.60*dat60$HV.sigma.60,xlab="biomass",ylab="HVxHV",main="60m",col="grey") - - #Plot NDVI-style ratio of the two backscatter bands - par(mfrow=c(1,1)) - scatter.smooth(dat48$biomass,(dat48$HH.sigma.48-dat48$HV.sigma.48)/(dat48$HH.sigma.48+dat48$HV.sigma.48),xlab="biomass",ylab="(HH-HV)/(HH+HV)",main="48m", col="gray") + + # Plot NDVI-style ratio of the two backscatter bands + par(mfrow = c(1, 1)) + scatter.smooth(dat48$biomass, (dat48$HH.sigma.48 - dat48$HV.sigma.48) / (dat48$HH.sigma.48 + dat48$HV.sigma.48), xlab = "biomass", ylab = "(HH-HV)/(HH+HV)", main = "48m", col = "gray") # scatter.smooth(dat60$biomass,(dat60$HV.sigma.60-dat60$HV.sigma.60)/(dat60$HV.sigma.60+dat60$HV.sigma.60),xlab="biomass",ylab="(HV-HV)/(HV+HV)",main="60m", col="gray") - - #Figure illustrating the effect of seasonality on backscatter values (non-growing season values tend to be much higher) + + # Figure illustrating the effect of seasonality on backscatter values (non-growing season values tend to be much higher) # NOTE: Due to significant effect of season, and poor replication in non-growing season scndates, we restrict analysis to growing season only # par(mfrow=c(4,2),mar=c(4,4,2,2)) # scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2007],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2007],col="grey",xlab="biomass",ylab="HH",main="2007") @@ -139,33 +144,33 @@ palsar.plotter<-function(outpath,coord.set,fia){ # scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HV",main="2010") # points(dat48$biomass[format(dat48$scndate,"%m")>10],dat48$HV.sigma.48[format(dat48$scndate,"%m")>10],col="red",xlab="biomass",ylab="HV",main="2010") # legend("topright",pch=1,legend=c("!Dec","Dec"), cex=0.7,pt.cex=0.5,col=c("grey","red"),bty="n",xjust=1) - - par(mfrow=c(1,2)) - plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") - plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HV") - mtext("On same scale", side=3, line=-2, outer=TRUE, cex=1, font=2) - - par(mfrow=c(1,2)) - plot(dat48$scndate,dat48$HH.sigma.48,ylim=c(0,max(dat48$HH.sigma.48)),xlab="Date",ylab="HH") - plot(dat48$scndate,dat48$HV.sigma.48,ylim=c(0,max(dat48$HV.sigma.48)),xlab="Date",ylab="HV") - mtext("By Date", side=3, line=-2, outer=TRUE, cex=1, font=2) - - par(mfrow=c(1,2)) - plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HH") - plot(dat48$scndate[format(dat48$scndate,"%Y")==2010],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010],xlab="2010",ylab="HV") - mtext("2010 only", side=3, line=-3, outer=TRUE, cex=1, font=2) - - #Plots demonstrating the effects of including DEC palsar scndate - if(leaf.off==1){ #Only plots if leaf off period is included - par(mfrow=c(2,2)) - scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") - points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) - scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010],col="grey",xlab="biomass",ylab="HH",main="2010 only") - points(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")>10]) - scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HH.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HH",main="2010 only,Dec. removed") - scatter.smooth(dat48$biomass[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],dat48$HV.sigma.48[format(dat48$scndate,"%Y")==2010 & format(dat48$scndate,"%m")<11],col="grey",xlab="biomass",ylab="HV",main="2010 only,Dec. removed") + + par(mfrow = c(1, 2)) + plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") + plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HV") + mtext("On same scale", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + + par(mfrow = c(1, 2)) + plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(0, max(dat48$HH.sigma.48)), xlab = "Date", ylab = "HH") + plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(0, max(dat48$HV.sigma.48)), xlab = "Date", ylab = "HV") + mtext("By Date", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + + par(mfrow = c(1, 2)) + plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HH") + plot(dat48$scndate[format(dat48$scndate, "%Y") == 2010], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010], xlab = "2010", ylab = "HV") + mtext("2010 only", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + + # Plots demonstrating the effects of including DEC palsar scndate + if (leaf.off == 1) { # Only plots if leaf off period is included + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only") + points(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") > 10]) + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HH.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HH", main = "2010 only,Dec. removed") + scatter.smooth(dat48$biomass[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], dat48$HV.sigma.48[format(dat48$scndate, "%Y") == 2010 & format(dat48$scndate, "%m") < 11], col = "grey", xlab = "biomass", ylab = "HV", main = "2010 only,Dec. removed") } - + # #Plot individual time series of HH for each coordinate set # par(new=FALSE, mfrow=c(1,1)) # plot(format(dat48$scndate,"%Y"),dat48$HH.sigma.48,col="grey",cex=0.5, @@ -185,194 +190,200 @@ palsar.plotter<-function(outpath,coord.set,fia){ # lines(cbind(c(2007,2008,2009,2010),date.plot.HVmean[i,]),col=i) # par(new=T) # } - - #breaks biomass data into quantiles each containing ~5% of the data - bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks= round(quantile(dat48$biomass,probs = seq(0, 1, 0.05))) ),mean) + + # breaks biomass data into quantiles each containing ~5% of the data + bind.bio <- tapply(dat48$biomass, cut(dat48$biomass, breaks = round(quantile(dat48$biomass, probs = seq(0, 1, 0.05)))), mean) # bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=dat48$HH.sigma.48[round(quantile(dat48$biomass,probs = seq(0, 1, 0.05)))] ),mean) # bind.HV<-tapply(dat48$HV.sigma.48,cut(dat48$HV.sigma.48,breaks=dat48$HV.sigma.48[round(quantile(dat48$biomass,probs = seq(0, 1, 0.05)))] ),mean) - - cuts<-matrix(unlist(strsplit(names(bind.bio),",")),ncol=2,byrow=T) - for(i in 1:nrow(cuts)){ - for(j in 1:ncol(cuts)){ - cuts[i,j]<-strsplit(gsub("[^[:alnum:] ]", "", cuts[i,j]), " +")[[1]] + + cuts <- matrix(unlist(strsplit(names(bind.bio), ",")), ncol = 2, byrow = T) + for (i in 1:nrow(cuts)) { + for (j in 1:ncol(cuts)) { + cuts[i, j] <- strsplit(gsub("[^[:alnum:] ]", "", cuts[i, j]), " +")[[1]] } } - cuts<-cbind(as.numeric(cuts[,1]),as.numeric(cuts[,2])) - par(mfrow=c(1,2)) - scatter.smooth(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Biomass",ylab="HH",main="") - for(i in 1:nrow(cuts)){ - points(mean(dat48$biomass[dat48$biomass>=cuts[i,1] & dat48$biomass=cuts[i,1] & dat48$biomass= cuts[i, 1] & dat48$biomass < cuts[i, 2]]), + mean(dat48$HH.sigma.48[dat48$biomass >= cuts[i, 1] & dat48$biomass < cuts[i, 2]]) + ) } - legend("bottomleft",lty=c(1,NA),pch=c(NA,1),legend=c("Loess Curve","Bin Mean"),bty="n") - - scatter.smooth(dat48$biomass,dat48$HV.sigma.48,col="grey",pch=".",xlab="Biomass",ylab="HV",main="") - for(i in 1:nrow(cuts)){ - points(mean(dat48$biomass[dat48$biomass>=cuts[i,1] & dat48$biomass=cuts[i,1] & dat48$biomass= cuts[i, 1] & dat48$biomass < cuts[i, 2]]), + mean(dat48$HV.sigma.48[dat48$biomass >= cuts[i, 1] & dat48$biomass < cuts[i, 2]]) + ) + } + legend("bottomleft", lty = c(1, NA), pch = c(NA, 1), legend = c("Loess Curve", "Bin Mean"), bty = "n") + mtext("Bins each contain 5% of the data", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + + # Figures showing example of variation in backscatter on a single scndate (1st scndate) + par(mfrow = c(2, 2)) + scatter.smooth(dat48$biomass[dat48$scndate == unique(dat48$scndate)[1]], dat48$HH.sigma.48[dat48$scndate == unique(dat48$scndate)[1]], col = "grey", pch = 19, cex = 0.5, xlab = "Biomass (Mg/ha)", ylab = "HH (sigma naught)") + bplot.xy(dat48$biomass[dat48$scndate == unique(dat48$scndate)[1]], dat48$HH.sigma.48[dat48$scndate == unique(dat48$scndate)[1]], N = 15, xlab = "Biomass (Mg/ha)", ylab = "HH (sigma naught)") + + scatter.smooth(dat48$biomass[dat48$scndate == unique(dat48$scndate)[1]], dat48$HV.sigma.48[dat48$scndate == unique(dat48$scndate)[1]], col = "grey", pch = 19, cex = 0.5, xlab = "Biomass (Mg/ha)", ylab = "HV (sigma naught)") + bplot.xy(dat48$biomass[dat48$scndate == unique(dat48$scndate)[1]], dat48$HV.sigma.48[dat48$scndate == unique(dat48$scndate)[1]], N = 15, xlab = "Biomass (Mg/ha)", ylab = "HV (sigma naught)") + mtext(unique(dat48$scndate)[1], side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + + # Figures showing example of variation in backscatter for a each plot + par(new = T, mfrow = c(1, 1)) + plot(dat48$scndate, dat48$HH.sigma.48, ylim = c(min(dat48$HH.sigma.48), max(dat48$HH.sigma.48)), xaxt = "n", type = "n", col = i, ylab = "HH (sigma naught)", xlab = "") + for (i in unique(dat48$plot)) { + lines(dat48$scndate[dat48$plot == unique(dat48$plot)[i]], dat48$HH.sigma.48[dat48$plot == unique(dat48$plot)[i]], ylim = c(min(dat48$HH.sigma.48), max(dat48$HH.sigma.48)), xaxt = "n", type = "b", col = i, ylab = "HH (sigma naught)", xlab = "") + + par(new = F) } - legend("bottomleft",lty=c(1,NA),pch=c(NA,1),legend=c("Loess Curve","Bin Mean"),bty="n") - mtext("Bins each contain 5% of the data", side=3, line=-3, outer=TRUE, cex=1, font=2) - - #Figures showing example of variation in backscatter on a single scndate (1st scndate) - par(mfrow=c(2,2)) - scatter.smooth(dat48$biomass[dat48$scndate==unique(dat48$scndate)[1]], dat48$HH.sigma.48[dat48$scndate==unique(dat48$scndate)[1]],col="grey",pch=19,cex=0.5,xlab="Biomass (Mg/ha)",ylab="HH (sigma naught)") - bplot.xy(dat48$biomass[dat48$scndate==unique(dat48$scndate)[1]], dat48$HH.sigma.48[dat48$scndate==unique(dat48$scndate)[1]],N=15,xlab="Biomass (Mg/ha)",ylab="HH (sigma naught)") - - scatter.smooth(dat48$biomass[dat48$scndate==unique(dat48$scndate)[1]], dat48$HV.sigma.48[dat48$scndate==unique(dat48$scndate)[1]],col="grey",pch=19,cex=0.5,xlab="Biomass (Mg/ha)",ylab="HV (sigma naught)") - bplot.xy(dat48$biomass[dat48$scndate==unique(dat48$scndate)[1]], dat48$HV.sigma.48[dat48$scndate==unique(dat48$scndate)[1]],N=15,xlab="Biomass (Mg/ha)",ylab="HV (sigma naught)") - mtext(unique(dat48$scndate)[1], side=3, line=-3, outer=TRUE, cex=1, font=2) - - #Figures showing example of variation in backscatter for a each plot - par(new=T, mfrow=c(1,1)) - plot(dat48$scndate, dat48$HH.sigma.48,ylim=c(min(dat48$HH.sigma.48),max(dat48$HH.sigma.48)),xaxt="n",type="n",col=i,ylab="HH (sigma naught)",xlab="") - for(i in unique(dat48$plot)){ - lines(dat48$scndate[dat48$plot==unique(dat48$plot)[i]], dat48$HH.sigma.48[dat48$plot==unique(dat48$plot)[i]],ylim=c(min(dat48$HH.sigma.48),max(dat48$HH.sigma.48)),xaxt="n",type="b",col=i,ylab="HH (sigma naught)",xlab="") - - par(new=F) - } - lines(tapply(dat48$HH.sigma.48,dat48$scndate,mean),col="black",lwd=3,type="b") - axis.Date(side = 1, dat48$scndate[dat48$plot==unique(dat48$plot)[i]], format = "%Y-%m",las=2) - - par(mfrow=c(1,1)) - plot(dat48$scndate, dat48$HV.sigma.48,ylim=c(min(dat48$HV.sigma.48),max(dat48$HV.sigma.48)),xaxt="n",type="n",col=i,ylab="HV (sigma naught)",xlab="") - for(i in unique(dat48$plot)){ - lines(dat48$scndate[dat48$plot==unique(dat48$plot)[i]], dat48$HV.sigma.48[dat48$plot==unique(dat48$plot)[i]],ylim=c(min(dat48$HV.sigma.48),max(dat48$HV.sigma.48)),xaxt="n",type="b",col=i,ylab="HV (sigma naught)",xlab="") - par(new=F) + lines(tapply(dat48$HH.sigma.48, dat48$scndate, mean), col = "black", lwd = 3, type = "b") + axis.Date(side = 1, dat48$scndate[dat48$plot == unique(dat48$plot)[i]], format = "%Y-%m", las = 2) + + par(mfrow = c(1, 1)) + plot(dat48$scndate, dat48$HV.sigma.48, ylim = c(min(dat48$HV.sigma.48), max(dat48$HV.sigma.48)), xaxt = "n", type = "n", col = i, ylab = "HV (sigma naught)", xlab = "") + for (i in unique(dat48$plot)) { + lines(dat48$scndate[dat48$plot == unique(dat48$plot)[i]], dat48$HV.sigma.48[dat48$plot == unique(dat48$plot)[i]], ylim = c(min(dat48$HV.sigma.48), max(dat48$HV.sigma.48)), xaxt = "n", type = "b", col = i, ylab = "HV (sigma naught)", xlab = "") + par(new = F) } - axis.Date(side = 1, dat48$scndate[dat48$plot==unique(dat48$plot)[i]], format = "%Y-%m",las=2) -# mtext(paste("Plot",unique(dat48$plot)[1],sep=" "), side=3, line=-3, outer=TRUE, cex=1, font=2) - - - #Figure showing within-plot variation in backscatter values for each scn date - colors=rainbow(length(unique(dat48$scndate))) - par(mfrow=c(1,3)) - plot(dat48$biomass,dat48$HH.sigma.48,type="n",xlab="Biomass",ylab="HH") - for(d in as.character(unique(dat48$scndate))){ - for(p in unique(dat48$plot)){ + axis.Date(side = 1, dat48$scndate[dat48$plot == unique(dat48$plot)[i]], format = "%Y-%m", las = 2) + # mtext(paste("Plot",unique(dat48$plot)[1],sep=" "), side=3, line=-3, outer=TRUE, cex=1, font=2) + + + # Figure showing within-plot variation in backscatter values for each scn date + colors <- rainbow(length(unique(dat48$scndate))) + par(mfrow = c(1, 3)) + plot(dat48$biomass, dat48$HH.sigma.48, type = "n", xlab = "Biomass", ylab = "HH") + for (d in as.character(unique(dat48$scndate))) { + for (p in unique(dat48$plot)) { # lines(dat48$biomass[dat48$plot==p],dat48$HH.sigma.48[dat48$plot==p],col="grey") - x<-dat48$biomass[dat48$plot==p & dat48$scndate==d] - y<-dat48$HH.sigma.48[dat48$plot==p & dat48$scndate==d] - points(x,y,pch=19,cex=0.5,col=colors[as.character(unique(dat48$scndate))==d]) - se<-dat48$HHse_data_48m[dat48$plot==p & dat48$scndate==d] - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3,col=colors[as.character(unique(dat48$scndate))==d]) + x <- dat48$biomass[dat48$plot == p & dat48$scndate == d] + y <- dat48$HH.sigma.48[dat48$plot == p & dat48$scndate == d] + points(x, y, pch = 19, cex = 0.5, col = colors[as.character(unique(dat48$scndate)) == d]) + se <- dat48$HHse_data_48m[dat48$plot == p & dat48$scndate == d] + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3, col = colors[as.character(unique(dat48$scndate)) == d]) } } - - plot(dat48$biomass,dat48$HV.sigma.48,type="n",xlab="Biomass",ylab="HV") - for(d in as.character(unique(dat48$scndate))){ - for(p in unique(dat48$plot)){ + + plot(dat48$biomass, dat48$HV.sigma.48, type = "n", xlab = "Biomass", ylab = "HV") + for (d in as.character(unique(dat48$scndate))) { + for (p in unique(dat48$plot)) { # lines(dat48$biomass[dat48$plot==p],dat48$HV.sigma.48[dat48$plot==p],col="grey") - x<-dat48$biomass[dat48$plot==p & dat48$scndate==d] - y<-dat48$HV.sigma.48[dat48$plot==p & dat48$scndate==d] - points(x,y,pch=19,cex=0.5,col=colors[as.character(unique(dat48$scndate))==d]) - se<-dat48$HVse_data_48m[dat48$plot==p & dat48$scndate==d] - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3,col=colors[as.character(unique(dat48$scndate))==d]) + x <- dat48$biomass[dat48$plot == p & dat48$scndate == d] + y <- dat48$HV.sigma.48[dat48$plot == p & dat48$scndate == d] + points(x, y, pch = 19, cex = 0.5, col = colors[as.character(unique(dat48$scndate)) == d]) + se <- dat48$HVse_data_48m[dat48$plot == p & dat48$scndate == d] + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3, col = colors[as.character(unique(dat48$scndate)) == d]) } } - plot(dat48$biomass,dat48$HV.sigma.48,type="n",xaxt="n",yaxt="n",xlab="",ylab="",bty="n") - legend("center",pch=19,col=colors,legend=unique(dat48$scndate),bty="n") - mtext("Between-scene, within-plot variation", side=3, line=-2, outer=TRUE, cex=1, font=2) - - #Figure showing temporal variability for each plot in biomass-vs-backscatter space - par(mfrow=c(1,1)) - plot(dat48$biomass,dat48$HH.sigma.48,pch="",xlab="Biomass",ylab="HH") - for(p in unique(dat48$plot)){ - lines(dat48$biomass[dat48$plot==p],dat48$HH.sigma.48[dat48$plot==p],col="grey") - x<-mean(dat48$biomass[dat48$plot==p]) - y<-mean(dat48$HH.sigma.48[dat48$plot==p]) - se<-sd(dat48$HH.sigma.48[dat48$plot==p])/sqrt(length(dat48$HH.sigma.48[dat48$plot==p])) - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3) - points(x,y) + plot(dat48$biomass, dat48$HV.sigma.48, type = "n", xaxt = "n", yaxt = "n", xlab = "", ylab = "", bty = "n") + legend("center", pch = 19, col = colors, legend = unique(dat48$scndate), bty = "n") + mtext("Between-scene, within-plot variation", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + + # Figure showing temporal variability for each plot in biomass-vs-backscatter space + par(mfrow = c(1, 1)) + plot(dat48$biomass, dat48$HH.sigma.48, pch = "", xlab = "Biomass", ylab = "HH") + for (p in unique(dat48$plot)) { + lines(dat48$biomass[dat48$plot == p], dat48$HH.sigma.48[dat48$plot == p], col = "grey") + x <- mean(dat48$biomass[dat48$plot == p]) + y <- mean(dat48$HH.sigma.48[dat48$plot == p]) + se <- sd(dat48$HH.sigma.48[dat48$plot == p]) / sqrt(length(dat48$HH.sigma.48[dat48$plot == p])) + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3) + points(x, y) } - legend("topright",lty=c(1,1,NA),pch=c(NA,NA,1),col=c("grey","black","black"),legend=c("Range","SE","Mean"),bty="n") - - plot(dat48$biomass,dat48$HV.sigma.48,pch="",xlab="Biomass",ylab="HV") - for(p in unique(dat48$plot)){ - lines(dat48$biomass[dat48$plot==p],dat48$HV.sigma.48[dat48$plot==p],col="grey") - x<-mean(dat48$biomass[dat48$plot==p]) - y<-mean(dat48$HV.sigma.48[dat48$plot==p]) - se<-sd(dat48$HV.sigma.48[dat48$plot==p])/sqrt(length(dat48$HV.sigma.48[dat48$plot==p])) - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3) - points(x,y) + legend("topright", lty = c(1, 1, NA), pch = c(NA, NA, 1), col = c("grey", "black", "black"), legend = c("Range", "SE", "Mean"), bty = "n") + + plot(dat48$biomass, dat48$HV.sigma.48, pch = "", xlab = "Biomass", ylab = "HV") + for (p in unique(dat48$plot)) { + lines(dat48$biomass[dat48$plot == p], dat48$HV.sigma.48[dat48$plot == p], col = "grey") + x <- mean(dat48$biomass[dat48$plot == p]) + y <- mean(dat48$HV.sigma.48[dat48$plot == p]) + se <- sd(dat48$HV.sigma.48[dat48$plot == p]) / sqrt(length(dat48$HV.sigma.48[dat48$plot == p])) + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3) + points(x, y) } - legend("topright",lty=c(1,1,NA),pch=c(NA,NA,1),col=c("grey","black","black"),legend=c("Range","SE","Mean"),bty="n") - mtext("Between-scene variation", side=3, line=-2, outer=TRUE, cex=1, font=2) - - #Figure comparing within-plot variation (averaged over all scndates) to between-scene variation of plot means - par(mfrow=c(1,2)) - plot(dat48$HHse_data_48m,dat48$HHse_data_48m,type="n",ylab="Btwn scene SE of within-plot mean",xlab="Btwn scene mean of within-plot SE",main="HH") - abline(0,1,lwd=2,lty=2,col="grey") - for(p in unique(dat48$plot)){ - y<-mean(dat48$HHse_data_48m[dat48$plot==p]) - se<-sd(dat48$HHse_data_48m[dat48$plot==p])/sqrt(length(dat48$HHse_data_48m[dat48$plot==p])) - - x<-sd(dat48$HH.sigma.48[dat48$plot==p])/sqrt(length(dat48$HH.sigma.48[dat48$plot==p])) + legend("topright", lty = c(1, 1, NA), pch = c(NA, NA, 1), col = c("grey", "black", "black"), legend = c("Range", "SE", "Mean"), bty = "n") + mtext("Between-scene variation", side = 3, line = -2, outer = TRUE, cex = 1, font = 2) + + # Figure comparing within-plot variation (averaged over all scndates) to between-scene variation of plot means + par(mfrow = c(1, 2)) + plot(dat48$HHse_data_48m, dat48$HHse_data_48m, type = "n", ylab = "Btwn scene SE of within-plot mean", xlab = "Btwn scene mean of within-plot SE", main = "HH") + abline(0, 1, lwd = 2, lty = 2, col = "grey") + for (p in unique(dat48$plot)) { + y <- mean(dat48$HHse_data_48m[dat48$plot == p]) + se <- sd(dat48$HHse_data_48m[dat48$plot == p]) / sqrt(length(dat48$HHse_data_48m[dat48$plot == p])) + + x <- sd(dat48$HH.sigma.48[dat48$plot == p]) / sqrt(length(dat48$HH.sigma.48[dat48$plot == p])) # yse<-sd(dat48$HH.sigma.48[dat48$plot==p])/sqrt(length(dat48$HH.sigma.48[dat48$plot==p])) - - points(x,y,pch=19,cex=0.5) - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3) + + points(x, y, pch = 19, cex = 0.5) + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3) } - plot(dat48$HVse_data_48m,dat48$HVse_data_48m,type="n",ylab="Btwn scene SE of within-plot mean",xlab="Btwn scene mean of within-plot SE",main="HV") - abline(0,1,lwd=2,lty=2,col="grey") - for(p in unique(dat48$plot)){ - y<-mean(dat48$HVse_data_48m[dat48$plot==p]) - se<-sd(dat48$HVse_data_48m[dat48$plot==p])/sqrt(length(dat48$HVse_data_48m[dat48$plot==p])) - - x<-sd(dat48$HV.sigma.48[dat48$plot==p])/sqrt(length(dat48$HV.sigma.48[dat48$plot==p])) + plot(dat48$HVse_data_48m, dat48$HVse_data_48m, type = "n", ylab = "Btwn scene SE of within-plot mean", xlab = "Btwn scene mean of within-plot SE", main = "HV") + abline(0, 1, lwd = 2, lty = 2, col = "grey") + for (p in unique(dat48$plot)) { + y <- mean(dat48$HVse_data_48m[dat48$plot == p]) + se <- sd(dat48$HVse_data_48m[dat48$plot == p]) / sqrt(length(dat48$HVse_data_48m[dat48$plot == p])) + + x <- sd(dat48$HV.sigma.48[dat48$plot == p]) / sqrt(length(dat48$HV.sigma.48[dat48$plot == p])) # yse<-sd(dat48$HV.sigma.48[dat48$plot==p])/sqrt(length(dat48$HV.sigma.48[dat48$plot==p])) - - points(x,y,pch=19,cex=0.5) - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3) + + points(x, y, pch = 19, cex = 0.5) + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3) } - - #Figure showing across-date plot means with across-plot scndate means - colors=rainbow(length(unique(dat48$scndate))) - par(mfrow=c(1,2)) - plot(tapply(dat48$HH.sigma.48,dat48$plot,mean),xlab="Plot",ylab="Across-scene Mean HH") - for(d in as.character(unique(dat48$scndate))){ + + # Figure showing across-date plot means with across-plot scndate means + colors <- rainbow(length(unique(dat48$scndate))) + par(mfrow = c(1, 2)) + plot(tapply(dat48$HH.sigma.48, dat48$plot, mean), xlab = "Plot", ylab = "Across-scene Mean HH") + for (d in as.character(unique(dat48$scndate))) { # points(tapply(dat48$HH.sigma.48[dat48$scndate==d],dat48$plot[dat48$scndate==d],mean),col=colors[as.character(unique(dat48$scndate))==d]) - abline(h=mean(dat48$HH.sigma.48[dat48$scndate==d]),col=colors[as.character(unique(dat48$scndate))==d]) + abline(h = mean(dat48$HH.sigma.48[dat48$scndate == d]), col = colors[as.character(unique(dat48$scndate)) == d]) } - plot(tapply(dat48$HV.sigma.48,dat48$plot,mean),xlab="Plot",ylab="Across-scene Mean HV") - for(d in as.character(unique(dat48$scndate))){ + plot(tapply(dat48$HV.sigma.48, dat48$plot, mean), xlab = "Plot", ylab = "Across-scene Mean HV") + for (d in as.character(unique(dat48$scndate))) { # points(tapply(dat48$HV.sigma.48[dat48$scndate==d],dat48$plot[dat48$scndate==d],mean),col=colors[as.character(unique(dat48$scndate))==d]) - abline(h=mean(dat48$HV.sigma.48[dat48$scndate==d]),col=colors[as.character(unique(dat48$scndate))==d]) + abline(h = mean(dat48$HV.sigma.48[dat48$scndate == d]), col = colors[as.character(unique(dat48$scndate)) == d]) + } + + std.err <- function(x) { + sd(x) / sqrt(length(x)) } - - std.err<-function(x){ sd(x)/sqrt(length(x)) } - - #Figure showing between scene and between plot variation - x<-unique(dat48$scndate) - y<-tapply(dat48$HH.sigma.48,dat48$scndate,mean) - se<-tapply(dat48$HH.sigma.48,dat48$scndate,std.err) - par(mfrow=c(1,2)) - plot(x,y,ylim=c(min(y-se),max(y+se)),xlab="Scndate",ylab="HH") #scndate vs among-plot mean return - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3) #std.err bars are among-plot variation for each scndate - lines(unique(dat48$scndate),tapply(dat48$HH.sigma.48,dat48$scndate,mean)) - abline(h=mean(dat48$HH.sigma.48),col="grey",lwd=2) #mean return value for all scenes on all dates - mtext("Between-scndate variation", side=3, line=-3, outer=TRUE, cex=1, font=2) - - x<-unique(dat48$scndate) - y<-tapply(dat48$HV.sigma.48,dat48$scndate,mean) - se<-tapply(dat48$HV.sigma.48,dat48$scndate,std.err) - plot(x,y,ylim=c(min(y-se),max(y+se)),xlab="Scndate",ylab="HV") - arrows(x, y-se, x, y+se, length=0.05, angle=90, code=3) - lines(unique(dat48$scndate),tapply(dat48$HV.sigma.48,dat48$scndate,mean)) - abline(h=mean(dat48$HV.sigma.48),col="grey",lwd=2) - - #Figure - x<-unique(dat48$plot) - y<-tapply(dat48$HH.sigma.48,list(dat48$scndate,dat48$plot),mean) - se<-tapply(dat48$HH.sigma.48,list(dat48$scndate,dat48$plot),std.err) - plot(dat48$plot,dat48$HH.sigma.48,type="n") - for(p in unique(dat48$plot)){ - points(p,mean(dat48$HH.sigma.48[dat48$plot==p])) + + # Figure showing between scene and between plot variation + x <- unique(dat48$scndate) + y <- tapply(dat48$HH.sigma.48, dat48$scndate, mean) + se <- tapply(dat48$HH.sigma.48, dat48$scndate, std.err) + par(mfrow = c(1, 2)) + plot(x, y, ylim = c(min(y - se), max(y + se)), xlab = "Scndate", ylab = "HH") # scndate vs among-plot mean return + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3) # std.err bars are among-plot variation for each scndate + lines(unique(dat48$scndate), tapply(dat48$HH.sigma.48, dat48$scndate, mean)) + abline(h = mean(dat48$HH.sigma.48), col = "grey", lwd = 2) # mean return value for all scenes on all dates + mtext("Between-scndate variation", side = 3, line = -3, outer = TRUE, cex = 1, font = 2) + + x <- unique(dat48$scndate) + y <- tapply(dat48$HV.sigma.48, dat48$scndate, mean) + se <- tapply(dat48$HV.sigma.48, dat48$scndate, std.err) + plot(x, y, ylim = c(min(y - se), max(y + se)), xlab = "Scndate", ylab = "HV") + arrows(x, y - se, x, y + se, length = 0.05, angle = 90, code = 3) + lines(unique(dat48$scndate), tapply(dat48$HV.sigma.48, dat48$scndate, mean)) + abline(h = mean(dat48$HV.sigma.48), col = "grey", lwd = 2) + + # Figure + x <- unique(dat48$plot) + y <- tapply(dat48$HH.sigma.48, list(dat48$scndate, dat48$plot), mean) + se <- tapply(dat48$HH.sigma.48, list(dat48$scndate, dat48$plot), std.err) + plot(dat48$plot, dat48$HH.sigma.48, type = "n") + for (p in unique(dat48$plot)) { + points(p, mean(dat48$HH.sigma.48[dat48$plot == p])) } - abline(h=mean(dat48$HH.sigma.48),col="grey",lwd=2) - - - + abline(h = mean(dat48$HH.sigma.48), col = "grey", lwd = 2) + + + # par(new=FALSE, mfrow=c(1,2)) # plot(dat48$biomass,dat48$HH.sigma.48,col="grey",pch=".",xlab="Binned Biomass",ylab="Binned HH") # points(bind.bio,bind.HH) @@ -380,7 +391,7 @@ palsar.plotter<-function(outpath,coord.set,fia){ # points(bind.bio,bind.HV) # points(dat48$biomass[dat48$biomass>=1 & dat48$biomass<=48],dat48$HV.sigma.48[dat48$biomass>=1 &dat48$biomass<=48],pch=".",col="red") # mtext("Bins each contain 5% of the data points", side=3, line=-3, outer=TRUE, cex=1, font=2) - + # #breaks data into even-length bins # bind.bio<-tapply(dat48$biomass, cut(dat48$biomass, breaks=seq(0, max(dat48$biomass), 0.05*max(dat48$biomass))),mean) # bind.HH<-tapply(dat48$HH.sigma.48,cut(dat48$HH.sigma.48,breaks=seq(0, max(dat48$HH.sigma.48), 0.05*max(dat48$HH.sigma.48))),mean) @@ -391,11 +402,10 @@ palsar.plotter<-function(outpath,coord.set,fia){ # plot(dat48$biomass,dat48$HV.sigma.48,col="grey",,pch=".",xlab="Binned Biomass",ylab="Binned HV") # points(bind.bio,bind.HV) # mtext("Bins each contain 5% of data range", side=3, line=-3, outer=TRUE, cex=1, font=2) - - par(mfrow=c(1,2)) - bplot.xy(dat48$biomass,dat48$HH.sigma.48,N=15,xlab="biomass",ylab="HH (sigma naught)") - bplot.xy(dat48$biomass,dat48$HV.sigma.48,N=15,xlab="biomass",ylab="HV (sigma naught)") - + + par(mfrow = c(1, 2)) + bplot.xy(dat48$biomass, dat48$HH.sigma.48, N = 15, xlab = "biomass", ylab = "HH (sigma naught)") + bplot.xy(dat48$biomass, dat48$HV.sigma.48, N = 15, xlab = "biomass", ylab = "HV (sigma naught)") + dev.off() - -}#end function \ No newline at end of file +} # end function diff --git a/modules/data.remote/inst/scripts/spp_cleanup.R b/modules/data.remote/inst/scripts/spp_cleanup.R index 5910553f6af..6b0690ada54 100644 --- a/modules/data.remote/inst/scripts/spp_cleanup.R +++ b/modules/data.remote/inst/scripts/spp_cleanup.R @@ -1,64 +1,63 @@ -spp.cleanup<-function(data){ +spp.cleanup <- function(data) { + data$Spp[data$Spp == "ABBA"] <- "ABBA" + data$Spp[data$Spp == "ACPE"] <- "ACPE" + data$Spp[data$Spp == "ACRU"] <- "ACRU" + data$Spp[data$Spp == "ACSA"] <- "ACSA3" + data$Spp[data$Spp == "ACSA4"] <- "ACSA3" + data$Spp[data$Spp == "ACSA5"] <- "ACSA3" + data$Spp[data$Spp == "ACSA6"] <- "ACSA3" + data$Spp[data$Spp == "acsa"] <- "ACSA3" + data$Spp[data$Spp == "ASCA3"] <- "ACSA3" + data$Spp[data$Spp == "ACSP"] <- "ACSP2" + data$Spp[data$Spp == "ACSP2"] <- "ACSP2" + data$Spp[data$Spp == "BEAL2"] <- "BEAL2" + data$Spp[data$Spp == "BEAL"] <- "BEAL2" + data$Spp[data$Spp == "BEAL2"] <- "BEAL2" + data$Spp[data$Spp == "BRAL2"] <- "BEAL2" + data$Spp[data$Spp == "BELE"] <- "BELE" + data$Spp[data$Spp == "BELI"] <- "BELE" + data$Spp[data$Spp == "BEPA"] <- "BEPA" + data$Spp[data$Spp == "FAGR"] <- "FAGR" + data$Spp[data$Spp == "FRAM2"] <- "FRAM2" + data$Spp[data$Spp == "FRAM"] <- "FRAM2" + data$Spp[data$Spp == "FRNI"] <- "FRNI" + data$Spp[data$Spp == "FRPE"] <- "FRPE" + data$Spp[data$Spp == "HAVI4"] <- "HAVI4" + data$Spp[data$Spp == "HAVI"] <- "HAVI4" + data$Spp[data$Spp == "OSVI"] <- "OSVI" + data$Spp[data$Spp == "PIGL"] <- "PIGL" + data$Spp[data$Spp == "PIMA"] <- "PIMA" + data$Spp[data$Spp == "PIAM"] <- "PIMA" + data$Spp[data$Spp == "PIRU"] <- "PIRU" + data$Spp[data$Spp == "PIRE"] <- "PIRE" + data$Spp[data$Spp == "PIER"] <- "PIRE" + data$Spp[data$Spp == "PIST"] <- "PIST" + data$Spp[data$Spp == "PSIT"] <- "PIST" + data$Spp[data$Spp == "POGR4"] <- "POGR4" + data$Spp[data$Spp == "POGR"] <- "POGR4" + data$Spp[data$Spp == "POTR5"] <- "POTR5" + data$Spp[data$Spp == "POTR"] <- "POTR5" + data$Spp[data$Spp == "PRPE2"] <- "PRPE2" + data$Spp[data$Spp == "PRPE"] <- "PRPE2" + data$Spp[data$Spp == "PRSE2"] <- "PRSE2" + data$Spp[data$Spp == "PRSE"] <- "PRSE2" + data$Spp[data$Spp == "QURU"] <- "QURU" + data$Spp[data$Spp == "THOC2"] <- "THOC2" + data$Spp[data$Spp == "THOC"] <- "THOC2" + data$Spp[data$Spp == "TIAM"] <- "TIAM" + data$Spp[data$Spp == "TIMA"] <- "TIAM" + data$Spp[data$Spp == "TSCA"] <- "TSCA" + data$Spp[data$Spp == "ULAM"] <- "ULAM" -data$Spp[data$Spp=="ABBA"]<-"ABBA" -data$Spp[data$Spp=="ACPE"]<-"ACPE" -data$Spp[data$Spp=="ACRU"]<-"ACRU" -data$Spp[data$Spp=="ACSA"]<-"ACSA3" - data$Spp[data$Spp=="ACSA4"]<-"ACSA3" - data$Spp[data$Spp=="ACSA5"]<-"ACSA3" - data$Spp[data$Spp=="ACSA6"]<-"ACSA3" - data$Spp[data$Spp=="acsa"]<-"ACSA3" - data$Spp[data$Spp=="ASCA3"]<-"ACSA3" -data$Spp[data$Spp=="ACSP"]<-"ACSP2" - data$Spp[data$Spp=="ACSP2"]<-"ACSP2" -data$Spp[data$Spp=="BEAL2"]<-"BEAL2" - data$Spp[data$Spp=="BEAL"]<-"BEAL2" - data$Spp[data$Spp=="BEAL2"]<-"BEAL2" - data$Spp[data$Spp=="BRAL2"]<-"BEAL2" -data$Spp[data$Spp=="BELE"]<-"BELE" - data$Spp[data$Spp=="BELI"]<-"BELE" -data$Spp[data$Spp=="BEPA"]<-"BEPA" -data$Spp[data$Spp=="FAGR"]<-"FAGR" -data$Spp[data$Spp=="FRAM2"]<-"FRAM2" - data$Spp[data$Spp=="FRAM"]<-"FRAM2" -data$Spp[data$Spp=="FRNI"]<-"FRNI" -data$Spp[data$Spp=="FRPE"]<-"FRPE" -data$Spp[data$Spp=="HAVI4"]<-"HAVI4" - data$Spp[data$Spp=="HAVI"]<-"HAVI4" -data$Spp[data$Spp=="OSVI"]<-"OSVI" -data$Spp[data$Spp=="PIGL"]<-"PIGL" -data$Spp[data$Spp=="PIMA"]<-"PIMA" - data$Spp[data$Spp=="PIAM"]<-"PIMA" -data$Spp[data$Spp=="PIRU"]<-"PIRU" -data$Spp[data$Spp=="PIRE"]<-"PIRE" - data$Spp[data$Spp=="PIER"]<-"PIRE" -data$Spp[data$Spp=="PIST"]<-"PIST" - data$Spp[data$Spp=="PSIT"]<-"PIST" -data$Spp[data$Spp=="POGR4"]<-"POGR4" - data$Spp[data$Spp=="POGR"]<-"POGR4" -data$Spp[data$Spp=="POTR5"]<-"POTR5" - data$Spp[data$Spp=="POTR"]<-"POTR5" -data$Spp[data$Spp=="PRPE2"]<-"PRPE2" - data$Spp[data$Spp=="PRPE"]<-"PRPE2" -data$Spp[data$Spp=="PRSE2"]<-"PRSE2" - data$Spp[data$Spp=="PRSE"]<-"PRSE2" -data$Spp[data$Spp=="QURU"]<-"QURU" -data$Spp[data$Spp=="THOC2"]<-"THOC2" -data$Spp[data$Spp=="THOC"]<-"THOC2" -data$Spp[data$Spp=="TIAM"]<-"TIAM" - data$Spp[data$Spp=="TIMA"]<-"TIAM" -data$Spp[data$Spp=="TSCA"]<-"TSCA" -data$Spp[data$Spp=="ULAM"]<-"ULAM" + data$Spp[data$Spp == "Missing Data"] <- "Missing_Data" + data$Spp[data$Spp == "MissingData"] <- "Missing_Data" + data$Spp[data$Spp == "UN"] <- "Missing_Data" + data$Spp[data$Spp == "un"] <- "Missing_Data" + data$Spp[grep("\\?", data$Spp)] <- "Missing_Data" + data$Spp[grep(" Spp", data$Spp)] <- "Missing_Data" + data$Spp[grep(" SPP", data$Spp)] <- "Missing_Data" + data$Spp[grep(" spp", data$Spp)] <- "Missing_Data" + data$Spp[data$Spp == ""] <- "Missing_Data" -data$Spp[data$Spp=="Missing Data"]<-"Missing_Data" -data$Spp[data$Spp=="MissingData"]<-"Missing_Data" -data$Spp[data$Spp=="UN"]<-"Missing_Data" -data$Spp[data$Spp=="un"]<-"Missing_Data" -data$Spp[grep("\\?",data$Spp)]<-"Missing_Data" -data$Spp[grep(" Spp",data$Spp)]<-"Missing_Data" -data$Spp[grep(" SPP",data$Spp)]<-"Missing_Data" -data$Spp[grep(" spp",data$Spp)]<-"Missing_Data" -data$Spp[data$Spp==""]<-"Missing_Data" - -return(data$Spp) + return(data$Spp) } diff --git a/modules/emulator/R/GaussProcess.R b/modules/emulator/R/GaussProcess.R index d0171e0de17..ace2e31db1e 100644 --- a/modules/emulator/R/GaussProcess.R +++ b/modules/emulator/R/GaussProcess.R @@ -19,28 +19,28 @@ ##' @param ... Additional arguments ##' ##' @author Michael Dietze -GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes", ngibbs = 5000, +GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes", ngibbs = 5000, burnin = 1000, thin = 1, jump.ic = c(1.1, 0.2), prior = "IG", mix = "joint", psi = NULL, zeroMean = FALSE, exclude = NULL, ...) { ## isotropic <- FALSE;nugget<-FALSE;method='bayes';ngibbs <- 50; burnin <- 10;thin<- 1; ## jump.ic<-c(1.1,0.2); prior <- 'unif' - + ## library('dietze') - + if (burnin > ngibbs) { burnin <- floor(ngibbs * 0.25) } - + if (!(method %in% c("bayes", "MLE"))) { stop(cat(method, "not yet implemented")) } - + ## deal with repeated measures - x.full <- x - x.id <- groupid(x) + x.full <- x + x.id <- groupid(x) x.compact <- NULL - n.unique <- max(unique(x.id)) + n.unique <- max(unique(x.id)) n <- length(x) if (is.matrix(x)) { n <- nrow(x) @@ -57,10 +57,10 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" y <- y[!duplicated(y)] ## stop('repeated measured detected, but nugget == FALSE') } - ##settings + ## settings ## isotropic <- (is.matrix(d) || (is.list(d) && length(d) == 1)) # isotropic -> correlation same in all directions - - ##calc distance matrix + + ## calc distance matrix d <- NULL if (isotropic) { d <- distance.matrix(x.compact, 2) @@ -71,28 +71,28 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" if (!isotropic) { dim <- length(d) } - + ## IC and Priors mu <- mean(y) if (zeroMean) { mu <- 0 } - av <- bv <- 0.001 #nugget IG prior - aw <- bw <- 0.001 #covariance IG prior - ap <- bp <- 0.01 #spatial IG prior - mu.V0 <- 10 ^ ceiling(log10(stats::var(y)) + 2) #mean prior variance - tauw <- tauv <- stats::var(y) * 0.5 + av <- bv <- 0.001 # nugget IG prior + aw <- bw <- 0.001 # covariance IG prior + ap <- bp <- 0.01 # spatial IG prior + mu.V0 <- 10^ceiling(log10(stats::var(y)) + 2) # mean prior variance + tauw <- tauv <- stats::var(y) * 0.5 if (is.null(psi)) { psi <- rep(1, dim) } - S <- calcSpatialCov(d, psi, tauw) #spatial covariance - Tinv <- diag(1 / tauv, n) - W <- y - mu #spatial random effects - nseq <- seq_len(n.unique) - W.full <- W[x.id] + S <- calcSpatialCov(d, psi, tauw) # spatial covariance + Tinv <- diag(1 / tauv, n) + W <- y - mu # spatial random effects + nseq <- seq_len(n.unique) + W.full <- W[x.id] id.count <- as.vector(table(x.id)) - X <- matrix(rep(1, n.unique), n.unique, 1) - + X <- matrix(rep(1, n.unique), n.unique, 1) + ## maximum likelihood if (zeroMean) { parm <- c(tauw, psi) @@ -126,39 +126,40 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" } } ## function (theta, d, nugget, myY, maxval = Inf) - + if (method == "MLE") { - return(list(method = method, tauw = tauw, tauv = tauv, mu = mu, psi = psi, - nugget = nugget, isotropic = isotropic, d = d, x.id = x.id, - x.compact = x.compact, y = y, mle = nmin, zeroMean = zeroMean)) + return(list( + method = method, tauw = tauw, tauv = tauv, mu = mu, psi = psi, + nugget = nugget, isotropic = isotropic, d = d, x.id = x.id, + x.compact = x.compact, y = y, mle = nmin, zeroMean = zeroMean + )) } - + ## Storage - samp <- seq(burnin, ngibbs, thin) - nsamp <- length(samp) - tauwjump <- jump(jump.ic[1]) - psijump <- jump(jump.ic[2]) + samp <- seq(burnin, ngibbs, thin) + nsamp <- length(samp) + tauwjump <- jump(jump.ic[1]) + psijump <- jump(jump.ic[2]) if (mix == "each") { psijump <- mvjump(ic = jump.ic[2], dim = ncol(x)) } - tauwgibbs <- matrix(NA, ngibbs, 1) #spatial var - psigibbs <- matrix(NA, ngibbs, dim) #spatial corr - mugibbs <- rep(NA, nsamp) #mean - Wgibbs <- tauvgibbs <- NULL + tauwgibbs <- matrix(NA, ngibbs, 1) # spatial var + psigibbs <- matrix(NA, ngibbs, dim) # spatial corr + mugibbs <- rep(NA, nsamp) # mean + Wgibbs <- tauvgibbs <- NULL if (nugget) { - Wgibbs <- matrix(NA, nsamp, n.unique) #spatial random effects - tauvgibbs <- rep(NA, nsamp) #nugget var + Wgibbs <- matrix(NA, nsamp, n.unique) # spatial random effects + tauvgibbs <- rep(NA, nsamp) # nugget var } - + # reset spatial covariance for mle S <- calcSpatialCov(d, psi, tauw) - + ## progress bar progress_bar <- utils::txtProgressBar(min = 0, max = ngibbs, style = 3) - + ## Gibbs loop for (g in seq_len(ngibbs)) { - cc <- 1 ## draw W if (nugget) { @@ -169,8 +170,8 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" W <- mvtnorm::rmvnorm(1, M %*% m, M) W.full <- W } else { - ##method 1, draw W's individually - ##for(i in 1:n.unique){ + ## method 1, draw W's individually + ## for(i in 1:n.unique){ ## sel <- nseq[nseq != i] ## u <- which(x.id == i) ## mubar <- S[i,sel] %*% solve(S[sel,sel]) %*% W[sel] @@ -178,38 +179,38 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" ## M <- 1/(length(u)/tauv+Sbar) ## m <- sum(y[u]-mu)/tauv ## W[i] <- rnorm(1,M*m,M) - ##} - ##method 2, aggregate Y's - yagg <- tapply(y - mu, x.id, sum) - M <- solve(id.count * diag(1 / tauv, n.unique) + Sinv) - m <- diag(1 / tauv, n.unique) %*% yagg - W <- mvtnorm::rmvnorm(1, M %*% m, M) + ## } + ## method 2, aggregate Y's + yagg <- tapply(y - mu, x.id, sum) + M <- solve(id.count * diag(1 / tauv, n.unique) + Sinv) + m <- diag(1 / tauv, n.unique) %*% yagg + W <- mvtnorm::rmvnorm(1, M %*% m, M) W.full <- W[x.id] } } else { ## no nugget -- deterministic W <- W.full <- y - mu } - + cc <- 2 ## draw psi if (mix == "joint") { psistar <- exp(stats::rnorm(dim, log(psi), p(psijump))) Sstar <- calcSpatialCov(d, psistar, tauw) - ##anum.p <- try(sum(log(dinvgamma(psistar,ap,bp))) + dmvnorm(as.vector(W),rep(0,n.unique),Sstar,log=TRUE),TRUE) - ##aden.p <- sum(log(dinvgamma(psi,ap,bp))) + dmvnorm(as.vector(W),rep(0,n.unique),S,log=TRUE) anum.p <- aden.p <- 0 ## inproper uniform prior + ## anum.p <- try(sum(log(dinvgamma(psistar,ap,bp))) + dmvnorm(as.vector(W),rep(0,n.unique),Sstar,log=TRUE),TRUE) + ## aden.p <- sum(log(dinvgamma(psi,ap,bp))) + dmvnorm(as.vector(W),rep(0,n.unique),S,log=TRUE) anum.p <- aden.p <- 0 ## inproper uniform prior if (prior == "IG") { anum.p <- sum(ldinvgamma(psistar, ap, bp)) aden.p <- sum(ldinvgamma(psi, ap, bp)) } anum.p <- try(mvtnorm::dmvnorm(as.vector(W), rep(0, n.unique), Sstar, log = TRUE) + anum.p, TRUE) aden.p <- mvtnorm::dmvnorm(as.vector(W), rep(0, n.unique), S, log = TRUE) + aden.p - if (is.numeric(anum.p) && - is.finite(anum.p) && - exp(anum.p - aden.p) > stats::runif(1) && - min(psistar) > 0) { + if (is.numeric(anum.p) && + is.finite(anum.p) && + exp(anum.p - aden.p) > stats::runif(1) && + min(psistar) > 0) { psi <- psistar - S <- Sstar + S <- Sstar } psigibbs[g, ] <- psi psijump <- stats::update(psijump, psigibbs) @@ -218,18 +219,18 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" psistar <- psi for (i in seq_len(dim)) { psistar[i] <- exp(stats::rnorm(1, log(psi), p(psijump)[i])) - Sstar <- calcSpatialCov(d, psistar, tauw) - anum.p <- aden.p <- 0 ## inproper uniform prior + Sstar <- calcSpatialCov(d, psistar, tauw) + anum.p <- aden.p <- 0 ## inproper uniform prior if (prior == "IG") { anum.p <- sum(ldinvgamma(psistar, ap, bp)) aden.p <- sum(ldinvgamma(psi, ap, bp)) } anum.p <- try(mvtnorm::dmvnorm(as.vector(W), rep(0, n.unique), Sstar, log = TRUE) + anum.p, TRUE) aden.p <- mvtnorm::dmvnorm(as.vector(W), rep(0, n.unique), S, log = TRUE) + aden.p - if (is.numeric(anum.p) && - is.finite(anum.p) && - exp(anum.p - aden.p) > stats::runif(1) && - min(psistar) > 0) { + if (is.numeric(anum.p) && + is.finite(anum.p) && + exp(anum.p - aden.p) > stats::runif(1) && + min(psistar) > 0) { psi <- psistar S <- Sstar } else { @@ -239,34 +240,34 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" psigibbs[g, ] <- psi psijump <- stats::update(psijump, psigibbs) } - + cc <- 3 ## draw tauw taustar <- exp(stats::rnorm(1, log(tauw), p(tauwjump))) - Sstar <- calcSpatialCov(d, psi, taustar) - anum <- aden <- 0 + Sstar <- calcSpatialCov(d, psi, taustar) + anum <- aden <- 0 if (prior == "IG") { anum <- ldinvgamma(taustar, aw, bw) aden <- ldinvgamma(tauw, aw, bw) } anum <- try(anum + mvtnorm::dmvnorm(as.vector(W), rep(0, n.unique), Sstar, log = TRUE)) aden <- aden + mvtnorm::dmvnorm(as.vector(W), rep(0, n.unique), S, log = TRUE) - if (is.numeric(anum) && - is.finite(anum) && - exp(anum - aden) > stats::runif(1)) { + if (is.numeric(anum) && + is.finite(anum) && + exp(anum - aden) > stats::runif(1)) { tauw <- taustar S <- Sstar } tauwgibbs[g, ] <- tauw tauwjump <- stats::update(tauwjump, tauwgibbs) - + cc <- 4 ## draw tauv if (nugget) { - tauv <- MCMCpack::rinvgamma(1, av + n / 2, bv + 0.5 * sum((y - rep(mu, n) - W.full) ^ 2)) + tauv <- MCMCpack::rinvgamma(1, av + n / 2, bv + 0.5 * sum((y - rep(mu, n) - W.full)^2)) Tinv <- diag(1 / tauv, n) } - + cc <- 5 ## draw mu if (zeroMean) { @@ -279,11 +280,11 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" } else { Sinv <- solve(S) M <- solve(t(X) %*% Sinv %*% X + 1 / mu.V0) - m <- t(X) %*% Sinv %*% y ##[1:10] + m <- t(X) %*% Sinv %*% y ## [1:10] mu <- stats::rnorm(1, M * m, M) } } - + cc <- 6 ## store if (g %in% samp) { @@ -295,12 +296,14 @@ GaussProcess <- function(x, y, isotropic = TRUE, nugget = TRUE, method = "bayes" Wgibbs[i, ] <- W } } - utils::setTxtProgressBar(progress_bar , g) + utils::setTxtProgressBar(progress_bar, g) } close(progress_bar) - - return(list(method = method, tauwjump = tauwjump, tauw = tauwgibbs, - psijump = psijump, psi = psigibbs, mu = mugibbs, tauv = tauvgibbs, - W = Wgibbs, nugget = nugget, isotropic = isotropic, d = d, samp = samp, - x.id = x.id, x.compact = x.compact, y = y, zeroMean = zeroMean)) + + return(list( + method = method, tauwjump = tauwjump, tauw = tauwgibbs, + psijump = psijump, psi = psigibbs, mu = mugibbs, tauv = tauvgibbs, + W = Wgibbs, nugget = nugget, isotropic = isotropic, d = d, samp = samp, + x.id = x.id, x.compact = x.compact, y = y, zeroMean = zeroMean + )) } # GaussProcess diff --git a/modules/emulator/R/arate.R b/modules/emulator/R/arate.R index 9287b7a406a..6517821ac9d 100644 --- a/modules/emulator/R/arate.R +++ b/modules/emulator/R/arate.R @@ -1,7 +1,7 @@ ##' Acceptance rate ##' ##' @name arate -##' @title arate +##' @title arate ##' @export ##' ##' @param x vector of MCMC samples diff --git a/modules/emulator/R/calcSpatialCov.R b/modules/emulator/R/calcSpatialCov.R index 02dcf58b560..836ec5bd7be 100644 --- a/modules/emulator/R/calcSpatialCov.R +++ b/modules/emulator/R/calcSpatialCov.R @@ -1,9 +1,9 @@ ##' @name calcSpatialCov -##' @title calcSpatialCov +##' @title calcSpatialCov ##' @export ##' ##' @param x either a spatial distance matrix or a list of component spatial distance matrices ##' @param ... Additional arguments -##' +##' ##' @author Michael Dietze calcSpatialCov <- function(x, ...) UseMethod("calcSpatialCov", x) diff --git a/modules/emulator/R/calcSpatialCov.list.R b/modules/emulator/R/calcSpatialCov.list.R index 88e8f397777..a08a2fb20ae 100644 --- a/modules/emulator/R/calcSpatialCov.list.R +++ b/modules/emulator/R/calcSpatialCov.list.R @@ -1,34 +1,34 @@ ##' Currently assumes an exponential spatial dependency -##' +##' ##' can make gaussian by passing squared distance matrix -##' +##' ##' @name calcSpatialCov.list -##' @title calcSpatialCov.list +##' @title calcSpatialCov.list ##' @export ##' ##' @param d list of component spatial distance matrices ##' @param psi spatial corr ##' @param tau spatial var -##' +##' ##' @author Michael Dietze calcSpatialCov.list <- function(d, psi, tau) { - m <- length(d) + m <- length(d) nl <- nrow(d[[1]]) - H <- matrix(0, nl, nl) + H <- matrix(0, nl, nl) if (length(psi) == 1 && m > 1) { ## apply same psi to all directions psi <- rep(psi, m) } - - #for(i in 1:nl){ + + # for(i in 1:nl){ # for(j in i:nl){ # tmp <- 0 # for(k in 1:m){tmp <- tmp - psi[k]*d[[k]][i,j]} # H[i,j] <- tau*exp(tmp) # H[j,i] <- H[i,j] # } - #} - + # } + for (k in seq_len(m)) { H <- H - psi[k] * d[[k]] } diff --git a/modules/emulator/R/calcSpatialCov.matrix.R b/modules/emulator/R/calcSpatialCov.matrix.R index 9334cf8bb69..29710990811 100644 --- a/modules/emulator/R/calcSpatialCov.matrix.R +++ b/modules/emulator/R/calcSpatialCov.matrix.R @@ -1,19 +1,19 @@ ##' Currently assumes an exponential spatial dependency -##' +##' ##' can make gaussian by passing squared distance matrix -##' +##' ##' @name calcSpatialCov.matrix -##' @title calcSpatialCov.matrix +##' @title calcSpatialCov.matrix ##' @export ##' ##' @param d spatial distance matrix ##' @param psi spatial corr ##' @param tau spatial var -##' +##' ##' @author Michael Dietze calcSpatialCov.matrix <- function(d, psi, tau) { nl <- nrow(d) - H <- matrix(0, nl, nl) + H <- matrix(0, nl, nl) for (i in seq_len(nl)) { # for(j in 1:nl){ H[i,j] <- tau*exp(-psi*d[i,j]) } for (j in seq_len(nl)) { diff --git a/modules/emulator/R/distance.R b/modules/emulator/R/distance.R index c46f7b009ba..36d8b287e02 100644 --- a/modules/emulator/R/distance.R +++ b/modules/emulator/R/distance.R @@ -6,7 +6,7 @@ ##' @param power exponent used for calculating distance, default value of 2 = Pythagorean distance ##' ##' @return dst -##' +##' ##' @author Michael Dietze distance <- function(x, power = 1) { dst <- list() diff --git a/modules/emulator/R/distance.matrix.R b/modules/emulator/R/distance.matrix.R index 2ee97b87e44..bd789467d89 100644 --- a/modules/emulator/R/distance.matrix.R +++ b/modules/emulator/R/distance.matrix.R @@ -5,16 +5,16 @@ ##' @param x matrix of locations in physical or parameter space ##' @param power exponent used for calculating distance, default value of 2 = Pythagorean distance ##' @param dim dimenstion -##' +##' ##' @return d -##' +##' ##' @author Michael Dietze distance.matrix <- function(x, power = 1, dim = 2) { n <- nrow(x) d <- matrix(0, n, n) for (i in seq_len(n)) { for (j in seq_len(n)) { - d[i, j] <- sum((x[i, ] - x[j, ]) ^ power) + d[i, j] <- sum((x[i, ] - x[j, ])^power) # d[i,j] <- 0 for(k in 1:dim){ d[i,j] <- d[i,j] + (x[i,k]-x[j,k])^power } } } diff --git a/modules/emulator/R/distance12.matrix.R b/modules/emulator/R/distance12.matrix.R index 20d3a3c4990..b810f22aa4f 100644 --- a/modules/emulator/R/distance12.matrix.R +++ b/modules/emulator/R/distance12.matrix.R @@ -5,16 +5,16 @@ ##' @param x matrix of locations in parameter space ##' @param n1 number of rows in the original dataset ##' @param power exponent used for calculating distance, default value of 2 = Pythagorean distance -##' +##' ##' @return d -##' +##' ##' @author Michael Dietze distance12.matrix <- function(x, n1, power = 1) { n <- nrow(x) d <- matrix(0, n, n - n1) sel <- (n1 + 1):n for (i in seq_len(n)) { - d[i, ] <- (x[i, 1] - x[sel, 1]) ^ power + (x[i, 2] - x[sel, 2]) ^ power + d[i, ] <- (x[i, 1] - x[sel, 1])^power + (x[i, 2] - x[sel, 2])^power } return(d) } # distance12.matrix diff --git a/modules/emulator/R/gp_mle.R b/modules/emulator/R/gp_mle.R index f32ddd51ef4..b8c3eab8c6a 100644 --- a/modules/emulator/R/gp_mle.R +++ b/modules/emulator/R/gp_mle.R @@ -7,19 +7,18 @@ ##' @param nugget allows additional error in Y rather than fix interpolation to go through points ##' @param myY vector of observed data ##' @param maxval maximum value -##' +##' ##' @return val -##' +##' ##' @author Michael Dietze gp_mle <- function(theta, d, nugget, myY, maxval = Inf) { - ## get parms - mu <- theta[1] + mu <- theta[1] tauw <- theta[2] if (tauw <= 0) { return(maxval) } - i <- 3 + i <- 3 tauv <- 0 if (tauv < 0) { return(maxval) @@ -36,9 +35,9 @@ gp_mle <- function(theta, d, nugget, myY, maxval = Inf) { return(maxval) } n <- length(myY) - + S <- calcSpatialCov(d, phi, tauw) - + val <- try(-sum(mvtnorm::dmvnorm(myY, rep(mu, n), S + diag(tauv, n), log = TRUE))) if (!is.numeric(val)) { return(maxval) @@ -54,16 +53,15 @@ gp_mle <- function(theta, d, nugget, myY, maxval = Inf) { ##' zero mean version -##' @title gp_mle2 +##' @title gp_mle2 ##' @export -##' +##' ##' @param theta proposed parameter vector: [mu, tauw, tauv, phi1...phiK] ##' @param d spatial distance matrix ##' @param nugget allows additional error in Y rather than fix interpolation to go through points ##' @param myY vector of observed data ##' @param maxval maximum value gp_mle2 <- function(theta, d, nugget, myY, maxval = Inf) { - ## get parms tauw <- theta[1] if (tauw <= 0) { @@ -86,9 +84,9 @@ gp_mle2 <- function(theta, d, nugget, myY, maxval = Inf) { return(maxval) } n <- length(myY) - + S <- calcSpatialCov(d, phi, tauw) - + val <- try(-sum(mvtnorm::dmvnorm(myY, rep(0, n), S + diag(tauv, n), log = TRUE))) if (!is.numeric(val)) { return(maxval) diff --git a/modules/emulator/R/groupid.R b/modules/emulator/R/groupid.R index 04f0df8b600..624d5322100 100644 --- a/modules/emulator/R/groupid.R +++ b/modules/emulator/R/groupid.R @@ -1,16 +1,15 @@ ##' @name groupid ##' @title groupid ##' @export -##' +##' ##' @param x matrix of parameter values ##' @author Michael Dietze groupid <- function(x) { - if (is.null(ncol(x))) { ## | ncol(x) == 1){ return(seq_along(x)) } - + n <- nrow(x) v <- rep(NA, n) j <- 1 diff --git a/modules/emulator/R/jump.R b/modules/emulator/R/jump.R index fb8de44e4cc..d65e8281655 100644 --- a/modules/emulator/R/jump.R +++ b/modules/emulator/R/jump.R @@ -5,16 +5,16 @@ ##' @param ic optional data vector ##' @param rate target acceptance rate ##' @param ... Addtional arguments -##' +##' ##' @author Michael Dietze jump <- function(ic = 0, rate = 0.4, ...) { return(methods::new("jump", history = ic, arate = 0, target = rate)) } # jump ##' multivariate version -##' @title mvjump +##' @title mvjump ##' @export -##' +##' ##' @param ic optional data vector ##' @param rate target acceptance rate ##' @param nc NetCDF object containing target variable diff --git a/modules/emulator/R/ldinvgamma.R b/modules/emulator/R/ldinvgamma.R index 48a484d6168..655ee9123ca 100644 --- a/modules/emulator/R/ldinvgamma.R +++ b/modules/emulator/R/ldinvgamma.R @@ -1,21 +1,20 @@ ##' Log-dinvgamma, based on MCMCpack -##' +##' ##' @name ldinvgamma ##' @title ldinvgamma ##' @export ##' ##' @param x vector of quantiles ##' @param shape,scale shape and scale parameters for the inverse Gamma distribution -##' +##' ##' @return log.density -##' +##' ##' @author Michael Dietze ldinvgamma <- function(x, shape, scale = 1) { - if (shape <= 0 | scale <= 0) { - stop("Shape or scale parameter negative in dinvgamma().\n") - } - alpha <- shape - beta <- scale - return(alpha * log(beta) - lgamma(alpha) - (alpha + 1) * log(x) - (beta / x)) + if (shape <= 0 | scale <= 0) { + stop("Shape or scale parameter negative in dinvgamma().\n") + } + alpha <- shape + beta <- scale + return(alpha * log(beta) - lgamma(alpha) - (alpha + 1) * log(x) - (beta / x)) } # ldinvgamma - diff --git a/modules/emulator/R/lhc.R b/modules/emulator/R/lhc.R index b1883fa0260..0f8f67a4685 100644 --- a/modules/emulator/R/lhc.R +++ b/modules/emulator/R/lhc.R @@ -1,25 +1,24 @@ ##' Latin Hyper Cube -##' +##' ##' Simple uniform sampling with LHC permutation -##' +##' ##' @name lhc ##' @title lhc ##' @export ##' ##' @param x <- list (n.dim x 2) ##' @param n.samp number of samples -##' +##' ##' @author Michael Dietze lhc <- function(x, n.samp) { - n.dim <- nrow(x) - samp <- permute <- matrix(stats::runif(n.dim * n.samp), n.dim, n.samp) - for (i in seq_len(n.dim)) { - permute[i, ] <- order(permute[i, ]) - } - for (i in seq_len(n.dim)) { - myseq <- seq(x[i, 1], x[i, 2], length = n.samp + 1) - samp[i, ] <- stats::runif(n.samp, myseq[permute[i, ]], myseq[permute[i, ] + 1]) - } - return(t(samp)) + n.dim <- nrow(x) + samp <- permute <- matrix(stats::runif(n.dim * n.samp), n.dim, n.samp) + for (i in seq_len(n.dim)) { + permute[i, ] <- order(permute[i, ]) + } + for (i in seq_len(n.dim)) { + myseq <- seq(x[i, 1], x[i, 2], length = n.samp + 1) + samp[i, ] <- stats::runif(n.samp, myseq[permute[i, ]], myseq[permute[i, ] + 1]) + } + return(t(samp)) } # lhc - diff --git a/modules/emulator/R/nderiv.R b/modules/emulator/R/nderiv.R index 485f645f6f9..75067aaca9e 100644 --- a/modules/emulator/R/nderiv.R +++ b/modules/emulator/R/nderiv.R @@ -1,7 +1,7 @@ ##' @name nderiv ##' @title nderiv ##' @export -##' +##' ##' @param x Name of variable to plot on X axis ##' @param y Name of variable to plot on Y axis ##' @return der @@ -19,7 +19,7 @@ nderiv <- function(x, y) { return(der) } for (i in 2:(n - 1)) { - der[i] <- (y[i + 1] - y[i - 1])/(x[i + 1] - x[i - 1]) + der[i] <- (y[i + 1] - y[i - 1]) / (x[i + 1] - x[i - 1]) } return(der) } # nderiv diff --git a/modules/emulator/R/p.R b/modules/emulator/R/p.R index efffeaeb6c4..307e6790122 100644 --- a/modules/emulator/R/p.R +++ b/modules/emulator/R/p.R @@ -1,8 +1,8 @@ ##' @name p ##' @title p ##' @export -##' +##' ##' @param x jump distribution ##' @param ... Additional arguments -##' +##' p <- function(x, ...) UseMethod("p", x) diff --git a/modules/emulator/R/p.jump.R b/modules/emulator/R/p.jump.R index 39983217787..3b460ca9390 100644 --- a/modules/emulator/R/p.jump.R +++ b/modules/emulator/R/p.jump.R @@ -1,9 +1,9 @@ ##' @name p.jump ##' @title p.jump ##' @export -##' +##' ##' @param jmp jump parameter -##' +##' ##' @author Michael Dietze p.jump <- function(jmp) { n <- length(attr(jmp, "history")) @@ -13,9 +13,9 @@ p.jump <- function(jmp) { ##' @name p.mvjump ##' @title p.mvjump ##' @export -##' +##' ##' @param jmp jump parameter -##' +##' p.mvjump <- function(jmp) { n <- nrow(attr(jmp, "history")) return(attr(jmp, "history")[n, ]) diff --git a/modules/emulator/R/plot.jump.R b/modules/emulator/R/plot.jump.R index 170d9e3d22c..4919516f1a9 100644 --- a/modules/emulator/R/plot.jump.R +++ b/modules/emulator/R/plot.jump.R @@ -1,20 +1,22 @@ ##' @name plot.jump ##' @title plot.jump ##' @export plot.jump -##' +##' ##' @param jmp jump parameter -##' +##' ##' @author Michael Dietze plot.jump <- function(jmp) { graphics::par(mfrow = c(1, 2)) - plot(attr(jmp, "history"), - ylab = "Jump Parameter", - main = "Jump Parameter") + plot(attr(jmp, "history"), + ylab = "Jump Parameter", + main = "Jump Parameter" + ) graphics::abline(h = mean(attr(jmp, "history"), na.rm = TRUE)) - plot(attr(jmp, "arate"), - main = "Acceptance Rate", - ylim = c(0, 1), - ylab = "Acceptance Rate") + plot(attr(jmp, "arate"), + main = "Acceptance Rate", + ylim = c(0, 1), + ylab = "Acceptance Rate" + ) graphics::abline(h = mean(attr(jmp, "arate"), na.rm = TRUE)) graphics::abline(h = attr(jmp, "target"), col = 2) } # plot.jump @@ -33,18 +35,21 @@ plot.mvjump <- function(jmp) { plot( attr(jmp, "history")[, 1], ylab = "Jump Parameter", - main = "Jump Parameter") + main = "Jump Parameter" + ) graphics::abline(h = mean(attr(jmp, "history")[, 1], na.rm = TRUE)) graphics::text( 0.9 * length(attr(jmp, "history")[, 1]), min(attr(jmp, "history")[, 1]) + 0.8 * (max(attr(jmp, "history")[, 1]) - min(attr(jmp, "history")[, 1])), - paste("mean=", mean(attr(jmp, "history")[, 1]))) + paste("mean=", mean(attr(jmp, "history")[, 1])) + ) plot( attr(jmp, "arate"), ylab = "Acceptance Rate", main = "Acceptance Rate", - ylim = c(0, 1)) + ylim = c(0, 1) + ) graphics::abline(h = attr(jmp, "target")) graphics::abline(h = mean(attr(jmp, "arate"), na.rm = TRUE), col = 2) } # plot.mvjump diff --git a/modules/emulator/R/predict.GP.R b/modules/emulator/R/predict.GP.R index f230e0edb20..9942e974223 100644 --- a/modules/emulator/R/predict.GP.R +++ b/modules/emulator/R/predict.GP.R @@ -1,16 +1,15 @@ ##' @name predict.GP ##' @title predict.GP ##' @export -##' +##' ##' @param gp Gaussian Process ##' @param xpred value of x where prediction should be made ##' @param cI credible interval ##' @param pI prediction interval ##' @param splinefcns spline functions -##' +##' ##' @author Michael Dietze predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { - npred <- length(xpred) if (is.matrix(xpred)) { npred <- nrow(xpred) @@ -18,13 +17,13 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { nugget <- gp$nugget isotropic <- gp$isotropic d <- gp$d - dim <- 1 #; if(!isotropic) dim <- length(d) + dim <- 1 # ; if(!isotropic) dim <- length(d) x <- gp$x.compact x.id <- gp$x.id n.unique <- length(gp$x.id) # npred <- npred-n.unique y <- gp$y - + dprime <- NULL if (isotropic) { dprime <- distance.matrix(rbind(xpred, x), 2) @@ -35,12 +34,12 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { dprime <- distance(rbind(xpred, x), 2) } } - + if (gp$method == "bayes") { samp <- gp$samp tauw <- coda::mcmc(gp$tauw[samp, ]) - psi <- coda::mcmc(gp$psi[samp, ]) - mu <- coda::mcmc(gp$mu) + psi <- coda::mcmc(gp$psi[samp, ]) + mu <- coda::mcmc(gp$mu) tauv <- W <- NULL if (nugget) { tauv <- coda::mcmc(gp$tauv) @@ -53,7 +52,7 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { tauw <- gp$tauw tauv <- gp$tauv } - + ## Krige w/o interval if ((is.null(cI) && is.null(pI)) || gp$method == "MLE") { psibar <- NULL @@ -69,17 +68,19 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { tauwbar <- stats::median(tauw) Sprime <- calcSpatialCov(dprime, psibar, tauwbar) S12 <- Sprime[1:(npred * dim), (npred * dim + 1):(n.unique + npred * dim)] - S22 <- Sprime[(npred * dim + 1):(n.unique + npred * dim), - (npred * dim + 1):(n.unique + npred * dim)] + S22 <- Sprime[ + (npred * dim + 1):(n.unique + npred * dim), + (npred * dim + 1):(n.unique + npred * dim) + ] S22inv <- solve(S22) if (gp$zeroMean) { ey <- eyprime <- 0 } else { - ey <- eyprime <- stats::median(mu) #mean(y) + ey <- eyprime <- stats::median(mu) # mean(y) } ybar <- tapply(y, x.id, mean) yprime <- eyprime + S12 %*% S22inv %*% (ybar - ey) - + if (!is.null(splinefcns)) { ## add trend surface back on for (i in seq_len(nrow(xpred))) { @@ -92,10 +93,10 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { } yprime <- yprime + y.trend } - + return(yprime) } - + ### Credible and prediction intervals nsamp <- length(samp) # cInt <- pInt <- matrix(NA,nsamp,npred*dim) @@ -124,7 +125,7 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { S12 <- Sprime[1:(npred), (npred + 1):(n.unique + npred)] S11 <- Sprime[1:(npred), 1:(npred)] Sbar <- S11 - S12 %*% S22inv %*% t(S12) - + y.trend <- 0 if (!is.null(splinefcns)) { ## add trend surface back on @@ -137,19 +138,19 @@ predict.GP <- function(gp, xpred, cI = NULL, pI = NULL, splinefcns = NULL) { y.trend[i] <- y0 + sum(f - y0) } } - + if (nugget) { - Wprime <- mvtnorm::rmvnorm(1, S12 %*% S22inv %*% (W[i, ]), Sbar) + Wprime <- mvtnorm::rmvnorm(1, S12 %*% S22inv %*% (W[i, ]), Sbar) cInt[j, ] <- mu[i] + Wprime + y.trend pInt[j, ] <- stats::rnorm(npred * dim, cInt[j, ], sqrt(tauv1)) } else { cInt[j, ] <- mu[i] + S12 %*% S22inv %*% (y - mu[i]) + y.trend - mypred <- try(mvtnorm::rmvnorm(1, cInt[j, ], Sbar), silent = TRUE) ##wrap to prevent eigen failure + mypred <- try(mvtnorm::rmvnorm(1, cInt[j, ], Sbar), silent = TRUE) ## wrap to prevent eigen failure if (is.numeric(mypred)) { pInt[j, ] <- mypred } } - utils::setTxtProgressBar(progress_bar , i) + utils::setTxtProgressBar(progress_bar, i) } close(progress_bar) cIntQuant <- pIntQuant <- NULL diff --git a/modules/emulator/R/predict.density.R b/modules/emulator/R/predict.density.R index e82ad0016da..12f94fad3f8 100644 --- a/modules/emulator/R/predict.density.R +++ b/modules/emulator/R/predict.density.R @@ -1,26 +1,26 @@ ##' Simple interpolation of a density object to new points -##' +##' ##' @name predict.density ##' @title predict.density ##' @export -##' +##' ##' @param den density object ##' @param xnew new x coordinate -##' +##' ##' @return ynew ##' ##' @author Michael Dietze predict.density <- function(den, xnew) { neval <- length(den$x) - nnew <- length(xnew) - ynew <- rep(NA, nnew) + nnew <- length(xnew) + ynew <- rep(NA, nnew) for (i in seq_len(nnew)) { j <- findInterval(xnew[i], den$x) if (j == 0 || j == neval) { - ynew[i] <- 0 ## don't extrapolate beyond range,set to 0 + ynew[i] <- 0 ## don't extrapolate beyond range,set to 0 } else { - ynew[i] <- den$y[j] + (den$y[j + 1] - den$y[j]) / - (den$x[j + 1] - den$x[j]) * + ynew[i] <- den$y[j] + (den$y[j + 1] - den$y[j]) / + (den$x[j + 1] - den$x[j]) * (xnew[i] - den$x[j]) } } diff --git a/modules/emulator/R/summarize.GP.R b/modules/emulator/R/summarize.GP.R index d84163ec90c..1a9ab7d6311 100644 --- a/modules/emulator/R/summarize.GP.R +++ b/modules/emulator/R/summarize.GP.R @@ -1,17 +1,17 @@ ##' @name summarize.GP ##' @title summarize.GP ##' @export -##' +##' ##' @param gp Gaussian Process ##' @param pdf_file filename you want figures written out to ##' @param txt_file filename you want figures written out to -##' +##' ##' @author Michael Dietze summarize.GP <- function(gp, pdf_file = NULL, txt_file = NULL) { - nugget <- gp$nugget + nugget <- gp$nugget isotropic <- gp$isotropic - d <- gp$d - samp <- gp$samp + d <- gp$d + samp <- gp$samp if (is.null(pdf_file)) { graphics::par(ask = TRUE) } else { @@ -20,13 +20,13 @@ summarize.GP <- function(gp, pdf_file = NULL, txt_file = NULL) { if (!is.null(txt_file)) { sink(txt_file) } - + plot(gp$tauwjump) # title('JUMP: TAUW') - + plot(gp$psijump) # title('JUMP: PSI') - + tauw <- coda::mcmc(gp$tauw[samp, ]) psi <- coda::mcmc(gp$psi[samp, ]) mu <- coda::mcmc(gp$mu) @@ -53,12 +53,12 @@ summarize.GP <- function(gp, pdf_file = NULL, txt_file = NULL) { graphics::title("PSI") plot(mu) graphics::title("MU") - + ## plot ACF graphics::par(mfrow = c(1, 1)) if (isotropic) { xseq <- seq(0, max(d) / 2, length = 100) - plot(xseq, mean(tauw) * exp(-mean(psi) * xseq ^ 2), type = "l") + plot(xseq, mean(tauw) * exp(-mean(psi) * xseq^2), type = "l") } else { ## anisotropic rng <- 0 @@ -70,9 +70,11 @@ summarize.GP <- function(gp, pdf_file = NULL, txt_file = NULL) { for (k in seq_len(dim)) { acorr[, k] <- exp(-mean(psi[, k]) * xseq^2) } - plot(0, 0, type = "n", xlim = c(0, rng/2), - ylim = c(0, max(acorr)), xlab = "Parameter Distance", - ylab = "Correlation") + plot(0, 0, + type = "n", xlim = c(0, rng / 2), + ylim = c(0, max(acorr)), xlab = "Parameter Distance", + ylab = "Correlation" + ) for (k in seq_len(dim)) { graphics::lines(xseq, acorr[, k], col = k) } diff --git a/modules/emulator/R/update.jump.R b/modules/emulator/R/update.jump.R index 5a06f9c0f96..4f178c1861d 100644 --- a/modules/emulator/R/update.jump.R +++ b/modules/emulator/R/update.jump.R @@ -1,26 +1,26 @@ ##' @name update.jump ##' @title update.jump ##' @export -##' +##' ##' @param jmp jump parameter ##' @param chain mcmc chain -##' +##' ##' @return jmp updated jump parameter -##' +##' ##' @author Michael Dietze update.jump <- function(jmp, chain) { ## check for valid typing if (is.null(jmp)) { stop("jump is NULL") } - + ## update counter cnt <- attr(jmp, "count") + 1 attr(jmp, "count") <- cnt clen <- attr(jmp, "clen") - + ## update jump parm - if (cnt%%clen == 0) { + if (cnt %% clen == 0) { a <- max(arate(chain[(cnt - clen + 1):cnt, ]), 1 / clen) l <- length(attr(jmp, "history")) j <- attr(jmp, "history")[l] @@ -31,9 +31,9 @@ update.jump <- function(jmp, chain) { } ## multivariate version -##' @title update.mvjump +##' @title update.mvjump ##' @export -##' +##' ##' @param jmp jump parameter ##' @param chain mcmc chain @@ -42,12 +42,12 @@ update.mvjump <- function(jmp, chain) { if (is.null(jmp)) { stop("jump is NULL") } - + ## update counter cnt <- attr(jmp, "count") + 1 attr(jmp, "count") <- cnt clen <- attr(jmp, "clen") - + ## update jump parm if (cnt %% clen == 0) { hnew <- rep(NA, ncol(chain)) diff --git a/modules/emulator/R/zzz.R b/modules/emulator/R/zzz.R index 05e060a1c62..58a3f35a247 100644 --- a/modules/emulator/R/zzz.R +++ b/modules/emulator/R/zzz.R @@ -1,12 +1,17 @@ ##' define a class for automatically tuning jump distributions ##' ##' @export -##' +##' ##' @author Michael Dietze -methods::setClass("jump", methods::representation(history = "numeric", count = "numeric", target = "numeric", - clen = "numeric", arate = "numeric"), - prototype = list(history = vector("numeric", 0), count = 0, target = 0.4, - clen = 100, arate = vector("numeric", 0))) +methods::setClass("jump", methods::representation( + history = "numeric", count = "numeric", target = "numeric", + clen = "numeric", arate = "numeric" +), +prototype = list( + history = vector("numeric", 0), count = 0, target = 0.4, + clen = 100, arate = vector("numeric", 0) +) +) ## chain = mcmc chain history = jump parm history count = counter to update jump parm ## target = target acceptance rate clen = update period (recompute when count > clen) methods::setIs("jump", "list") @@ -14,11 +19,16 @@ methods::setIs("jump", "list") ##' multivariate version of jump class ##' ##' @export -##' -methods::setClass("mvjump", methods::representation(history = "matrix", count = "numeric", target = "numeric", - clen = "numeric", arate = "numeric", mydim = "numeric"), - prototype = list(history = matrix(NA, 0, 0), count = 0, target = 0.4, - clen = 100, arate = vector("numeric", 0), mydim = 1)) +##' +methods::setClass("mvjump", methods::representation( + history = "matrix", count = "numeric", target = "numeric", + clen = "numeric", arate = "numeric", mydim = "numeric" +), +prototype = list( + history = matrix(NA, 0, 0), count = 0, target = 0.4, + clen = 100, arate = vector("numeric", 0), mydim = 1 +) +) ## history = jump parm history count = counter to update jump parm target = target ## acceptance rate clen = update period (recompute when count > clen) methods::setIs("mvjump", "list") diff --git a/modules/meta.analysis/R/approx.posterior.R b/modules/meta.analysis/R/approx.posterior.R index cb856c6d0d5..84a3dfb0952 100644 --- a/modules/meta.analysis/R/approx.posterior.R +++ b/modules/meta.analysis/R/approx.posterior.R @@ -1,7 +1,7 @@ ##' Approximate the posterior MCMC with a closed form pdf ##' ##' returns priors where posterior MCMC are missing -##' +##' ##' NOTE: this function is similar to PEcAn.priors::fit.dist ##' @title Approximate posterior ##' @param trait.mcmc meta analysis outputs @@ -10,7 +10,7 @@ ##' @param outdir directory in which to plot results ##' @param filename.flag text to be included in the posteriors.pdf filename to make unique ##' @return posteriors data frame, similar to priors, -##' but with closed form pdfs fit to meta-analysis results +##' but with closed form pdfs fit to meta-analysis results ##' @export ##' @author David LeBauer, Carl Davidson, Mike Dietze ##' @examples @@ -20,38 +20,36 @@ ##' approx.posterior(trait.mcmc, priors = prior.distns) ##' } approx.posterior <- function(trait.mcmc, priors, trait.data = NULL, outdir = NULL, filename.flag = "") { - ## initialization posteriors <- priors do.plot <- !is.null(outdir) if (do.plot == TRUE) { grDevices::pdf(file.path(outdir, paste("posteriors", filename.flag, ".pdf", sep = ""))) } - + ## loop over traits for (trait in names(trait.mcmc)) { - dat <- trait.mcmc[[trait]] vname <- colnames(dat[[1]]) if ("beta.o" %in% vname) { dat <- as.matrix(dat)[, "beta.o"] } - + pdist <- priors[trait, "distn"] pparm <- as.numeric(priors[trait, 2:3]) ptrait <- trait - + ## first determine the candidate set of models based on any range restrictions zerobound <- c("exp", "gamma", "lnorm", "weibull") if (pdist %in% "beta") { - m <- mean(dat) - v <- stats::var(dat) - k <- (1 - m)/m - a <- (k / ((1 + k) ^ 2 * v) - 1) / (1 + k) - b <- a * k + m <- mean(dat) + v <- stats::var(dat) + k <- (1 - m) / m + a <- (k / ((1 + k)^2 * v) - 1) / (1 + k) + b <- a * k fit <- try(suppressWarnings(MASS::fitdistr(dat, "beta", list(shape1 = a, shape2 = b))), silent = TRUE) - + if (do.plot) { x <- seq(0, 1, length = 1000) plot(stats::density(dat), col = 2, lwd = 2, main = trait) @@ -60,9 +58,10 @@ approx.posterior <- function(trait.mcmc, priors, trait.data = NULL, outdir = NUL } graphics::lines(x, stats::dbeta(x, fit$estimate[1], fit$estimate[2]), lwd = 2, type = "l") graphics::lines(x, stats::dbeta(x, pparm[1], pparm[2]), lwd = 3, type = "l", col = 3) - graphics::legend("topleft", - legend = c("data", "prior", "post", "approx"), - col = c("purple", 3, 2, 1), lwd = 2) + graphics::legend("topleft", + legend = c("data", "prior", "post", "approx"), + col = c("purple", 3, 2, 1), lwd = 2 + ) } posteriors[trait, "parama"] <- fit$estimate[1] posteriors[trait, "paramb"] <- fit$estimate[2] @@ -74,7 +73,7 @@ approx.posterior <- function(trait.mcmc, priors, trait.data = NULL, outdir = NUL fit[[2]] <- try(suppressWarnings(MASS::fitdistr(dat, "lognormal")), silent = TRUE) fit[[3]] <- try(suppressWarnings(MASS::fitdistr(dat, "weibull")), silent = TRUE) fit[[4]] <- try(suppressWarnings(MASS::fitdistr(dat, "normal")), silent = TRUE) - + if (!trait == "cuticular_cond") { fit[[5]] <- try(suppressWarnings(MASS::fitdistr(dat, "gamma")), silent = TRUE) dist.names <- c(dist.names, "gamma") @@ -82,10 +81,12 @@ approx.posterior <- function(trait.mcmc, priors, trait.data = NULL, outdir = NUL failfit.bool <- sapply(fit, class) == "try-error" fit[failfit.bool] <- NULL dist.names <- dist.names[!failfit.bool] - - fparm <- lapply(fit, function(x) { as.numeric(x$estimate) }) + + fparm <- lapply(fit, function(x) { + as.numeric(x$estimate) + }) fAIC <- lapply(fit, stats::AIC) - + bestfit <- which.min(fAIC) posteriors[ptrait, "distn"] <- dist.names[bestfit] posteriors[ptrait, "parama"] <- fit[[bestfit]]$estimate[1] @@ -107,42 +108,48 @@ approx.posterior <- function(trait.mcmc, priors, trait.data = NULL, outdir = NUL .dens_plot(posteriors, priors, ptrait, dat, trait, trait.data) } } - } ## end trait loop - + } ## end trait loop + if (do.plot) { grDevices::dev.off() } - + return(posteriors) } # approx.posterior .dens_plot <- function(posteriors, priors, ptrait, dat, trait, trait.data, - plot_quantiles = c(0.01, 0.99)) { + plot_quantiles = c(0.01, 0.99)) { f <- function(x) { - cl <- call(paste0("d", posteriors[ptrait, "distn"]), - x, - posteriors[ptrait, "parama"], - posteriors[ptrait, "paramb"]) + cl <- call( + paste0("d", posteriors[ptrait, "distn"]), + x, + posteriors[ptrait, "parama"], + posteriors[ptrait, "paramb"] + ) eval(cl) } # f fq <- function(x) { - cl <- call(paste0("q", priors[ptrait, "distn"]), - x, - priors[ptrait, "parama"], - priors[ptrait, "paramb"]) + cl <- call( + paste0("q", priors[ptrait, "distn"]), + x, + priors[ptrait, "parama"], + priors[ptrait, "paramb"] + ) eval(cl) } # fq fp <- function(x) { - cl <- call(paste0("d", priors[ptrait, "distn"]), - x, - priors[ptrait, "parama"], - priors[ptrait, "paramb"]) + cl <- call( + paste0("d", priors[ptrait, "distn"]), + x, + priors[ptrait, "parama"], + priors[ptrait, "paramb"] + ) eval(cl) } # fp - + qbounds <- fq(plot_quantiles) x <- seq(qbounds[1], qbounds[2], length = 1000) rng <- range(dat) @@ -156,7 +163,8 @@ approx.posterior <- function(trait.mcmc, priors, trait.data = NULL, outdir = NUL } graphics::lines(x, f(x), lwd = 2, type = "l") graphics::lines(x, fp(x), lwd = 3, type = "l", col = 3) - graphics::legend("topleft", - legend = c("data", "prior", "post", "approx"), - col = c("purple", 3, 2, 1), lwd = 2) + graphics::legend("topleft", + legend = c("data", "prior", "post", "approx"), + col = c("purple", 3, 2, 1), lwd = 2 + ) } diff --git a/modules/meta.analysis/R/jagify.R b/modules/meta.analysis/R/jagify.R index 1f80121d31d..075b95da032 100644 --- a/modules/meta.analysis/R/jagify.R +++ b/modules/meta.analysis/R/jagify.R @@ -1,5 +1,5 @@ ##' -##' Convert queried data to format required by JAGS meta-analysis model +##' Convert queried data to format required by JAGS meta-analysis model ##' ##' @name jagify ##' @title Prepare trait data for JAGS meta-analysis @@ -9,19 +9,17 @@ ##' @export ##' @author David LeBauer jagify <- function(result, use_ghs = TRUE) { - - ## Create new column "trt_id" from column 'name'. Remove NAs. Assign treatments. ## Finally, summarize the results by calculating summary statistics from experimental replicates r <- result[!is.na(result$mean), ] r$trt_id <- r$name r <- transform.nas(r) - + # exclude greenhouse data unless requested otherwise - if(!use_ghs){ + if (!use_ghs) { r <- r[r$greenhouse != 1, ] } - + r <- PEcAn.DB::assign.treatments(r) r <- PEcAn.utils::summarize.result(r) r$stat <- as.numeric(r$stat) @@ -31,58 +29,62 @@ jagify <- function(result, use_ghs = TRUE) { r$ghs <- r$greenhouse r$site <- r$site_id r$trt_name <- r$name - - r <- r[, c("stat", "n", "site_id", "trt_id", "mean", "citation_id", "greenhouse", - "ghs", "treatment_id", "site", "trt_name")] - - #order by site_id and trt_id, but make sure "control" is the first trt of each site + + r <- r[, c( + "stat", "n", "site_id", "trt_id", "mean", "citation_id", "greenhouse", + "ghs", "treatment_id", "site", "trt_name" + )] + + # order by site_id and trt_id, but make sure "control" is the first trt of each site uniq <- setdiff(unique(r$trt_id), "control") r$trt_id <- factor(r$trt_id, levels = c("control", uniq[order(uniq)])) r <- r[order(r$site_id, r$trt_id), ] - - #add beta.trt index associated with each trt_id (performed in single.MA, replicated here for matching purposes) + + # add beta.trt index associated with each trt_id (performed in single.MA, replicated here for matching purposes) r$trt_num <- as.integer(factor(r$trt_id, levels = unique(r$trt_id))) - + if (length(r$stat[!is.na(r$stat) & r$stat <= 0]) > 0) { varswithbadstats <- unique(result$vname[which(r$stat <= 0)]) citationswithbadstats <- unique(r$citation_id[which(r$stat <= 0)]) - - PEcAn.logger::logger.warn("there are implausible values of SE: SE <= 0 \n", - "for", varswithbadstats, - "result from citation", citationswithbadstats, "\n", - "SE <=0 set to NA \n") + + PEcAn.logger::logger.warn( + "there are implausible values of SE: SE <= 0 \n", + "for", varswithbadstats, + "result from citation", citationswithbadstats, "\n", + "SE <=0 set to NA \n" + ) r$stat[r$stat <= 0] <- NA } - + rename_jags_columns(r) } # jagify # ==================================================================================================# ##' Function to remove NA values from database queries -##' +##' ##' Transform NA values in data exported from BETYdb -##' +##' ##' @name transform.nas ##' @param data input data -##' -##' @return A data frame NAs sensibly replaced +##' +##' @return A data frame NAs sensibly replaced transform.nas <- function(data) { - #set stat to NA if 0 (uncertainties can only asymptotically approach 0) + # set stat to NA if 0 (uncertainties can only asymptotically approach 0) data$stat[data$stat == 0] <- NA - + # control defaults to 1 data$control[is.na(data$control)] <- 1 - + # site defaults to 0 TODO assign different site for each citation - dsl data$site_id[is.na(data$site_id)] <- 0 - + # greenhouse defaults to false (0) data$greenhouse[is.na(data$greenhouse)] <- 0 - + # number of observations defaults to 2 for statistics, 1 otherwise data$n[is.na(data$n)] <- 1 data$n[data$n == 1 & !is.na(data$stat)] <- 2 - + return(data) } # transform.nas diff --git a/modules/meta.analysis/R/meta.analysis.R b/modules/meta.analysis/R/meta.analysis.R index dfae26db4fa..e1e806fcacc 100644 --- a/modules/meta.analysis/R/meta.analysis.R +++ b/modules/meta.analysis/R/meta.analysis.R @@ -40,12 +40,12 @@ ##' values = list(pft))[[1]] ##' traits <- c("SLA", "Vcmax") ##' trait_string <- paste(shQuote(traits), collapse = ",") -##' +##' ##' # Load traits and priors from BETY ##' species <- PEcAn.DB::query.pft_species(pft, con = con) ##' trait.data <- PEcAn.DB::query.traits(species[["id"]], c("SLA", "Vcmax"), con = con) ##' prior.distns <- PEcAn.DB::query.priors(pft_id, trait_string, con = con) -##' +##' ##' # Pre-process data ##' jagged.data <- lapply(trait.data, PEcAn.MA::jagify) ##' taupriors <- list(tauA = 0.01, @@ -60,27 +60,27 @@ pecan.ma <- function(trait.data, prior.distns, random = FALSE, overdispersed = TRUE, logfile = file.path(outdir, "meta-analysis.log)"), verbose = TRUE) { - - mcmc.object <- list() # initialize output list of mcmc objects for each trait + mcmc.object <- list() # initialize output list of mcmc objects for each trait mcmc.mat <- list() ## Set inputs for jags.model() j.chains <- 4 - j.iter <- as.numeric(j.iter) # Added by SPS 08.27.2013. issue #1803 + j.iter <- as.numeric(j.iter) # Added by SPS 08.27.2013. issue #1803 ## log the mcmc chain parameters if (!is.null(logfile)) { sink(file = file.path(outdir, "meta-analysis.log"), split = TRUE) on.exit(sink(NULL), add = TRUE) } if (verbose) { - cat(paste0("Each meta-analysis will be run with: \n", j.iter, - " total iterations,\n", j.chains, - " chains, \n", "a burnin of ", j.iter / 2, " samples,\n", - ", \nthus the total number of samples will be ", j.chains * (j.iter / 2), "\n")) + cat(paste0( + "Each meta-analysis will be run with: \n", j.iter, + " total iterations,\n", j.chains, + " chains, \n", "a burnin of ", j.iter / 2, " samples,\n", + ", \nthus the total number of samples will be ", j.chains * (j.iter / 2), "\n" + )) } for (trait.name in names(trait.data)) { - prior <- prior.distns[trait.name, c("distn", "parama", "paramb", "n")] if (verbose) { @@ -90,44 +90,52 @@ pecan.ma <- function(trait.data, prior.distns, writeLines(paste("------------------------------------------------")) } data <- trait.data[[trait.name]] - data <- data[, which(!colnames(data) %in% c("cite", "trait_id", "se", - "greenhouse", "site_id", "treatment_id", "trt_name", "trt_num"))] ## remove citation and other unneeded columns + data <- data[, which(!colnames(data) %in% c( + "cite", "trait_id", "se", + "greenhouse", "site_id", "treatment_id", "trt_name", "trt_num" + ))] ## remove citation and other unneeded columns ## check for excess missing data if (all(is.na(data[["obs.prec"]]))) { - PEcAn.logger::logger.warn("NO ERROR STATS PROVIDED\n Check meta-analysis Model Convergence", - "and consider turning off Random Effects by", - "setting FALSE", - "in your pecan.xml settings file ") + PEcAn.logger::logger.warn( + "NO ERROR STATS PROVIDED\n Check meta-analysis Model Convergence", + "and consider turning off Random Effects by", + "setting FALSE", + "in your pecan.xml settings file " + ) } if (!random) { data[["site"]] <- rep(1, nrow(data)) - data[["trt"]] <- rep(0, nrow(data)) + data[["trt"]] <- rep(0, nrow(data)) } # print out some data summaries to check if (verbose) { - writeLines(paste0("prior for ", trait.name, " + writeLines(paste0( + "prior for ", trait.name, " (using R parameterization):\n", prior$distn, - "(", prior$parama, ", ", prior$paramb, ")")) - writeLines(paste("data max:", max(data$Y, na.rm = TRUE), - "\ndata min:", min(data$Y, na.rm = TRUE), - "\nmean:", signif(mean(data$Y, na.rm = TRUE), 3), - "\nn:", length(data$Y))) + "(", prior$parama, ", ", prior$paramb, ")" + )) + writeLines(paste( + "data max:", max(data$Y, na.rm = TRUE), + "\ndata min:", min(data$Y, na.rm = TRUE), + "\nmean:", signif(mean(data$Y, na.rm = TRUE), 3), + "\nn:", length(data$Y) + )) writeLines("stem plot of data points") writeLines(paste(graphics::stem(data$Y))) if (any(!is.na(data$obs.prec)) && all(!is.infinite(data$obs.prec))) { writeLines("stem plot of obs.prec:") - writeLines(paste(graphics::stem(data[["obs.prec"]] ^ 2))) + writeLines(paste(graphics::stem(data[["obs.prec"]]^2))) } else { writeLines(paste("no estimates of SD for", trait.name)) } } - jag.model.file <- file.path(outdir, paste0(trait.name, ".model.bug")) # file to store model + jag.model.file <- file.path(outdir, paste0(trait.name, ".model.bug")) # file to store model ## run the meta-analysis in JAGS @@ -145,7 +153,7 @@ pecan.ma <- function(trait.data, prior.distns, if (verbose) { print(summary(jags.out)) } - + jags.out.trunc <- stats::window(jags.out, start = j.iter / 2) mcmc.object[[trait.name]] <- jags.out.trunc diff --git a/modules/meta.analysis/R/meta.analysis.summary.R b/modules/meta.analysis/R/meta.analysis.summary.R index 7e28c29ce7b..44427144df0 100644 --- a/modules/meta.analysis/R/meta.analysis.summary.R +++ b/modules/meta.analysis/R/meta.analysis.summary.R @@ -21,41 +21,43 @@ pecan.ma.summary <- function(mcmc.object, pft, outdir, threshold = 1.2, gg = FAL fail <- rep(FALSE, length(mcmc.object)) names(fail) <- names(mcmc.object) not.converged <- data.frame() - + sink(file = file.path(outdir, "meta-analysis.log"), append = TRUE, split = TRUE) for (trait in names(mcmc.object)) { - if (gg) { if (!requireNamespace("ggmcmc", quietly = TRUE)) { PEcAn.logger::logger.severe( "Can't find package 'ggmcmc',", "needed by `PEcAn.MA::meta.analysis.summary()` when `gg = TRUE`.", - "Please install it and try again.") + "Please install it and try again." + ) } } ## new diagnostic plots. But very slow & !any(grepl('^gg', dir(outdir)))){ if (gg) { if (coda::is.mcmc.list(mcmc.object[[trait]])) { ggplot2::theme_set(ggplot2::theme_bw()) - ggmcmc::ggmcmc(ggmcmc::ggs(mcmc.object[[trait]]), - plot = c("ggs_density", "ggs_traceplot", "ggs_autocorrelation", "ggs_Rhat", "ggs_geweke"), - file.path(outdir, paste0("gg.ma.summaryplots.", trait, ".pdf"))) + ggmcmc::ggmcmc(ggmcmc::ggs(mcmc.object[[trait]]), + plot = c("ggs_density", "ggs_traceplot", "ggs_autocorrelation", "ggs_Rhat", "ggs_geweke"), + file.path(outdir, paste0("gg.ma.summaryplots.", trait, ".pdf")) + ) } } - + ## reordering maparms so that beta.o etc not sent to end .maparms <- names(mcmc.object[[trait]][1, ][1][[1]]) - .parms <- c("beta.o", "thetaSD", "trtSD", "ySD") - maparms <- .maparms[c(which(.maparms %in% .parms), which(!.maparms %in% .parms))] - + .parms <- c("beta.o", "thetaSD", "trtSD", "ySD") + maparms <- .maparms[c(which(.maparms %in% .parms), which(!.maparms %in% .parms))] + ## plots for mcmc diagnosis grDevices::pdf(file.path(outdir, paste0("ma.summaryplots.", trait, ".pdf"))) for (i in maparms) { plot(mcmc.object[[trait]][, i], - trace = FALSE, - density = TRUE, - main = paste("summary plots of", i, "for", pft, trait)) + trace = FALSE, + density = TRUE, + main = paste("summary plots of", i, "for", pft, trait) + ) graphics::box(lwd = 2) plot(mcmc.object[[trait]][, i], density = FALSE) graphics::box(lwd = 2) @@ -68,25 +70,29 @@ pecan.ma.summary <- function(mcmc.object, pft, outdir, threshold = 1.2, gg = FAL grDevices::dev.off() ## G-R diagnostics to ensure convergence - gd <- coda::gelman.diag(mcmc.object[[trait]]) - mpsrf <- round(gd$mpsrf, digits = 3) + gd <- coda::gelman.diag(mcmc.object[[trait]]) + mpsrf <- round(gd$mpsrf, digits = 3) if (mpsrf < threshold) { - PEcAn.logger::logger.info(paste("JAGS model converged for", pft, trait, - "\nGD MPSRF = ", mpsrf, "\n")) + PEcAn.logger::logger.info(paste( + "JAGS model converged for", pft, trait, + "\nGD MPSRF = ", mpsrf, "\n" + )) } else { not.converged <- rbind(not.converged, data.frame(pft = pft, trait = trait, mpsrf = mpsrf)) - PEcAn.logger::logger.info(paste("JAGS model did not converge for", pft, trait, - "\nGD MPSRF = ", mpsrf, "\n")) + PEcAn.logger::logger.info(paste( + "JAGS model did not converge for", pft, trait, + "\nGD MPSRF = ", mpsrf, "\n" + )) fail[trait] <- TRUE } } # trait-loop ends - + if (any(fail)) { PEcAn.logger::logger.warn("JAGS model failed to converge for one or more trait. Discarding samples.") for (i in seq_len(nrow(not.converged))) { with(not.converged[i, ], PEcAn.logger::logger.info(paste(pft, trait, "MPSRF = ", mpsrf))) } - mcmc.object[fail] <- NULL #discard samples + mcmc.object[fail] <- NULL # discard samples } sink() return(mcmc.object) diff --git a/modules/meta.analysis/R/meta.analysis.write.model.R b/modules/meta.analysis/R/meta.analysis.write.model.R index db3a6e03ba5..048dff16824 100644 --- a/modules/meta.analysis/R/meta.analysis.write.model.R +++ b/modules/meta.analysis/R/meta.analysis.write.model.R @@ -5,7 +5,7 @@ ##' @name write.ma.model ##' @title write.ma.model ##' @param modelfile model template file (ma.model.template.R) -##' @param outfile file name of model created +##' @param outfile file name of model created ##' @param reg.model structure of regression model ##' @param pr.dist A string representing the root distribution name used by R, e.g. 'norm', 'lnorm', 'gamma', 'beta', etc. ##' @param pr.param.a first parameter value accepted by \code{pr.dist} @@ -14,13 +14,12 @@ ##' @param trt.n number of distinct treatments in data ##' @param site.n number of distinct sites in data ##' @param ghs.n = 1 if only non-greenhouse or greenhouse studies included, 2 if both -##' @param tauA parameter a for gamma prior on precision +##' @param tauA parameter a for gamma prior on precision ##' @param tauB parameter b for gamma prior on precision ##' @return Nothing, but as a side effect, the model is written ##' @author David LeBauer and Mike Dietze. -write.ma.model <- function(modelfile, outfile, reg.model, pr.dist, pr.param.a, pr.param.b, n, +write.ma.model <- function(modelfile, outfile, reg.model, pr.dist, pr.param.a, pr.param.b, n, trt.n, site.n, ghs.n, tauA, tauB) { - model.text <- scan(file = modelfile, what = "character", sep = "@") ## chose an uncommon separator in order to capture whole lines model.text <- gsub("%_%", "", model.text) @@ -33,7 +32,7 @@ write.ma.model <- function(modelfile, outfile, reg.model, pr.dist, pr.param.a, p model.text <- gsub("LENGTHG", site.n, model.text) model.text <- gsub("TAUA", format(signif(tauA, 2), scientific = FALSE), model.text) model.text <- gsub("TAUB", format(signif(tauB, 2), scientific = FALSE), model.text) - + if (ghs.n == 1) { model.text <- gsub("\\#GGG", "\\#", model.text) } diff --git a/modules/meta.analysis/R/rename_jags_columns.R b/modules/meta.analysis/R/rename_jags_columns.R index dd6676c307f..7a02213a7d0 100644 --- a/modules/meta.analysis/R/rename_jags_columns.R +++ b/modules/meta.analysis/R/rename_jags_columns.R @@ -6,7 +6,6 @@ ##' @export ##' @author David LeBauer rename_jags_columns <- function(data) { - # Change variable names and calculate obs.prec within data frame # Swap column names; needed for downstream function pecan.ma() colnames(data)[colnames(data) %in% c("greenhouse", "ghs")] <- c("ghs", "greenhouse") @@ -16,18 +15,21 @@ rename_jags_columns <- function(data) { n <- NULL trt_id <- NULL citation_id <- NULL - transformed <- transform(data, - Y = mean, - se = stat, - obs.prec = 1 / (sqrt(n) * stat) ^2, - trt = trt_id, - cite = citation_id) - + transformed <- transform(data, + Y = mean, + se = stat, + obs.prec = 1 / (sqrt(n) * stat)^2, + trt = trt_id, + cite = citation_id + ) + # Subset data frame - selected <- subset(transformed, select = c('Y', 'n', 'site', 'trt', 'ghs', 'obs.prec', - 'se', 'cite', - "greenhouse", "site_id", "treatment_id", "trt_name", "trt_num")) # add original # original versions of greenhouse, site_id, treatment_id, trt_name + selected <- subset(transformed, select = c( + "Y", "n", "site", "trt", "ghs", "obs.prec", + "se", "cite", + "greenhouse", "site_id", "treatment_id", "trt_name", "trt_num" + )) # add original # original versions of greenhouse, site_id, treatment_id, trt_name # Return subset data frame return(selected) } -##=============================================================================# +## =============================================================================# diff --git a/modules/meta.analysis/R/run.meta.analysis.R b/modules/meta.analysis/R/run.meta.analysis.R index 360d3453a0a..d5f7cf86d0f 100644 --- a/modules/meta.analysis/R/run.meta.analysis.R +++ b/modules/meta.analysis/R/run.meta.analysis.R @@ -1,65 +1,64 @@ - run.meta.analysis.pft <- function(pft, iterations, random = TRUE, threshold = 1.2, dbfiles, dbcon, use_ghs = TRUE, update = FALSE) { # check to see if get.trait was executed - if (!file.exists(file.path(pft$outdir, "trait.data.Rdata")) || - !file.exists(file.path(pft$outdir, "prior.distns.Rdata"))) { + if (!file.exists(file.path(pft$outdir, "trait.data.Rdata")) || + !file.exists(file.path(pft$outdir, "prior.distns.Rdata"))) { PEcAn.logger::logger.severe("Could not find output from get.trait for", pft$name) return(NA) } - + # check to see if run.meta.analysis can be skipped - if (file.exists(file.path(pft$outdir, "trait.mcmc.Rdata")) && - file.exists(file.path(pft$outdir, "post.distns.Rdata")) && - update != TRUE) { + if (file.exists(file.path(pft$outdir, "trait.mcmc.Rdata")) && + file.exists(file.path(pft$outdir, "post.distns.Rdata")) && + update != TRUE) { PEcAn.logger::logger.info("Assuming get.trait copied results already") return(pft) } - + # make sure there is a posteriorid if (is.null(pft$posteriorid)) { PEcAn.logger::logger.severe("Make sure to pass in pft list from get.trait. Missing posteriorid for", pft$name) return(NA) } - + # make sure random and use_ghs is logical, and threshold is numeric # when someone re-reads xml and continues from meta.analysis these can cause bugs (especially the threshold bug is very subtle) - random <- as.logical(random) - use_ghs <- as.logical(use_ghs) + random <- as.logical(random) + use_ghs <- as.logical(use_ghs) threshold <- as.numeric(threshold) - + # get list of existing files so they get ignored saving old.files <- list.files(path = pft$outdir) - + PEcAn.logger::logger.info("-------------------------------------------------------------------") PEcAn.logger::logger.info(" Running meta.analysis for PFT:", pft$name) PEcAn.logger::logger.info("-------------------------------------------------------------------") - + ## Load trait data for PFT trait_env <- new.env() load(file.path(pft$outdir, "trait.data.Rdata"), envir = trait_env) prior_env <- new.env() load(file.path(pft$outdir, "prior.distns.Rdata"), envir = prior_env) - + if (length(trait_env$trait.data) == 0) { PEcAn.logger::logger.info("no trait data for PFT", pft$name, "\n so no meta-analysis will be performed") return(NA) } - + # create path where to store files pathname <- file.path(dbfiles, "posterior", pft$posteriorid) dir.create(pathname, showWarnings = FALSE, recursive = TRUE) - + ## Convert data to format expected by pecan.ma jagged.data <- lapply(trait_env$trait.data, PEcAn.MA::jagify, use_ghs = use_ghs) - + ## Save the jagged.data object, replaces previous madata.Rdata object ## First 6 columns are equivalent and direct inputs into the meta-analysis save(jagged.data, file = file.path(pft$outdir, "jagged.data.Rdata")) - - if(!use_ghs){ + + if (!use_ghs) { # check if any data left after excluding greenhouse all_trait_check <- sapply(jagged.data, nrow) - if(any(all_trait_check == 0)){ + if (any(all_trait_check == 0)) { nodat <- which(all_trait_check == 0) jagged.data[nodat] <- NULL PEcAn.logger::logger.info("No more data left after excluding greenhouse data for the following traits:", paste(names(all_trait_check)[nodat], collapse = ", ")) @@ -68,7 +67,6 @@ run.meta.analysis.pft <- function(pft, iterations, random = TRUE, threshold = 1. check_consistent <- function(data.median, prior, trait, msg_var, perr = 5e-04, pwarn = 0.025) { - p.data <- p.point.in.prior(point = data.median, prior = prior) if (p.data <= 1 - perr & p.data >= perr) { @@ -85,64 +83,65 @@ run.meta.analysis.pft <- function(pft, iterations, random = TRUE, threshold = 1. return(1) } - + ## Check that data is consistent with prior for (trait in names(jagged.data)) { - data.median <- stats::median(jagged.data[[trait]][ , 'Y']) - prior <- prior_env$prior.distns[trait, ] - check <- check_consistent(data.median, prior, trait, "data") + data.median <- stats::median(jagged.data[[trait]][, "Y"]) + prior <- prior_env$prior.distns[trait, ] + check <- check_consistent(data.median, prior, trait, "data") if (is.na(check)) { return(NA) } } - + ## Average trait data - trait.average <- sapply(jagged.data, function(x) mean(x$Y, na.rm = TRUE) ) - + trait.average <- sapply(jagged.data, function(x) mean(x$Y, na.rm = TRUE)) + ## Set gamma distribution prior tau_value <- 0.01 prior.variances <- as.data.frame(rep(1, nrow(prior_env$prior.distns))) row.names(prior.variances) <- row.names(prior_env$prior.distns) - prior.variances[names(trait.average), ] <- 0.001 * trait.average ^ 2 + prior.variances[names(trait.average), ] <- 0.001 * trait.average^2 prior.variances["seedling_mortality", 1] <- 1 taupriors <- list(tauA = tau_value, tauB = apply(prior.variances, 1, function(x) min(tau_value, x))) - + ### Run the meta-analysis trait.mcmc <- pecan.ma(jagged.data, - prior_env$prior.distns, - taupriors, - j.iter = iterations, - outdir = pft$outdir, - random = random) - + prior_env$prior.distns, + taupriors, + j.iter = iterations, + outdir = pft$outdir, + random = random + ) + ### Check that meta-analysis posteriors are consistent with priors for (trait in names(trait.mcmc)) { post.median <- stats::median(as.matrix(trait.mcmc[[trait]][, "beta.o"])) - prior <- prior_env$prior.distns[trait, ] + prior <- prior_env$prior.distns[trait, ] check <- check_consistent(post.median, prior, trait, "data") if (is.na(check)) { return(NA) } } - + ### Generate summaries and diagnostics, discard samples if trait failed to converge trait.mcmc <- pecan.ma.summary(trait.mcmc, pft$name, pft$outdir, threshold) - + ### Save the meta.analysis output save(trait.mcmc, file = file.path(pft$outdir, "trait.mcmc.Rdata")) - + post.distns <- approx.posterior(trait.mcmc, prior_env$prior.distns, jagged.data, pft$outdir) dist_MA_path <- file.path(pft$outdir, "post.distns.MA.Rdata") save(post.distns, file = dist_MA_path) dist_path <- file.path(pft$outdir, "post.distns.Rdata") - + # Symlink to post.distns.Rdata (no 'MA' identifier) if (file.exists(dist_path)) { file.remove(dist_path) } file.symlink(dist_MA_path, dist_path) - + ### save and store in database all results except those that were there already for (file in list.files(path = pft$outdir)) { # Skip file if it was there already, or if it's a symlink (like the post.distns.Rdata link above) @@ -155,7 +154,7 @@ run.meta.analysis.pft <- function(pft, iterations, random = TRUE, threshold = 1. } } # run.meta.analysis.pft -##--------------------------------------------------------------------------------------------------## +## --------------------------------------------------------------------------------------------------## ##' Run meta analysis ##' ##' This will use the following items from settings: @@ -178,13 +177,15 @@ run.meta.analysis.pft <- function(pft, iterations, random = TRUE, threshold = 1. ##' and post.distns.Rdata, respectively ##' @export ##' @author Shawn Serbin, David LeBauer -run.meta.analysis <- function(pfts, iterations, random = TRUE, threshold = 1.2, dbfiles, database, use_ghs = TRUE , update = FALSE) { +run.meta.analysis <- function(pfts, iterations, random = TRUE, threshold = 1.2, dbfiles, database, use_ghs = TRUE, update = FALSE) { # process all pfts dbcon <- PEcAn.DB::db.open(database) on.exit(PEcAn.DB::db.close(dbcon), add = TRUE) - result <- lapply(pfts, run.meta.analysis.pft, iterations = iterations, random = random, - threshold = threshold, dbfiles = dbfiles, dbcon = dbcon, use_ghs = use_ghs, update = update) + result <- lapply(pfts, run.meta.analysis.pft, + iterations = iterations, random = random, + threshold = threshold, dbfiles = dbfiles, dbcon = dbcon, use_ghs = use_ghs, update = update + ) } # run.meta.analysis.R ## ==================================================================================================# #' Run meta-analysis on all PFTs in a (list of) PEcAn settings @@ -198,38 +199,40 @@ runModule.run.meta.analysis <- function(settings) { pfts <- list() pft.names <- character(0) for (i in seq_along(settings)) { - pfts.i <- settings[[i]]$pfts + pfts.i <- settings[[i]]$pfts pft.names.i <- sapply(pfts.i, function(x) x$name) - ind <- which(pft.names.i %in% setdiff(pft.names.i, pft.names)) - pfts <- c(pfts, pfts.i[ind]) - pft.names <- sapply(pfts, function(x) x$name) + ind <- which(pft.names.i %in% setdiff(pft.names.i, pft.names)) + pfts <- c(pfts, pfts.i[ind]) + pft.names <- sapply(pfts, function(x) x$name) } - - PEcAn.logger::logger.info(paste0("Running meta-analysis on all PFTs listed by any Settings object in the list: ", - paste(pft.names, collapse = ", "))) - + + PEcAn.logger::logger.info(paste0( + "Running meta-analysis on all PFTs listed by any Settings object in the list: ", + paste(pft.names, collapse = ", ") + )) + iterations <- settings$meta.analysis$iter - random <- settings$meta.analysis$random.effects$on - use_ghs <- settings$meta.analysis$random.effects$use_ghs - threshold <- settings$meta.analysis$threshold - dbfiles <- settings$database$dbfiles - database <- settings$database$bety + random <- settings$meta.analysis$random.effects$on + use_ghs <- settings$meta.analysis$random.effects$use_ghs + threshold <- settings$meta.analysis$threshold + dbfiles <- settings$database$dbfiles + database <- settings$database$bety run.meta.analysis(pfts, iterations, random, threshold, dbfiles, database, use_ghs) } else if (PEcAn.settings::is.Settings(settings)) { - pfts <- settings$pfts + pfts <- settings$pfts iterations <- settings$meta.analysis$iter - random <- settings$meta.analysis$random.effects$on - use_ghs <- settings$meta.analysis$random.effects$use_ghs - threshold <- settings$meta.analysis$threshold - dbfiles <- settings$database$dbfiles - database <- settings$database$bety + random <- settings$meta.analysis$random.effects$on + use_ghs <- settings$meta.analysis$random.effects$use_ghs + threshold <- settings$meta.analysis$threshold + dbfiles <- settings$database$dbfiles + database <- settings$database$bety run.meta.analysis(pfts, iterations, random, threshold, dbfiles, database, use_ghs, update = settings$meta.analysis$update) } else { stop("runModule.run.meta.analysis only works with Settings or MultiSettings") } } # runModule.run.meta.analysis -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' compare point to prior distribution ##' ##' used to compare data to prior, meta analysis posterior to prior @@ -239,7 +242,9 @@ runModule.run.meta.analysis <- function(settings) { ##' @return result of `p(point, parama, paramb)` ##' @author David LeBauer p.point.in.prior <- function(point, prior) { - out <- do.call(paste0("p", prior$distn), - list(point, prior$parama, prior$paramb)) + out <- do.call( + paste0("p", prior$distn), + list(point, prior$parama, prior$paramb) + ) return(out) } # p.point.in.prior diff --git a/modules/meta.analysis/R/single.MA.R b/modules/meta.analysis/R/single.MA.R index 89377fcb039..8b50c29503e 100644 --- a/modules/meta.analysis/R/single.MA.R +++ b/modules/meta.analysis/R/single.MA.R @@ -1,4 +1,3 @@ - ##' Individual Meta-analysis ##' ##' Individual meta-analysis for a specific trait and PFT is run by the function @@ -22,20 +21,24 @@ single.MA <- function(data, j.chains, j.iter, tauA, tauB, prior, jag.model.file, overdispersed = TRUE) { ## Convert R distributions to JAGS distributions - jagsprior <- PEcAn.utils::r2bugs.distributions(prior) - jagsprior <- jagsprior[, c("distn", "parama", "paramb", "n")] + jagsprior <- PEcAn.utils::r2bugs.distributions(prior) + jagsprior <- jagsprior[, c("distn", "parama", "paramb", "n")] colnames(jagsprior) <- c("distn", "a", "b", "n") - colnames(prior) <- c("distn", "a", "b", "n") + colnames(prior) <- c("distn", "a", "b", "n") # determine what factors to include in meta-analysis - model.parms <- list(ghs = length(unique(data$ghs)), - site = length(unique(data$site)), - trt = length(unique(data$trt))) + model.parms <- list( + ghs = length(unique(data$ghs)), + site = length(unique(data$site)), + trt = length(unique(data$trt)) + ) # define regression model - reg.parms <- list(ghs = "beta.ghs[ghs[k]]", # beta.o will be included by default - site = "beta.site[site[k]]", - trt = "beta.trt[trt[k]]") + reg.parms <- list( + ghs = "beta.ghs[ghs[k]]", # beta.o will be included by default + site = "beta.site[site[k]]", + trt = "beta.trt[trt[k]]" + ) # making sure ghs and trt are factor data$ghs <- as.factor(data$ghs) @@ -75,28 +78,36 @@ single.MA <- function(data, j.chains, j.iter, tauA, tauB, prior, jag.model.file, ### Write JAGS bug file based on user settings and default bug file write.ma.model (modelfile = ### paste(settings$pecanDir,'rscripts/ma.model.template.bug',sep=''), - write.ma.model(modelfile = modelfile, - outfile = jag.model.file, - reg.model = reg.model, - jagsprior$distn, jagsprior$a, jagsprior$b, - n = length(data$Y), - trt.n = model.parms[["trt"]], - site.n = model.parms[["site"]], - ghs.n = model.parms[["ghs"]], - tauA = tauA, tauB = tauB) + write.ma.model( + modelfile = modelfile, + outfile = jag.model.file, + reg.model = reg.model, + jagsprior$distn, jagsprior$a, jagsprior$b, + n = length(data$Y), + trt.n = model.parms[["trt"]], + site.n = model.parms[["site"]], + ghs.n = model.parms[["ghs"]], + tauA = tauA, tauB = tauB + ) if (overdispersed == TRUE) { ## overdispersed chains j.inits <- function(chain) { - list(beta.o = do.call(paste("q", prior$dist, sep = ""), - list(chain * 1 / (j.chains + 1), prior$a, prior$b)), - .RNG.seed = chain, - .RNG.name = "base::Mersenne-Twister") + list( + beta.o = do.call( + paste("q", prior$dist, sep = ""), + list(chain * 1 / (j.chains + 1), prior$a, prior$b) + ), + .RNG.seed = chain, + .RNG.name = "base::Mersenne-Twister" + ) } } else if (overdispersed == FALSE) { ## chains fixed at data mean - used if above code does not converge ## invalidates assumptions about convergence, e.g. Gelman-Rubin diagnostic - j.inits <- function(chain) { list(beta.o = mean(data$Y)) } + j.inits <- function(chain) { + list(beta.o = mean(data$Y)) + } } j.model <- rjags::jags.model( diff --git a/modules/meta.analysis/R/zzz.R b/modules/meta.analysis/R/zzz.R index c9394eef533..9f7fce91795 100644 --- a/modules/meta.analysis/R/zzz.R +++ b/modules/meta.analysis/R/zzz.R @@ -5,15 +5,16 @@ # need when calling base generics (in particular, it provides # as.matrix.mcmc.list) but it does not export them, so we can't use @importFrom # to pull in only the subset we need. -# +# # @param libname,pkgname Not used; present for historical reasons # @return nothing # .onLoad <- function(libname, pkgname) { - if(!requireNamespace("coda", quietly = TRUE)){ - PEcAn.logger::logger.severe( - "coda is not installed, but is needed by PEcAn.MA.", - "Try running `install.packages('coda')`") - } - invisible() + if (!requireNamespace("coda", quietly = TRUE)) { + PEcAn.logger::logger.severe( + "coda is not installed, but is needed by PEcAn.MA.", + "Try running `install.packages('coda')`" + ) + } + invisible() } diff --git a/modules/meta.analysis/tests/testthat/test.approx.posterior.R b/modules/meta.analysis/tests/testthat/test.approx.posterior.R index 5be5ee4153b..9ef0e7b0247 100644 --- a/modules/meta.analysis/tests/testthat/test.approx.posterior.R +++ b/modules/meta.analysis/tests/testthat/test.approx.posterior.R @@ -4,9 +4,13 @@ load("data/trait.mcmc.RData") load("data/prior.distns.RData") test_that("test data are as expected", { - expect_equal(names(trait.mcmc), - c("quantum_efficiency", "leaf_respiration_rate_m2", - "stomatal_slope.BB", "SLA", "Vcmax")) + expect_equal( + names(trait.mcmc), + c( + "quantum_efficiency", "leaf_respiration_rate_m2", + "stomatal_slope.BB", "SLA", "Vcmax" + ) + ) expect_is(trait.mcmc, "list") expect_is(trait.mcmc[[1]], "mcmc.list") expect_is(prior.distns, "data.frame") diff --git a/modules/meta.analysis/tests/testthat/test.jagify.R b/modules/meta.analysis/tests/testthat/test.jagify.R index 50949a7a5d7..cdd4d397a4f 100644 --- a/modules/meta.analysis/tests/testthat/test.jagify.R +++ b/modules/meta.analysis/tests/testthat/test.jagify.R @@ -1,21 +1,21 @@ - test_that("jagify correctly assigns treatment index of 1 to all control treatments, regardless of alphabetical order", { ## generate test data; controls assigned to early alphabet and late alphabet trt names - testresult <- data.frame(citation_id = 1, - site_id = rep(1:2, each = 5), - name = rep(letters[1:5],2), - trt_id = as.character(rep(letters[1:5],2)), - control = c(1, rep(0,8), 1), - greenhouse = c(rep(0,5), rep(1,5)), - date = 1, - time = NA, - cultivar_id = 1, - specie_id = 1, - n = 2, - mean = sqrt(1:10), - stat = 1, - statname = "SE", - treatment_id = 1:10 + testresult <- data.frame( + citation_id = 1, + site_id = rep(1:2, each = 5), + name = rep(letters[1:5], 2), + trt_id = as.character(rep(letters[1:5], 2)), + control = c(1, rep(0, 8), 1), + greenhouse = c(rep(0, 5), rep(1, 5)), + date = 1, + time = NA, + cultivar_id = 1, + specie_id = 1, + n = 2, + mean = sqrt(1:10), + stat = 1, + statname = "SE", + treatment_id = 1:10 ) jagged.data <- jagify(testresult) diff --git a/modules/meta.analysis/tests/testthat/test.run.meta.analysis.R b/modules/meta.analysis/tests/testthat/test.run.meta.analysis.R index 1f9c9c9a3fd..6f6b93b294d 100644 --- a/modules/meta.analysis/tests/testthat/test.run.meta.analysis.R +++ b/modules/meta.analysis/tests/testthat/test.run.meta.analysis.R @@ -1,13 +1,13 @@ test_that("`runModule.run.meta.analysis` throws an error for incorrect input", { - expect_error(runModule.run.meta.analysis('test'), "only works with Settings or MultiSettings") + expect_error(runModule.run.meta.analysis("test"), "only works with Settings or MultiSettings") }) test_that("`run.meta.analysis` able to call run.meta.analysis.pft for each pft in the input list", { mocked_res <- mockery::mock(1, cycle = TRUE) - mockery::stub(run.meta.analysis, 'run.meta.analysis.pft', mocked_res) - mockery::stub(run.meta.analysis, 'PEcAn.DB::db.open', 1) - mockery::stub(run.meta.analysis, 'PEcAn.DB::db.close', 1) - pfts <- list('ebifarm.salix', 'temperate.coniferous') + mockery::stub(run.meta.analysis, "run.meta.analysis.pft", mocked_res) + mockery::stub(run.meta.analysis, "PEcAn.DB::db.open", 1) + mockery::stub(run.meta.analysis, "PEcAn.DB::db.close", 1) + pfts <- list("ebifarm.salix", "temperate.coniferous") run.meta.analysis(pfts = pfts, iterations = 1, dbfiles = NULL, database = NULL) mockery::expect_called(mocked_res, 2) args <- mockery::mock_args(mocked_res) @@ -25,9 +25,9 @@ test_that("`run.meta.analysis.pft` throws an error if it cannot find output from test_that("`run.meta.analysis.pft` throws an error for missing posteriorid", { pft <- list(outdir = "test", name = "ebifarm.salix") - mockery::stub(run.meta.analysis.pft, 'file.exists', TRUE) + mockery::stub(run.meta.analysis.pft, "file.exists", TRUE) expect_error( run.meta.analysis.pft(pft = pft, iterations = 1, dbfiles = NULL, dbcon = NULL, update = TRUE), "Missing posteriorid" ) -}) \ No newline at end of file +}) diff --git a/modules/photosynthesis/R/Licor.QC.R b/modules/photosynthesis/R/Licor.QC.R index 7b4106e7964..c25b4cc03e0 100644 --- a/modules/photosynthesis/R/Licor.QC.R +++ b/modules/photosynthesis/R/Licor.QC.R @@ -6,35 +6,38 @@ ##' @param curve Whether to do Quality Control by examining the 'ACi' curve, the 'AQ' curve, or both ##' @param tol Code automatically tries to separate ACi and AQ curves in the same dataset by detecting the 'reference' condition for light and CO2 respectively. This is the relative error around the mode in that detection. Licor_QC <- function(dat, curve = c("ACi", "AQ"), tol = 0.05) { - if (!("QC" %in% names(dat))) { dat$QC <- rep(0, nrow(dat)) } pos <- c(0.1, 0.9) - + status <- c("red", "black", "blue", "grey") if ("aci" %in% tolower(curve)) { - ## filter out A-Q curve points ref <- estimate_mode(dat$PARi) if (ref < 1) { return(NULL) } sel <- which(abs(dat$PARi - ref) / ref < tol) - - ulhc <- c(sum(rev(pos) * range(dat$Ci[sel], na.rm = TRUE)), - sum(pos * range(dat$Photo[sel], na.rm = TRUE))) ## upper left hand corner - + + ulhc <- c( + sum(rev(pos) * range(dat$Ci[sel], na.rm = TRUE)), + sum(pos * range(dat$Photo[sel], na.rm = TRUE)) + ) ## upper left hand corner + ## reference plot - plot(dat$Ci[sel], dat$Photo[sel], - col = status[dat$QC[sel] + 2], - pch = 20, cex = 2, - main = paste("CLICK ON OUTLIERS\n", dat$fname[1])) + plot(dat$Ci[sel], dat$Photo[sel], + col = status[dat$QC[sel] + 2], + pch = 20, cex = 2, + main = paste("CLICK ON OUTLIERS\n", dat$fname[1]) + ) graphics::points(dat$Ci[-sel], dat$Photo[-sel], col = status[4], pch = 20, cex = 2) graphics::text(ulhc[1], ulhc[2], "FAIL\nALL", col = "red") - graphics::legend("bottomright", legend = c("fail", "unchecked", "pass", "other"), - col = status, pch = 18, cex = 1.5, bty = "n") - + graphics::legend("bottomright", + legend = c("fail", "unchecked", "pass", "other"), + col = status, pch = 18, cex = 1.5, bty = "n" + ) + ## flag outliers flag <- graphics::identify(c(dat$Ci[sel], ulhc[1]), c(dat$Photo[sel], ulhc[2])) if (length(flag) > 0) { @@ -45,46 +48,52 @@ Licor_QC <- function(dat, curve = c("ACi", "AQ"), tol = 0.05) { } } dat$QC[sel[dat$QC[sel] == 0]] <- 1 - + ## Updated plot - plot(dat$Ci[sel], dat$Photo[sel], - col = status[dat$QC[sel] + 2], - pch = 20, cex = 2, - main = paste("UPDATED", dat$fname[1], "\nclick to undo Outliers")) + plot(dat$Ci[sel], dat$Photo[sel], + col = status[dat$QC[sel] + 2], + pch = 20, cex = 2, + main = paste("UPDATED", dat$fname[1], "\nclick to undo Outliers") + ) graphics::points(dat$Ci[-sel], dat$Photo[-sel], col = status[4], pch = 20, cex = 2) - graphics::legend("bottomright", legend = c("fail", "unchecked", "pass"), - col = status, pch = 18, cex = 1.5, bty = "n") - + graphics::legend("bottomright", + legend = c("fail", "unchecked", "pass"), + col = status, pch = 18, cex = 1.5, bty = "n" + ) + flag <- graphics::identify(dat$Ci[sel], dat$Photo[sel]) ## undo outliers if (length(flag) > 0) { dat$QC[sel[flag]] <- 1 } - } - + if ("aq" %in% tolower(curve)) { - ## filter out A-Ci curve points ref <- estimate_mode(dat$CO2R) if (ref < 1) { return(NULL) } - sel <- which(abs(dat$CO2R - ref)/ref < tol) - - ulhc <- c(sum(rev(pos) * range(dat$PARi[sel], na.rm = TRUE)), - sum(pos * range(dat$Photo[sel], na.rm = TRUE))) ## upper left hand corner - + sel <- which(abs(dat$CO2R - ref) / ref < tol) + + ulhc <- c( + sum(rev(pos) * range(dat$PARi[sel], na.rm = TRUE)), + sum(pos * range(dat$Photo[sel], na.rm = TRUE)) + ) ## upper left hand corner + ## reference plot - plot(dat$PARi[sel], dat$Photo[sel], - col = status[dat$QC[sel] + 2], - pch = 20, cex = 2, - main = paste("CLICK ON OUTLIERS\n", dat$fname[1])) + plot(dat$PARi[sel], dat$Photo[sel], + col = status[dat$QC[sel] + 2], + pch = 20, cex = 2, + main = paste("CLICK ON OUTLIERS\n", dat$fname[1]) + ) graphics::points(dat$PARi[-sel], dat$Photo[-sel], col = status[4], pch = 20, cex = 2) graphics::text(ulhc[1], ulhc[2], "FAIL\nALL", col = "red") - graphics::legend("bottomright", legend = c("fail", "unchecked", "pass", "other"), - col = status, pch = 18, cex = 1.5, bty = "n") - + graphics::legend("bottomright", + legend = c("fail", "unchecked", "pass", "other"), + col = status, pch = 18, cex = 1.5, bty = "n" + ) + ## flag outliers flag <- graphics::identify(c(dat$PARi[sel], ulhc[1]), c(dat$Photo[sel], ulhc[2])) if (length(flag) > 0) { @@ -95,16 +104,19 @@ Licor_QC <- function(dat, curve = c("ACi", "AQ"), tol = 0.05) { } } dat$QC[sel[dat$QC[sel] == 0]] <- 1 - + ## updated plot - plot(dat$PARi[sel], dat$Photo[sel], - col = status[dat$QC[sel] + 2], - pch = 20, cex = 2, - main = paste("UPDATED", dat$fname[1], "\nclick to undo Outliers")) + plot(dat$PARi[sel], dat$Photo[sel], + col = status[dat$QC[sel] + 2], + pch = 20, cex = 2, + main = paste("UPDATED", dat$fname[1], "\nclick to undo Outliers") + ) graphics::points(dat$PARi[-sel], dat$Photo[-sel], col = status[4], pch = 20, cex = 2) - graphics::legend("bottomright", legend = c("fail", "unchecked", "pass"), - col = status, pch = 18, cex = 1.5, bty = "n") - + graphics::legend("bottomright", + legend = c("fail", "unchecked", "pass"), + col = status, pch = 18, cex = 1.5, bty = "n" + ) + ## undo outliers flag <- graphics::identify(dat$PARi[sel], dat$Photo[sel]) if (length(flag) > 0) { diff --git a/modules/photosynthesis/R/fitA.R b/modules/photosynthesis/R/fitA.R index 7a30af09b18..9b4647f3afe 100644 --- a/modules/photosynthesis/R/fitA.R +++ b/modules/photosynthesis/R/fitA.R @@ -3,29 +3,30 @@ ##' @author Mike Dietze ##' @author Xiaohui Feng ##' @export -##' +##' ##' @param flux.data data.frame of Licor data, concatenated by rows, and with a leading column 'fname' that is used to count the number of curves and match to covariates ##' @param cov.data data.frame of covariate data. Column names used in formulas ##' @param model list including at least 6 components: the fixed effects model for alpha (a.fixed) and Vcmax (V.fixed), the random effects for these (a.random, V.random), the variable used to match the gas-exchange and covariate data (match), and the number of MCMC interations (n.iter). Additional optional arguments: TPU = TRUE turns on TPU limitation; Temp == 'Bernacchi01' turns on the Bernacchi et al 2001 temperature correction. If this is turned on all parameters are estimated for 25C, otherwise no temperature correction is applied. Setting Temp = 'June2004' will turn on the June et al 2004 Funct Plant Biol temperature correction to Jmax. Note: these two corrections are not mutually exclusive, you can set Temp = c('June2004','Bernacchi2001') -##' +##' ##' Right now the fixed effects are specified as a string using the standard R lm formula syntax, but without the LHS variable (e.g. '~ SLA + chl + SLA:chl'). The tilde is optional. For random effects, the two options right now are just 'leaf' for leaf-level random effects and NULL. 'model' has a default that sets all effects to NULL (fit one curve to all data) and n.iter=1000. -##' +##' fitA <- function(flux.data, cov.data = NULL, model = NULL) { - - ## TO-DO: + ## TO-DO: ## Random effects using design matrix ## Model selection ## output variable selection: Pred Loss, WAIC? ## function to do: multiple response curves ## specify priors in model object ## integrate with meta-analysis - + if (is.null(model)) { - model <- list(a.fixed = NULL, a.random = NULL, V.fixed = NULL, V.random = NULL, - n.iter = 5000, match = "fname") + model <- list( + a.fixed = NULL, a.random = NULL, V.fixed = NULL, V.random = NULL, + n.iter = 5000, match = "fname" + ) } out.variables <- c("r0", "vmax0", "alpha0", "Jmax0", "cp0", "tau", "pmean", "pA") - + a.fixed <- model$a.fixed a.random <- model$a.random V.fixed <- model$V.fixed @@ -33,20 +34,20 @@ fitA <- function(flux.data, cov.data = NULL, model = NULL) { if (is.null(model$match)) { model$match <- "fname" } - + dat <- as.data.frame(flux.data) - - id <- dat[, model$match] - n.curves <- length(unique(id)) - curve.id <- as.numeric(as.factor(id)) + + id <- dat[, model$match] + n.curves <- length(unique(id)) + curve.id <- as.numeric(as.factor(id)) curve.code <- tapply(as.character(id), curve.id, unique) - + ## match between gas exchange data and covariates if (!is.null(cov.data)) { ord <- match(curve.code, as.character(cov.data[, model$match])) cov.data <- cov.data[ord, ] } - + ## Vcmax design matrix if (is.null(V.fixed)) { XV <- NULL @@ -57,15 +58,15 @@ fitA <- function(flux.data, cov.data = NULL, model = NULL) { if (length(grep("~", V.fixed)) == 0) { V.fixed <- paste("~", V.fixed) } - XV <- with(cov.data, model.matrix(formula(V.fixed))) + XV <- with(cov.data, model.matrix(formula(V.fixed))) XV.cols <- colnames(XV) XV.cols <- XV.cols[XV.cols != "(Intercept)"] - XV <- as.matrix(XV[, XV.cols]) + XV <- as.matrix(XV[, XV.cols]) colnames(XV) <- XV.cols Vcenter <- apply(XV, 2, mean, na.rm = TRUE) - XV <- t(t(XV) - Vcenter) + XV <- t(t(XV) - Vcenter) } - + ## alpha design matrix if (is.null(a.fixed)) { Xa <- NULL @@ -74,15 +75,15 @@ fitA <- function(flux.data, cov.data = NULL, model = NULL) { print("alpha formula provided but covariate data is absent:", a.fixed) } a.fixed <- ifelse(length(grep("~", a.fixed)) == 0, paste("~", a.fixed), a.fixed) - Xa <- with(cov.data, model.matrix(formula(a.fixed))) - Xa <- as.matrix(Xa[, -which(colnames(Xa) == "(Intercept)")]) + Xa <- with(cov.data, model.matrix(formula(a.fixed))) + Xa <- as.matrix(Xa[, -which(colnames(Xa) == "(Intercept)")]) acenter <- apply(Xa, 2, mean, na.rm = TRUE) - Xa <- t(t(Xa) - acenter) + Xa <- t(t(Xa) - acenter) } - + ## Define JAGS model - - my.model <- " + + my.model <- " model{ ## Priors @@ -125,14 +126,14 @@ To <- 35 ## Representative value, would benifit from spp calibration! ## Vcmax BETAS #RLEAF.V tau.Vleaf~dgamma(0.1,0.1) ## add random leaf effects -#RLEAF.V for(i in 1:nrep){ +#RLEAF.V for(i in 1:nrep){ #RLEAF.V Vleaf[i]~dnorm(0,tau.Vleaf) #RLEAF.V } ## alpha BETAs #RLEAF.A tau.Aleaf~dgamma(0.1,0.1) -#RLEAF.A for(i in 1:nrep){ +#RLEAF.A for(i in 1:nrep){ #RLEAF.A Aleaf[i]~dnorm(0,tau.Aleaf) #RLEAF.A } @@ -163,119 +164,127 @@ To <- 35 ## Representative value, would benifit from spp calibration! " -## prep data -sel <- seq_len(nrow(dat)) #which(dat$spp == s) -if("Tleaf" %in% names(dat)){ - if(max(dat$Tleaf) < 100){ # if Tleaf in C, convert to K - dat$Tleaf <- dat$Tleaf + 273.15 - } -} else if (!"Tleaf" %in% names(dat)) { + ## prep data + sel <- seq_len(nrow(dat)) # which(dat$spp == s) + if ("Tleaf" %in% names(dat)) { + if (max(dat$Tleaf) < 100) { # if Tleaf in C, convert to K + dat$Tleaf <- dat$Tleaf + 273.15 + } + } else if (!"Tleaf" %in% names(dat)) { dat$Tleaf <- 25 + 273.15 ## if no Tleaf, assume 25C in Kelvin - warning("No Leaf Temperature provided, setting to 25C\n", - "To change add a column named Tleaf to flux.data data frame") -} + warning( + "No Leaf Temperature provided, setting to 25C\n", + "To change add a column named Tleaf to flux.data data frame" + ) + } -mydat <- list(an = dat$Photo[sel], - pi = dat$Ci[sel], - q = dat$PARi[sel], - T = dat$Tleaf, - n = length(sel), Kc = 46, - Ko = 22000, - po = 21000, - rep = curve.id, - nrep = n.curves) -# Kc<-46 ## Michaelis constant CO2 (Pa) -# Ko<-33000 ## Michaelis constant O2 (Pa) -# po<-21000 ## partial pressure of O2 (Pa) - -## TPU Limitation -if ("TPU" %in% names(model)) { - if (model$TPU == TRUE) { - my.model <- gsub(pattern = "#TPU", " ", my.model) - out.variables <- c(out.variables, "tpu") + mydat <- list( + an = dat$Photo[sel], + pi = dat$Ci[sel], + q = dat$PARi[sel], + T = dat$Tleaf, + n = length(sel), Kc = 46, + Ko = 22000, + po = 21000, + rep = curve.id, + nrep = n.curves + ) + # Kc<-46 ## Michaelis constant CO2 (Pa) + # Ko<-33000 ## Michaelis constant O2 (Pa) + # po<-21000 ## partial pressure of O2 (Pa) + + ## TPU Limitation + if ("TPU" %in% names(model)) { + if (model$TPU == TRUE) { + my.model <- gsub(pattern = "#TPU", " ", my.model) + out.variables <- c(out.variables, "tpu") + } } -} -## Temperature scaling -Vformula <- NULL -if ("Temp" %in% names(model)) { - if ("Bernacchi01" %in% model$Temp) { - my.model <- gsub(pattern = "##B01", " ", my.model) + ## Temperature scaling + Vformula <- NULL + if ("Temp" %in% names(model)) { + if ("Bernacchi01" %in% model$Temp) { + my.model <- gsub(pattern = "##B01", " ", my.model) + } + if ("June2004" %in% model$Temp) { + my.model <- gsub(pattern = "##J04", " ", my.model) + } } - if ("June2004" %in% model$Temp) { - my.model <- gsub(pattern = "##J04", " ", my.model) + + ## VCmax Formulas + Vformula <- NULL + if ("leaf" %in% V.random) { + Vformula <- " + Vleaf[rep[i]]" + my.model <- gsub(pattern = "#RLEAF.V", " ", my.model) + out.variables <- c(out.variables, "tau.Vleaf") } -} -## VCmax Formulas -Vformula <- NULL -if ("leaf" %in% V.random) { - Vformula <- " + Vleaf[rep[i]]" - my.model <- gsub(pattern = "#RLEAF.V", " ", my.model) - out.variables <- c(out.variables, "tau.Vleaf") -} + if (!is.null(XV)) { + Vnames <- gsub(" ", "_", colnames(XV)) + Vformula <- paste( + Vformula, + paste0("+ betaV", Vnames, "*XV[rep[i],", seq_len(ncol(XV)), "]", collapse = " ") + ) + Vpriors <- paste0(" betaV", Vnames, "~dnorm(0,0.001)", collapse = "\n") + my.model <- sub(pattern = "## Vcmax BETAS", Vpriors, my.model) + mydat[["XV"]] <- XV + out.variables <- c(out.variables, paste0("betaV", Vnames)) + } + if (!is.null(Vformula)) { + my.model <- sub(pattern = "#VFORMULA", Vformula, my.model) + } -if (!is.null(XV)) { - Vnames <- gsub(" ", "_", colnames(XV)) - Vformula <- paste(Vformula, - paste0("+ betaV", Vnames, "*XV[rep[i],", seq_len(ncol(XV)), "]", collapse = " ")) - Vpriors <- paste0(" betaV", Vnames, "~dnorm(0,0.001)", collapse = "\n") - my.model <- sub(pattern = "## Vcmax BETAS", Vpriors, my.model) - mydat[["XV"]] <- XV - out.variables <- c(out.variables, paste0("betaV", Vnames)) -} -if (!is.null(Vformula)) { - my.model <- sub(pattern = "#VFORMULA", Vformula, my.model) -} - -## alpha Formulas -Aformula <- NULL -if ("leaf" %in% a.random) { - Aformula <- " + Aleaf[rep[i]]" - my.model <- gsub(pattern = "#RLEAF.A", "", my.model) - out.variables <- c(out.variables, "tau.Aleaf") -} + ## alpha Formulas + Aformula <- NULL + if ("leaf" %in% a.random) { + Aformula <- " + Aleaf[rep[i]]" + my.model <- gsub(pattern = "#RLEAF.A", "", my.model) + out.variables <- c(out.variables, "tau.Aleaf") + } -if (!is.null(Xa)) { - Anames <- gsub(" ", "_", colnames(Xa)) - Aformula <- paste(Aformula, paste0("+ betaA", Anames, "*Xa[rep[i],", 1:ncol(Xa), - "]", collapse = " ")) - apriors <- paste0("betaA", Anames, "~dnorm(0,0.001)", collapse = "\n") - my.model <- sub(pattern = "## alpha BETAs", apriors, my.model) - mydat[["Xa"]] <- Xa - out.variables <- c(out.variables, paste0("betaA", Anames)) -} -if (!is.null(Aformula)) { - my.model <- sub(pattern = "#AFORMULA", Aformula, my.model) -} + if (!is.null(Xa)) { + Anames <- gsub(" ", "_", colnames(Xa)) + Aformula <- paste(Aformula, paste0("+ betaA", Anames, "*Xa[rep[i],", 1:ncol(Xa), + "]", + collapse = " " + )) + apriors <- paste0("betaA", Anames, "~dnorm(0,0.001)", collapse = "\n") + my.model <- sub(pattern = "## alpha BETAs", apriors, my.model) + mydat[["Xa"]] <- Xa + out.variables <- c(out.variables, paste0("betaA", Anames)) + } + if (!is.null(Aformula)) { + my.model <- sub(pattern = "#AFORMULA", Aformula, my.model) + } -## Define initial conditions -init <- list() -init[[1]] <- list(r0 = 1.2, vmax0 = 39, alpha0 = 0.25, tau = 10, cp0 = 6, Jmax0 = 80) ## tau.Vleaf=30,beta1=4, beta2=1,beta5=3,tau.Vmon=10,tpu=10, -init[[2]] <- list(r0 = 1, vmax0 = 100, alpha0 = 0.2, tau = 20, cp0 = 4, Jmax0 = 150) ##tau.Vleaf=20,beta1=1,beta2=1,beta5=-1,tau.Vmon=20,tpu=13, -init[[3]] <- list(r0 = 2, vmax0 = 60, alpha0 = 0.28, tau = 20, cp0 = 5, Jmax0 = 60) ##tau.Vleaf=100,beta1=1,beta2=2,beta5=2,tau.Vmon=3,tpu=20, + ## Define initial conditions + init <- list() + init[[1]] <- list(r0 = 1.2, vmax0 = 39, alpha0 = 0.25, tau = 10, cp0 = 6, Jmax0 = 80) ## tau.Vleaf=30,beta1=4, beta2=1,beta5=3,tau.Vmon=10,tpu=10, + init[[2]] <- list(r0 = 1, vmax0 = 100, alpha0 = 0.2, tau = 20, cp0 = 4, Jmax0 = 150) ## tau.Vleaf=20,beta1=1,beta2=1,beta5=-1,tau.Vmon=20,tpu=13, + init[[3]] <- list(r0 = 2, vmax0 = 60, alpha0 = 0.28, tau = 20, cp0 = 5, Jmax0 = 60) ## tau.Vleaf=100,beta1=1,beta2=2,beta5=2,tau.Vmon=3,tpu=20, -mc3 <- rjags::jags.model(file = textConnection(my.model), data = mydat, inits = init, n.chains = 3) + mc3 <- rjags::jags.model(file = textConnection(my.model), data = mydat, inits = init, n.chains = 3) -mc3.out <- rjags::coda.samples(model = mc3, variable.names = out.variables, n.iter = model$n.iter) + mc3.out <- rjags::coda.samples(model = mc3, variable.names = out.variables, n.iter = model$n.iter) -## split output -out <- list(params = NULL, predict = NULL, model = my.model) -mfit <- as.matrix(mc3.out, chains = TRUE) -pred.cols <- union(grep("pA", colnames(mfit)), grep("pmean", colnames(mfit))) -chain.col <- which(colnames(mfit) == "CHAIN") -out$predict <- mat2mcmc.list(mfit[, c(chain.col, pred.cols)]) -out$params <- mat2mcmc.list(mfit[, -pred.cols]) -return(out) + ## split output + out <- list(params = NULL, predict = NULL, model = my.model) + mfit <- as.matrix(mc3.out, chains = TRUE) + pred.cols <- union(grep("pA", colnames(mfit)), grep("pmean", colnames(mfit))) + chain.col <- which(colnames(mfit) == "CHAIN") + out$predict <- mat2mcmc.list(mfit[, c(chain.col, pred.cols)]) + out$params <- mat2mcmc.list(mfit[, -pred.cols]) + return(out) } # fitA ##' @name read_Licor ##' @title read_Licor -##' +##' ##' @author Mike Dietze ##' @export -##' +##' ##' @param filename name of the file to read ##' @param sep file delimiter. defaults to tab ##' @param ... optional arguements forwarded to read.table @@ -287,11 +296,13 @@ read_Licor <- function(filename, sep = "\t", ...) { start <- grep(pattern = "OPEN", full) skip <- grep(pattern = "STARTOFDATA", full) for (i in rev(seq_along(start))) { - full <- full[-(start[i]:(skip[i] + 1 * (i > 1)))] # +1 is to deal with second header + full <- full[-(start[i]:(skip[i] + 1 * (i > 1)))] # +1 is to deal with second header } - full <- full[grep("\t", full)] ## skip timestamp lines - dat <- utils::read.table(textConnection(full), header = TRUE, blank.lines.skip = TRUE, - sep = sep, ...) + full <- full[grep("\t", full)] ## skip timestamp lines + dat <- utils::read.table(textConnection(full), + header = TRUE, blank.lines.skip = TRUE, + sep = sep, ... + ) fname <- rep(fbase, nrow(dat)) dat <- as.data.frame(cbind(fname, dat)) return(dat) diff --git a/modules/photosynthesis/R/plots.R b/modules/photosynthesis/R/plots.R index 2a09f4a4b25..af987c4d511 100644 --- a/modules/photosynthesis/R/plots.R +++ b/modules/photosynthesis/R/plots.R @@ -3,8 +3,9 @@ ##' @author Mike Dietze ##' @noRd ciEnvelope <- function(x, ylo, yhi, col = "lightgrey", ...) { - return(graphics::polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi), ylo[1])), - col = col, border = NA, ...)) + return(graphics::polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi), ylo[1])), + col = col, border = NA, ... + )) } # ciEnvelope ##' @name plot_photo @@ -17,89 +18,93 @@ ciEnvelope <- function(x, ylo, yhi, col = "lightgrey", ...) { ##' @param byLeaf whether to plot fits on a leaf-by-leaf basis ##' @export plot_photo <- function(data, out, curve = c("ACi", "AQ"), tol = 0.05, byLeaf = TRUE) { - - params <- as.matrix(out$params) + params <- as.matrix(out$params) predict <- as.matrix(out$predict) - CI <- apply(predict, 2, stats::quantile, c(0.025, 0.5, 0.975)) - pmean <- CI[, grep("pmean", colnames(CI))] - pA <- CI[, grep("pA", colnames(CI))] - - ## determine whether the fit was done by leaf or not + CI <- apply(predict, 2, stats::quantile, c(0.025, 0.5, 0.975)) + pmean <- CI[, grep("pmean", colnames(CI))] + pA <- CI[, grep("pA", colnames(CI))] + + ## determine whether the fit was done by leaf or not ## byLeaf = length(grep('tau.',colnames(params))) > 0 || length(grep('beta',colnames(params)))>0 - + if (byLeaf) { - id <- data[, "fname"] - n.curves <- length(unique(id)) - curve.id <- as.numeric(as.factor(as.character(id))) + id <- data[, "fname"] + n.curves <- length(unique(id)) + curve.id <- as.numeric(as.factor(as.character(id))) curve.code <- tapply(as.character(id), curve.id, unique) } else { - n.curves <- 1 - curve.id <- rep(1, nrow(data)) + n.curves <- 1 + curve.id <- rep(1, nrow(data)) curve.code <- "COMBINED" } - + for (c in seq_len(n.curves)) { srow <- which(curve.id == c) dat <- data[srow, ] - + if ("aci" %in% tolower(curve)) { - ## filter out A-Q curve points ref <- estimate_mode(dat$PARi) if (ref < 1) { return(NULL) } - sel <- which(abs(dat$PARi - ref)/ref < tol) - + sel <- which(abs(dat$PARi - ref) / ref < tol) + if (length(sel) > 3) { sel <- sel[order(dat$Ci[sel])] - + ## reference plot - plot(dat$Ci[sel], dat$Photo[sel], - pch = 20, cex = 1.5, - xlab = "Ci (ppm)", ylab = "An (umol m-2 s-1)", - main = paste(curve.code[c], "A-Ci"), ylim = range(pmean)) - ciEnvelope(dat$Ci[sel], pA[1, srow[sel]], pA[3, srow[sel]], col = "grey90") #plot PI - ciEnvelope(dat$Ci[sel], pmean[1, srow[sel]], pmean[3, srow[sel]], col = "grey60") #plot CI - graphics::lines(dat$Ci[sel], pmean[2, srow[sel]], col = 2, lwd = 3) #model line - graphics::points(dat$Ci[sel], dat$Photo[sel], pch = 20, cex = 1.5) #licor data points + plot(dat$Ci[sel], dat$Photo[sel], + pch = 20, cex = 1.5, + xlab = "Ci (ppm)", ylab = "An (umol m-2 s-1)", + main = paste(curve.code[c], "A-Ci"), ylim = range(pmean) + ) + ciEnvelope(dat$Ci[sel], pA[1, srow[sel]], pA[3, srow[sel]], col = "grey90") # plot PI + ciEnvelope(dat$Ci[sel], pmean[1, srow[sel]], pmean[3, srow[sel]], col = "grey60") # plot CI + graphics::lines(dat$Ci[sel], pmean[2, srow[sel]], col = 2, lwd = 3) # model line + graphics::points(dat$Ci[sel], dat$Photo[sel], pch = 20, cex = 1.5) # licor data points graphics::points(dat$Ci[-sel], dat$Photo[-sel], col = "yellow", pch = 20) - graphics::legend("bottomright", legend = c("curve data", "other data", "mean", "CI", "PI"), - col = c(1, "yellow", 2, "grey60", "grey90"), - pch = 18, lty = 1, - lwd = 8, bty = "n") + graphics::legend("bottomright", + legend = c("curve data", "other data", "mean", "CI", "PI"), + col = c(1, "yellow", 2, "grey60", "grey90"), + pch = 18, lty = 1, + lwd = 8, bty = "n" + ) } else { print("No ACi data available") } } - + if ("aq" %in% tolower(curve)) { ## filter out A-Ci curve points ref <- estimate_mode(dat$CO2R) if (ref < 1) { return(NULL) } - sel <- which(abs(dat$CO2R - ref)/ref < tol) - + sel <- which(abs(dat$CO2R - ref) / ref < tol) + if (length(sel) > 3) { sel <- sel[order(dat$PARi[sel])] - + ## reference plot - plot(dat$PARi[sel], dat$Photo[sel], - pch = 20, cex = 1.5, - xlab = "PAR (umol m-2 s-1)", ylab = "An (umol m-2 s-1)", main = paste(curve.code[c], "A-Q"), - ylim = range(pmean)) - ciEnvelope(dat$PARi[sel], pA[1, srow[sel]], pA[3, srow[sel]], col = "grey90") #plot PI - ciEnvelope(dat$PARi[sel], pmean[1, srow[sel]], pmean[3, srow[sel]], col = "grey60") #plot CI - graphics::lines(dat$PARi[sel], pmean[2, srow[sel]], col = 2, lwd = 3) #model line - graphics::points(dat$PARi[sel], dat$Photo[sel], pch = 20, cex = 1.5) #licor data points + plot(dat$PARi[sel], dat$Photo[sel], + pch = 20, cex = 1.5, + xlab = "PAR (umol m-2 s-1)", ylab = "An (umol m-2 s-1)", main = paste(curve.code[c], "A-Q"), + ylim = range(pmean) + ) + ciEnvelope(dat$PARi[sel], pA[1, srow[sel]], pA[3, srow[sel]], col = "grey90") # plot PI + ciEnvelope(dat$PARi[sel], pmean[1, srow[sel]], pmean[3, srow[sel]], col = "grey60") # plot CI + graphics::lines(dat$PARi[sel], pmean[2, srow[sel]], col = 2, lwd = 3) # model line + graphics::points(dat$PARi[sel], dat$Photo[sel], pch = 20, cex = 1.5) # licor data points graphics::points(dat$PARi[-sel], dat$Photo[-sel], col = "yellow", pch = 20) - graphics::legend("bottomright", legend = c("curve data", "other data", "mean", "CI", "PI"), - col = c(1, "yellow", 2, "grey60", "grey90"), - pch = 18, lty = 1, lwd = 8, bty = "n") + graphics::legend("bottomright", + legend = c("curve data", "other data", "mean", "CI", "PI"), + col = c(1, "yellow", 2, "grey60", "grey90"), + pch = 18, lty = 1, lwd = 8, bty = "n" + ) } else { print("No AQ data available") } - } ## end A-Q - } ## end loop over curves + } ## end A-Q + } ## end loop over curves } # plot_photo diff --git a/modules/photosynthesis/code/BB_model.R b/modules/photosynthesis/code/BB_model.R index aaf0610dc7f..58e7ddc0bae 100644 --- a/modules/photosynthesis/code/BB_model.R +++ b/modules/photosynthesis/code/BB_model.R @@ -1,12 +1,12 @@ ## BB_model_Wolz ## Programmer: Kevin Wolz ## Last Updated: 5/07/12 -## Support From: IB 509 Statistical Modeling Class, Professor Mike Dietze, T.A. Ryan Kelly, Student Xiaohui Feng +## Support From: IB 509 Statistical Modeling Class, Professor Mike Dietze, T.A. Ryan Kelly, Student Xiaohui Feng ## -## This script fits the Ball-Berry model for stomatal conductance to data of soybean leaves -## at ambient atmospheric conditions. The fit is done in a Bayesian context using the Markov Chain Monte Carlo -## method via interface with BUGS. Data was gather using a Licor-6400 from the ambient rings at SoyFace at the -## University of Illinois at Urbana-Champaign in Champaign, IL. +## This script fits the Ball-Berry model for stomatal conductance to data of soybean leaves +## at ambient atmospheric conditions. The fit is done in a Bayesian context using the Markov Chain Monte Carlo +## method via interface with BUGS. Data was gather using a Licor-6400 from the ambient rings at SoyFace at the +## University of Illinois at Urbana-Champaign in Champaign, IL. ## load libraries library(R2WinBUGS) @@ -14,62 +14,62 @@ library(BRugs) library(plotrix) ## load data -dat = read.csv("Kdata_Project.csv", header=T) ## raw data to analyze -dat = dat[which(dat$id == 1),] ## select data at saturating PARi +dat <- read.csv("Kdata_Project.csv", header = T) ## raw data to analyze +dat <- dat[which(dat$id == 1), ] ## select data at saturating PARi -MLE = read.csv("param_compare.csv", header=T) ## parameters from MLE analysis for comparison +MLE <- read.csv("param_compare.csv", header = T) ## parameters from MLE analysis for comparison ## MCMC MODEL -my.model = function(){ - ## Parameter model - g0 ~ dlnorm(1e-10,10000) ## BB intercept prior (weak) - m ~ dnorm(10,0.1) ## BB slope prior (weak) - tau.BB ~ dgamma(0.1,0.00001) ## BB model precision prior (weak) - - for(i in 1:n){ - ## Process Model - pred.gs[i] <- g0 + m*an[i]*H[i]/ca[i] ## Ball-Berry model - - ## Data Model - gs[i] ~ dnorm(pred.gs[i], tau.BB) ## likelihood - pG[i] ~ dnorm(pred.gs[i], tau.BB) ## prediction - } +my.model <- function() { + ## Parameter model + g0 ~ dlnorm(1e-10, 10000) ## BB intercept prior (weak) + m ~ dnorm(10, 0.1) ## BB slope prior (weak) + tau.BB ~ dgamma(0.1, 0.00001) ## BB model precision prior (weak) + + for (i in 1:n) { + ## Process Model + pred.gs[i] <- g0 + m * an[i] * H[i] / ca[i] ## Ball-Berry model + + ## Data Model + gs[i] ~ dnorm(pred.gs[i], tau.BB) ## likelihood + pG[i] ~ dnorm(pred.gs[i], tau.BB) ## prediction + } } -write.model(my.model,"BBmodel.txt") ## save model to text file +write.model(my.model, "BBmodel.txt") ## save model to text file ## MCMC INITIALIZATION & DATA SELECTION init <- list() - init[[1]] <- list(m=8, g0=-0.01, tau.BB=2000) ## chain 1 initial conditions - init[[2]] <- list(m=10, g0=0, tau.BB=1000) ## chain 2 initial conditions - init[[3]] <- list(m=12, g0=0.1, tau.BB=1500) ## chain 3 initial conditions - -leaf.list = unique(dat$leaf) ## select leaf subset to test - -BBmcmc <- list() ## initialize output object - -## MCMC LOOP -for(s in leaf.list){ - sel = which(dat$leaf == s) ## pick leaf for this loop - - an = dat$Photo[sel] ## define net photosynthesis - ca = dat$CO2S[sel] ## define atmospheric [CO2] - H = dat$RH_S[sel]/100 ## define relative humidity - gs = dat$Cond[sel] ## define stomatal conductance - - mydat <- list(an=an, n=length(an), gs=gs, ca=ca, H=H) ## data list for current leaf - - BB <- openbugs(mydat, ## data - init, ## initial conditions - model.file = "BBmodel.txt", ## model - n.chains = 3, ## number of chains - n.iter = 50000, ## number of iterations - n.burnin = 10000, ## burn in - n.thin = 20, ## thin - parameters.to.save = c("g0", "m", "tau.BB", "pred.gs", "pG") ## parameters to save - ) - - BBmcmc[[s]] = BB ## save output object +init[[1]] <- list(m = 8, g0 = -0.01, tau.BB = 2000) ## chain 1 initial conditions +init[[2]] <- list(m = 10, g0 = 0, tau.BB = 1000) ## chain 2 initial conditions +init[[3]] <- list(m = 12, g0 = 0.1, tau.BB = 1500) ## chain 3 initial conditions + +leaf.list <- unique(dat$leaf) ## select leaf subset to test + +BBmcmc <- list() ## initialize output object + +## MCMC LOOP +for (s in leaf.list) { + sel <- which(dat$leaf == s) ## pick leaf for this loop + + an <- dat$Photo[sel] ## define net photosynthesis + ca <- dat$CO2S[sel] ## define atmospheric [CO2] + H <- dat$RH_S[sel] / 100 ## define relative humidity + gs <- dat$Cond[sel] ## define stomatal conductance + + mydat <- list(an = an, n = length(an), gs = gs, ca = ca, H = H) ## data list for current leaf + + BB <- openbugs(mydat, ## data + init, ## initial conditions + model.file = "BBmodel.txt", ## model + n.chains = 3, ## number of chains + n.iter = 50000, ## number of iterations + n.burnin = 10000, ## burn in + n.thin = 20, ## thin + parameters.to.save = c("g0", "m", "tau.BB", "pred.gs", "pG") ## parameters to save + ) + + BBmcmc[[s]] <- BB ## save output object } ## OUTPUT ANALYSIS @@ -79,168 +79,175 @@ mcmcg0.sd <- list() mcmcm.sd <- list() mcmctauBB <- list() mcmctauBB.sd <- list() -i = 1 ## counter +i <- 1 ## counter ## data summary -sink("BB_MCMC_Summary.txt") -for(j in leaf.list){ - print(j) - print(BBmcmc[[j]])} -sink() - -for(s in leaf.list){ - ## select data for each leaf - sel1 = which(dat$leaf == s) - - ## trace and density plots - pdf(paste("Leaf_",s,"_BB_Model_Trace.pdf",sep="")) - plot(as.mcmc.list(BBmcmc[[s]])) - dev.off() - - ## predictions vs measurements - gs = dat$Cond[sel1] - axismax = max(max(gs),max(BBmcmc[[s]]$mean$pred.gs)) - pdf(paste("Leaf_",s, "_BB_Pred-vs-Meas.pdf",sep="")) - plot(gs, - BBmcmc[[s]]$mean$pred.gs, - ylim = c(0,axismax), - xlim = c(0,axismax), - pch = 19, - main = "Predicted gs vs Measured gs", - xlab = "Measured gs (mol m-2 s-1)", - ylab = "Predicted gs (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - dev.off() - - ## mcmc means of each leaf - mcmcg0[i] = BBmcmc[[s]]$mean$g0 - mcmcm[i] = BBmcmc[[s]]$mean$m - mcmctauBB[i] = BBmcmc[[s]]$mean$tau.BB - mcmcg0.sd[i] = BBmcmc[[s]]$sd$g0 - mcmcm.sd[i] = BBmcmc[[s]]$sd$m - mcmctauBB.sd[i] = BBmcmc[[s]]$sd$tau.BB - i = i + 1 +sink("BB_MCMC_Summary.txt") +for (j in leaf.list) { + print(j) + print(BBmcmc[[j]]) +} +sink() + +for (s in leaf.list) { + ## select data for each leaf + sel1 <- which(dat$leaf == s) + + ## trace and density plots + pdf(paste("Leaf_", s, "_BB_Model_Trace.pdf", sep = "")) + plot(as.mcmc.list(BBmcmc[[s]])) + dev.off() + + ## predictions vs measurements + gs <- dat$Cond[sel1] + axismax <- max(max(gs), max(BBmcmc[[s]]$mean$pred.gs)) + pdf(paste("Leaf_", s, "_BB_Pred-vs-Meas.pdf", sep = "")) + plot(gs, + BBmcmc[[s]]$mean$pred.gs, + ylim = c(0, axismax), + xlim = c(0, axismax), + pch = 19, + main = "Predicted gs vs Measured gs", + xlab = "Measured gs (mol m-2 s-1)", + ylab = "Predicted gs (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + dev.off() + + ## mcmc means of each leaf + mcmcg0[i] <- BBmcmc[[s]]$mean$g0 + mcmcm[i] <- BBmcmc[[s]]$mean$m + mcmctauBB[i] <- BBmcmc[[s]]$mean$tau.BB + mcmcg0.sd[i] <- BBmcmc[[s]]$sd$g0 + mcmcm.sd[i] <- BBmcmc[[s]]$sd$m + mcmctauBB.sd[i] <- BBmcmc[[s]]$sd$tau.BB + i <- i + 1 } - mcmcg0 = as.numeric(mcmcg0) - mcmcm = as.numeric(mcmcm) - mcmctauBB = as.numeric(mcmctauBB) - mcmcg0.sd = as.numeric(mcmcg0.sd) - mcmcm.sd = as.numeric(mcmcm.sd) - mcmctauBB.sd = as.numeric(mcmctauBB.sd) - -output <- data.frame(mcmcm, mcmcm.sd, mcmcg0, mcmcg0.sd, mcmctauBB, mcmctauBB.sd) +mcmcg0 <- as.numeric(mcmcg0) +mcmcm <- as.numeric(mcmcm) +mcmctauBB <- as.numeric(mcmctauBB) +mcmcg0.sd <- as.numeric(mcmcg0.sd) +mcmcm.sd <- as.numeric(mcmcm.sd) +mcmctauBB.sd <- as.numeric(mcmctauBB.sd) + +output <- data.frame(mcmcm, mcmcm.sd, mcmcg0, mcmcg0.sd, mcmctauBB, mcmctauBB.sd) write.csv(output, file = "BB_output.csv") - ## MLE means of each leaf - MLEg0 = MLE$g0.nobound - MLEm = MLE$m.nobound - - ## compare mcmc means to MLE means for g0 - pdf(paste("Mean_Comparison-g0.pdf",sep="")) - plot(MLEg0, - mcmcg0, - ylim = c(-0.2,0.2), - xlim = c(-0.2,0.2), - pch = 19, - main = "MCMC g0 Means vs MLE g0 Means", - xlab = "MLE g0 (mol m-2 s-1)", - ylab = "MCMC g0 (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(MLEg0,mcmcg0,mcmcg0.sd,add=TRUE) - dev.off() - - ## compare mcmc means to MLE means for m - pdf(paste("Mean_Comparison-m.pdf",sep="")) - plot(MLEm, - mcmcm, - ylim = c(5,20), - xlim = c(5,20), - pch = 19, - main = "MCMC m Means vs MLE m Means", - xlab = "MLE m", - ylab = "MCMC m", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(MLEm,mcmcm,mcmcm.sd,add=TRUE) - dev.off() +## MLE means of each leaf +MLEg0 <- MLE$g0.nobound +MLEm <- MLE$m.nobound + +## compare mcmc means to MLE means for g0 +pdf(paste("Mean_Comparison-g0.pdf", sep = "")) +plot(MLEg0, + mcmcg0, + ylim = c(-0.2, 0.2), + xlim = c(-0.2, 0.2), + pch = 19, + main = "MCMC g0 Means vs MLE g0 Means", + xlab = "MLE g0 (mol m-2 s-1)", + ylab = "MCMC g0 (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 +) +abline(0, 1, col = "dark green", lwd = 3) +plotCI(MLEg0, mcmcg0, mcmcg0.sd, add = TRUE) +dev.off() + +## compare mcmc means to MLE means for m +pdf(paste("Mean_Comparison-m.pdf", sep = "")) +plot(MLEm, + mcmcm, + ylim = c(5, 20), + xlim = c(5, 20), + pch = 19, + main = "MCMC m Means vs MLE m Means", + xlab = "MLE m", + ylab = "MCMC m", + cex.main = 1.6, + cex.lab = 1.4 +) +abline(0, 1, col = "dark green", lwd = 3) +plotCI(MLEm, mcmcm, mcmcm.sd, add = TRUE) +dev.off() ## Analysis with MCMC Object -for(s in leaf.list){ - ## select data for each leaf - sel2 = which(dat$leaf == s) - an = dat$Photo[sel2] ## define net photosynthesis - ca = dat$CO2S[sel2] ## define atmospheric [CO2] - H = dat$RH_S[sel2]/100 ## define relative humidity - gs = dat$Cond[sel2] ## define stomatal conductance - - ## convert output chains to mcmc objects - M <- coda::mcmc(BBmcmc[[s]]$sims.list$m) - G0 <- coda::mcmc(BBmcmc[[s]]$sims.list$g0) - T <- coda::mcmc(BBmcmc[[s]]$sims.list$tau.BB) - n <- coda::length(BBmcmc[[s]]$sims.list$g0) - - ## autocorrelation plots of each leaf - pdf(paste("Leaf_",s,"_BB_m_Autocorr.pdf",sep="")) - coda::autocorr.plot(M) - dev.off() - - pdf(paste("Leaf_",s,"_BB_g0_Autocorr.pdf",sep="")) - coda::autocorr.plot(G0) - dev.off() - - ## parameter correlation plots of each leaf - pdf(paste("Leaf_",s,"_BB_Param_Corr.pdf",sep="")) - plot(BBmcmc[[s]]$sims.list$g0, - BBmcmc[[s]]$sims.list$m, - pch = 19, - main = "BB Parameter Correlation", - xlab = "g0", - ylab = "m", - cex.main = 1.6, - cex.lab = 1.4) - dev.off() - - ## credible and prediction intervals - xpred <- seq(0,0.055,0.001) ## sequence of x values to make predictions at - npred <- length(xpred) - ypred <- matrix(NA,nrow=n,ncol=npred) ## storage for prediction interval - ycred <- matrix(NA,nrow=n,ncol=npred) ## storage for credible interval - - for(g in 1:n){ - Ey <- G0[g] + M[g]*xpred - ycred[g,] <- Ey - ypred[g,] <- rnorm(npred,Ey,sqrt(1/T[g])) - } - - ci <- apply(ycred,2,quantile,c(0.025,0.5,0.975))## credible interval and median - pi <- apply(ypred,2,quantile,c(0.025,0.975)) ## prediction interval - - pdf(paste("Leaf_",s,"_BB_Plot_Fit.pdf",sep="")) - plot(an*H/ca, - gs, - ylim = c(-0.5,1), - xlim = c(0,0.055), - pch = 19, - main = "BB Model Fit", - xlab = "A*H/Ca", - ylab = "Stomatal Conductance (gs) (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - - lines(xpred,ci[1,],col=3,lty=2) ## lower CI - lines(xpred,ci[2,],col=3,lwd=2) ## median - lines(xpred,ci[3,],col=3,lty=2) ## upper CI - lines(xpred,pi[1,],col=4,lty=2) ## lower PI - lines(xpred,pi[2,],col=4,lty=2) ## upper PI - - legend(0, 1, - c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), - col = c(3,3,4), - lty = c(1, 2, 2)) - dev.off() +for (s in leaf.list) { + ## select data for each leaf + sel2 <- which(dat$leaf == s) + an <- dat$Photo[sel2] ## define net photosynthesis + ca <- dat$CO2S[sel2] ## define atmospheric [CO2] + H <- dat$RH_S[sel2] / 100 ## define relative humidity + gs <- dat$Cond[sel2] ## define stomatal conductance + + ## convert output chains to mcmc objects + M <- coda::mcmc(BBmcmc[[s]]$sims.list$m) + G0 <- coda::mcmc(BBmcmc[[s]]$sims.list$g0) + T <- coda::mcmc(BBmcmc[[s]]$sims.list$tau.BB) + n <- coda::length(BBmcmc[[s]]$sims.list$g0) + + ## autocorrelation plots of each leaf + pdf(paste("Leaf_", s, "_BB_m_Autocorr.pdf", sep = "")) + coda::autocorr.plot(M) + dev.off() + + pdf(paste("Leaf_", s, "_BB_g0_Autocorr.pdf", sep = "")) + coda::autocorr.plot(G0) + dev.off() + + ## parameter correlation plots of each leaf + pdf(paste("Leaf_", s, "_BB_Param_Corr.pdf", sep = "")) + plot(BBmcmc[[s]]$sims.list$g0, + BBmcmc[[s]]$sims.list$m, + pch = 19, + main = "BB Parameter Correlation", + xlab = "g0", + ylab = "m", + cex.main = 1.6, + cex.lab = 1.4 + ) + dev.off() + + ## credible and prediction intervals + xpred <- seq(0, 0.055, 0.001) ## sequence of x values to make predictions at + npred <- length(xpred) + ypred <- matrix(NA, nrow = n, ncol = npred) ## storage for prediction interval + ycred <- matrix(NA, nrow = n, ncol = npred) ## storage for credible interval + + for (g in 1:n) { + Ey <- G0[g] + M[g] * xpred + ycred[g, ] <- Ey + ypred[g, ] <- rnorm(npred, Ey, sqrt(1 / T[g])) + } + + ci <- apply(ycred, 2, quantile, c(0.025, 0.5, 0.975)) ## credible interval and median + pi <- apply(ypred, 2, quantile, c(0.025, 0.975)) ## prediction interval + + pdf(paste("Leaf_", s, "_BB_Plot_Fit.pdf", sep = "")) + plot(an * H / ca, + gs, + ylim = c(-0.5, 1), + xlim = c(0, 0.055), + pch = 19, + main = "BB Model Fit", + xlab = "A*H/Ca", + ylab = "Stomatal Conductance (gs) (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + + lines(xpred, ci[1, ], col = 3, lty = 2) ## lower CI + lines(xpred, ci[2, ], col = 3, lwd = 2) ## median + lines(xpred, ci[3, ], col = 3, lty = 2) ## upper CI + lines(xpred, pi[1, ], col = 4, lty = 2) ## lower PI + lines(xpred, pi[2, ], col = 4, lty = 2) ## upper PI + + legend(0, 1, + c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), + col = c(3, 3, 4), + lty = c(1, 2, 2) + ) + dev.off() } diff --git a/modules/photosynthesis/code/C3_photomodel.R b/modules/photosynthesis/code/C3_photomodel.R index 2f0927ff56a..5f32a217cf1 100644 --- a/modules/photosynthesis/code/C3_photomodel.R +++ b/modules/photosynthesis/code/C3_photomodel.R @@ -1,12 +1,12 @@ ## C3_photomodel_Wolz ## Programmer: Kevin Wolz ## Last Updated: 4/11/12 -## Support From: IB 509 Statistical Modeling Class, Professor Mike Dietze, T.A. Ryan Kelly, Student Xiaohui Feng +## Support From: IB 509 Statistical Modeling Class, Professor Mike Dietze, T.A. Ryan Kelly, Student Xiaohui Feng ## -## This script fits the Farquhar model for photosynthesis to data of soybean leaves -## at ambient atmospheric conditions. The fit is done in a Bayesian context using the Markov Chain Monte Carlo -## method via interface with BUGS. Data was gather using a Licor-6400 from the ambient rings at SoyFace at the -## University of Illinois at Urbana-Champaign in Champaign, IL. +## This script fits the Farquhar model for photosynthesis to data of soybean leaves +## at ambient atmospheric conditions. The fit is done in a Bayesian context using the Markov Chain Monte Carlo +## method via interface with BUGS. Data was gather using a Licor-6400 from the ambient rings at SoyFace at the +## University of Illinois at Urbana-Champaign in Champaign, IL. ## load libraries library(R2WinBUGS) @@ -14,296 +14,303 @@ library(BRugs) library(plotrix) ## load data -dat=read.csv("Kdata_Project.csv", header=T) ## raw data to analyze -dat = dat[which(dat$id == 1),] ## select data at saturating PARi +dat <- read.csv("Kdata_Project.csv", header = T) ## raw data to analyze +dat <- dat[which(dat$id == 1), ] ## select data at saturating PARi -MLE = read.csv("param_compare.csv", header=T) ## parameters from MLE analysis for comparison +MLE <- read.csv("param_compare.csv", header = T) ## parameters from MLE analysis for comparison ## MCMC MODEL -my.model = function(){ - ## Parameter model - Jmax ~ dlnorm(log(100),log(25)) ## maximum electron transport rate prior - Vcmax ~ dlnorm(log(180),log(35)) ## maximum rubisco capacity prior - R ~ dlnorm(log(0.7),log(0.15)) ## leaf respiration prior - alpha ~ dnorm(log(0.8),log(0.1)) ## quantum yield (mol e-/mol photon) - tau.FvCB ~ dgamma(0.1,0.1) ## FvCB model precision prior (weak) - - for(i in 1:n){ - ## Process Model - q2[i] <- q[i]*alpha*phi*beta - J[i] <- (q2[i]+Jmax-sqrt((q2[i]+Jmax)*(q2[i]+Jmax)-4*theta*q2[i]*Jmax))/(2*theta) ## potential electron transport rate - - Aj[i] <- J[i] * (ci[i]-G.star)/(4.5*ci[i]+10.5*G.star) ## electron transport limited rate - Ac[i] <- Vcmax * (ci[i]-G.star)/(ci[i]+Kc*(1+po/Ko)) ## rubisco limited rate - - Adiagnostic[i] <- Aj[i] - Ac[i] - - pred.An[i] <- min(Aj[i], Ac[i]) - R ## Farquhar model - - ## Data Model - an[i] ~ dnorm(pred.An[i], tau.FvCB) ## likelihood - pA[i] ~ dnorm(pred.An[i], tau.FvCB) ## prediction - } +my.model <- function() { + ## Parameter model + Jmax ~ dlnorm(log(100), log(25)) ## maximum electron transport rate prior + Vcmax ~ dlnorm(log(180), log(35)) ## maximum rubisco capacity prior + R ~ dlnorm(log(0.7), log(0.15)) ## leaf respiration prior + alpha ~ dnorm(log(0.8), log(0.1)) ## quantum yield (mol e-/mol photon) + tau.FvCB ~ dgamma(0.1, 0.1) ## FvCB model precision prior (weak) + + for (i in 1:n) { + ## Process Model + q2[i] <- q[i] * alpha * phi * beta + J[i] <- (q2[i] + Jmax - sqrt((q2[i] + Jmax) * (q2[i] + Jmax) - 4 * theta * q2[i] * Jmax)) / (2 * theta) ## potential electron transport rate + + Aj[i] <- J[i] * (ci[i] - G.star) / (4.5 * ci[i] + 10.5 * G.star) ## electron transport limited rate + Ac[i] <- Vcmax * (ci[i] - G.star) / (ci[i] + Kc * (1 + po / Ko)) ## rubisco limited rate + + Adiagnostic[i] <- Aj[i] - Ac[i] + + pred.An[i] <- min(Aj[i], Ac[i]) - R ## Farquhar model + + ## Data Model + an[i] ~ dnorm(pred.An[i], tau.FvCB) ## likelihood + pA[i] ~ dnorm(pred.An[i], tau.FvCB) ## prediction + } } -write.model(my.model,"c3model.txt") ## save model to text file +write.model(my.model, "c3model.txt") ## save model to text file init <- list() - init[[1]] <- list(R=1, Vcmax=90, alpha=0.80, tau.FvCB=10, Jmax=165)#, theta=0.65) ## chain 1 initial conditions (black) - init[[2]] <- list(R=1.3, Vcmax=100, alpha=0.85, tau.FvCB=10, Jmax=170)#, theta=0.7) ## chain 2 initial conditions (red) - init[[3]] <- list(R=1.6, Vcmax=110, alpha=0.90, tau.FvCB=10, Jmax=175)#, theta=0.75) ## chain 3 initial conditions (green) +init[[1]] <- list(R = 1, Vcmax = 90, alpha = 0.80, tau.FvCB = 10, Jmax = 165) # , theta=0.65) ## chain 1 initial conditions (black) +init[[2]] <- list(R = 1.3, Vcmax = 100, alpha = 0.85, tau.FvCB = 10, Jmax = 170) # , theta=0.7) ## chain 2 initial conditions (red) +init[[3]] <- list(R = 1.6, Vcmax = 110, alpha = 0.90, tau.FvCB = 10, Jmax = 175) # , theta=0.75) ## chain 3 initial conditions (green) -leaf.list = unique(dat$leaf) ## select leaf subset to test +leaf.list <- unique(dat$leaf) ## select leaf subset to test -c3mcmc <- list() ## initialize output object +c3mcmc <- list() ## initialize output object ## MCMC LOOP -for(s in leaf.list){ - sel = which(dat$leaf == s) ## pick leaf for this loop - - an = dat$Photo[sel] ## define net photosynthesis - ci = dat$Ci[sel] ## define intracellular [CO2] - q = dat$PARi[sel] ## define incident radiation - - mydat <- list(an=an, ci=ci, q=q, n=length(an), Kc=400, Ko=275, po=210, phi=0.85, beta=0.5, theta=0.7, G.star=44.25) ## data for current leaf - - c3 <- openbugs(mydat, ## data - init, ## initial conditions - model.file = "c3model.txt", ## model - n.chains = 3, ## number of chains - n.iter = 100000, ## number of iterations - n.burnin = 50000, ## burn in - n.thin = 20, ## thin - parameters.to.save = c("R", "Vcmax", "alpha", "Jmax", "tau.FvCB", "pred.An", "pA", "Adiagnostic") ## parameters to save - ) - - c3mcmc[[s]] = c3 ## save output object - print(s) +for (s in leaf.list) { + sel <- which(dat$leaf == s) ## pick leaf for this loop + + an <- dat$Photo[sel] ## define net photosynthesis + ci <- dat$Ci[sel] ## define intracellular [CO2] + q <- dat$PARi[sel] ## define incident radiation + + mydat <- list(an = an, ci = ci, q = q, n = length(an), Kc = 400, Ko = 275, po = 210, phi = 0.85, beta = 0.5, theta = 0.7, G.star = 44.25) ## data for current leaf + + c3 <- openbugs(mydat, ## data + init, ## initial conditions + model.file = "c3model.txt", ## model + n.chains = 3, ## number of chains + n.iter = 100000, ## number of iterations + n.burnin = 50000, ## burn in + n.thin = 20, ## thin + parameters.to.save = c("R", "Vcmax", "alpha", "Jmax", "tau.FvCB", "pred.An", "pA", "Adiagnostic") ## parameters to save + ) + + c3mcmc[[s]] <- c3 ## save output object + print(s) } ## OUTPUT ANALYSIS c3mcmcV <- list() -c3mcmcJ <- list() -c3mcmcR <- list() -c3mcmcalpha <- list() -c3mcmctauFvCB <- list() +c3mcmcJ <- list() +c3mcmcR <- list() +c3mcmcalpha <- list() +c3mcmctauFvCB <- list() c3mcmcV.sd <- list() -c3mcmcJ.sd <- list() -c3mcmcR.sd <- list() -c3mcmcalpha.sd <- list() -c3mcmctauFvCB.sd <- list() +c3mcmcJ.sd <- list() +c3mcmcR.sd <- list() +c3mcmcalpha.sd <- list() +c3mcmctauFvCB.sd <- list() -i = 1 ## counter +i <- 1 ## counter ## data summary -sink("FvCB_MCMC_Summary.txt") -for(j in leaf.list){ - print(j) - print(c3mcmc[[j]])} -sink() - -for(s in leaf.list){ - ## select data for each leaf - sel1 = which(dat$leaf == s) - - ## trace and density plots - pdf(paste("Leaf_",s,"_FvCB_Model_Trace.pdf",sep="")) - plot(as.mcmc.list(c3mcmc[[s]])) - dev.off() - - ## predictions vs measurements - an = dat$Photo[sel1] - axismax = max(max(an),max(c3mcmc[[s]]$mean$pred.An)) - pdf(paste("Leaf_",s,"_FvCB_Pred-vs-Meas.pdf",sep="")) - plot(an, - c3mcmc[[s]]$mean$pred.An, - ylim = c(-5,60), - xlim = c(-5,60), - pch = 19, - main = "Predicted An vs measured An", - xlab = "Measured An (umol m-2 s-1)", - ylab = "Predicted An (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - dev.off() - - ## mcmc means of each leaf - c3mcmcV[i] = c3mcmc[[s]]$mean$Vcmax - c3mcmcJ[i] = c3mcmc[[s]]$mean$Jmax - c3mcmcR[i] = c3mcmc[[s]]$mean$R - c3mcmcalpha[i] = c3mcmc[[s]]$mean$alpha - c3mcmctauFvCB[i] = c3mcmc[[s]]$mean$tau.FvCB - - ## mcmc sd of each leaf - c3mcmcV.sd[i] = c3mcmc[[s]]$sd$Vcmax - c3mcmcJ.sd[i] = c3mcmc[[s]]$sd$Jmax - c3mcmcR.sd[i] = c3mcmc[[s]]$sd$R - c3mcmcalpha.sd[i] = c3mcmc[[s]]$sd$alpha - c3mcmctauFvCB.sd[i] = c3mcmc[[s]]$sd$tau.FvCB - i = i + 1 +sink("FvCB_MCMC_Summary.txt") +for (j in leaf.list) { + print(j) + print(c3mcmc[[j]]) } - - c3mcmcV = as.numeric(c3mcmcV) - c3mcmcJ = as.numeric(c3mcmcJ) - c3mcmcR = as.numeric(c3mcmcR) - c3mcmcalpha = as.numeric(c3mcmcalpha) - c3mcmctauFvCB = as.numeric(c3mcmctauFvCB) - - c3mcmcV.sd = as.numeric(c3mcmcV.sd) - c3mcmcJ.sd = as.numeric(c3mcmcJ.sd) - c3mcmcR.sd = as.numeric(c3mcmcR.sd) - c3mcmcalpha.sd = as.numeric(c3mcmcalpha.sd) - c3mcmctauFvCB.sd = as.numeric(c3mcmctauFvCB.sd) - +sink() + +for (s in leaf.list) { + ## select data for each leaf + sel1 <- which(dat$leaf == s) + + ## trace and density plots + pdf(paste("Leaf_", s, "_FvCB_Model_Trace.pdf", sep = "")) + plot(as.mcmc.list(c3mcmc[[s]])) + dev.off() + + ## predictions vs measurements + an <- dat$Photo[sel1] + axismax <- max(max(an), max(c3mcmc[[s]]$mean$pred.An)) + pdf(paste("Leaf_", s, "_FvCB_Pred-vs-Meas.pdf", sep = "")) + plot(an, + c3mcmc[[s]]$mean$pred.An, + ylim = c(-5, 60), + xlim = c(-5, 60), + pch = 19, + main = "Predicted An vs measured An", + xlab = "Measured An (umol m-2 s-1)", + ylab = "Predicted An (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + dev.off() + + ## mcmc means of each leaf + c3mcmcV[i] <- c3mcmc[[s]]$mean$Vcmax + c3mcmcJ[i] <- c3mcmc[[s]]$mean$Jmax + c3mcmcR[i] <- c3mcmc[[s]]$mean$R + c3mcmcalpha[i] <- c3mcmc[[s]]$mean$alpha + c3mcmctauFvCB[i] <- c3mcmc[[s]]$mean$tau.FvCB + + ## mcmc sd of each leaf + c3mcmcV.sd[i] <- c3mcmc[[s]]$sd$Vcmax + c3mcmcJ.sd[i] <- c3mcmc[[s]]$sd$Jmax + c3mcmcR.sd[i] <- c3mcmc[[s]]$sd$R + c3mcmcalpha.sd[i] <- c3mcmc[[s]]$sd$alpha + c3mcmctauFvCB.sd[i] <- c3mcmc[[s]]$sd$tau.FvCB + i <- i + 1 +} + +c3mcmcV <- as.numeric(c3mcmcV) +c3mcmcJ <- as.numeric(c3mcmcJ) +c3mcmcR <- as.numeric(c3mcmcR) +c3mcmcalpha <- as.numeric(c3mcmcalpha) +c3mcmctauFvCB <- as.numeric(c3mcmctauFvCB) + +c3mcmcV.sd <- as.numeric(c3mcmcV.sd) +c3mcmcJ.sd <- as.numeric(c3mcmcJ.sd) +c3mcmcR.sd <- as.numeric(c3mcmcR.sd) +c3mcmcalpha.sd <- as.numeric(c3mcmcalpha.sd) +c3mcmctauFvCB.sd <- as.numeric(c3mcmctauFvCB.sd) + output <- data.frame(c3mcmcV, c3mcmcV.sd, c3mcmcJ, c3mcmcJ.sd, c3mcmcalpha, c3mcmcalpha.sd, c3mcmcR, c3mcmcR.sd, c3mcmctauFvCB, c3mcmctauFvCB.sd) write.csv(output, file = "FvCB_output.csv") - ## MLE means of each leaf - MLEV = MLE$Katie.Vcmax - MLEJ = MLE$Katie.Jmax - - ## compare mcmc means to Manual means for Vcmax - pdf(paste("Mean_Comparison-Vcmax.pdf",sep="")) - plot(MLEV, - c3mcmcV, - ylim = c(50,150), - xlim = c(50,150), - pch = 19, - main = "MCMC Vcmax Means vs Manual Vcmax Fit", - xlab = "Manual Vcmax Fit (umol m-2 s-1)", - ylab = "MCMC Vcmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(MLEV,c3mcmcV,c3mcmcV.sd,add=TRUE) - dev.off() - - ## compare mcmc means to Manual means for Jmax - pdf(paste("Mean_Comparison-Jmax.pdf",sep="")) - plot(MLEJ, - c3mcmcJ, - ylim = c(100,250), - xlim = c(100,250), - pch = 19, - main = "MCMC Jmax Means vs Maunal Jmax Fit", - xlab = "Manual Jmax Fit (umol m-2 s-1)", - ylab = "MCMC Jmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(MLEJ,c3mcmcJ,c3mcmcJ.sd,add=TRUE) - dev.off() +## MLE means of each leaf +MLEV <- MLE$Katie.Vcmax +MLEJ <- MLE$Katie.Jmax + +## compare mcmc means to Manual means for Vcmax +pdf(paste("Mean_Comparison-Vcmax.pdf", sep = "")) +plot(MLEV, + c3mcmcV, + ylim = c(50, 150), + xlim = c(50, 150), + pch = 19, + main = "MCMC Vcmax Means vs Manual Vcmax Fit", + xlab = "Manual Vcmax Fit (umol m-2 s-1)", + ylab = "MCMC Vcmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 +) +abline(0, 1, col = "dark green", lwd = 3) +plotCI(MLEV, c3mcmcV, c3mcmcV.sd, add = TRUE) +dev.off() + +## compare mcmc means to Manual means for Jmax +pdf(paste("Mean_Comparison-Jmax.pdf", sep = "")) +plot(MLEJ, + c3mcmcJ, + ylim = c(100, 250), + xlim = c(100, 250), + pch = 19, + main = "MCMC Jmax Means vs Maunal Jmax Fit", + xlab = "Manual Jmax Fit (umol m-2 s-1)", + ylab = "MCMC Jmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 +) +abline(0, 1, col = "dark green", lwd = 3) +plotCI(MLEJ, c3mcmcJ, c3mcmcJ.sd, add = TRUE) +dev.off() ## Analysis with MCMC Object -Kc=400 -Ko=275 -po=210 -f=0.15 -phi=0.85 -beta=0.5 -theta=0.7 -G.star=44.25 - -for(s in leaf.list){ - ## select data for each leaf - sel2 = which(dat$leaf == s) - an = dat$Photo[sel2] ## define net photosynthesis - ci = dat$Ci[sel2] ## define intracellular [CO2] - q = dat$PARi[sel2] ## define incident radiation - - ## convert output chains to mcmc objects - r <- c3mcmc[[s]]$sims.list$R - v <- c3mcmc[[s]]$sims.list$Vcmax - a <- c3mcmc[[s]]$sims.list$alpha - j <- c3mcmc[[s]]$sims.list$Jmax - t <- c3mcmc[[s]]$sims.list$tau.FvCB - - R <- coda::mcmc(r) - V <- coda::mcmc(v) - A <- coda::mcmc(a) - J <- coda::mcmc(j) - T <- coda::mcmc(t) - n <- length(c3mcmc[[s]]$sims.list$R) - - df <- data.frame(r,v,a,j,g,t) - - ## parameter pairs plot of each leaf - jpeg(paste("Leaf_",s,"_FvCB_Params_Corr_Plot.jpeg",sep="")) - pairs(df,c("Rd","Vcmax","alpha","Jmax","Model Prec")) - dev.off() - - ## autocorrelation plots of each leaf - pdf(paste("Leaf_",s,"_FvCB_Vcmax_Autocorr.pdf",sep="")) - coda::autocorr.plot(V) - dev.off() - - pdf(paste("Leaf_",s,"_FvCB_Jmax_Autocorr.pdf",sep="")) - coda::autocorr.plot(J) - dev.off() - - ## parameter correlation plots of each leaf - pdf(paste("Leaf_",s,"_FvCB_Param_Corr.pdf",sep="")) - plot(c3mcmc[[s]]$sims.list$Vcmax, - c3mcmc[[s]]$sims.list$Jmax, - pch = 19, - main = "FvCB Parameter Correlation", - xlab = "Vcmax", - ylab = "Jmax", - cex.main = 1.6, - cex.lab = 1.4) - dev.off() - - ## credible and prediction intervals - sorted = sort.int(ci,index.return=TRUE) - index = sorted$ix - xpred <- ci[index] ## sequence of x values to make predictions at - npred <- length(xpred) - ypred <- matrix(NA,nrow=n,ncol=npred) ## storage for prediction interval - ycred <- matrix(NA,nrow=n,ncol=npred) ## storage for credible interval - - q2 <- matrix(NA,1,npred) - Jm <- matrix(NA,1,npred) - Aj <- matrix(NA,1,npred) - Ac <- matrix(NA,1,npred) - for(g in 1:n){ - q2[1,] <- q[index]*A[g]*(1-f)/2 - Jm[1,] <- (q2[1,]+J[g]-sqrt((q2[1,]+J[g])*(q2[1,]+J[g])-4*theta*q2[1,]*J[g]))/(2*theta) ## potential electron transport rate - - Aj[1,] <- Jm[1,] * (xpred-G.star)/(4.5*xpred+10.5*G[g]) ## electron transport limited rate - Ac[1,] <- V[g] * (xpred-G.star)/(xpred+Kc*(1+po/Ko)) ## rubisco limited rate - - Ey <- pmin(Aj[1,], Ac[1,]) - R[g] ## Farquhar model - ycred[g,] <- Ey - ypred[g,] <- rnorm(npred,Ey,sqrt(1/T[g])) - } - - credi <- apply(ycred,2,quantile,c(0.025,0.5,0.975))## credible interval and median - predi <- apply(ypred,2,quantile,c(0.025,0.975)) ## prediction interval - - pdf(paste("Leaf_",s,"_FvCB_Plot_Fit.pdf",sep="")) - plot(ci, - an, - ylim = c(0,35), - xlim = c(0,1300), - pch = 19, - main = "FvCB Model Fit", - xlab = "Intracellular [CO2] (Ci) (ppm)", - ylab = "Net Photosynthesis (An) (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - - lines(xpred,credi[1,],col=3,lty=2) ## lower CI - lines(xpred,credi[2,],col=3,lwd=2) ## median - lines(xpred,credi[3,],col=3,lty=2) ## upper CI - lines(xpred,predi[1,],col=4,lty=2) ## lower PI - lines(xpred,predi[2,],col=4,lty=2) ## upper PI - - legend(700, 5, - c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), - col = c(3,3,4), - lty = c(1, 2, 2)) - dev.off() -} \ No newline at end of file +Kc <- 400 +Ko <- 275 +po <- 210 +f <- 0.15 +phi <- 0.85 +beta <- 0.5 +theta <- 0.7 +G.star <- 44.25 + +for (s in leaf.list) { + ## select data for each leaf + sel2 <- which(dat$leaf == s) + an <- dat$Photo[sel2] ## define net photosynthesis + ci <- dat$Ci[sel2] ## define intracellular [CO2] + q <- dat$PARi[sel2] ## define incident radiation + + ## convert output chains to mcmc objects + r <- c3mcmc[[s]]$sims.list$R + v <- c3mcmc[[s]]$sims.list$Vcmax + a <- c3mcmc[[s]]$sims.list$alpha + j <- c3mcmc[[s]]$sims.list$Jmax + t <- c3mcmc[[s]]$sims.list$tau.FvCB + + R <- coda::mcmc(r) + V <- coda::mcmc(v) + A <- coda::mcmc(a) + J <- coda::mcmc(j) + T <- coda::mcmc(t) + n <- length(c3mcmc[[s]]$sims.list$R) + + df <- data.frame(r, v, a, j, g, t) + + ## parameter pairs plot of each leaf + jpeg(paste("Leaf_", s, "_FvCB_Params_Corr_Plot.jpeg", sep = "")) + pairs(df, c("Rd", "Vcmax", "alpha", "Jmax", "Model Prec")) + dev.off() + + ## autocorrelation plots of each leaf + pdf(paste("Leaf_", s, "_FvCB_Vcmax_Autocorr.pdf", sep = "")) + coda::autocorr.plot(V) + dev.off() + + pdf(paste("Leaf_", s, "_FvCB_Jmax_Autocorr.pdf", sep = "")) + coda::autocorr.plot(J) + dev.off() + + ## parameter correlation plots of each leaf + pdf(paste("Leaf_", s, "_FvCB_Param_Corr.pdf", sep = "")) + plot(c3mcmc[[s]]$sims.list$Vcmax, + c3mcmc[[s]]$sims.list$Jmax, + pch = 19, + main = "FvCB Parameter Correlation", + xlab = "Vcmax", + ylab = "Jmax", + cex.main = 1.6, + cex.lab = 1.4 + ) + dev.off() + + ## credible and prediction intervals + sorted <- sort.int(ci, index.return = TRUE) + index <- sorted$ix + xpred <- ci[index] ## sequence of x values to make predictions at + npred <- length(xpred) + ypred <- matrix(NA, nrow = n, ncol = npred) ## storage for prediction interval + ycred <- matrix(NA, nrow = n, ncol = npred) ## storage for credible interval + + q2 <- matrix(NA, 1, npred) + Jm <- matrix(NA, 1, npred) + Aj <- matrix(NA, 1, npred) + Ac <- matrix(NA, 1, npred) + for (g in 1:n) { + q2[1, ] <- q[index] * A[g] * (1 - f) / 2 + Jm[1, ] <- (q2[1, ] + J[g] - sqrt((q2[1, ] + J[g]) * (q2[1, ] + J[g]) - 4 * theta * q2[1, ] * J[g])) / (2 * theta) ## potential electron transport rate + + Aj[1, ] <- Jm[1, ] * (xpred - G.star) / (4.5 * xpred + 10.5 * G[g]) ## electron transport limited rate + Ac[1, ] <- V[g] * (xpred - G.star) / (xpred + Kc * (1 + po / Ko)) ## rubisco limited rate + + Ey <- pmin(Aj[1, ], Ac[1, ]) - R[g] ## Farquhar model + ycred[g, ] <- Ey + ypred[g, ] <- rnorm(npred, Ey, sqrt(1 / T[g])) + } + + credi <- apply(ycred, 2, quantile, c(0.025, 0.5, 0.975)) ## credible interval and median + predi <- apply(ypred, 2, quantile, c(0.025, 0.975)) ## prediction interval + + pdf(paste("Leaf_", s, "_FvCB_Plot_Fit.pdf", sep = "")) + plot(ci, + an, + ylim = c(0, 35), + xlim = c(0, 1300), + pch = 19, + main = "FvCB Model Fit", + xlab = "Intracellular [CO2] (Ci) (ppm)", + ylab = "Net Photosynthesis (An) (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + + lines(xpred, credi[1, ], col = 3, lty = 2) ## lower CI + lines(xpred, credi[2, ], col = 3, lwd = 2) ## median + lines(xpred, credi[3, ], col = 3, lty = 2) ## upper CI + lines(xpred, predi[1, ], col = 4, lty = 2) ## lower PI + lines(xpred, predi[2, ], col = 4, lty = 2) ## upper PI + + legend(700, 5, + c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), + col = c(3, 3, 4), + lty = c(1, 2, 2) + ) + dev.off() +} diff --git a/modules/photosynthesis/code/C3_photomodel_XF.R b/modules/photosynthesis/code/C3_photomodel_XF.R index a9581f9b373..a4662fb9b71 100644 --- a/modules/photosynthesis/code/C3_photomodel_XF.R +++ b/modules/photosynthesis/code/C3_photomodel_XF.R @@ -1,131 +1,131 @@ library(R2WinBUGS) library(BRugs) -dat=read.csv("c3photosynthesis.csv",header=T) -#dat2=read.csv('c3covariates.csv',header=T) - -my.model = function(){ - Jmax ~ dlnorm(4.7,2.7) ## maximum electron transport rate prior - alpha~dnorm(0.25,100) ##quantum yield (mol electrons/mole photon) prior - vmax ~dlnorm(4.6,2.7) ## maximum rubisco capacity prior - r ~ dlnorm(0.75,1.56) ## leaf respiration prior - cp ~ dlnorm(1.9,2.7) ## CO2 compensation point prior - tau ~ dgamma(0.1,0.1) -# tpu~ dlnorm(3,2.8) ##tpu - -# tau.Vleaf~dgamma(0.1,0.1) ## add random leaf effects -# tau.Aleaf~dgamma(0.1,0.1) -# for(i in 1:nrep){ -# Vleaf[i]~dnorm(0,tau.Vleaf) -# Aleaf[i]~dnorm(0,tau.Aleaf) -# } -# beta1 ~ dnorm(0,0.1) ## chlorophyll effects -# beta2 ~ dnorm(0,0.1) ## SLA effects -# beta5 ~ dnorm(0,0.1) ## Vcmax effects - -# tau.Vmon~dgamma(0.01,0.01) ## add month effects on Vcmax, fixed effects -# Vmon[7]<-0 ## reference month -# Vmon[8]~ dnorm(0,tau.Vmon) -# Vmon[5]~ dnorm(0,tau.Vmon) -# Vmon[6]~ dnorm(0,tau.Vmon) -# Vmon[9]~ dnorm(0,tau.Vmon) -# Vmon[10]~ dnorm(0,tau.Vmon) -# Kc<-46 ## Michaelis constant CO2 (Pa) -# Ko<-33000 ## Michaelis constant O2 (Pa) -# po<-21000 ## partial pressure of O2 (Pa) -# k <- 0.21 ## Vo/Vc - for(i in 1:n){ -# al[i]<-((alpha+beta1*(chl[rep[i]]-chlave)+beta2*(sla[rep[i]]-slaave) -# +Aleaf[rep[i]])*q[i]/(sqrt(1+((alpha+Aleaf[rep[i]])*(alpha+ -# Aleaf[rep[i]])*q[i]*q[i])/(Jmax*Jmax))))*(pi[i]-cp)/(4*pi[i]+8*cp) ## electron transport limited with chlorophyll, SLA and random leaf effects turned on - - al[i]<-(alpha*q[i]/(sqrt(1+(alpha*alpha*q[i]*q[i])/(Jmax*Jmax))))*(pi[i]-cp)/(4*pi[i]+8*cp) ## electron transport limited without covariates - -# ae[i]<-(vmax+Vmon[month[i]]+beta5*(leafn[rep[i]]-leafnave) -# +Vleaf[rep[i]])*((pi[i]-cp)/(pi[i]+Kc*(1+po/Ko))) ## maximum rubisco limited with leaf N, month and random leaf effects turned on - - ae[i]<- vmax*(pi[i]-cp)/(pi[i]+Kc*(1+po/Ko)) ## maximum rubisco limited without covariates - -# ap[i]<-3*tpu ## phosphate limited - - prean[i]<-min(al[i], ae[i]) - r ## predicted net photosynthesis - an[i]~dnorm(prean[i],tau) ## likelihood - pA[i] ~ dnorm(prean[i],tau) ## prediction - } +dat <- read.csv("c3photosynthesis.csv", header = T) +# dat2=read.csv('c3covariates.csv',header=T) + +my.model <- function() { + Jmax ~ dlnorm(4.7, 2.7) ## maximum electron transport rate prior + alpha ~ dnorm(0.25, 100) ## quantum yield (mol electrons/mole photon) prior + vmax ~ dlnorm(4.6, 2.7) ## maximum rubisco capacity prior + r ~ dlnorm(0.75, 1.56) ## leaf respiration prior + cp ~ dlnorm(1.9, 2.7) ## CO2 compensation point prior + tau ~ dgamma(0.1, 0.1) + # tpu~ dlnorm(3,2.8) ##tpu + + # tau.Vleaf~dgamma(0.1,0.1) ## add random leaf effects + # tau.Aleaf~dgamma(0.1,0.1) + # for(i in 1:nrep){ + # Vleaf[i]~dnorm(0,tau.Vleaf) + # Aleaf[i]~dnorm(0,tau.Aleaf) + # } + # beta1 ~ dnorm(0,0.1) ## chlorophyll effects + # beta2 ~ dnorm(0,0.1) ## SLA effects + # beta5 ~ dnorm(0,0.1) ## Vcmax effects + + # tau.Vmon~dgamma(0.01,0.01) ## add month effects on Vcmax, fixed effects + # Vmon[7]<-0 ## reference month + # Vmon[8]~ dnorm(0,tau.Vmon) + # Vmon[5]~ dnorm(0,tau.Vmon) + # Vmon[6]~ dnorm(0,tau.Vmon) + # Vmon[9]~ dnorm(0,tau.Vmon) + # Vmon[10]~ dnorm(0,tau.Vmon) + # Kc<-46 ## Michaelis constant CO2 (Pa) + # Ko<-33000 ## Michaelis constant O2 (Pa) + # po<-21000 ## partial pressure of O2 (Pa) + # k <- 0.21 ## Vo/Vc + for (i in 1:n) { + # al[i]<-((alpha+beta1*(chl[rep[i]]-chlave)+beta2*(sla[rep[i]]-slaave) + # +Aleaf[rep[i]])*q[i]/(sqrt(1+((alpha+Aleaf[rep[i]])*(alpha+ + # Aleaf[rep[i]])*q[i]*q[i])/(Jmax*Jmax))))*(pi[i]-cp)/(4*pi[i]+8*cp) ## electron transport limited with chlorophyll, SLA and random leaf effects turned on + + al[i] <- (alpha * q[i] / (sqrt(1 + (alpha * alpha * q[i] * q[i]) / (Jmax * Jmax)))) * (pi[i] - cp) / (4 * pi[i] + 8 * cp) ## electron transport limited without covariates + + # ae[i]<-(vmax+Vmon[month[i]]+beta5*(leafn[rep[i]]-leafnave) + # +Vleaf[rep[i]])*((pi[i]-cp)/(pi[i]+Kc*(1+po/Ko))) ## maximum rubisco limited with leaf N, month and random leaf effects turned on + + ae[i] <- vmax * (pi[i] - cp) / (pi[i] + Kc * (1 + po / Ko)) ## maximum rubisco limited without covariates + + # ap[i]<-3*tpu ## phosphate limited + + prean[i] <- min(al[i], ae[i]) - r ## predicted net photosynthesis + an[i] ~ dnorm(prean[i], tau) ## likelihood + pA[i] ~ dnorm(prean[i], tau) ## prediction + } } - write.model(my.model,"c3model.txt") - -init<-list() - init[[1]]<-list(r=1.2, vmax=39,alpha=0.25, tau=10, cp=6, Jmax=80) ## tau.Vleaf=30,beta1=4, beta2=1,beta5=3,tau.Vmon=10,tpu=10, - init[[2]]<-list(r=1, vmax=100, alpha=0.20, tau=20, cp=4, Jmax=150) ##tau.Vleaf=20,beta1=1,beta2=1,beta5=-1,tau.Vmon=20,tpu=13, - init[[3]]<-list(r=2, vmax=60, alpha=0.28, tau=20, cp=5,Jmax=60) ##tau.Vleaf=100,beta1=1,beta2=2,beta5=2,tau.Vmon=3,tpu=20, - - spp.list = unique(as.character(dat$id)) - - c3mcmc <- list() - - for(s in spp.list){ - sel = which(dat$id == s) - an=dat$Photo[sel] - pi=dat$Ci_Pa[sel] - q=dat$PARi[sel] -# rep=dat$rep[sel] ## turn on random leaf effects -# reps = unique(rep) -# rep = match(rep,reps) -# nrep = length(reps) -# month=dat$month[sel] ## month information - -## covariates data from dat2 -# sel2=which(dat2$id==s) -# chl=dat2$chl[sel2] -# leafn=dat2$leafn[sel2] -# sla=dat2$sla[sel2] -# rep2 = dat2$rep[sel2] -# rep2 = match(rep2,reps) -# rep3track[[s]]=rep2 -# chlave=mean(chl) -# leafnave=mean(leafn) -# slaave=mean(sla) - -## temperature compoent (Bernacchi et al. 2001,2002,2003) -#parameter=as.character(c("Vmax","Jmax","Kc","Ko","cp","Rd")) -#c=c(26.36,17.71,35.98,12.38,11.19,18.71) -#Ha=c(65.33,43.9,80.99,23.72,24.46,46.39) -#R=8.314 -#value25=c(1,1,460,220,37,1) ## when include temperature component, the unit of Kc (umol mol-1) Ko (mmol mol-1) and cp (umol mol-1) in this equation need to be converted into Pascals -#for(i in 1:6){ -#y=value25[i]*exp(c[i]-(1000*Ha[i]/(R*(tleaf+273.15)))) -#} - - -mydat<-list(an=an, pi=pi, q=q,n=length(an),Kc=46,Ko=22000,po=21000) -## rep=rep,nrep=nrep,chl=chl,leafn=leafn, sla=sla, chlave=chlave,leafnave=leafnave,slaave=slaave,month=month - - mc3 <- openbugs(mydat, - init, - model.file="c3model.txt", - n.chains=3, - n.iter=1000, - n.burnin=500, - n.thin =25, - parameters.to.save=c("r","vmax","alpha","Jmax", "cp","tau", "prean", "pA") -##"beta1","beta2","beta5","Vmon","Vleaf","Aleaf","tau.Vmon","tau.Vleaf","tau.Aleaf", - ) - c3mcmc[[s]]=mc3 +write.model(my.model, "c3model.txt") + +init <- list() +init[[1]] <- list(r = 1.2, vmax = 39, alpha = 0.25, tau = 10, cp = 6, Jmax = 80) ## tau.Vleaf=30,beta1=4, beta2=1,beta5=3,tau.Vmon=10,tpu=10, +init[[2]] <- list(r = 1, vmax = 100, alpha = 0.20, tau = 20, cp = 4, Jmax = 150) ## tau.Vleaf=20,beta1=1,beta2=1,beta5=-1,tau.Vmon=20,tpu=13, +init[[3]] <- list(r = 2, vmax = 60, alpha = 0.28, tau = 20, cp = 5, Jmax = 60) ## tau.Vleaf=100,beta1=1,beta2=2,beta5=2,tau.Vmon=3,tpu=20, + +spp.list <- unique(as.character(dat$id)) + +c3mcmc <- list() + +for (s in spp.list) { + sel <- which(dat$id == s) + an <- dat$Photo[sel] + pi <- dat$Ci_Pa[sel] + q <- dat$PARi[sel] + # rep=dat$rep[sel] ## turn on random leaf effects + # reps = unique(rep) + # rep = match(rep,reps) + # nrep = length(reps) + # month=dat$month[sel] ## month information + + ## covariates data from dat2 + # sel2=which(dat2$id==s) + # chl=dat2$chl[sel2] + # leafn=dat2$leafn[sel2] + # sla=dat2$sla[sel2] + # rep2 = dat2$rep[sel2] + # rep2 = match(rep2,reps) + # rep3track[[s]]=rep2 + # chlave=mean(chl) + # leafnave=mean(leafn) + # slaave=mean(sla) + + ## temperature compoent (Bernacchi et al. 2001,2002,2003) + # parameter=as.character(c("Vmax","Jmax","Kc","Ko","cp","Rd")) + # c=c(26.36,17.71,35.98,12.38,11.19,18.71) + # Ha=c(65.33,43.9,80.99,23.72,24.46,46.39) + # R=8.314 + # value25=c(1,1,460,220,37,1) ## when include temperature component, the unit of Kc (umol mol-1) Ko (mmol mol-1) and cp (umol mol-1) in this equation need to be converted into Pascals + # for(i in 1:6){ + # y=value25[i]*exp(c[i]-(1000*Ha[i]/(R*(tleaf+273.15)))) + # } + + + mydat <- list(an = an, pi = pi, q = q, n = length(an), Kc = 46, Ko = 22000, po = 21000) + ## rep=rep,nrep=nrep,chl=chl,leafn=leafn, sla=sla, chlave=chlave,leafnave=leafnave,slaave=slaave,month=month + + mc3 <- openbugs(mydat, + init, + model.file = "c3model.txt", + n.chains = 3, + n.iter = 1000, + n.burnin = 500, + n.thin = 25, + parameters.to.save = c("r", "vmax", "alpha", "Jmax", "cp", "tau", "prean", "pA") + ## "beta1","beta2","beta5","Vmon","Vleaf","Aleaf","tau.Vmon","tau.Vleaf","tau.Aleaf", + ) + c3mcmc[[s]] <- mc3 } ## make trace and density plots -for(s in spp.list){ -pdf(paste(s, " model trace.pdf",sep="")) -plot(as.mcmc.list(c3mcmc[[s]])) -dev.off() +for (s in spp.list) { + pdf(paste(s, " model trace.pdf", sep = "")) + plot(as.mcmc.list(c3mcmc[[s]])) + dev.off() } ## predictions vs measurements -sel1=which(dat$id==s) -an=dat$Photo[sel1] -plot(an,c3mcmc[[s]]$mean$prean, ylim=c(-5,40),xlim=c(-5,40), pch=19, -main="Predicted photosynthesis vs measured phtosynthesis", xlab="Measured An (umol m-2 s-1)", -ylab="Predicted An (umol m-2 s-1)",cex.main=1.6,cex.lab=1.4) -abline(0,1, col="dark green",lwd=3) - - +sel1 <- which(dat$id == s) +an <- dat$Photo[sel1] +plot(an, c3mcmc[[s]]$mean$prean, + ylim = c(-5, 40), xlim = c(-5, 40), pch = 19, + main = "Predicted photosynthesis vs measured phtosynthesis", xlab = "Measured An (umol m-2 s-1)", + ylab = "Predicted An (umol m-2 s-1)", cex.main = 1.6, cex.lab = 1.4 +) +abline(0, 1, col = "dark green", lwd = 3) diff --git a/modules/photosynthesis/code/FBB_cluster_processing.R b/modules/photosynthesis/code/FBB_cluster_processing.R index a05a14ffeaf..0d0b04d9758 100644 --- a/modules/photosynthesis/code/FBB_cluster_processing.R +++ b/modules/photosynthesis/code/FBB_cluster_processing.R @@ -1,50 +1,50 @@ ## Retreive & Analyze FBBmodel Results -##syntax: nohup env SPECIES=2 LEAF=4 R --vanilla < FBB_cluster_processing.R > log2-4 & +## syntax: nohup env SPECIES=2 LEAF=4 R --vanilla < FBB_cluster_processing.R > log2-4 & #****************** USER PARAMETERS *********************************** -species.id <- as.numeric(system("echo $SPECIES",intern=TRUE)) -leaf <- as.numeric(system("echo $LEAF",intern=TRUE)) -nchains <- 3 # as.numeric(system("echo $NCHAINS",intern=TRUE)) +species.id <- as.numeric(system("echo $SPECIES", intern = TRUE)) +leaf <- as.numeric(system("echo $LEAF", intern = TRUE)) +nchains <- 3 # as.numeric(system("echo $NCHAINS",intern=TRUE)) -niter = 100000 # number of MCMC iterations to compute +niter <- 100000 # number of MCMC iterations to compute -start = 10000 # number of burn-in iterations -end = niter # last iteration -thin = 50 # number of iterations to thin +start <- 10000 # number of burn-in iterations +end <- niter # last iteration +thin <- 50 # number of iterations to thin ## File parameters -filePath = "/home/wolz1/Biomath/" # "/Users/wolzy4u/Desktop/Biomath/" -datFile = "Kdata_Project.csv" # "Biomath_Processed_Data.csv" -saveDirec = "FBB_soyface_output/" # "FBB_output/" -saveDirDat = "FBB_soyface_output_dat/" # "FBB_output_dat/" - -setupFile = "FBB_setup.R" -funcFile = "FBB_functions.R" -multianalysisFile = "FBB_multi_chain_analysis.R" -comparisonFile = "FBB_compare.R" -paramcorrFile = "FBB_param_corr.R" -summaryFile = "FBB_summary.R" - -datFile = paste(filePath, datFile, sep="") -saveDirec = paste(filePath, saveDirec, sep="") -saveDirDat = paste(filePath, saveDirDat, sep="") -setupFile = paste(filePath, setupFile,sep="") -funcFile = paste(filePath, funcFile, sep="") -multianalysisFile = paste(filePath, multianalysisFile, sep="") -comparisonFile = paste(filePath, comparisonFile, sep="") -paramcorrFile = paste(filePath, paramcorrFile, sep="") -summaryFile = paste(filePath, summaryFile, sep="") +filePath <- "/home/wolz1/Biomath/" # "/Users/wolzy4u/Desktop/Biomath/" +datFile <- "Kdata_Project.csv" # "Biomath_Processed_Data.csv" +saveDirec <- "FBB_soyface_output/" # "FBB_output/" +saveDirDat <- "FBB_soyface_output_dat/" # "FBB_output_dat/" + +setupFile <- "FBB_setup.R" +funcFile <- "FBB_functions.R" +multianalysisFile <- "FBB_multi_chain_analysis.R" +comparisonFile <- "FBB_compare.R" +paramcorrFile <- "FBB_param_corr.R" +summaryFile <- "FBB_summary.R" + +datFile <- paste(filePath, datFile, sep = "") +saveDirec <- paste(filePath, saveDirec, sep = "") +saveDirDat <- paste(filePath, saveDirDat, sep = "") +setupFile <- paste(filePath, setupFile, sep = "") +funcFile <- paste(filePath, funcFile, sep = "") +multianalysisFile <- paste(filePath, multianalysisFile, sep = "") +comparisonFile <- paste(filePath, comparisonFile, sep = "") +paramcorrFile <- paste(filePath, paramcorrFile, sep = "") +summaryFile <- paste(filePath, summaryFile, sep = "") ## Toggles and parameters -plot.corr = TRUE # Whether or not to plot parameter correlations +plot.corr <- TRUE # Whether or not to plot parameter correlations ## Constants -O = 210 # [02] in ppt (millimol/mol) -Kc = 275 # Michaelis-Menton constant of RuBisCO for C02 -Ko = 400 # Michaelis-Menton constant of RuBisCO for O -phi = 0.85 # maximum dark-adapted quantum yield of PSII -beta = 0.5 # fraction of absorbed quanta that reasches PSII -theta = 0.7 # empirical curvature factor +O <- 210 # [02] in ppt (millimol/mol) +Kc <- 275 # Michaelis-Menton constant of RuBisCO for C02 +Ko <- 400 # Michaelis-Menton constant of RuBisCO for O +phi <- 0.85 # maximum dark-adapted quantum yield of PSII +beta <- 0.5 # fraction of absorbed quanta that reasches PSII +theta <- 0.7 # empirical curvature factor ## Run setup file source(setupFile) @@ -52,79 +52,81 @@ source(setupFile) ## Load functions (in separate file for convenience) source(funcFile) -Vcmax.allchains = matrix(NA, nrow=niter, ncol=nchains) -Jmax.allchains = matrix(NA, nrow=niter, ncol=nchains) -R.allchains = matrix(NA, nrow=niter, ncol=nchains) -Gstar.allchains = matrix(NA, nrow=niter, ncol=nchains) -alpha.allchains = matrix(NA, nrow=niter, ncol=nchains) -m.allchains = matrix(NA, nrow=niter, ncol=nchains) -g0.allchains = matrix(NA, nrow=niter, ncol=nchains) -tauBB.allchains = matrix(NA, nrow=niter, ncol=nchains) -tauF.allchains = matrix(NA, nrow=niter, ncol=nchains) -An.pred.allchains = matrix(NA, nrow=niter*nchains, ncol=npoints) -gs.pred.allchains = matrix(NA, nrow=niter*nchains, ncol=npoints) -DA.allchains = matrix(NA, nrow=niter, ncol=nchains) -Dgs.allchains = matrix(NA, nrow=niter, ncol=nchains) -pA.allchains = matrix(NA, nrow=niter*nchains, ncol=npoints) -pgs.allchains = matrix(NA, nrow=niter*nchains, ncol=npoints) +Vcmax.allchains <- matrix(NA, nrow = niter, ncol = nchains) +Jmax.allchains <- matrix(NA, nrow = niter, ncol = nchains) +R.allchains <- matrix(NA, nrow = niter, ncol = nchains) +Gstar.allchains <- matrix(NA, nrow = niter, ncol = nchains) +alpha.allchains <- matrix(NA, nrow = niter, ncol = nchains) +m.allchains <- matrix(NA, nrow = niter, ncol = nchains) +g0.allchains <- matrix(NA, nrow = niter, ncol = nchains) +tauBB.allchains <- matrix(NA, nrow = niter, ncol = nchains) +tauF.allchains <- matrix(NA, nrow = niter, ncol = nchains) +An.pred.allchains <- matrix(NA, nrow = niter * nchains, ncol = npoints) +gs.pred.allchains <- matrix(NA, nrow = niter * nchains, ncol = npoints) +DA.allchains <- matrix(NA, nrow = niter, ncol = nchains) +Dgs.allchains <- matrix(NA, nrow = niter, ncol = nchains) +pA.allchains <- matrix(NA, nrow = niter * nchains, ncol = npoints) +pgs.allchains <- matrix(NA, nrow = niter * nchains, ncol = npoints) #********************************************************************** ## EXTRACT DATA FOR EACH CHAIN AND COMPILE INTO ONE MATRIX PER VARIABLE -for(chain in 1:nchains){ -oldrunName = paste(species.id, leaf, chain, sep="-") -load(paste(saveDirDat,oldrunName,".Rdata",sep="")) - -Vcmax.allchains[,chain] = Vcmax.mcmc[,chain] -Jmax.allchains[,chain] = Jmax.mcmc[,chain] -R.allchains[,chain] = R.mcmc[,chain] - -if(exists('Gstar.mcmc') && !is.null(Gstar.mcmc)) { Gstar.allchains[,chain] = Gstar.mcmc[,chain] } else { - Gstar.allchains = NULL -} - -alpha.allchains[,chain] = alpha.mcmc[,chain] -m.allchains[,chain] = m.mcmc[,chain] -g0.allchains[,chain] = g0.mcmc[,chain] -tauBB.allchains[,chain] = tauBB.mcmc[,chain] -tauF.allchains[,chain] = tauF.mcmc[,chain] -DA.allchains[,chain] = DA.mcmc[,chain] -Dgs.allchains[,chain] = Dgs.mcmc[,chain] - -An.pred.allchains[((1+niter*(chain-1)):(niter+niter*(chain-1))),] = An.pred.mcmc[((1+niter*(chain-1)):(niter+niter*(chain-1))),] -gs.pred.allchains[((1+niter*(chain-1)):(niter+niter*(chain-1))),] = gs.pred.mcmc[((1+niter*(chain-1)):(niter+niter*(chain-1))),] - -pA.allchains[((1+niter*(chain-1)):(niter+niter*(chain-1))),] = pA.mcmc[((1+niter*(chain-1)):(niter+niter*(chain-1))),] -pgs.allchains[((1+niter*(chain-1)):(niter+niter*(chain-1))),] = pgs.mcmc[((1+niter*(chain-1)):(niter+niter*(chain-1))),] +for (chain in 1:nchains) { + oldrunName <- paste(species.id, leaf, chain, sep = "-") + load(paste(saveDirDat, oldrunName, ".Rdata", sep = "")) + + Vcmax.allchains[, chain] <- Vcmax.mcmc[, chain] + Jmax.allchains[, chain] <- Jmax.mcmc[, chain] + R.allchains[, chain] <- R.mcmc[, chain] + + if (exists("Gstar.mcmc") && !is.null(Gstar.mcmc)) { + Gstar.allchains[, chain] <- Gstar.mcmc[, chain] + } else { + Gstar.allchains <- NULL + } + + alpha.allchains[, chain] <- alpha.mcmc[, chain] + m.allchains[, chain] <- m.mcmc[, chain] + g0.allchains[, chain] <- g0.mcmc[, chain] + tauBB.allchains[, chain] <- tauBB.mcmc[, chain] + tauF.allchains[, chain] <- tauF.mcmc[, chain] + DA.allchains[, chain] <- DA.mcmc[, chain] + Dgs.allchains[, chain] <- Dgs.mcmc[, chain] + + An.pred.allchains[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] <- An.pred.mcmc[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] + gs.pred.allchains[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] <- gs.pred.mcmc[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] + + pA.allchains[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] <- pA.mcmc[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] + pgs.allchains[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] <- pgs.mcmc[((1 + niter * (chain - 1)):(niter + niter * (chain - 1))), ] } ## end of chain #********************************************************************** -runName = paste(species.id, leaf, sep="-") -SaveLeafDir = paste(saveDirec, runName, "/", sep="") -saveFileDat = paste(SaveLeafDir, runName, ".Rdata", sep="") -saveFileSum = paste(SaveLeafDir, runName, "_summary.txt", sep="") -dir.create(SaveLeafDir,showWarnings=FALSE,recursive=TRUE) - -Vcmax.mcmc = Vcmax.allchains -Jmax.mcmc = Jmax.allchains -R.mcmc = R.allchains -Gstar.mcmc = Gstar.allchains -alpha.mcmc = alpha.allchains -m.mcmc = m.allchains -g0.mcmc = g0.allchains -tauBB.mcmc = tauBB.allchains -tauF.mcmc = tauF.allchains -DA.mcmc = DA.allchains -Dgs.mcmc = Dgs.allchains -An.pred.mcmc = An.pred.allchains -gs.pred.mcmc = gs.pred.allchains -pA.mcmc = pA.allchains -pgs.mcmc = pgs.allchains +runName <- paste(species.id, leaf, sep = "-") +SaveLeafDir <- paste(saveDirec, runName, "/", sep = "") +saveFileDat <- paste(SaveLeafDir, runName, ".Rdata", sep = "") +saveFileSum <- paste(SaveLeafDir, runName, "_summary.txt", sep = "") +dir.create(SaveLeafDir, showWarnings = FALSE, recursive = TRUE) + +Vcmax.mcmc <- Vcmax.allchains +Jmax.mcmc <- Jmax.allchains +R.mcmc <- R.allchains +Gstar.mcmc <- Gstar.allchains +alpha.mcmc <- alpha.allchains +m.mcmc <- m.allchains +g0.mcmc <- g0.allchains +tauBB.mcmc <- tauBB.allchains +tauF.mcmc <- tauF.allchains +DA.mcmc <- DA.allchains +Dgs.mcmc <- Dgs.allchains +An.pred.mcmc <- An.pred.allchains +gs.pred.mcmc <- gs.pred.allchains +pA.mcmc <- pA.allchains +pgs.mcmc <- pgs.allchains ## Run analysis -source(multianalysisFile,print.eval=TRUE) +source(multianalysisFile, print.eval = TRUE) ## Run parameter correlation -source(paramcorrFile,print.eval=TRUE) +source(paramcorrFile, print.eval = TRUE) ## Run summary creation -source(summaryFile,print.eval=TRUE) +source(summaryFile, print.eval = TRUE) diff --git a/modules/photosynthesis/code/FBB_compare.R b/modules/photosynthesis/code/FBB_compare.R index 18d86d49ed8..989b017414f 100644 --- a/modules/photosynthesis/code/FBB_compare.R +++ b/modules/photosynthesis/code/FBB_compare.R @@ -1,428 +1,455 @@ ## FBB_compare library(plotrix) -filePath = "/Users/wolzy4u/Desktop/Biomath/" #"/home/wolz1/Biomath/" -saveDirec = "FBB_soyface_output/" # "FBB_output/" -compareFile = "param_compare.csv" -saveDirec = paste(filePath, saveDirec, sep="") -compare = read.csv(paste(filePath,compareFile,sep=""), header=T) +filePath <- "/Users/wolzy4u/Desktop/Biomath/" # "/home/wolz1/Biomath/" +saveDirec <- "FBB_soyface_output/" # "FBB_output/" +compareFile <- "param_compare.csv" +saveDirec <- paste(filePath, saveDirec, sep = "") +compare <- read.csv(paste(filePath, compareFile, sep = ""), header = T) -compare.coupled.alone = TRUE -compare.coupled.manual = TRUE -compare.alone.manual = TRUE +compare.coupled.alone <- TRUE +compare.coupled.manual <- TRUE +compare.alone.manual <- TRUE ## Coupled FBB Model Mean & SD -Vcmax.leaf.mean = compare$coupled.Vcmax -Jmax.leaf.mean = compare$coupled.Jmax -alpha.leaf.mean = compare$coupled.alpha -R.leaf.mean = compare$coupled.R -Gstar.leaf.mean = compare$coupled.Gstar -tauF.leaf.mean = compare$coupled.tauF -m.leaf.mean = compare$coupled.m -g0.leaf.mean = compare$coupled.g0 -tauBB.leaf.mean = compare$coupled.tauBB -DIC.F.leaf.mean = compare$coupled.DIC.F -DIC.BB.leaf.mean = compare$coupled.DIC.BB +Vcmax.leaf.mean <- compare$coupled.Vcmax +Jmax.leaf.mean <- compare$coupled.Jmax +alpha.leaf.mean <- compare$coupled.alpha +R.leaf.mean <- compare$coupled.R +Gstar.leaf.mean <- compare$coupled.Gstar +tauF.leaf.mean <- compare$coupled.tauF +m.leaf.mean <- compare$coupled.m +g0.leaf.mean <- compare$coupled.g0 +tauBB.leaf.mean <- compare$coupled.tauBB +DIC.F.leaf.mean <- compare$coupled.DIC.F +DIC.BB.leaf.mean <- compare$coupled.DIC.BB + +Vcmax.leaf.sd <- compare$coupled.Vcmax.sd +Jmax.leaf.sd <- compare$coupled.Jmax.sd +alpha.leaf.sd <- compare$coupled.alpha.sd +R.leaf.sd <- compare$coupled.R.sd +Gstar.leaf.sd <- compare$coupled.Gstar.sd +tauF.leaf.sd <- compare$coupled.tauF.sd +m.leaf.sd <- compare$coupled.m.sd +g0.leaf.sd <- compare$coupled.g0.sd +tauBB.leaf.sd <- compare$coupled.tauBB.sd -Vcmax.leaf.sd = compare$coupled.Vcmax.sd -Jmax.leaf.sd = compare$coupled.Jmax.sd -alpha.leaf.sd = compare$coupled.alpha.sd -R.leaf.sd = compare$coupled.R.sd -Gstar.leaf.sd = compare$coupled.Gstar.sd -tauF.leaf.sd = compare$coupled.tauF.sd -m.leaf.sd = compare$coupled.m.sd -g0.leaf.sd = compare$coupled.g0.sd -tauBB.leaf.sd = compare$coupled.tauBB.sd - ## Uncoupled FBB Model Mean & SD -compare.V.alone = compare$alone.Vcmax -compare.J.alone = compare$alone.Jmax -compare.alpha.alone = compare$alone.alpha -compare.R.alone = compare$alone.R -compare.Gstar.alone = compare$alone.Gstar -compare.tauF.alone = compare$alone.tauF -compare.DIC.F.alone = compare$alone.DIC.F -compare.g0.alone = compare$alone.g0 -compare.m.alone = compare$alone.m -compare.tauBB.alone = compare$alone.tauBB -compare.DIC.BB.alone = compare$alone.DIC.BB - -sd.V.alone = compare$alone.Vcmax.sd -sd.J.alone = compare$alone.Jmax.sd -sd.alpha.alone = compare$alone.alpha.sd -sd.R.alone = compare$alone.R.sd -sd.Gstar.alone = compare$alone.Gstar.sd -sd.tauF.alone = compare$alone.tauF.sd -sd.g0.alone = compare$alone.g0.sd -sd.m.alone = compare$alone.m.sd -sd.tauBB.alone = compare$alone.tauBB.sd +compare.V.alone <- compare$alone.Vcmax +compare.J.alone <- compare$alone.Jmax +compare.alpha.alone <- compare$alone.alpha +compare.R.alone <- compare$alone.R +compare.Gstar.alone <- compare$alone.Gstar +compare.tauF.alone <- compare$alone.tauF +compare.DIC.F.alone <- compare$alone.DIC.F +compare.g0.alone <- compare$alone.g0 +compare.m.alone <- compare$alone.m +compare.tauBB.alone <- compare$alone.tauBB +compare.DIC.BB.alone <- compare$alone.DIC.BB + +sd.V.alone <- compare$alone.Vcmax.sd +sd.J.alone <- compare$alone.Jmax.sd +sd.alpha.alone <- compare$alone.alpha.sd +sd.R.alone <- compare$alone.R.sd +sd.Gstar.alone <- compare$alone.Gstar.sd +sd.tauF.alone <- compare$alone.tauF.sd +sd.g0.alone <- compare$alone.g0.sd +sd.m.alone <- compare$alone.m.sd +sd.tauBB.alone <- compare$alone.tauBB.sd ## Manual Fit Mean -compare.V.man = compare$Katie.Vcmax -compare.J.man = compare$Katie.Jmax -compare.g0.MLE = compare$g0.nobound -compare.m.MLE = compare$m.nobound +compare.V.man <- compare$Katie.Vcmax +compare.J.man <- compare$Katie.Jmax +compare.g0.MLE <- compare$g0.nobound +compare.m.MLE <- compare$m.nobound + + +if (compare.coupled.alone) { + ## compare coupled mcmc means to uncoupled means for Vcmax + lim <- min(compare.V.alone - sd.V.alone, Vcmax.leaf.mean - Vcmax.leaf.sd) - 10 + lim[2] <- max(compare.V.alone + sd.V.alone, Vcmax.leaf.mean + Vcmax.leaf.sd) + 10 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-Vcmax.pdf", sep = "")) + plot(compare.V.alone, + Vcmax.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC Vcmax Means", + xlab = "Non-Coupled MCMC Vcmax (umol m-2 s-1)", + ylab = "Coupled MCMC Vcmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.V.alone, Vcmax.leaf.mean, Vcmax.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.V.alone, Vcmax.leaf.mean, sd.V.alone, err = "x", add = TRUE, lwd = 1) + dev.off() + + ## compare coupled mcmc means to manual means for Jmax + lim <- min(compare.J.alone - sd.J.alone, Jmax.leaf.mean - Jmax.leaf.sd) - 10 + lim[2] <- max(compare.J.alone + sd.J.alone, Jmax.leaf.mean + Jmax.leaf.sd) + 10 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-Jmax.pdf", sep = "")) + plot(compare.J.alone, + Jmax.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC Jmax Means", + xlab = "Non-Coupled MCMC Jmax (umol m-2 s-1)", + ylab = "Coupled MCMC Jmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.J.alone, Jmax.leaf.mean, Jmax.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.J.alone, Jmax.leaf.mean, sd.J.alone, err = "x", add = TRUE, lwd = 1) + dev.off() + ## compare coupled mcmc means to manual means for alpha + lim <- min(compare.alpha.alone - sd.alpha.alone, alpha.leaf.mean - alpha.leaf.sd) - 0.1 + lim[2] <- max(compare.alpha.alone + sd.alpha.alone, alpha.leaf.mean + alpha.leaf.sd) + 0.1 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-alpha.pdf", sep = "")) + plot(compare.alpha.alone, + alpha.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC alpha Means", + xlab = "Non-Coupled MCMC alpha", + ylab = "Coupled MCMC alpha", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.alpha.alone, alpha.leaf.mean, alpha.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.alpha.alone, alpha.leaf.mean, + sd.alpha.alone, + err = "x", add = TRUE, lwd = 1 + ) + dev.off() -if(compare.coupled.alone) { - ## compare coupled mcmc means to uncoupled means for Vcmax - lim = min(compare.V.alone-sd.V.alone,Vcmax.leaf.mean-Vcmax.leaf.sd)-10 - lim[2] = max(compare.V.alone+sd.V.alone,Vcmax.leaf.mean+Vcmax.leaf.sd)+10 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-Vcmax.pdf",sep="")) - plot(compare.V.alone, - Vcmax.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC Vcmax Means", - xlab = "Non-Coupled MCMC Vcmax (umol m-2 s-1)", - ylab = "Coupled MCMC Vcmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.V.alone,Vcmax.leaf.mean,Vcmax.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.V.alone,Vcmax.leaf.mean,sd.V.alone,err="x",add=TRUE,lwd=1) - dev.off() + ## compare coupled mcmc means to manual means for R + lim <- min(compare.R.alone - sd.R.alone, R.leaf.mean - R.leaf.sd) - 0.1 + lim[2] <- max(compare.R.alone + sd.R.alone, R.leaf.mean + R.leaf.sd) + 0.1 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-R.pdf", sep = "")) + plot(compare.R.alone, + R.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC R Means", + xlab = "Non-Coupled MCMC R (umol m-2 s-1)", + ylab = "Coupled MCMC R (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.R.alone, R.leaf.mean, R.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.R.alone, R.leaf.mean, sd.R.alone, err = "x", add = TRUE, lwd = 1) + dev.off() - ## compare coupled mcmc means to manual means for Jmax - lim = min(compare.J.alone-sd.J.alone,Jmax.leaf.mean-Jmax.leaf.sd)-10 - lim[2] = max(compare.J.alone+sd.J.alone,Jmax.leaf.mean+Jmax.leaf.sd)+10 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-Jmax.pdf",sep="")) - plot(compare.J.alone, - Jmax.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC Jmax Means", - xlab = "Non-Coupled MCMC Jmax (umol m-2 s-1)", - ylab = "Coupled MCMC Jmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.J.alone,Jmax.leaf.mean,Jmax.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.J.alone,Jmax.leaf.mean,sd.J.alone,err="x",add=TRUE,lwd=1) - dev.off() - - ## compare coupled mcmc means to manual means for alpha - lim = min(compare.alpha.alone-sd.alpha.alone,alpha.leaf.mean-alpha.leaf.sd)-0.1 - lim[2] = max(compare.alpha.alone+sd.alpha.alone,alpha.leaf.mean+alpha.leaf.sd)+0.1 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-alpha.pdf",sep="")) - plot(compare.alpha.alone, - alpha.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC alpha Means", - xlab = "Non-Coupled MCMC alpha", - ylab = "Coupled MCMC alpha", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.alpha.alone,alpha.leaf.mean,alpha.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.alpha.alone,alpha.leaf.mean, - sd.alpha.alone,err="x",add=TRUE,lwd=1) - dev.off() + ## compare coupled mcmc means to manual means for Gstar + lim <- min(compare.Gstar.alone - sd.Gstar.alone, Gstar.leaf.mean - Gstar.leaf.sd) - 5 + lim[2] <- max(compare.Gstar.alone + sd.Gstar.alone, Gstar.leaf.mean + Gstar.leaf.sd) + 5 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-Gstar.pdf", sep = "")) + plot(compare.Gstar.alone, + Gstar.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC Gstar Means", + xlab = "Non-Coupled MCMC Gstar (umol mol-1)", + ylab = "Coupled MCMC Gstar (umol mol-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.Gstar.alone, Gstar.leaf.mean, Gstar.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.Gstar.alone, Gstar.leaf.mean, + sd.Gstar.alone, + err = "x", add = TRUE, lwd = 1 + ) + dev.off() - ## compare coupled mcmc means to manual means for R - lim = min(compare.R.alone-sd.R.alone,R.leaf.mean-R.leaf.sd)-0.1 - lim[2] = max(compare.R.alone+sd.R.alone,R.leaf.mean+R.leaf.sd)+0.1 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-R.pdf",sep="")) - plot(compare.R.alone, - R.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC R Means", - xlab = "Non-Coupled MCMC R (umol m-2 s-1)", - ylab = "Coupled MCMC R (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.R.alone,R.leaf.mean,R.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.R.alone,R.leaf.mean,sd.R.alone,err="x",add=TRUE,lwd=1) - dev.off() - - ## compare coupled mcmc means to manual means for Gstar - lim = min(compare.Gstar.alone-sd.Gstar.alone,Gstar.leaf.mean-Gstar.leaf.sd)-5 - lim[2] = max(compare.Gstar.alone+ sd.Gstar.alone,Gstar.leaf.mean+ Gstar.leaf.sd)+5 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-Gstar.pdf",sep="")) - plot(compare.Gstar.alone, - Gstar.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC Gstar Means", - xlab = "Non-Coupled MCMC Gstar (umol mol-1)", - ylab = "Coupled MCMC Gstar (umol mol-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.Gstar.alone,Gstar.leaf.mean,Gstar.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.Gstar.alone,Gstar.leaf.mean, - sd.Gstar.alone,err="x",add=TRUE,lwd=1) - dev.off() - - ## compare coupled mcmc means to manual means for tauF - lim = min(compare.tauF.alone-sd.tauF.alone,tauF.leaf.mean-tauF.leaf.sd)-5 - lim[2] = max(compare.tauF.alone+sd.tauF.alone,tauF.leaf.mean+tauF.leaf.sd)+5 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-tauF.pdf",sep="")) - plot(compare.tauF.alone, - tauF.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC tauF Means", - xlab = "Non-Coupled MCMC tauF", - ylab = "Coupled MCMC tauF", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.tauF.alone,tauF.leaf.mean,tauF.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.tauF.alone,tauF.leaf.mean, - sd.tauF.alone,err="x",add=TRUE,lwd=1) - dev.off() - - ## compare coupled mcmc means to manual means for DIC.F - lim = min(compare.DIC.F.alone,DIC.F.leaf.mean)-5 - lim[2] = max(compare.DIC.F.alone,DIC.F.leaf.mean)+5 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-DIC.F.pdf",sep="")) - plot(compare.DIC.F.alone, - DIC.F.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC Farquhar DIC", - xlab = "Non-Coupled MCMC DIC", - ylab = "Coupled MCMC DIC", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - dev.off() - - ## compare coupled mcmc means to MLE means for g0 - lim = min(compare.g0.alone-sd.g0.alone,g0.leaf.mean-g0.leaf.sd)-0.1 - lim[2] = max(compare.g0.alone+sd.g0.alone,g0.leaf.mean+g0.leaf.sd)+0.1 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-g0.pdf",sep="")) - plot(compare.g0.alone, - g0.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC g0 Means", - xlab = "Non-Coupled MCMC g0 (mol m-2 s-1)", - ylab = "Coupled MCMC g0 (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.g0.alone,g0.leaf.mean,g0.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.g0.alone,g0.leaf.mean,sd.g0.alone,err="x",add=TRUE,lwd=1) - dev.off() + ## compare coupled mcmc means to manual means for tauF + lim <- min(compare.tauF.alone - sd.tauF.alone, tauF.leaf.mean - tauF.leaf.sd) - 5 + lim[2] <- max(compare.tauF.alone + sd.tauF.alone, tauF.leaf.mean + tauF.leaf.sd) + 5 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-tauF.pdf", sep = "")) + plot(compare.tauF.alone, + tauF.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC tauF Means", + xlab = "Non-Coupled MCMC tauF", + ylab = "Coupled MCMC tauF", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.tauF.alone, tauF.leaf.mean, tauF.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.tauF.alone, tauF.leaf.mean, + sd.tauF.alone, + err = "x", add = TRUE, lwd = 1 + ) + dev.off() - ## compare coupled mcmc means to MLE means for m - lim = min(compare.m.alone-sd.m.alone,m.leaf.mean-m.leaf.sd)-1 - lim[2] = max(compare.m.alone+sd.m.alone,m.leaf.mean+m.leaf.sd)+1 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-m.pdf",sep="")) - plot(compare.m.alone, - m.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC m Means", - xlab = "Non-Coupled MCMC m", - ylab = "Coupled MCMC m", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.m.alone,m.leaf.mean,m.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.m.alone,m.leaf.mean,sd.m.alone,err="x",add=TRUE,lwd=1) - dev.off() - - ## compare coupled mcmc means to manual means for tauF - lim = min(compare.tauBB.alone-sd.tauBB.alone,tauBB.leaf.mean-tauBB.leaf.sd)-0.0001 - lim[2] = max(compare.tauBB.alone+sd.tauBB.alone,tauBB.leaf.mean+tauBB.leaf.sd)+0.0001 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-tauBB.pdf",sep="")) - plot(compare.tauBB.alone, - tauBB.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC tauBB Means", - xlab = "Non-Coupled MCMC tauBB", - ylab = "Coupled MCMC tauBB", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.tauBB.alone,tauBB.leaf.mean,tauBB.leaf.sd,add=TRUE,lwd=1) - plotCI(compare.tauBB.alone,tauBB.leaf.mean, - sd.tauBB.alone,err="x",add=TRUE,lwd=1) - dev.off() - - ## compare coupled mcmc means to manual means for DIC.BB - lim = min(compare.DIC.BB.alone,DIC.BB.leaf.mean)-5 - lim[2] = max(compare.DIC.BB.alone,DIC.BB.leaf.mean)+5 - pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-DIC.BB.pdf",sep="")) - plot(compare.DIC.BB.alone, - DIC.BB.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Non-Coupled MCMC Ball-Berry DIC", - xlab = "Non-Coupled MCMC DIC", - ylab = "Coupled MCMC DIC", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - dev.off() + ## compare coupled mcmc means to manual means for DIC.F + lim <- min(compare.DIC.F.alone, DIC.F.leaf.mean) - 5 + lim[2] <- max(compare.DIC.F.alone, DIC.F.leaf.mean) + 5 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-DIC.F.pdf", sep = "")) + plot(compare.DIC.F.alone, + DIC.F.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC Farquhar DIC", + xlab = "Non-Coupled MCMC DIC", + ylab = "Coupled MCMC DIC", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + dev.off() + + ## compare coupled mcmc means to MLE means for g0 + lim <- min(compare.g0.alone - sd.g0.alone, g0.leaf.mean - g0.leaf.sd) - 0.1 + lim[2] <- max(compare.g0.alone + sd.g0.alone, g0.leaf.mean + g0.leaf.sd) + 0.1 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-g0.pdf", sep = "")) + plot(compare.g0.alone, + g0.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC g0 Means", + xlab = "Non-Coupled MCMC g0 (mol m-2 s-1)", + ylab = "Coupled MCMC g0 (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.g0.alone, g0.leaf.mean, g0.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.g0.alone, g0.leaf.mean, sd.g0.alone, err = "x", add = TRUE, lwd = 1) + dev.off() + + ## compare coupled mcmc means to MLE means for m + lim <- min(compare.m.alone - sd.m.alone, m.leaf.mean - m.leaf.sd) - 1 + lim[2] <- max(compare.m.alone + sd.m.alone, m.leaf.mean + m.leaf.sd) + 1 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-m.pdf", sep = "")) + plot(compare.m.alone, + m.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC m Means", + xlab = "Non-Coupled MCMC m", + ylab = "Coupled MCMC m", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.m.alone, m.leaf.mean, m.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.m.alone, m.leaf.mean, sd.m.alone, err = "x", add = TRUE, lwd = 1) + dev.off() + + ## compare coupled mcmc means to manual means for tauF + lim <- min(compare.tauBB.alone - sd.tauBB.alone, tauBB.leaf.mean - tauBB.leaf.sd) - 0.0001 + lim[2] <- max(compare.tauBB.alone + sd.tauBB.alone, tauBB.leaf.mean + tauBB.leaf.sd) + 0.0001 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-tauBB.pdf", sep = "")) + plot(compare.tauBB.alone, + tauBB.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC tauBB Means", + xlab = "Non-Coupled MCMC tauBB", + ylab = "Coupled MCMC tauBB", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.tauBB.alone, tauBB.leaf.mean, tauBB.leaf.sd, add = TRUE, lwd = 1) + plotCI(compare.tauBB.alone, tauBB.leaf.mean, + sd.tauBB.alone, + err = "x", add = TRUE, lwd = 1 + ) + dev.off() + + ## compare coupled mcmc means to manual means for DIC.BB + lim <- min(compare.DIC.BB.alone, DIC.BB.leaf.mean) - 5 + lim[2] <- max(compare.DIC.BB.alone, DIC.BB.leaf.mean) + 5 + pdf(paste(saveDirec, "Coupled_vs_Alone_Comparison-DIC.BB.pdf", sep = "")) + plot(compare.DIC.BB.alone, + DIC.BB.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Non-Coupled MCMC Ball-Berry DIC", + xlab = "Non-Coupled MCMC DIC", + ylab = "Coupled MCMC DIC", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + dev.off() } -if(compare.coupled.manual) { - ## compare coupled mcmc to manual fit for Vcmax - lim = min(compare.V.man,Vcmax.leaf.mean-Vcmax.leaf.sd)-10 - lim[2] = max(compare.V.man,Vcmax.leaf.mean+Vcmax.leaf.sd)+10 - pdf(paste(saveDirec, "Coupled_vs_Manual_Comparison-Vcmax.pdf",sep="")) - plot(compare.V.man, - Vcmax.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Manually Fit Vcmax", - xlab = "Manual Vcmax Fit (umol m-2 s-1)", - ylab = "Coupled MCMC Vcmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.V.man,Vcmax.leaf.mean,Vcmax.leaf.sd,add=TRUE,lwd=1) - dev.off() +if (compare.coupled.manual) { + ## compare coupled mcmc to manual fit for Vcmax + lim <- min(compare.V.man, Vcmax.leaf.mean - Vcmax.leaf.sd) - 10 + lim[2] <- max(compare.V.man, Vcmax.leaf.mean + Vcmax.leaf.sd) + 10 + pdf(paste(saveDirec, "Coupled_vs_Manual_Comparison-Vcmax.pdf", sep = "")) + plot(compare.V.man, + Vcmax.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Manually Fit Vcmax", + xlab = "Manual Vcmax Fit (umol m-2 s-1)", + ylab = "Coupled MCMC Vcmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.V.man, Vcmax.leaf.mean, Vcmax.leaf.sd, add = TRUE, lwd = 1) + dev.off() - ## compare coupled mcmc to manual fit for Jmax - lim = min(compare.J.man,Jmax.leaf.mean-Jmax.leaf.sd)-10 - lim[2] = max(compare.J.man,Jmax.leaf.mean+Jmax.leaf.sd)+10 - pdf(paste(saveDirec, "Coupled_vs_Manual_Comparison-Jmax.pdf",sep="")) - plot(compare.J.man, - Jmax.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs Manually Fit Jmax", - xlab = "Manual Jmax Fit (umol m-2 s-1)", - ylab = "Coupled MCMC Jmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.J.man,Jmax.leaf.mean,Jmax.leaf.sd,add=TRUE,lwd=1) - dev.off() + ## compare coupled mcmc to manual fit for Jmax + lim <- min(compare.J.man, Jmax.leaf.mean - Jmax.leaf.sd) - 10 + lim[2] <- max(compare.J.man, Jmax.leaf.mean + Jmax.leaf.sd) + 10 + pdf(paste(saveDirec, "Coupled_vs_Manual_Comparison-Jmax.pdf", sep = "")) + plot(compare.J.man, + Jmax.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs Manually Fit Jmax", + xlab = "Manual Jmax Fit (umol m-2 s-1)", + ylab = "Coupled MCMC Jmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.J.man, Jmax.leaf.mean, Jmax.leaf.sd, add = TRUE, lwd = 1) + dev.off() - ## compare coupled mcmc to MLE fit for g0 - lim = min(compare.g0.MLE,g0.leaf.mean-g0.leaf.sd)-0.1 - lim[2] = max(compare.g0.MLE,g0.leaf.mean+g0.leaf.sd)+0.1 - pdf(paste(saveDirec, "Coupled_vs_MLE_Comparison-g0.pdf",sep="")) - plot(compare.g0.MLE, - g0.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs MLE g0 Means", - xlab = "MLE g0 (mol m-2 s-1)", - ylab = "Coupled MCMC g0 (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.g0.MLE,g0.leaf.mean,g0.leaf.sd,add=TRUE,lwd=1) - dev.off() + ## compare coupled mcmc to MLE fit for g0 + lim <- min(compare.g0.MLE, g0.leaf.mean - g0.leaf.sd) - 0.1 + lim[2] <- max(compare.g0.MLE, g0.leaf.mean + g0.leaf.sd) + 0.1 + pdf(paste(saveDirec, "Coupled_vs_MLE_Comparison-g0.pdf", sep = "")) + plot(compare.g0.MLE, + g0.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs MLE g0 Means", + xlab = "MLE g0 (mol m-2 s-1)", + ylab = "Coupled MCMC g0 (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.g0.MLE, g0.leaf.mean, g0.leaf.sd, add = TRUE, lwd = 1) + dev.off() - ## compare coupled mcmc to MLE fit for m - lim = min(compare.m.MLE,m.leaf.mean-m.leaf.sd)-1 - lim[2] = max(compare.m.MLE,m.leaf.mean+m.leaf.sd)+1 - pdf(paste(saveDirec, "Coupled_vs_MLE_Comparison-m.pdf",sep="")) - plot(compare.m.MLE, - m.leaf.mean, - ylim = lim, - xlim = lim, - pch = 19, - main = "Coupled MCMC vs MLE m Means", - xlab = "MLE m", - ylab = "Coupled MCMC m", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.m.MLE,m.leaf.mean,m.leaf.sd,add=TRUE,lwd=1) - dev.off() + ## compare coupled mcmc to MLE fit for m + lim <- min(compare.m.MLE, m.leaf.mean - m.leaf.sd) - 1 + lim[2] <- max(compare.m.MLE, m.leaf.mean + m.leaf.sd) + 1 + pdf(paste(saveDirec, "Coupled_vs_MLE_Comparison-m.pdf", sep = "")) + plot(compare.m.MLE, + m.leaf.mean, + ylim = lim, + xlim = lim, + pch = 19, + main = "Coupled MCMC vs MLE m Means", + xlab = "MLE m", + ylab = "Coupled MCMC m", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.m.MLE, m.leaf.mean, m.leaf.sd, add = TRUE, lwd = 1) + dev.off() } -if(compare.alone.manual) { - ## compare decoupled mcmc to manual fit for Vcmax - lim = min(compare.V.man,compare.V.alone-sd.V.alone)-10 - lim[2] = max(compare.V.man,compare.V.alone+sd.V.alone)+10 - pdf(paste(saveDirec, "DEcoupled_vs_Manual_Comparison-Vcmax.pdf",sep="")) - plot(compare.V.man, - compare.V.alone, - ylim = lim, - xlim = lim, - pch = 19, - main = "DEcoupled MCMC vs Manually Fit Vcmax", - xlab = "Manual Vcmax Fit (umol m-2 s-1)", - ylab = "DEcoupled MCMC Vcmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.V.man,compare.V.alone,sd.V.alone,add=TRUE,lwd=1) - dev.off() +if (compare.alone.manual) { + ## compare decoupled mcmc to manual fit for Vcmax + lim <- min(compare.V.man, compare.V.alone - sd.V.alone) - 10 + lim[2] <- max(compare.V.man, compare.V.alone + sd.V.alone) + 10 + pdf(paste(saveDirec, "DEcoupled_vs_Manual_Comparison-Vcmax.pdf", sep = "")) + plot(compare.V.man, + compare.V.alone, + ylim = lim, + xlim = lim, + pch = 19, + main = "DEcoupled MCMC vs Manually Fit Vcmax", + xlab = "Manual Vcmax Fit (umol m-2 s-1)", + ylab = "DEcoupled MCMC Vcmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.V.man, compare.V.alone, sd.V.alone, add = TRUE, lwd = 1) + dev.off() - ## compare decoupled mcmc to manual fit for Jmax - lim = min(compare.J.man,compare.J.alone-sd.J.alone)-10 - lim[2] = max(compare.J.man,compare.J.alone+sd.J.alone)+10 - pdf(paste(saveDirec, "DEcoupled_vs_Manual_Comparison-Jmax.pdf",sep="")) - plot(compare.J.man, - compare.J.alone, - ylim = lim, - xlim = lim, - pch = 19, - main = "DEcoupled MCMC vs Manually Fit Jmax", - xlab = "Manual Jmax Fit (umol m-2 s-1)", - ylab = "DEcoupled MCMC Jmax (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.J.man,compare.J.alone,sd.J.alone,add=TRUE,lwd=1) - dev.off() + ## compare decoupled mcmc to manual fit for Jmax + lim <- min(compare.J.man, compare.J.alone - sd.J.alone) - 10 + lim[2] <- max(compare.J.man, compare.J.alone + sd.J.alone) + 10 + pdf(paste(saveDirec, "DEcoupled_vs_Manual_Comparison-Jmax.pdf", sep = "")) + plot(compare.J.man, + compare.J.alone, + ylim = lim, + xlim = lim, + pch = 19, + main = "DEcoupled MCMC vs Manually Fit Jmax", + xlab = "Manual Jmax Fit (umol m-2 s-1)", + ylab = "DEcoupled MCMC Jmax (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.J.man, compare.J.alone, sd.J.alone, add = TRUE, lwd = 1) + dev.off() - ## compare decoupled mcmc to MLE fit for g0 - lim = min(compare.g0.MLE,compare.g0.alone-sd.g0.alone)-0.1 - lim[2] = max(compare.g0.MLE,compare.g0.alone+sd.g0.alone)+0.1 - pdf(paste(saveDirec, "DEcoupled_vs_MLE_Comparison-g0.pdf",sep="")) - plot(compare.g0.MLE, - compare.g0.alone, - ylim = lim, - xlim = lim, - pch = 19, - main = "DEcoupled MCMC vs MLE g0 Means", - xlab = "MLE g0 (mol m-2 s-1)", - ylab = "DEcoupled MCMC g0 (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.g0.MLE,compare.g0.alone,sd.g0.alone,add=TRUE,lwd=1) - dev.off() + ## compare decoupled mcmc to MLE fit for g0 + lim <- min(compare.g0.MLE, compare.g0.alone - sd.g0.alone) - 0.1 + lim[2] <- max(compare.g0.MLE, compare.g0.alone + sd.g0.alone) + 0.1 + pdf(paste(saveDirec, "DEcoupled_vs_MLE_Comparison-g0.pdf", sep = "")) + plot(compare.g0.MLE, + compare.g0.alone, + ylim = lim, + xlim = lim, + pch = 19, + main = "DEcoupled MCMC vs MLE g0 Means", + xlab = "MLE g0 (mol m-2 s-1)", + ylab = "DEcoupled MCMC g0 (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.g0.MLE, compare.g0.alone, sd.g0.alone, add = TRUE, lwd = 1) + dev.off() - ## compare decoupled mcmc to MLE fit for m - lim = min(compare.m.MLE,compare.m.alone-sd.m.alone)-1 - lim[2] = max(compare.m.MLE,compare.m.alone+sd.m.alone)+1 - pdf(paste(saveDirec, "DEcoupled_vs_MLE_Comparison-m.pdf",sep="")) - plot(compare.m.MLE, - compare.m.alone, - ylim = lim, - xlim = lim, - pch = 19, - main = "DEcoupled MCMC vs MLE m Means", - xlab = "MLE m", - ylab = "DEcoupled MCMC m", - cex.main = 1.6, - cex.lab = 1.4) - abline(0, 1, col = "dark green", lwd = 3) - plotCI(compare.m.MLE,compare.m.alone,sd.m.alone,add=TRUE,lwd=1) - dev.off() -} + ## compare decoupled mcmc to MLE fit for m + lim <- min(compare.m.MLE, compare.m.alone - sd.m.alone) - 1 + lim[2] <- max(compare.m.MLE, compare.m.alone + sd.m.alone) + 1 + pdf(paste(saveDirec, "DEcoupled_vs_MLE_Comparison-m.pdf", sep = "")) + plot(compare.m.MLE, + compare.m.alone, + ylim = lim, + xlim = lim, + pch = 19, + main = "DEcoupled MCMC vs MLE m Means", + xlab = "MLE m", + ylab = "DEcoupled MCMC m", + cex.main = 1.6, + cex.lab = 1.4 + ) + abline(0, 1, col = "dark green", lwd = 3) + plotCI(compare.m.MLE, compare.m.alone, sd.m.alone, add = TRUE, lwd = 1) + dev.off() +} diff --git a/modules/photosynthesis/code/FBB_functions.R b/modules/photosynthesis/code/FBB_functions.R index 9e2a676cf85..7c73ba8f2fc 100644 --- a/modules/photosynthesis/code/FBB_functions.R +++ b/modules/photosynthesis/code/FBB_functions.R @@ -3,93 +3,129 @@ library(MCMCpack) ## truncated normal density -dtnorm = function(x,mu,sd,minX,maxX) { - dnorm(x,mu,sd,log=TRUE) - log(pnorm(maxX,mu,sd) - pnorm(minX,mu,sd)) } +dtnorm <- function(x, mu, sd, minX, maxX) { + dnorm(x, mu, sd, log = TRUE) - log(pnorm(maxX, mu, sd) - pnorm(minX, mu, sd)) +} ## truncated normal random number -rtnorm = function(n,mu,sd,minX,maxX) { - x = rnorm(n,mu,sd) - redo = which(xmaxX) - while(length(redo)>0) { - x[redo] = rnorm(length(redo),mu,sd) - redo = which(xmaxX) } - return(x) } +rtnorm <- function(n, mu, sd, minX, maxX) { + x <- rnorm(n, mu, sd) + redo <- which(x < minX | x > maxX) + while (length(redo) > 0) { + x[redo] <- rnorm(length(redo), mu, sd) + redo <- which(x < minX | x > maxX) + } + return(x) +} ## likelihood of A -llik.A = function(An.pred, tauF) { - sum(dnorm(An.obs, An.pred, sqrt(tauF), log=TRUE)) } +llik.A <- function(An.pred, tauF) { + sum(dnorm(An.obs, An.pred, sqrt(tauF), log = TRUE)) +} ## likelihood of gs -llik.gs = function(gs.pred, tauBB) { - sum(dnorm(gs.obs, gs.pred, sqrt(tauBB), log=TRUE)) } +llik.gs <- function(gs.pred, tauBB) { + sum(dnorm(gs.obs, gs.pred, sqrt(tauBB), log = TRUE)) +} ## Prior Distributions -prior.Vcmax = function(Vcmax) { dlnorm(Vcmax, prior.mu.Vcmax, prior.sd.Vcmax, log=TRUE) } -prior.Jmax = function(Jmax) { dlnorm(Jmax, prior.mu.Jmax, prior.sd.Jmax, log=TRUE) } -prior.R = function(R) { dlnorm(R, prior.mu.R, prior.sd.R, log=TRUE) } -prior.Gstar = function(Gstar) { dlnorm(Gstar, prior.mu.Gstar, prior.sd.Gstar, log=TRUE) } +prior.Vcmax <- function(Vcmax) { + dlnorm(Vcmax, prior.mu.Vcmax, prior.sd.Vcmax, log = TRUE) +} +prior.Jmax <- function(Jmax) { + dlnorm(Jmax, prior.mu.Jmax, prior.sd.Jmax, log = TRUE) +} +prior.R <- function(R) { + dlnorm(R, prior.mu.R, prior.sd.R, log = TRUE) +} +prior.Gstar <- function(Gstar) { + dlnorm(Gstar, prior.mu.Gstar, prior.sd.Gstar, log = TRUE) +} -#prior.alpha = function(alpha) { dunif(alpha,0,1) } -prior.alpha = function(alpha) { dlnorm(alpha, prior.mu.alpha, prior.sd.alpha, log=TRUE) } +# prior.alpha = function(alpha) { dunif(alpha,0,1) } +prior.alpha <- function(alpha) { + dlnorm(alpha, prior.mu.alpha, prior.sd.alpha, log = TRUE) +} -prior.m = function(m) { dnorm(m, prior.mu.m, prior.sd.m, log=TRUE) } -prior.g0 = function(g0) { dlnorm(g0, prior.mu.g0, prior.sd.g0, log=TRUE) } +prior.m <- function(m) { + dnorm(m, prior.mu.m, prior.sd.m, log = TRUE) +} +prior.g0 <- function(g0) { + dlnorm(g0, prior.mu.g0, prior.sd.g0, log = TRUE) +} ## Jump Distributions for Metropolis Sampled Paramters -jump.Vcmax = function(Vcmax) { rnorm(1,Vcmax,jumpSD.Vcmax) } -jump.Jmax = function(Jmax) { rnorm(1,Jmax,jumpSD.Jmax) } -jump.R = function(R) { rtnorm(1,R,jumpSD.R,R.lb,R.ub) } -jump.Gstar = function(Gstar) { rnorm(1,Gstar,jumpSD.Gstar) } -jump.alpha = function(alpha) { rtnorm(1,alpha,jumpSD.alpha,alpha.lb,alpha.ub) } -jump.m = function(m) { rnorm(1,m,jumpSD.m) } -#jump.g0 = function(g0) { rnorm(1,g0,jumpSD.g0) } -jump.g0 = function(g0) { rtnorm(1,g0,jumpSD.g0,g0.lb,g0.ub) } +jump.Vcmax <- function(Vcmax) { + rnorm(1, Vcmax, jumpSD.Vcmax) +} +jump.Jmax <- function(Jmax) { + rnorm(1, Jmax, jumpSD.Jmax) +} +jump.R <- function(R) { + rtnorm(1, R, jumpSD.R, R.lb, R.ub) +} +jump.Gstar <- function(Gstar) { + rnorm(1, Gstar, jumpSD.Gstar) +} +jump.alpha <- function(alpha) { + rtnorm(1, alpha, jumpSD.alpha, alpha.lb, alpha.ub) +} +jump.m <- function(m) { + rnorm(1, m, jumpSD.m) +} +# jump.g0 = function(g0) { rnorm(1,g0,jumpSD.g0) } +jump.g0 <- function(g0) { + rtnorm(1, g0, jumpSD.g0, g0.lb, g0.ub) +} ## Gibbs Sampling for Variances -gibbs.tauF = function(An.pred,tauF){ - shape = prior.s1.tauF+length(An.pred)/2 - rate = prior.s2.tauF+0.5*sum((An.pred-An.obs)^2) - rinvgamma(1,shape,rate) } +gibbs.tauF <- function(An.pred, tauF) { + shape <- prior.s1.tauF + length(An.pred) / 2 + rate <- prior.s2.tauF + 0.5 * sum((An.pred - An.obs)^2) + rinvgamma(1, shape, rate) +} + +gibbs.tauBB <- function(gs.pred, tauBB) { + shape <- prior.s1.tauBB + length(gs.pred) / 2 + rate <- prior.s2.tauBB + 0.5 * sum((gs.pred - gs.obs)^2) + rinvgamma(1, shape, rate) +} -gibbs.tauBB = function(gs.pred,tauBB){ - shape = prior.s1.tauBB+length(gs.pred)/2 - rate = prior.s2.tauBB+0.5*sum((gs.pred-gs.obs)^2) - rinvgamma(1,shape,rate) } - ## Farquhar-Ball Berry Optimization Functions -farquhar = function(Ci,Fparams,Q){ - Q2 <- Q*Fparams[5]*phi*beta - J <- (Q2+Fparams[2]-sqrt((Q2+Fparams[2])*(Q2+Fparams[2])-4*theta*Q2*Fparams[2]))/(2*theta) - Ac <- Fparams[1]*(Ci-Fparams[4])/(Ci+(Kc*(1+(O/Ko)))) - Aj <- J*(Ci-Fparams[4])/((4.5*Ci)+(10.5*Fparams[4])) - min(Aj,Ac) - Fparams[3] -} - - -ballberry = function(input,BBparams,Fparams,obs){ - Ci <- obs[1] - input[1]/input[2] - e1 <- farquhar(Ci,Fparams,obs[3]) - input[1] - e2 <- (BBparams[1] + BBparams[2]*input[1]*obs[2]/obs[1] - input[2])*100 - return(e1^2 + e2^2) -} - - -solve.model = function(Vcmax, Jmax, R, Gstar, alpha, m, g0){ - output = list() - for(i in 1:npoints){ # loop over data points - ic <- c(An.obs[i], gs.obs[i]) # take initial conditions from actual data - out <- optim(ic, # solve simultaneously for An.pred and gs.pred - ballberry, - BBparams = c(g0,m), # Ballberry params - Fparams = c(Vcmax,Jmax,R,Gstar,alpha), # Farquhar params - obs = c(Ca[i], H[i], Q[i])) # data - output$An.pred[i] = out$par[1] - output$gs.pred[i] = out$par[2] - } - return(output) +farquhar <- function(Ci, Fparams, Q) { + Q2 <- Q * Fparams[5] * phi * beta + J <- (Q2 + Fparams[2] - sqrt((Q2 + Fparams[2]) * (Q2 + Fparams[2]) - 4 * theta * Q2 * Fparams[2])) / (2 * theta) + Ac <- Fparams[1] * (Ci - Fparams[4]) / (Ci + (Kc * (1 + (O / Ko)))) + Aj <- J * (Ci - Fparams[4]) / ((4.5 * Ci) + (10.5 * Fparams[4])) + min(Aj, Ac) - Fparams[3] +} + + +ballberry <- function(input, BBparams, Fparams, obs) { + Ci <- obs[1] - input[1] / input[2] + e1 <- farquhar(Ci, Fparams, obs[3]) - input[1] + e2 <- (BBparams[1] + BBparams[2] * input[1] * obs[2] / obs[1] - input[2]) * 100 + return(e1^2 + e2^2) +} + + +solve.model <- function(Vcmax, Jmax, R, Gstar, alpha, m, g0) { + output <- list() + for (i in 1:npoints) { # loop over data points + ic <- c(An.obs[i], gs.obs[i]) # take initial conditions from actual data + out <- optim(ic, # solve simultaneously for An.pred and gs.pred + ballberry, + BBparams = c(g0, m), # Ballberry params + Fparams = c(Vcmax, Jmax, R, Gstar, alpha), # Farquhar params + obs = c(Ca[i], H[i], Q[i]) + ) # data + output$An.pred[i] <- out$par[1] + output$gs.pred[i] <- out$par[2] + } + return(output) } diff --git a/modules/photosynthesis/code/FBB_main.R b/modules/photosynthesis/code/FBB_main.R index b2aa8bdff54..7247cb34c19 100644 --- a/modules/photosynthesis/code/FBB_main.R +++ b/modules/photosynthesis/code/FBB_main.R @@ -1,108 +1,108 @@ ## Setup and Run FBBmodel -##syntax: nohup env SPECIES=1 LEAF=31 CHAIN=1 R --vanilla < FBB_main.R > log1-31-1 & -startTime = proc.time()[3] # Start timer +## syntax: nohup env SPECIES=1 LEAF=31 CHAIN=1 R --vanilla < FBB_main.R > log1-31-1 & +startTime <- proc.time()[3] # Start timer #****************** USER PARAMETERS *********************************** -species.id <- as.numeric(system("echo $SPECIES",intern=TRUE)) -leaf <- as.numeric(system("echo $LEAF",intern=TRUE)) -chain <- as.numeric(system("echo $CHAIN",intern=TRUE)) -niter = 100000 # number of MCMC iterations to compute -nchains = 3 # number of chains that will be used -progressEvery = 10000 # How frequent to print progress +species.id <- as.numeric(system("echo $SPECIES", intern = TRUE)) +leaf <- as.numeric(system("echo $LEAF", intern = TRUE)) +chain <- as.numeric(system("echo $CHAIN", intern = TRUE)) +niter <- 100000 # number of MCMC iterations to compute +nchains <- 3 # number of chains that will be used +progressEvery <- 10000 # How frequent to print progress ## File parameters -filePath = "/home/wolz1/Biomath/" # "/Users/wolzy4u/Desktop/Biomath/" -datFile = "Kdata_Project.csv" # "Biomath_Processed_Data.csv" -saveDirDat = "FBB_soyface_output_dat/" # "FBB_output_dat/" -setupFile = "FBB_setup.R" -funcFile = "FBB_functions.R" -mcmcFile = "FBB_mcmc.R" +filePath <- "/home/wolz1/Biomath/" # "/Users/wolzy4u/Desktop/Biomath/" +datFile <- "Kdata_Project.csv" # "Biomath_Processed_Data.csv" +saveDirDat <- "FBB_soyface_output_dat/" # "FBB_output_dat/" +setupFile <- "FBB_setup.R" +funcFile <- "FBB_functions.R" +mcmcFile <- "FBB_mcmc.R" -datFile = paste(filePath, datFile, sep="") -saveDirDat = paste(filePath, saveDirDat, sep="") -setupFile = paste(filePath, setupFile,sep="") -funcFile = paste(filePath, funcFile, sep="") -mcmcFile = paste(filePath, mcmcFile, sep="") +datFile <- paste(filePath, datFile, sep = "") +saveDirDat <- paste(filePath, saveDirDat, sep = "") +setupFile <- paste(filePath, setupFile, sep = "") +funcFile <- paste(filePath, funcFile, sep = "") +mcmcFile <- paste(filePath, mcmcFile, sep = "") -dir.create(saveDirDat,showWarnings=FALSE,recursive=TRUE) +dir.create(saveDirDat, showWarnings = FALSE, recursive = TRUE) ## Toggles and parameters -compute.pA = TRUE # Whether or not to compute pA -compute.pgs = TRUE # Whether or not to compute pgs -compute.DA = TRUE # Whether or not to compute DA -compute.Dgs = TRUE # Whether or not to compute Dgs -track.An.pred = TRUE # Whether or not to track An.pred (TRUE = yes) -track.gs.pred = TRUE # Whether or not to track gs.pred (TRUE = yes) - -sample.Vcmax = TRUE # Whether or not to sample Vcmax (TRUE = yes) - prior.mu.Vcmax = 4.61 - prior.sd.Vcmax = 0.32 -sample.Jmax = TRUE # Whether or not to sample Jmax (TRUE = yes) - prior.mu.Jmax = 5.16 - prior.sd.Jmax = 0.32 -sample.R = TRUE # Whether or not to sample R (TRUE = yes) - prior.mu.R = -0.69 - prior.sd.R = 1 - R.lb = 0 # lower bound on R - R.ub = 10 # upper bound on R -sample.Gstar = TRUE # Whether or not to sample Gstar (TRUE = yes) - prior.mu.Gstar = 3.75 - prior.sd.Gstar = 1 -sample.alpha = TRUE # Whether or not to sample alpha (TRUE = yes) - prior.mu.alpha = -0.15 - prior.sd.alpha = 0.1 - alpha.lb = 0 # lower bound on alpha - alpha.ub = 1 # upper bound on alpha -sample.m = TRUE # Whether or not to sample alpha (TRUE = yes) - prior.mu.m = 10 - prior.sd.m = 10 -sample.g0 = TRUE # Whether or not to sample alpha (TRUE = yes) - prior.mu.g0 = 0 - prior.sd.g0 = 0.1 -sample.tauF = TRUE # Whether or not to sample tauF (TRUE = yes) - prior.s1.tauF = 0.1 - prior.s2.tauF = 0.1 - tauF.lb = 0 # lower bound on tauF - tauF.ub = 100 # upper bound on tauF -sample.tauBB = TRUE # Whether or not to sample tauBB (TRUE = yes) - prior.s1.tauBB = 0.1 - prior.s2.tauBB = 0.00001 - tauBB.lb = 0 # lower bound on tauBB - tauBB.ub = 100 # upper bound on tauBB +compute.pA <- TRUE # Whether or not to compute pA +compute.pgs <- TRUE # Whether or not to compute pgs +compute.DA <- TRUE # Whether or not to compute DA +compute.Dgs <- TRUE # Whether or not to compute Dgs +track.An.pred <- TRUE # Whether or not to track An.pred (TRUE = yes) +track.gs.pred <- TRUE # Whether or not to track gs.pred (TRUE = yes) + +sample.Vcmax <- TRUE # Whether or not to sample Vcmax (TRUE = yes) +prior.mu.Vcmax <- 4.61 +prior.sd.Vcmax <- 0.32 +sample.Jmax <- TRUE # Whether or not to sample Jmax (TRUE = yes) +prior.mu.Jmax <- 5.16 +prior.sd.Jmax <- 0.32 +sample.R <- TRUE # Whether or not to sample R (TRUE = yes) +prior.mu.R <- -0.69 +prior.sd.R <- 1 +R.lb <- 0 # lower bound on R +R.ub <- 10 # upper bound on R +sample.Gstar <- TRUE # Whether or not to sample Gstar (TRUE = yes) +prior.mu.Gstar <- 3.75 +prior.sd.Gstar <- 1 +sample.alpha <- TRUE # Whether or not to sample alpha (TRUE = yes) +prior.mu.alpha <- -0.15 +prior.sd.alpha <- 0.1 +alpha.lb <- 0 # lower bound on alpha +alpha.ub <- 1 # upper bound on alpha +sample.m <- TRUE # Whether or not to sample alpha (TRUE = yes) +prior.mu.m <- 10 +prior.sd.m <- 10 +sample.g0 <- TRUE # Whether or not to sample alpha (TRUE = yes) +prior.mu.g0 <- 0 +prior.sd.g0 <- 0.1 +sample.tauF <- TRUE # Whether or not to sample tauF (TRUE = yes) +prior.s1.tauF <- 0.1 +prior.s2.tauF <- 0.1 +tauF.lb <- 0 # lower bound on tauF +tauF.ub <- 100 # upper bound on tauF +sample.tauBB <- TRUE # Whether or not to sample tauBB (TRUE = yes) +prior.s1.tauBB <- 0.1 +prior.s2.tauBB <- 0.00001 +tauBB.lb <- 0 # lower bound on tauBB +tauBB.ub <- 100 # upper bound on tauBB ## Initial conditions -Vcmax.ic = c(88.7,200,30) -Jmax.ic = c(144.8,300,50) -R.ic = c(0.6,2,0.01) -Gstar.ic = c(29.8,60,5) -alpha.ic = c(0.85,0.99,0.3) -m.ic = c(12.8,20,1) -g0.ic = c(0,1,-1) -tauF.ic = c(0.4,1,0.01) -tauBB.ic = c(0.033,0.05,0.001) +Vcmax.ic <- c(88.7, 200, 30) +Jmax.ic <- c(144.8, 300, 50) +R.ic <- c(0.6, 2, 0.01) +Gstar.ic <- c(29.8, 60, 5) +alpha.ic <- c(0.85, 0.99, 0.3) +m.ic <- c(12.8, 20, 1) +g0.ic <- c(0, 1, -1) +tauF.ic <- c(0.4, 1, 0.01) +tauBB.ic <- c(0.033, 0.05, 0.001) ## Jump SD -jumpSD.Vcmax = 5 -jumpSD.Jmax = 10 -jumpSD.R = 1 -jumpSD.Gstar = 5 -jumpSD.alpha = 0.1 -jumpSD.m = 3 -jumpSD.g0 = 0.05 -jumpSD.tauF = 0.01 -jumpSD.tauBB = 0.01 +jumpSD.Vcmax <- 5 +jumpSD.Jmax <- 10 +jumpSD.R <- 1 +jumpSD.Gstar <- 5 +jumpSD.alpha <- 0.1 +jumpSD.m <- 3 +jumpSD.g0 <- 0.05 +jumpSD.tauF <- 0.01 +jumpSD.tauBB <- 0.01 ## Constants -O = 210 # [02] in ppt (millimol/mol) -Kc = 275 # Michaelis-Menton constant of RuBisCO for C02 -Ko = 400 # Michaelis-Menton constant of RuBisCO for O -phi = 0.85 # maximum dark-adapted quantum yield of PSII -beta = 0.5 # fraction of absorbed quanta that reasches PSII -theta = 0.7 # empirical curvature factor +O <- 210 # [02] in ppt (millimol/mol) +Kc <- 275 # Michaelis-Menton constant of RuBisCO for C02 +Ko <- 400 # Michaelis-Menton constant of RuBisCO for O +phi <- 0.85 # maximum dark-adapted quantum yield of PSII +beta <- 0.5 # fraction of absorbed quanta that reasches PSII +theta <- 0.7 # empirical curvature factor #********************************************************************** ## FOR EACH LEAF - CHAIN -runName = paste(species.id, leaf, chain, sep="-") +runName <- paste(species.id, leaf, chain, sep = "-") ## Run setup file source(setupFile) @@ -111,23 +111,23 @@ source(setupFile) source(funcFile) ## Initial Conditions -Vcmax = Vcmax.ic[chain] # max velocity of carboxylation (micromol/m^2/s) -Jmax = Jmax.ic[chain] # max rate of electron transport (micromol/m^2/s) -R = R.ic[chain] # day respiration (micromol/m^2/s) -Gstar = Gstar.ic[chain] # CO2 compensation pt in the absense of dark resp -alpha = alpha.ic[chain] # absorbance of leaves (3.1) -m = m.ic[chain] # slope of Ball-Berry model -g0 = g0.ic[chain] # y-intercept of Ball-Berry model -tauF = tauF.ic[chain] # variance of Farquhar model -tauBB = tauBB.ic[chain] # variance of Ball-Berry model +Vcmax <- Vcmax.ic[chain] # max velocity of carboxylation (micromol/m^2/s) +Jmax <- Jmax.ic[chain] # max rate of electron transport (micromol/m^2/s) +R <- R.ic[chain] # day respiration (micromol/m^2/s) +Gstar <- Gstar.ic[chain] # CO2 compensation pt in the absense of dark resp +alpha <- alpha.ic[chain] # absorbance of leaves (3.1) +m <- m.ic[chain] # slope of Ball-Berry model +g0 <- g0.ic[chain] # y-intercept of Ball-Berry model +tauF <- tauF.ic[chain] # variance of Farquhar model +tauBB <- tauBB.ic[chain] # variance of Ball-Berry model ## Run MCMC -source(mcmcFile,print.eval=TRUE) +source(mcmcFile, print.eval = TRUE) -save.image(paste(saveDirDat, runName, ".Rdata", sep="")) +save.image(paste(saveDirDat, runName, ".Rdata", sep = "")) #********************************************************************** -elapsedTime = proc.time()[3] - startTime +elapsedTime <- proc.time()[3] - startTime print(elapsedTime) -efficiency = elapsedTime/niter -print(paste('Seconds/Iteration:', efficiency)) +efficiency <- elapsedTime / niter +print(paste("Seconds/Iteration:", efficiency)) diff --git a/modules/photosynthesis/code/FBB_mcmc.R b/modules/photosynthesis/code/FBB_mcmc.R index f322308adfe..882105a2170 100644 --- a/modules/photosynthesis/code/FBB_mcmc.R +++ b/modules/photosynthesis/code/FBB_mcmc.R @@ -1,209 +1,249 @@ ### MCMC Implementation # Space for tracked variables -if(sample.Vcmax) { Vcmax.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.Jmax) { Jmax.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.R) { R.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.Gstar) { Gstar.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.alpha) { alpha.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.m) { m.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.g0) { g0.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.tauBB) { tauBB.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(sample.tauF) { tauF.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(track.An.pred){An.pred.mcmc = matrix(NA, nrow=niter*nchains, ncol=npoints) } -if(track.gs.pred){gs.pred.mcmc = matrix(NA, nrow=niter*nchains, ncol=npoints) } -if(compute.DA) { DA.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(compute.Dgs) { Dgs.mcmc = matrix(NA, nrow=niter, ncol=nchains) } -if(compute.pA) { pA.mcmc = matrix(NA, nrow=niter*nchains, ncol=npoints) } -if(compute.pgs) { pgs.mcmc = matrix(NA, nrow=niter*nchains, ncol=npoints) } - -my.model = solve.model(Vcmax, Jmax, R, Gstar, alpha, m, g0) - +if (sample.Vcmax) { + Vcmax.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.Jmax) { + Jmax.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.R) { + R.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.Gstar) { + Gstar.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.alpha) { + alpha.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.m) { + m.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.g0) { + g0.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.tauBB) { + tauBB.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (sample.tauF) { + tauF.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (track.An.pred) { + An.pred.mcmc <- matrix(NA, nrow = niter * nchains, ncol = npoints) +} +if (track.gs.pred) { + gs.pred.mcmc <- matrix(NA, nrow = niter * nchains, ncol = npoints) +} +if (compute.DA) { + DA.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (compute.Dgs) { + Dgs.mcmc <- matrix(NA, nrow = niter, ncol = nchains) +} +if (compute.pA) { + pA.mcmc <- matrix(NA, nrow = niter * nchains, ncol = npoints) +} +if (compute.pgs) { + pgs.mcmc <- matrix(NA, nrow = niter * nchains, ncol = npoints) +} + +my.model <- solve.model(Vcmax, Jmax, R, Gstar, alpha, m, g0) + ## MCMC loop -for(b in 1:niter) { - - # Vcmax - Metropolis-Hastings Sampling - if(sample.Vcmax){ - Vcmax.new = jump.Vcmax(Vcmax) # jump to new Vcmax - my.model.new = solve.model( # solve coupled F & BB - Vcmax.new, Jmax, R, Gstar, alpha, m, g0) - p.Vcmax.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.Vcmax(Vcmax.new) # new prior - p.Vcmax.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.Vcmax(Vcmax) # old prior - accept = exp(p.Vcmax.new - p.Vcmax.old) # compute acceptrance ratio - if(accept > runif(1)) { # check for acceptance - Vcmax = Vcmax.new # replace Vcmax - my.model = my.model.new # replace A.pred & gs.pred - } - Vcmax.mcmc[b,chain] = Vcmax # save result of this iteration - } # End if(sample.Vcmax) - - - # Jmax - Metropolis-Hastings Sampling - if(sample.Jmax){ - Jmax.new = jump.Jmax(Jmax) # jump to new Jmax - my.model.new = solve.model( # solve coupled F & BB - Vcmax, Jmax.new, R, Gstar, alpha, m, g0) - p.Jmax.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.Jmax(Jmax.new) # new prior - p.Jmax.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.Jmax(Jmax) # old prior - accept = exp(p.Jmax.new - p.Jmax.old) # compute acceptrance ratio - if(accept > runif(1)) { # check for acceptance - Jmax = Jmax.new # replace Jmax - my.model = my.model.new # replace A.pred & gs.pred - } - Jmax.mcmc[b,chain] = Jmax # save result of this iteration - } # End if(sample.Jmax) - - - # R - Metropolis-Hastings Sampling - if(sample.R){ - R.new = jump.R(R) # jump to new R - my.model.new = solve.model( # solve coupled F & BB - Vcmax, Jmax, R.new, Gstar, alpha, m, g0) - p.R.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.R(R.new) # new prior - p.R.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.R(R) # old prior - jnew = dtnorm(R.new,R,jumpSD.R,0,100) # J(new|current) - jold = dtnorm(R,R.new,jumpSD.R,0,100) # J(current|new) - accept = exp((p.R.new-jnew) - (p.R.old-jold)) # compute acceptrance - if(accept > runif(1)) { # check for acceptance - R = R.new # replace R - my.model = my.model.new # replace A.pred & gs.pred - } - R.mcmc[b,chain] = R # save result of this iteration - } # End if(sample.R) - - - # Gstar - Metropolis-Hastings Sampling - if(sample.Gstar){ - Gstar.new = jump.Gstar(Gstar) # jump to new Gstar - my.model.new = solve.model( # solve coupled F & BB - Vcmax, Jmax, R, Gstar.new, alpha, m, g0) - p.Gstar.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.Gstar(Gstar.new) # new prior - p.Gstar.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.Gstar(Gstar) # old prior - accept = exp(p.Gstar.new - p.Gstar.old) # compute acceptrance ratio - if(accept > runif(1)) { # check for acceptance - Gstar = Gstar.new # replace Gstar - my.model = my.model.new # replace A.pred & gs.pred - } - Gstar.mcmc[b,chain] = Gstar # save result of this iteration - } # End if(sample.Gstar) - - - # alpha - Metropolis-Hastings Sampling - if(sample.alpha){ - alpha.new = jump.alpha(alpha) # jump to new alpha - my.model.new = solve.model( # solve coupled F & BB - Vcmax, Jmax, R, Gstar, alpha.new, m, g0) - p.alpha.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.alpha(alpha.new) # new prior - p.alpha.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.alpha(alpha) # old prior - jnew = dtnorm(alpha.new,alpha,jumpSD.alpha,0,1) # J(new|current) - jold = dtnorm(alpha,alpha.new,jumpSD.alpha,0,1) # J(current|new) - accept = exp((p.alpha.new-jnew) - (p.alpha.old-jold)) # compute acceptrance - if(accept > runif(1)) { # check for acceptance - alpha = alpha.new # replace alpha - my.model = my.model.new # replace A.pred & gs.pred - } - alpha.mcmc[b,chain] = alpha # save result of this iteration - } # End if(sample.alpha) - - - # m - Metropolis-Hastings Sampling - if(sample.m){ - m.new = jump.m(m) # jump to new m - my.model.new = solve.model( # solve coupled F & BB - Vcmax, Jmax, R, Gstar, alpha, m.new, g0) - p.m.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.m(m.new) # new prior - p.m.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.m(m) # old prior - accept = exp(p.m.new - p.m.old) # compute acceptrance ratio - if(accept > runif(1)) { # check for acceptance - m = m.new # replace m - my.model = my.model.new # replace A.pred & gs.pred - } - m.mcmc[b,chain] = m # save result of this iteration - } # End if(sample.m) - - - # g0 - Metropolis-Hastings Sampling - if(sample.g0){ - g0.new = jump.g0(g0) # jump to new g0 - my.model.new = solve.model( # solve coupled F & BB - Vcmax, Jmax, R, Gstar, alpha, m, g0.new) - p.g0.new = llik.A(my.model.new$An.pred,tauF) + # new A liklihood - llik.gs(my.model.new$gs.pred,tauBB) + # new gs liklihood - prior.g0(g0.new) # new prior - p.g0.old = llik.A(my.model$An.pred,tauF) + # old A liklihood - llik.gs(my.model$gs.pred,tauBB) + # old gs liklihood - prior.g0(g0) # old prior - accept = exp(p.g0.new - p.g0.old) # compute acceptrance ratio - if(accept > runif(1)) { # check for acceptance - g0 = g0.new # replace g0 - my.model = my.model.new # replace A.pred & gs.pred - } - g0.mcmc[b,chain] = g0 # save result of this iteration - } # End if(sample.g0) - - - # tauF - Gibbs Sampling - if(sample.tauF){ - tauF = gibbs.tauF(my.model$An.pred,tauF) - tauF.mcmc[b,chain] = tauF # save result of this iteration - } - - - # tauBB - Gibbs Sampling - if(sample.tauBB){ - tauBB = gibbs.tauBB(my.model$gs.pred,tauBB) - tauBB.mcmc[b,chain] = tauBB # save result of this iteration - } - - - - ## Compute PI - if(compute.pA) { - pA.mcmc[b+niter*(chain-1),] = rnorm(npoints,my.model$An.pred,sqrt(tauF)) - } - - if(compute.pgs) { - pgs.mcmc[b+niter*(chain-1),] = rnorm(npoints,my.model$gs.pred,sqrt(tauBB)) - } - - - ## Compute D (liklihood of data|params) - if(compute.DA) { DA.mcmc[b,chain] = -2*llik.A(my.model$An.pred,tauF) } - - if(compute.Dgs) { Dgs.mcmc[b,chain] = -2*llik.gs(my.model$gs.pred,tauBB) } - - - # save data model values - An.pred.mcmc[b+niter*(chain-1),] = my.model$An.pred # save result of this iteration - gs.pred.mcmc[b+niter*(chain-1),] = my.model$gs.pred # save result of this iteration - - - # Print Progress - if(b %% progressEvery == 0) { - print(paste("Species:",species.id,"Leaf:",leaf," Chain:",chain," Iterations:",b)) - } -} \ No newline at end of file +for (b in 1:niter) { + # Vcmax - Metropolis-Hastings Sampling + if (sample.Vcmax) { + Vcmax.new <- jump.Vcmax(Vcmax) # jump to new Vcmax + my.model.new <- solve.model( # solve coupled F & BB + Vcmax.new, Jmax, R, Gstar, alpha, m, g0 + ) + p.Vcmax.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.Vcmax(Vcmax.new) # new prior + p.Vcmax.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.Vcmax(Vcmax) # old prior + accept <- exp(p.Vcmax.new - p.Vcmax.old) # compute acceptrance ratio + if (accept > runif(1)) { # check for acceptance + Vcmax <- Vcmax.new # replace Vcmax + my.model <- my.model.new # replace A.pred & gs.pred + } + Vcmax.mcmc[b, chain] <- Vcmax # save result of this iteration + } # End if(sample.Vcmax) + + + # Jmax - Metropolis-Hastings Sampling + if (sample.Jmax) { + Jmax.new <- jump.Jmax(Jmax) # jump to new Jmax + my.model.new <- solve.model( # solve coupled F & BB + Vcmax, Jmax.new, R, Gstar, alpha, m, g0 + ) + p.Jmax.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.Jmax(Jmax.new) # new prior + p.Jmax.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.Jmax(Jmax) # old prior + accept <- exp(p.Jmax.new - p.Jmax.old) # compute acceptrance ratio + if (accept > runif(1)) { # check for acceptance + Jmax <- Jmax.new # replace Jmax + my.model <- my.model.new # replace A.pred & gs.pred + } + Jmax.mcmc[b, chain] <- Jmax # save result of this iteration + } # End if(sample.Jmax) + + + # R - Metropolis-Hastings Sampling + if (sample.R) { + R.new <- jump.R(R) # jump to new R + my.model.new <- solve.model( # solve coupled F & BB + Vcmax, Jmax, R.new, Gstar, alpha, m, g0 + ) + p.R.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.R(R.new) # new prior + p.R.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.R(R) # old prior + jnew <- dtnorm(R.new, R, jumpSD.R, 0, 100) # J(new|current) + jold <- dtnorm(R, R.new, jumpSD.R, 0, 100) # J(current|new) + accept <- exp((p.R.new - jnew) - (p.R.old - jold)) # compute acceptrance + if (accept > runif(1)) { # check for acceptance + R <- R.new # replace R + my.model <- my.model.new # replace A.pred & gs.pred + } + R.mcmc[b, chain] <- R # save result of this iteration + } # End if(sample.R) + + + # Gstar - Metropolis-Hastings Sampling + if (sample.Gstar) { + Gstar.new <- jump.Gstar(Gstar) # jump to new Gstar + my.model.new <- solve.model( # solve coupled F & BB + Vcmax, Jmax, R, Gstar.new, alpha, m, g0 + ) + p.Gstar.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.Gstar(Gstar.new) # new prior + p.Gstar.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.Gstar(Gstar) # old prior + accept <- exp(p.Gstar.new - p.Gstar.old) # compute acceptrance ratio + if (accept > runif(1)) { # check for acceptance + Gstar <- Gstar.new # replace Gstar + my.model <- my.model.new # replace A.pred & gs.pred + } + Gstar.mcmc[b, chain] <- Gstar # save result of this iteration + } # End if(sample.Gstar) + + + # alpha - Metropolis-Hastings Sampling + if (sample.alpha) { + alpha.new <- jump.alpha(alpha) # jump to new alpha + my.model.new <- solve.model( # solve coupled F & BB + Vcmax, Jmax, R, Gstar, alpha.new, m, g0 + ) + p.alpha.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.alpha(alpha.new) # new prior + p.alpha.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.alpha(alpha) # old prior + jnew <- dtnorm(alpha.new, alpha, jumpSD.alpha, 0, 1) # J(new|current) + jold <- dtnorm(alpha, alpha.new, jumpSD.alpha, 0, 1) # J(current|new) + accept <- exp((p.alpha.new - jnew) - (p.alpha.old - jold)) # compute acceptrance + if (accept > runif(1)) { # check for acceptance + alpha <- alpha.new # replace alpha + my.model <- my.model.new # replace A.pred & gs.pred + } + alpha.mcmc[b, chain] <- alpha # save result of this iteration + } # End if(sample.alpha) + + + # m - Metropolis-Hastings Sampling + if (sample.m) { + m.new <- jump.m(m) # jump to new m + my.model.new <- solve.model( # solve coupled F & BB + Vcmax, Jmax, R, Gstar, alpha, m.new, g0 + ) + p.m.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.m(m.new) # new prior + p.m.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.m(m) # old prior + accept <- exp(p.m.new - p.m.old) # compute acceptrance ratio + if (accept > runif(1)) { # check for acceptance + m <- m.new # replace m + my.model <- my.model.new # replace A.pred & gs.pred + } + m.mcmc[b, chain] <- m # save result of this iteration + } # End if(sample.m) + + + # g0 - Metropolis-Hastings Sampling + if (sample.g0) { + g0.new <- jump.g0(g0) # jump to new g0 + my.model.new <- solve.model( # solve coupled F & BB + Vcmax, Jmax, R, Gstar, alpha, m, g0.new + ) + p.g0.new <- llik.A(my.model.new$An.pred, tauF) + # new A liklihood + llik.gs(my.model.new$gs.pred, tauBB) + # new gs liklihood + prior.g0(g0.new) # new prior + p.g0.old <- llik.A(my.model$An.pred, tauF) + # old A liklihood + llik.gs(my.model$gs.pred, tauBB) + # old gs liklihood + prior.g0(g0) # old prior + accept <- exp(p.g0.new - p.g0.old) # compute acceptrance ratio + if (accept > runif(1)) { # check for acceptance + g0 <- g0.new # replace g0 + my.model <- my.model.new # replace A.pred & gs.pred + } + g0.mcmc[b, chain] <- g0 # save result of this iteration + } # End if(sample.g0) + + + # tauF - Gibbs Sampling + if (sample.tauF) { + tauF <- gibbs.tauF(my.model$An.pred, tauF) + tauF.mcmc[b, chain] <- tauF # save result of this iteration + } + + + # tauBB - Gibbs Sampling + if (sample.tauBB) { + tauBB <- gibbs.tauBB(my.model$gs.pred, tauBB) + tauBB.mcmc[b, chain] <- tauBB # save result of this iteration + } + + + + ## Compute PI + if (compute.pA) { + pA.mcmc[b + niter * (chain - 1), ] <- rnorm(npoints, my.model$An.pred, sqrt(tauF)) + } + + if (compute.pgs) { + pgs.mcmc[b + niter * (chain - 1), ] <- rnorm(npoints, my.model$gs.pred, sqrt(tauBB)) + } + + + ## Compute D (liklihood of data|params) + if (compute.DA) { + DA.mcmc[b, chain] <- -2 * llik.A(my.model$An.pred, tauF) + } + + if (compute.Dgs) { + Dgs.mcmc[b, chain] <- -2 * llik.gs(my.model$gs.pred, tauBB) + } + + + # save data model values + An.pred.mcmc[b + niter * (chain - 1), ] <- my.model$An.pred # save result of this iteration + gs.pred.mcmc[b + niter * (chain - 1), ] <- my.model$gs.pred # save result of this iteration + + + # Print Progress + if (b %% progressEvery == 0) { + print(paste("Species:", species.id, "Leaf:", leaf, " Chain:", chain, " Iterations:", b)) + } +} diff --git a/modules/photosynthesis/code/FBB_multi_chain_analysis.R b/modules/photosynthesis/code/FBB_multi_chain_analysis.R index 8c38b772482..32200b25d8a 100644 --- a/modules/photosynthesis/code/FBB_multi_chain_analysis.R +++ b/modules/photosynthesis/code/FBB_multi_chain_analysis.R @@ -1,425 +1,444 @@ ### FBB_analysis ## make variable to store acceptance stats -accept = list() +accept <- list() ## select indices of interations to use in analysis -keep = seq(start,end,by=thin) +keep <- seq(start, end, by = thin) ## Plot tracked plottable parameters. Otherwise, save them as NULL. ## Parameter Vcmax -if(exists('Vcmax.mcmc') && !is.null(Vcmax.mcmc)) { - mcmc.Vcmax = mcmc.list( - coda::mcmc(Vcmax.mcmc[keep,1],start,end,thin), - coda::mcmc(Vcmax.mcmc[keep,2],start,end,thin), - coda::mcmc(Vcmax.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.Vcmax.pdf",sep="")) - plot(mcmc.Vcmax, main="Vcmax") +if (exists("Vcmax.mcmc") && !is.null(Vcmax.mcmc)) { + mcmc.Vcmax <- mcmc.list( + coda::mcmc(Vcmax.mcmc[keep, 1], start, end, thin), + coda::mcmc(Vcmax.mcmc[keep, 2], start, end, thin), + coda::mcmc(Vcmax.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.Vcmax.pdf", sep = "")) + plot(mcmc.Vcmax, main = "Vcmax") dev.off() - accept$Vcmax = sum((diff(Vcmax.mcmc)!=0))/(niter*nchains-1) + accept$Vcmax <- sum((diff(Vcmax.mcmc) != 0)) / (niter * nchains - 1) } else { - Vcmax.mcmc = NULL - accept$Vcmax = NULL + Vcmax.mcmc <- NULL + accept$Vcmax <- NULL } ## Parameter Jmax -if(exists('Jmax.mcmc') && !is.null(Jmax.mcmc)) { - mcmc.Jmax = mcmc.list( - coda::mcmc(Jmax.mcmc[keep,1],start,end,thin), - coda::mcmc(Jmax.mcmc[keep,2],start,end,thin), - coda::mcmc(Jmax.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.Jmax.pdf",sep="")) - plot(mcmc.Jmax, main="Jmax") +if (exists("Jmax.mcmc") && !is.null(Jmax.mcmc)) { + mcmc.Jmax <- mcmc.list( + coda::mcmc(Jmax.mcmc[keep, 1], start, end, thin), + coda::mcmc(Jmax.mcmc[keep, 2], start, end, thin), + coda::mcmc(Jmax.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.Jmax.pdf", sep = "")) + plot(mcmc.Jmax, main = "Jmax") dev.off() - accept$Jmax = sum((diff(Jmax.mcmc)!=0))/(niter*nchains-1) + accept$Jmax <- sum((diff(Jmax.mcmc) != 0)) / (niter * nchains - 1) } else { - Jmax.mcmc = NULL - accept$Jmax = NULL + Jmax.mcmc <- NULL + accept$Jmax <- NULL } ## Parameter R -if(exists('R.mcmc') && !is.null(R.mcmc)) { - mcmc.R = mcmc.list( - coda::mcmc(R.mcmc[keep,1],start,end,thin), - coda::mcmc(R.mcmc[keep,2],start,end,thin), - coda::mcmc(R.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.R.pdf",sep="")) - plot(mcmc.R, main="R") +if (exists("R.mcmc") && !is.null(R.mcmc)) { + mcmc.R <- mcmc.list( + coda::mcmc(R.mcmc[keep, 1], start, end, thin), + coda::mcmc(R.mcmc[keep, 2], start, end, thin), + coda::mcmc(R.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.R.pdf", sep = "")) + plot(mcmc.R, main = "R") dev.off() - accept$R = sum((diff(R.mcmc)!=0))/(niter*nchains-1) + accept$R <- sum((diff(R.mcmc) != 0)) / (niter * nchains - 1) } else { - R.mcmc = NULL - accept$R = NULL + R.mcmc <- NULL + accept$R <- NULL } ## Parameter Gstar -if(exists('Gstar.mcmc') && !is.null(Gstar.mcmc)) { - mcmc.Gstar = mcmc.list( - coda::mcmc(Gstar.mcmc[keep,1],start,end,thin), - coda::mcmc(Gstar.mcmc[keep,2],start,end,thin), - coda::mcmc(Gstar.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.Gstar.pdf",sep="")) - plot(mcmc.Gstar, main="Gstar") +if (exists("Gstar.mcmc") && !is.null(Gstar.mcmc)) { + mcmc.Gstar <- mcmc.list( + coda::mcmc(Gstar.mcmc[keep, 1], start, end, thin), + coda::mcmc(Gstar.mcmc[keep, 2], start, end, thin), + coda::mcmc(Gstar.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.Gstar.pdf", sep = "")) + plot(mcmc.Gstar, main = "Gstar") dev.off() - accept$Gstar = sum((diff(Gstar.mcmc)!=0))/(niter*nchains-1) + accept$Gstar <- sum((diff(Gstar.mcmc) != 0)) / (niter * nchains - 1) } else { - Gstar.mcmc = NULL - accept$Gstar = NULL + Gstar.mcmc <- NULL + accept$Gstar <- NULL } ## Parameter alpha -if(exists('alpha.mcmc') && !is.null(alpha.mcmc)) { - mcmc.alpha = mcmc.list( - coda::mcmc(alpha.mcmc[keep,1],start,end,thin), - coda::mcmc(alpha.mcmc[keep,2],start,end,thin), - coda::mcmc(alpha.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.alpha.pdf",sep="")) - plot(mcmc.alpha, main="alpha") +if (exists("alpha.mcmc") && !is.null(alpha.mcmc)) { + mcmc.alpha <- mcmc.list( + coda::mcmc(alpha.mcmc[keep, 1], start, end, thin), + coda::mcmc(alpha.mcmc[keep, 2], start, end, thin), + coda::mcmc(alpha.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.alpha.pdf", sep = "")) + plot(mcmc.alpha, main = "alpha") dev.off() - accept$alpha = sum((diff(alpha.mcmc)!=0))/(niter*nchains-1) + accept$alpha <- sum((diff(alpha.mcmc) != 0)) / (niter * nchains - 1) } else { - alpha.mcmc = NULL - accept$alpha = NULL + alpha.mcmc <- NULL + accept$alpha <- NULL } ## Parameter m -if(exists('m.mcmc') && !is.null(m.mcmc)) { - mcmc.m = mcmc.list( - coda::mcmc(m.mcmc[keep,1],start,end,thin), - coda::mcmc(m.mcmc[keep,2],start,end,thin), - coda::mcmc(m.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.m.pdf",sep="")) - plot(mcmc.m, main="m") +if (exists("m.mcmc") && !is.null(m.mcmc)) { + mcmc.m <- mcmc.list( + coda::mcmc(m.mcmc[keep, 1], start, end, thin), + coda::mcmc(m.mcmc[keep, 2], start, end, thin), + coda::mcmc(m.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.m.pdf", sep = "")) + plot(mcmc.m, main = "m") dev.off() - accept$m = sum((diff(m.mcmc)!=0))/(niter*nchains-1) + accept$m <- sum((diff(m.mcmc) != 0)) / (niter * nchains - 1) } else { - m.mcmc = NULL - accept$m = NULL + m.mcmc <- NULL + accept$m <- NULL } ## Parameter g0 -if(exists('g0.mcmc') && !is.null(g0.mcmc)) { - mcmc.g0 = mcmc.list( - coda::mcmc(g0.mcmc[keep,1],start,end,thin), - coda::mcmc(g0.mcmc[keep,2],start,end,thin), - coda::mcmc(g0.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.g0.pdf",sep="")) - plot(mcmc.g0, main="g0") +if (exists("g0.mcmc") && !is.null(g0.mcmc)) { + mcmc.g0 <- mcmc.list( + coda::mcmc(g0.mcmc[keep, 1], start, end, thin), + coda::mcmc(g0.mcmc[keep, 2], start, end, thin), + coda::mcmc(g0.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.g0.pdf", sep = "")) + plot(mcmc.g0, main = "g0") dev.off() - accept$g0 = sum((diff(g0.mcmc)!=0))/(niter*nchains-1) + accept$g0 <- sum((diff(g0.mcmc) != 0)) / (niter * nchains - 1) } else { - g0.mcmc = NULL - accept$g0 = NULL + g0.mcmc <- NULL + accept$g0 <- NULL } ## Parameter tauF -if(exists('tauF.mcmc') && !is.null(tauF.mcmc)) { - mcmc.tauF = mcmc.list( - coda::mcmc(tauF.mcmc[keep,1],start,end,thin), - coda::mcmc(tauF.mcmc[keep,2],start,end,thin), - coda::mcmc(tauF.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.tauF.pdf",sep="")) - plot(mcmc.tauF, main="tauF") +if (exists("tauF.mcmc") && !is.null(tauF.mcmc)) { + mcmc.tauF <- mcmc.list( + coda::mcmc(tauF.mcmc[keep, 1], start, end, thin), + coda::mcmc(tauF.mcmc[keep, 2], start, end, thin), + coda::mcmc(tauF.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.tauF.pdf", sep = "")) + plot(mcmc.tauF, main = "tauF") dev.off() } else { - tauF.mcmc = NULL + tauF.mcmc <- NULL } ## Parameter tauBB -if(exists('tauBB.mcmc') && !is.null(tauBB.mcmc)) { - mcmc.tauBB = mcmc.list( - coda::mcmc(tauBB.mcmc[keep,1],start,end,thin), - coda::mcmc(tauBB.mcmc[keep,2],start,end,thin), - coda::mcmc(tauBB.mcmc[keep,3],start,end,thin)) - pdf(paste(SaveLeafDir, runName, "_plot.tauBB.pdf",sep="")) - plot(mcmc.tauBB, main="tauBB") +if (exists("tauBB.mcmc") && !is.null(tauBB.mcmc)) { + mcmc.tauBB <- mcmc.list( + coda::mcmc(tauBB.mcmc[keep, 1], start, end, thin), + coda::mcmc(tauBB.mcmc[keep, 2], start, end, thin), + coda::mcmc(tauBB.mcmc[keep, 3], start, end, thin) + ) + pdf(paste(SaveLeafDir, runName, "_plot.tauBB.pdf", sep = "")) + plot(mcmc.tauBB, main = "tauBB") dev.off() } else { - tauBB.mcmc = NULL + tauBB.mcmc <- NULL } ## Parameter An.pred -if(exists('An.pred.mcmc') && !is.null(An.pred.mcmc)) { - An.ave = An.pred.mcmc[1:niter,] - if(nchains>1){ - for(c in 2:nchains){ - An.ave = (An.ave+An.pred.mcmc[(1+niter*(c-1)):(niter*c),])/2 +if (exists("An.pred.mcmc") && !is.null(An.pred.mcmc)) { + An.ave <- An.pred.mcmc[1:niter, ] + if (nchains > 1) { + for (c in 2:nchains) { + An.ave <- (An.ave + An.pred.mcmc[(1 + niter * (c - 1)):(niter * c), ]) / 2 } } - mcmc.An.pred = coda::mcmc(An.ave[keep,],start,end,thin) - for(a in 1:npoints) { - pdf(paste(SaveLeafDir, runName, "_plot.An.pred", a, ".pdf",sep="")) - plot(mcmc.An.pred[,a], main=paste("An.pred[",a,"]",sep="")) + mcmc.An.pred <- coda::mcmc(An.ave[keep, ], start, end, thin) + for (a in 1:npoints) { + pdf(paste(SaveLeafDir, runName, "_plot.An.pred", a, ".pdf", sep = "")) + plot(mcmc.An.pred[, a], main = paste("An.pred[", a, "]", sep = "")) dev.off() } } else { - An.pred.mcmc = NULL + An.pred.mcmc <- NULL } ## Parameter gs.pred -if(exists('gs.pred.mcmc') && !is.null(gs.pred.mcmc)) { - gs.ave = gs.pred.mcmc[1:niter,] - if(nchains>1){ - for(c in 2:nchains){ - gs.ave = (gs.ave+gs.pred.mcmc[(1+niter*(c-1)):(niter*c),])/2 +if (exists("gs.pred.mcmc") && !is.null(gs.pred.mcmc)) { + gs.ave <- gs.pred.mcmc[1:niter, ] + if (nchains > 1) { + for (c in 2:nchains) { + gs.ave <- (gs.ave + gs.pred.mcmc[(1 + niter * (c - 1)):(niter * c), ]) / 2 } } - mcmc.gs.pred = coda::mcmc(gs.ave[keep,],start,end,thin) - for(a in 1:npoints) { - pdf(paste(SaveLeafDir, runName, "_plot.gs.pred", a, ".pdf",sep="")) - plot(mcmc.gs.pred[,a], main=paste("gs.pred[",a,"]",sep="")) + mcmc.gs.pred <- coda::mcmc(gs.ave[keep, ], start, end, thin) + for (a in 1:npoints) { + pdf(paste(SaveLeafDir, runName, "_plot.gs.pred", a, ".pdf", sep = "")) + plot(mcmc.gs.pred[, a], main = paste("gs.pred[", a, "]", sep = "")) dev.off() } } else { - gs.pred.mcmc = NULL + gs.pred.mcmc <- NULL } ## Get summary of all variables -summary.Vcmax = summary(mcmc.Vcmax) -summary.Jmax = summary(mcmc.Jmax) -summary.R = summary(mcmc.R) -summary.Gstar = summary(mcmc.Gstar) -summary.alpha = summary(mcmc.alpha) -summary.m = summary(mcmc.m) -summary.g0 = summary(mcmc.g0) -summary.tauF = summary(mcmc.tauF) -summary.tauBB = summary(mcmc.tauBB) -summary.An.pred = summary(mcmc.An.pred) -summary.gs.pred = summary(mcmc.gs.pred) +summary.Vcmax <- summary(mcmc.Vcmax) +summary.Jmax <- summary(mcmc.Jmax) +summary.R <- summary(mcmc.R) +summary.Gstar <- summary(mcmc.Gstar) +summary.alpha <- summary(mcmc.alpha) +summary.m <- summary(mcmc.m) +summary.g0 <- summary(mcmc.g0) +summary.tauF <- summary(mcmc.tauF) +summary.tauBB <- summary(mcmc.tauBB) +summary.An.pred <- summary(mcmc.An.pred) +summary.gs.pred <- summary(mcmc.gs.pred) ## Compute means & intervals for all variables -mean.Vcmax = summary.Vcmax$statistics[1] -mean.Jmax = summary.Jmax$statistics[1] -mean.R = summary.R$statistics[1] -mean.Gstar = summary.Gstar$statistics[1] -mean.alpha = summary.alpha$statistics[1] -mean.m = summary.m$statistics[1] -mean.g0 = summary.g0$statistics[1] -mean.tauF = summary.tauF$statistics[1] -mean.tauBB = summary.tauBB$statistics[1] - -sd.Vcmax = summary.Vcmax$statistics[2] -sd.Jmax = summary.Jmax$statistics[2] -sd.R = summary.R$statistics[2] -sd.Gstar = summary.Gstar$statistics[2] -sd.alpha = summary.alpha$statistics[2] -sd.m = summary.m$statistics[2] -sd.g0 = summary.g0$statistics[2] -sd.tauF = summary.tauF$statistics[2] -sd.tauBB = summary.tauBB$statistics[2] - -q25.Vcmax = summary.Vcmax$quantiles[1] -q25.Jmax = summary.Jmax$quantiles[1] -q25.R = summary.R$quantiles[1] -q25.Gstar = summary.Gstar$quantiles[1] -q25.alpha = summary.alpha$quantiles[1] -q25.m = summary.m$quantiles[1] -q25.g0 = summary.g0$quantiles[1] -q25.tauF = summary.tauF$quantiles[1] -q25.tauBB = summary.tauBB$quantiles[1] - - -q975.Vcmax = summary.Vcmax$quantiles[5] -q975.Jmax = summary.Jmax$quantiles[5] -q975.R = summary.R$quantiles[5] -q975.Gstar = summary.Gstar$quantiles[5] -q975.alpha = summary.alpha$quantiles[5] -q975.m = summary.m$quantiles[5] -q975.g0 = summary.g0$quantiles[5] -q975.tauF = summary.tauF$quantiles[5] -q975.tauBB = summary.tauBB$quantiles[5] - -mean.pred.An = numeric() -mean.pred.gs = numeric() -sd.pred.An = numeric() -sd.pred.gs = numeric() -q25.pred.An = numeric() -q25.pred.gs = numeric() -q975.pred.An = numeric() -q975.pred.gs = numeric() - -for(p in 1:npoints){ - mean.pred.An[p] = summary.An.pred$statistics[p,1] - mean.pred.gs[p] = summary.gs.pred$statistics[p,1] - sd.pred.An[p] = summary.An.pred$statistics[p,2] - sd.pred.gs[p] = summary.gs.pred$statistics[p,2] - q25.pred.An[p] = summary.An.pred$quantiles[p,1] - q25.pred.gs[p] = summary.gs.pred$quantiles[p,1] - q975.pred.An[p] = summary.An.pred$quantiles[p,5] - q975.pred.gs[p] = summary.gs.pred$quantiles[p,5] +mean.Vcmax <- summary.Vcmax$statistics[1] +mean.Jmax <- summary.Jmax$statistics[1] +mean.R <- summary.R$statistics[1] +mean.Gstar <- summary.Gstar$statistics[1] +mean.alpha <- summary.alpha$statistics[1] +mean.m <- summary.m$statistics[1] +mean.g0 <- summary.g0$statistics[1] +mean.tauF <- summary.tauF$statistics[1] +mean.tauBB <- summary.tauBB$statistics[1] + +sd.Vcmax <- summary.Vcmax$statistics[2] +sd.Jmax <- summary.Jmax$statistics[2] +sd.R <- summary.R$statistics[2] +sd.Gstar <- summary.Gstar$statistics[2] +sd.alpha <- summary.alpha$statistics[2] +sd.m <- summary.m$statistics[2] +sd.g0 <- summary.g0$statistics[2] +sd.tauF <- summary.tauF$statistics[2] +sd.tauBB <- summary.tauBB$statistics[2] + +q25.Vcmax <- summary.Vcmax$quantiles[1] +q25.Jmax <- summary.Jmax$quantiles[1] +q25.R <- summary.R$quantiles[1] +q25.Gstar <- summary.Gstar$quantiles[1] +q25.alpha <- summary.alpha$quantiles[1] +q25.m <- summary.m$quantiles[1] +q25.g0 <- summary.g0$quantiles[1] +q25.tauF <- summary.tauF$quantiles[1] +q25.tauBB <- summary.tauBB$quantiles[1] + + +q975.Vcmax <- summary.Vcmax$quantiles[5] +q975.Jmax <- summary.Jmax$quantiles[5] +q975.R <- summary.R$quantiles[5] +q975.Gstar <- summary.Gstar$quantiles[5] +q975.alpha <- summary.alpha$quantiles[5] +q975.m <- summary.m$quantiles[5] +q975.g0 <- summary.g0$quantiles[5] +q975.tauF <- summary.tauF$quantiles[5] +q975.tauBB <- summary.tauBB$quantiles[5] + +mean.pred.An <- numeric() +mean.pred.gs <- numeric() +sd.pred.An <- numeric() +sd.pred.gs <- numeric() +q25.pred.An <- numeric() +q25.pred.gs <- numeric() +q975.pred.An <- numeric() +q975.pred.gs <- numeric() + +for (p in 1:npoints) { + mean.pred.An[p] <- summary.An.pred$statistics[p, 1] + mean.pred.gs[p] <- summary.gs.pred$statistics[p, 1] + sd.pred.An[p] <- summary.An.pred$statistics[p, 2] + sd.pred.gs[p] <- summary.gs.pred$statistics[p, 2] + q25.pred.An[p] <- summary.An.pred$quantiles[p, 1] + q25.pred.gs[p] <- summary.gs.pred$quantiles[p, 1] + q975.pred.An[p] <- summary.An.pred$quantiles[p, 5] + q975.pred.gs[p] <- summary.gs.pred$quantiles[p, 5] } ## predictions vs measurements -axismin = min(min(An.obs),min(mean.pred.An)) -axismax = max(max(An.obs),max(mean.pred.An)) -pdf(paste(SaveLeafDir, runName, "_An_Pred-vs-Meas.pdf",sep="")) +axismin <- min(min(An.obs), min(mean.pred.An)) +axismax <- max(max(An.obs), max(mean.pred.An)) +pdf(paste(SaveLeafDir, runName, "_An_Pred-vs-Meas.pdf", sep = "")) plot(An.obs, - mean.pred.An, - ylim = c(axismin,axismax), - xlim = c(axismin,axismax), - pch = 19, - main = "Predicted An vs Measured An", - xlab = "Measured An (umol m-2 s-1)", - ylab = "Predicted An (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) + mean.pred.An, + ylim = c(axismin, axismax), + xlim = c(axismin, axismax), + pch = 19, + main = "Predicted An vs Measured An", + xlab = "Measured An (umol m-2 s-1)", + ylab = "Predicted An (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 +) abline(0, 1, col = "dark green", lwd = 3) dev.off() -axismin = min(min(gs.obs),min(mean.pred.gs)) -axismax = max(max(gs.obs),max(mean.pred.gs)) -pdf(paste(SaveLeafDir, runName, "_gs_Pred-vs-Meas.pdf",sep="")) +axismin <- min(min(gs.obs), min(mean.pred.gs)) +axismax <- max(max(gs.obs), max(mean.pred.gs)) +pdf(paste(SaveLeafDir, runName, "_gs_Pred-vs-Meas.pdf", sep = "")) plot(gs.obs, - mean.pred.gs, - ylim = c(axismin,axismax), - xlim = c(axismin,axismax), - pch = 19, - main = "Predicted gs vs Measured gs", - xlab = "Measured gs (mol m-2 s-1)", - ylab = "Predicted gs (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) + mean.pred.gs, + ylim = c(axismin, axismax), + xlim = c(axismin, axismax), + pch = 19, + main = "Predicted gs vs Measured gs", + xlab = "Measured gs (mol m-2 s-1)", + ylab = "Predicted gs (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 +) abline(0, 1, col = "dark green", lwd = 3) dev.off() ## autocorrelation plots of each leaf -pdf(paste(SaveLeafDir, runName, "_Autocorr_Vcmax.pdf",sep="")) -coda::autocorr.plot(mcmc.Vcmax, main="Vcmax Autocorrelation") +pdf(paste(SaveLeafDir, runName, "_Autocorr_Vcmax.pdf", sep = "")) +coda::autocorr.plot(mcmc.Vcmax, main = "Vcmax Autocorrelation") dev.off() -pdf(paste(SaveLeafDir, runName, "_Autocorr_Jmax.pdf",sep="")) -coda::autocorr.plot(mcmc.Jmax, main="Jmax Autocorrelation") +pdf(paste(SaveLeafDir, runName, "_Autocorr_Jmax.pdf", sep = "")) +coda::autocorr.plot(mcmc.Jmax, main = "Jmax Autocorrelation") dev.off() ## Farquhar CI and PI -if(exists('An.pred.mcmc') && !is.null(An.pred.mcmc)) { - pdf(paste(SaveLeafDir, runName, "_model_Farquhar.pdf",sep="")) - ci.pred = Ca-mean.pred.An/mean.pred.gs - cimax = max(q975.pred.An) - cimin = min(q25.pred.An) - pi.An <- apply(pA.mcmc,2,quantile,c(0.025,0.5,0.975)) - pimax = max(pi.An[3,]) - pimin = min(pi.An[1,]) - ylim = min((sign(pimin)*0.75*abs(pimin)),(sign(cimin)*0.75*abs(cimin))) - ylim[2] = max((sign(pimax)*1.25*abs(pimax)),(sign(cimax)*1.25*abs(cimax))) - xlim = 0 - xlim[2] = max(Ci)+100 - +if (exists("An.pred.mcmc") && !is.null(An.pred.mcmc)) { + pdf(paste(SaveLeafDir, runName, "_model_Farquhar.pdf", sep = "")) + ci.pred <- Ca - mean.pred.An / mean.pred.gs + cimax <- max(q975.pred.An) + cimin <- min(q25.pred.An) + pi.An <- apply(pA.mcmc, 2, quantile, c(0.025, 0.5, 0.975)) + pimax <- max(pi.An[3, ]) + pimin <- min(pi.An[1, ]) + ylim <- min((sign(pimin) * 0.75 * abs(pimin)), (sign(cimin) * 0.75 * abs(cimin))) + ylim[2] <- max((sign(pimax) * 1.25 * abs(pimax)), (sign(cimax) * 1.25 * abs(cimax))) + xlim <- 0 + xlim[2] <- max(Ci) + 100 + plot(Ci, - An.obs, - ylim = ylim, - xlim = xlim, - pch = 19, - main = "Farquhar Coupled Model Fit", - xlab = "Intracellular [CO2] (Ci) (ppm)", - ylab = "Net Photosynthesis (An) (umol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - lines(ci.pred[index],mean.pred.An[index],col=3,lwd=2) - lines(ci.pred[index],q25.pred.An[index],col=3,lty=2) - lines(ci.pred[index],q975.pred.An[index],col=3,lty=2) - lines(ci.pred[index],pi.An[1,index],col=4,lty=2) - lines(ci.pred[index],pi.An[3,index],col=4,lty=2) - - legend(700, (ylim[1]+10), - c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), - col = c(3,3,4), - lty = c(1, 2, 2)) - + An.obs, + ylim = ylim, + xlim = xlim, + pch = 19, + main = "Farquhar Coupled Model Fit", + xlab = "Intracellular [CO2] (Ci) (ppm)", + ylab = "Net Photosynthesis (An) (umol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + lines(ci.pred[index], mean.pred.An[index], col = 3, lwd = 2) + lines(ci.pred[index], q25.pred.An[index], col = 3, lty = 2) + lines(ci.pred[index], q975.pred.An[index], col = 3, lty = 2) + lines(ci.pred[index], pi.An[1, index], col = 4, lty = 2) + lines(ci.pred[index], pi.An[3, index], col = 4, lty = 2) + + legend(700, (ylim[1] + 10), + c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), + col = c(3, 3, 4), + lty = c(1, 2, 2) + ) + dev.off() } ## Ball-Berry CI and PI -if(exists('gs.pred.mcmc') && !is.null(gs.pred.mcmc)) { - pdf(paste(SaveLeafDir, runName, "_model_BallBerry.pdf",sep="")) - BBx = mean.pred.An*H/Ca - cimax = max(q975.pred.gs) - cimin = min(q25.pred.gs) - pi.gs <- apply(pgs.mcmc,2,quantile,c(0.025,0.5,0.975)) - pimax = max(pi.gs[3,]) - pimin = min(pi.gs[1,]) - ylim = min((sign(pimin)*0.75*abs(pimin)),(sign(cimin)*0.75*abs(cimin))) - ylim[2] = max((sign(pimax)*1.25*abs(pimax)),(sign(cimax)*1.25*abs(cimax))) - xlim = min(An.obs*H/Ca) - 0.005 - xlim[2] = max(An.obs*H/Ca) + 0.005 - - plot(An.obs*H/Ca, - gs.obs, - ylim = ylim, - xlim = xlim, - pch = 19, - main = "Ball-Berry Coupled Model Fit", - xlab = "A*H/Ca", - ylab = "Stomatal Conductance (gs) (mol m-2 s-1)", - cex.main = 1.6, - cex.lab = 1.4) - - sorted = sort.int(An.obs*H/Ca,index.return=TRUE) - BBx.index = sorted$ix - - lines(BBx[BBx.index],mean.pred.gs[BBx.index],col=3,lwd=2) - lines(BBx[BBx.index],q25.pred.gs[BBx.index],col=3,lty=2) - lines(BBx[BBx.index],q975.pred.gs[BBx.index],col=3,lty=2) - lines(BBx[BBx.index],pi.gs[1, BBx.index],col=4,lty=2) - lines(BBx[BBx.index],pi.gs[3, BBx.index],col=4,lty=2) - - legend(0, ylim[2], - c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), - col = c(3,3,4), - lty = c(1, 2, 2)) - +if (exists("gs.pred.mcmc") && !is.null(gs.pred.mcmc)) { + pdf(paste(SaveLeafDir, runName, "_model_BallBerry.pdf", sep = "")) + BBx <- mean.pred.An * H / Ca + cimax <- max(q975.pred.gs) + cimin <- min(q25.pred.gs) + pi.gs <- apply(pgs.mcmc, 2, quantile, c(0.025, 0.5, 0.975)) + pimax <- max(pi.gs[3, ]) + pimin <- min(pi.gs[1, ]) + ylim <- min((sign(pimin) * 0.75 * abs(pimin)), (sign(cimin) * 0.75 * abs(cimin))) + ylim[2] <- max((sign(pimax) * 1.25 * abs(pimax)), (sign(cimax) * 1.25 * abs(cimax))) + xlim <- min(An.obs * H / Ca) - 0.005 + xlim[2] <- max(An.obs * H / Ca) + 0.005 + + plot(An.obs * H / Ca, + gs.obs, + ylim = ylim, + xlim = xlim, + pch = 19, + main = "Ball-Berry Coupled Model Fit", + xlab = "A*H/Ca", + ylab = "Stomatal Conductance (gs) (mol m-2 s-1)", + cex.main = 1.6, + cex.lab = 1.4 + ) + + sorted <- sort.int(An.obs * H / Ca, index.return = TRUE) + BBx.index <- sorted$ix + + lines(BBx[BBx.index], mean.pred.gs[BBx.index], col = 3, lwd = 2) + lines(BBx[BBx.index], q25.pred.gs[BBx.index], col = 3, lty = 2) + lines(BBx[BBx.index], q975.pred.gs[BBx.index], col = 3, lty = 2) + lines(BBx[BBx.index], pi.gs[1, BBx.index], col = 4, lty = 2) + lines(BBx[BBx.index], pi.gs[3, BBx.index], col = 4, lty = 2) + + legend(0, ylim[2], + c("MCMC Fit", "95% Credible Interval", "95% Predictive Interval"), + col = c(3, 3, 4), + lty = c(1, 2, 2) + ) + dev.off() } ## DIC calculation -if(exists('DA.mcmc')) { - An.pred.mean = colMeans(mcmc.An.pred,1) - tauF.mean = mean(tauF.mcmc) - - DthetaBar = -2*llik.A(An.pred.mean,tauF.mean) - Dbar = mean(DA.mcmc[keep,]) - DIC.An = 2*Dbar - DthetaBar -} else { - DA.mcmc=NULL +if (exists("DA.mcmc")) { + An.pred.mean <- colMeans(mcmc.An.pred, 1) + tauF.mean <- mean(tauF.mcmc) + + DthetaBar <- -2 * llik.A(An.pred.mean, tauF.mean) + Dbar <- mean(DA.mcmc[keep, ]) + DIC.An <- 2 * Dbar - DthetaBar +} else { + DA.mcmc <- NULL } -if(exists('Dgs.mcmc')) { - gs.pred.mean = colMeans(mcmc.gs.pred,1) - tauBB.mean = mean(tauBB.mcmc) - - DthetaBar = -2*llik.gs(gs.pred.mean,tauBB.mean) - Dbar = mean(Dgs.mcmc[keep,]) - DIC.gs = 2*Dbar - DthetaBar -} else { - Dgs.mcmc=NULL +if (exists("Dgs.mcmc")) { + gs.pred.mean <- colMeans(mcmc.gs.pred, 1) + tauBB.mean <- mean(tauBB.mcmc) + + DthetaBar <- -2 * llik.gs(gs.pred.mean, tauBB.mean) + Dbar <- mean(Dgs.mcmc[keep, ]) + DIC.gs <- 2 * Dbar - DthetaBar +} else { + Dgs.mcmc <- NULL } ## parameter pairs plot -df = data.frame(Vcmax.mcmc[keep,1], Jmax.mcmc[keep,1], R.mcmc[keep,1], Gstar.mcmc[keep,1], alpha.mcmc[keep,1], m.mcmc[keep,1], g0.mcmc[keep,1], tauF.mcmc[keep,1], tauBB.mcmc[keep,1]) -jpeg(paste(SaveLeafDir, runName, "_Params_Corr_Plot.jpeg",sep="")) -pairs(df,c("Vcmax","Jmax","R","Gamma*","alpha","m","g0","F Var","BB Var")) -dev.off() +df <- data.frame(Vcmax.mcmc[keep, 1], Jmax.mcmc[keep, 1], R.mcmc[keep, 1], Gstar.mcmc[keep, 1], alpha.mcmc[keep, 1], m.mcmc[keep, 1], g0.mcmc[keep, 1], tauF.mcmc[keep, 1], tauBB.mcmc[keep, 1]) +jpeg(paste(SaveLeafDir, runName, "_Params_Corr_Plot.jpeg", sep = "")) +pairs(df, c("Vcmax", "Jmax", "R", "Gamma*", "alpha", "m", "g0", "F Var", "BB Var")) +dev.off() ## set remaining calculated values to NULL if they weren't tracked -if(!exists('pA.mcmc')) { pA.mcmc=NULL } -if(!exists('pgs.mcmc')) { pgs.mcmc=NULL } +if (!exists("pA.mcmc")) { + pA.mcmc <- NULL +} +if (!exists("pgs.mcmc")) { + pgs.mcmc <- NULL +} ## Save output data -save(Vcmax.mcmc,Jmax.mcmc,R.mcmc,Gstar.mcmc,alpha.mcmc,m.mcmc,g0.mcmc,tauF.mcmc,tauBB.mcmc,An.pred.mcmc,gs.pred.mcmc,dat,niter,DIC.An,DIC.gs, file=saveFileDat) +save(Vcmax.mcmc, Jmax.mcmc, R.mcmc, Gstar.mcmc, alpha.mcmc, m.mcmc, g0.mcmc, tauF.mcmc, tauBB.mcmc, An.pred.mcmc, gs.pred.mcmc, dat, niter, DIC.An, DIC.gs, file = saveFileDat) diff --git a/modules/photosynthesis/code/FBB_param_corr.R b/modules/photosynthesis/code/FBB_param_corr.R index 3cb11f4e73b..21d6996b34b 100644 --- a/modules/photosynthesis/code/FBB_param_corr.R +++ b/modules/photosynthesis/code/FBB_param_corr.R @@ -1,50 +1,54 @@ ## FBB_param_corr ## parameter correlation plots of each leaf -if(plot.corr){ -pdf(paste(SaveLeafDir, runName, "_param_corr_Vcmax_Jmax.pdf",sep="")) - plot(Vcmax.mcmc[keep,1], - Jmax.mcmc[keep,1], - pch = 19, - main = "Vcmax & Jmax Correlation", - xlab = "Vcmax", - ylab = "Jmax", - xlim = c(50,100), - cex.main = 1.6, - cex.lab = 1.4) - dev.off() - -pdf(paste(SaveLeafDir, runName, "_param_corr_m_Vcmax.pdf",sep="")) - plot(Vcmax.mcmc[keep,1], - m.mcmc[keep,1], - pch = 19, - main = "Vcmax & m Correlation", - xlab = "Vcmax", - ylab = "m", - xlim = c(50,100), - cex.main = 1.6, - cex.lab = 1.4) - dev.off() +if (plot.corr) { + pdf(paste(SaveLeafDir, runName, "_param_corr_Vcmax_Jmax.pdf", sep = "")) + plot(Vcmax.mcmc[keep, 1], + Jmax.mcmc[keep, 1], + pch = 19, + main = "Vcmax & Jmax Correlation", + xlab = "Vcmax", + ylab = "Jmax", + xlim = c(50, 100), + cex.main = 1.6, + cex.lab = 1.4 + ) + dev.off() -pdf(paste(SaveLeafDir, runName, "_param_corr_m_Jmax.pdf",sep="")) - plot(Jmax.mcmc[keep,1], - m.mcmc[keep,1], - pch = 19, - main = "Jmax & m Correlation", - xlab = "Jmax", - ylab = "m", - cex.main = 1.6, - cex.lab = 1.4) - dev.off() - -pdf(paste(SaveLeafDir, runName, "_param_corr_m_g0.pdf",sep="")) - plot(g0.mcmc[keep,1], - m.mcmc[keep,1], - pch = 19, - main = "g0 & m Correlation", - xlab = "g0", - ylab = "m", - cex.main = 1.6, - cex.lab = 1.4) - dev.off() + pdf(paste(SaveLeafDir, runName, "_param_corr_m_Vcmax.pdf", sep = "")) + plot(Vcmax.mcmc[keep, 1], + m.mcmc[keep, 1], + pch = 19, + main = "Vcmax & m Correlation", + xlab = "Vcmax", + ylab = "m", + xlim = c(50, 100), + cex.main = 1.6, + cex.lab = 1.4 + ) + dev.off() + + pdf(paste(SaveLeafDir, runName, "_param_corr_m_Jmax.pdf", sep = "")) + plot(Jmax.mcmc[keep, 1], + m.mcmc[keep, 1], + pch = 19, + main = "Jmax & m Correlation", + xlab = "Jmax", + ylab = "m", + cex.main = 1.6, + cex.lab = 1.4 + ) + dev.off() + + pdf(paste(SaveLeafDir, runName, "_param_corr_m_g0.pdf", sep = "")) + plot(g0.mcmc[keep, 1], + m.mcmc[keep, 1], + pch = 19, + main = "g0 & m Correlation", + xlab = "g0", + ylab = "m", + cex.main = 1.6, + cex.lab = 1.4 + ) + dev.off() } diff --git a/modules/photosynthesis/code/FBB_setup.R b/modules/photosynthesis/code/FBB_setup.R index 80054e2ddc3..9553f1d7295 100644 --- a/modules/photosynthesis/code/FBB_setup.R +++ b/modules/photosynthesis/code/FBB_setup.R @@ -1,21 +1,21 @@ ## FBBmodel setup # load data -data = read.csv(datFile, header=T) +data <- read.csv(datFile, header = T) # select which leaf to analyze -dat = data[which(data$id == species.id),] -dat = dat[which(dat$leaf == leaf),] -npoints = nrow(dat) +dat <- data[which(data$id == species.id), ] +dat <- dat[which(dat$leaf == leaf), ] +npoints <- nrow(dat) # define data variables -An.obs = dat$Photo # observed An -gs.obs = dat$Cond # observed gs -Ca = dat$CO2S # atmospheric [C02] -H = dat$RH_S/100 # realtive humidity -Q = dat$PARi # PAR -Ci = Ca-An.obs/gs.obs # intracellular [CO2] +An.obs <- dat$Photo # observed An +gs.obs <- dat$Cond # observed gs +Ca <- dat$CO2S # atmospheric [C02] +H <- dat$RH_S / 100 # realtive humidity +Q <- dat$PARi # PAR +Ci <- Ca - An.obs / gs.obs # intracellular [CO2] # determine proper sorting of measurements -sorted = sort.int(Ca,index.return=TRUE) -index = sorted$ix \ No newline at end of file +sorted <- sort.int(Ca, index.return = TRUE) +index <- sorted$ix diff --git a/modules/photosynthesis/code/FBB_summary.R b/modules/photosynthesis/code/FBB_summary.R index ab2c5e635cf..e326ace9297 100644 --- a/modules/photosynthesis/code/FBB_summary.R +++ b/modules/photosynthesis/code/FBB_summary.R @@ -1,38 +1,38 @@ ## FBB_summary sink(saveFileSum) -print(paste("LEAF",leaf)) -print(paste("Iterations:",niter)) -print(paste("Start:",start)) -print(paste("End:",end)) -print(paste("Thin:",thin)) -print(paste("Used Iterations:",(end-start+1)*nchains/thin)) +print(paste("LEAF", leaf)) +print(paste("Iterations:", niter)) +print(paste("Start:", start)) +print(paste("End:", end)) +print(paste("Thin:", thin)) +print(paste("Used Iterations:", (end - start + 1) * nchains / thin)) print("---------------------------------------------") print("MEAN, SD, q2.5, q97.5") -print(paste("Vcmax:",mean.Vcmax, sd.Vcmax, q25.Vcmax, q975.Vcmax, sep=",")) -print(paste("Jmax:",mean.Jmax, sd.Jmax, q25.Jmax, q975.Jmax, sep=",")) -print(paste("R:",mean.R, sd.R, q25.R, q975.R, sep=",")) -print(paste("Gstar:",mean.Gstar, sd.Gstar, q25.Gstar, q975.Gstar, sep=",")) -print(paste("alpha:",mean.alpha, sd.alpha, q25.alpha, q975.alpha, sep=",")) -print(paste("m:",mean.m, sd.m, q25.m, q975.m, sep=",")) -print(paste("g0:",mean.g0, sd.g0, q25.g0, q975.g0, sep=",")) -print(paste("tauF:",mean.tauF, sd.tauF, q25.tauF, q975.tauF, sep=",")) -print(paste("tauBB:",mean.tauBB, sd.tauBB, q25.tauBB, q975.tauBB, sep=",")) +print(paste("Vcmax:", mean.Vcmax, sd.Vcmax, q25.Vcmax, q975.Vcmax, sep = ",")) +print(paste("Jmax:", mean.Jmax, sd.Jmax, q25.Jmax, q975.Jmax, sep = ",")) +print(paste("R:", mean.R, sd.R, q25.R, q975.R, sep = ",")) +print(paste("Gstar:", mean.Gstar, sd.Gstar, q25.Gstar, q975.Gstar, sep = ",")) +print(paste("alpha:", mean.alpha, sd.alpha, q25.alpha, q975.alpha, sep = ",")) +print(paste("m:", mean.m, sd.m, q25.m, q975.m, sep = ",")) +print(paste("g0:", mean.g0, sd.g0, q25.g0, q975.g0, sep = ",")) +print(paste("tauF:", mean.tauF, sd.tauF, q25.tauF, q975.tauF, sep = ",")) +print(paste("tauBB:", mean.tauBB, sd.tauBB, q25.tauBB, q975.tauBB, sep = ",")) print("---------------------------------------------") print("PREDICTED VALUES") -print(paste("An-mean:",mean.pred.An)) -print(paste("gs-mean:",mean.pred.gs)) -print(paste("An-sd:",sd.pred.An)) -print(paste("gs-sd:",sd.pred.gs)) -print(paste("An-q25:",q25.pred.An)) -print(paste("gs-q25:",q25.pred.gs)) -print(paste("An-q975:",q975.pred.An)) -print(paste("gs-q975:",q975.pred.gs)) +print(paste("An-mean:", mean.pred.An)) +print(paste("gs-mean:", mean.pred.gs)) +print(paste("An-sd:", sd.pred.An)) +print(paste("gs-sd:", sd.pred.gs)) +print(paste("An-q25:", q25.pred.An)) +print(paste("gs-q25:", q25.pred.gs)) +print(paste("An-q975:", q975.pred.An)) +print(paste("gs-q975:", q975.pred.gs)) print("---------------------------------------------") -print(paste("Farquhar DIC:",DIC.An)) -print(paste("Ball-Berry DIC:",DIC.gs)) +print(paste("Farquhar DIC:", DIC.An)) +print(paste("Ball-Berry DIC:", DIC.gs)) print("---------------------------------------------") print("Acceptrance Rates") print(accept) sink() -summary = data.frame(mean.Vcmax, sd.Vcmax, mean.Jmax, sd.Jmax, mean.R, sd.R, mean.Gstar, sd.Gstar, mean.alpha, sd.alpha, mean.m, sd.m, mean.g0, sd.g0, mean.tauF, sd.tauF, mean.tauBB, sd.tauBB, DIC.An, DIC.gs) -write.csv(summary,file=paste(SaveLeafDir, runName, "_summary.csv", sep="")) \ No newline at end of file +summary <- data.frame(mean.Vcmax, sd.Vcmax, mean.Jmax, sd.Jmax, mean.R, sd.R, mean.Gstar, sd.Gstar, mean.alpha, sd.alpha, mean.m, sd.m, mean.g0, sd.g0, mean.tauF, sd.tauF, mean.tauBB, sd.tauBB, DIC.An, DIC.gs) +write.csv(summary, file = paste(SaveLeafDir, runName, "_summary.csv", sep = "")) diff --git a/modules/photosynthesis/code/serbin.simple.code/Process_LiCor_GE_ACi_Data.R b/modules/photosynthesis/code/serbin.simple.code/Process_LiCor_GE_ACi_Data.R index 20df0d092ca..fc4f9fc2c40 100644 --- a/modules/photosynthesis/code/serbin.simple.code/Process_LiCor_GE_ACi_Data.R +++ b/modules/photosynthesis/code/serbin.simple.code/Process_LiCor_GE_ACi_Data.R @@ -3,75 +3,75 @@ # Estimate Vcmax and Jmax using Farquhar equation optimization # # --- Uses DEoptim global parameter optimization -# --- Outputs Rd, Vcmax, and Jmax (when a full curve is availible) estimated parameters +# --- Outputs Rd, Vcmax, and Jmax (when a full curve is availible) estimated parameters # in a single output .csv file with assocaited information # --- Output A-Ci fit diagnostic figures and DEoptim parameter trace plots # -# --- Last updated: 8.22.2013 BY SPS +# --- Last updated: 8.22.2013 BY SPS #################################################################################################### #---------------- Close all devices and delete all variables. -------------------------------------# -rm(list=ls(all=TRUE)) # clear workspace -graphics.off() # close any open graphics -closeAllConnections() # close any open connections to files +rm(list = ls(all = TRUE)) # clear workspace +graphics.off() # close any open graphics +closeAllConnections() # close any open connections to files #--------------------------------------------------------------------------------------------------# #---------------- *User defined settings.* --------------------------------------------------------# ### Location of R scripts. Needed for Farquhar model optimization. Contains functions. -r.functions <- '/Users/serbin/Data/Dropbox/Soybean_Aphid_Project/R_Scripts/' +r.functions <- "/Users/serbin/Data/Dropbox/Soybean_Aphid_Project/R_Scripts/" -### Input LI6400 dataset. First define location of file (i.e. directory). -in.dir <- '/Users/serbin/Data/Dropbox/Soybean_Aphid_Project/Data/LI6400_Data/' -dataset <- 'Compiled_data_2012_ACi_only.v2.csv' +### Input LI6400 dataset. First define location of file (i.e. directory). +in.dir <- "/Users/serbin/Data/Dropbox/Soybean_Aphid_Project/Data/LI6400_Data/" +dataset <- "Compiled_data_2012_ACi_only.v2.csv" ### Define input file to be processed -ge.data <- read.table(paste(in.dir,"/",dataset,sep=""), header=T,sep=",") -summary(ge.data) ## Summary of dataset +ge.data <- read.table(paste(in.dir, "/", dataset, sep = ""), header = T, sep = ",") +summary(ge.data) ## Summary of dataset -### Main output directory -out.dir <- ('/Users/serbin/Data/Dropbox/Soybean_Aphid_Project/Data/Processed_LI6400_Data/Processed_2012_Data/') -if (! file.exists(out.dir)) dir.create(out.dir,recursive=TRUE) +### Main output directory +out.dir <- ("/Users/serbin/Data/Dropbox/Soybean_Aphid_Project/Data/Processed_LI6400_Data/Processed_2012_Data/") +if (!file.exists(out.dir)) dir.create(out.dir, recursive = TRUE) # *********************************** QA/QC Options *********************************** ### Vcmax to Jmax Ci cutoff. -Vcmax.cutoff <- 220 #ppm Options: E.g. 180, 200, 220, 250 -Jmax.cutoff <- 450 #ppm Options: E.g. 400, 450, 500 +Vcmax.cutoff <- 220 # ppm Options: E.g. 180, 200, 220, 250 +Jmax.cutoff <- 450 # ppm Options: E.g. 400, 450, 500 ### Sample QC checks -Cond.cutoff <- 0.08 ## Throw out observations with Cond < cutoff. E.g. 0.08 -Ci.cutoff <- c(0,2000) ## Throw out observations with Ci out of bounds -Tleaf.cutoff <- 1.6 ## How much Tleaf variation to accept in curve data. E.g. 1 +Cond.cutoff <- 0.08 ## Throw out observations with Cond < cutoff. E.g. 0.08 +Ci.cutoff <- c(0, 2000) ## Throw out observations with Ci out of bounds +Tleaf.cutoff <- 1.6 ## How much Tleaf variation to accept in curve data. E.g. 1 # would allow variation of 1 degree around the mean Tleaf # *********************************** QA/QC Options *********************************** # ********************************** DEoptim Options ********************************** ### DEoptim options. Controls the parameter search space for each DEoptim iteration. -lower.bound <- c(5,-5,5) ## Lower bound of Vcmax, Rd, & Jmax -upper.bound <- c(800,50,800) ## Upper bound of Vcmax, Rd, & Jmax -max.iters <- 1000 ## Max iterations -RMSE.min <- 0.1 ## Min value of RMSE to be reached (VTR) during -NP <- 100 ## Number of population members. For many problems it is best to set - ## NP to be at least 10 times the length of the parameter vector. -DWF <- 0.8 ## Differential weighting factor from interval [0,2]. Default to 0.8. -CR <- 0.6 ## Crossover probability from interval [0,1]. Default to 0.5. -# DEoptim optimization. DEoptim will stop when either +lower.bound <- c(5, -5, 5) ## Lower bound of Vcmax, Rd, & Jmax +upper.bound <- c(800, 50, 800) ## Upper bound of Vcmax, Rd, & Jmax +max.iters <- 1000 ## Max iterations +RMSE.min <- 0.1 ## Min value of RMSE to be reached (VTR) during +NP <- 100 ## Number of population members. For many problems it is best to set +## NP to be at least 10 times the length of the parameter vector. +DWF <- 0.8 ## Differential weighting factor from interval [0,2]. Default to 0.8. +CR <- 0.6 ## Crossover probability from interval [0,1]. Default to 0.5. +# DEoptim optimization. DEoptim will stop when either # 1) RMSE=RMSE.min or 2) max iters are reached. -# Should be sufficiently small, e.g. 0.5. Set to +# Should be sufficiently small, e.g. 0.5. Set to # a low number to ensure convergance (e.g. 0.0001) # or if unsure. This option can speed up convergence -#see ?DEoptim +# see ?DEoptim # ********************************** DEoptim Options ********************************** # -------------------------------- Temperature adjustment settings --------------------------------- # Taken from Bernacchi et al. 2001. Plant, Cell and Environment. 24, 253-259 # -# & Long, S. P. and C. J. Bernacchi. 2003. Gas exchange measurements, what can -# they tell us about the underlying limitations to photosynthesis? Procedures +# & Long, S. P. and C. J. Bernacchi. 2003. Gas exchange measurements, what can +# they tell us about the underlying limitations to photosynthesis? Procedures # and sources of error. Journal of Experimental Botany 54:2393-2401. # # & Medlyn et al., 2002. Plant, Cell and Environment. 25, 1167-1179 @@ -79,35 +79,37 @@ CR <- 0.6 ## Crossover probability from interval [0,1]. # & Bernacchi et al., 2013. Plant, Cell and Environment. # # -#R <- 0.008314472 ## Ideal gas constant -R <- 8.314 ## Ideal gas constant. J mol-1 K-1 -Oxygen <- 210 ## Oxygen value (ubar) -#Oxygen <- 21 -Kc25 <- 404.9 ## umol m-1 -#Ekc <- 79.430 ## kJ mol-1 -Ekc <- 79430 -Ko25 <- 278.4 ## mmol m-1 -#Eko <- 36.380 ## kJ mol-1 -Eko <- 36380 -Gstar25 <- 42.75 ## umol m-1 -#EGstar <- 37.830 ## kJ mol-1 -EGstar <- 37830 -mm.constants <- list(R=R,Oxygen=Oxygen,Kc25=Kc25,Ekc=Ekc,Ko25=Ko25,Eko=Eko,Gstar25=Gstar25, - EGstar=EGstar) -rm(R,Oxygen,Kc25,Ekc,Ko25,Eko,Gstar25,EGstar) +# R <- 0.008314472 ## Ideal gas constant +R <- 8.314 ## Ideal gas constant. J mol-1 K-1 +Oxygen <- 210 ## Oxygen value (ubar) +# Oxygen <- 21 +Kc25 <- 404.9 ## umol m-1 +# Ekc <- 79.430 ## kJ mol-1 +Ekc <- 79430 +Ko25 <- 278.4 ## mmol m-1 +# Eko <- 36.380 ## kJ mol-1 +Eko <- 36380 +Gstar25 <- 42.75 ## umol m-1 +# EGstar <- 37.830 ## kJ mol-1 +EGstar <- 37830 +mm.constants <- list( + R = R, Oxygen = Oxygen, Kc25 = Kc25, Ekc = Ekc, Ko25 = Ko25, Eko = Eko, Gstar25 = Gstar25, + EGstar = EGstar +) +rm(R, Oxygen, Kc25, Ekc, Ko25, Eko, Gstar25, EGstar) #--------------------------------------------------------------------------------------------------# -#==================================================================================================# +# ==================================================================================================# # End of user defined options. Start of program. -#==================================================================================================# +# ==================================================================================================# ###################################### START SCRIPT ################################################ -print('**************************************************') -print('**************** STARTING SCRIPT *****************') -print('**************************************************') -print(' ') +print("**************************************************") +print("**************** STARTING SCRIPT *****************") +print("**************************************************") +print(" ") #################################################################################################### @@ -115,42 +117,44 @@ library(DEoptim) #--------------------------------------------------------------------------------------------------# # Load utils and Farquhar functions for data QA/QC and optimization -source(paste(r.functions,'/','photo.processing.functions.R',sep="")) +source(paste(r.functions, "/", "photo.processing.functions.R", sep = "")) #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ### Setup output dirs -dlm = .Platform$file.sep # <--- What is the platform specific delimiter? -aci.fig.dir = paste(out.dir,dlm,"ACi_Diagnostics",sep="") -if (! file.exists(aci.fig.dir)) dir.create(aci.fig.dir) +dlm <- .Platform$file.sep # <--- What is the platform specific delimiter? +aci.fig.dir <- paste(out.dir, dlm, "ACi_Diagnostics", sep = "") +if (!file.exists(aci.fig.dir)) dir.create(aci.fig.dir) #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# # Apply data QA/QC functions -#data <- ge.data #For debugging -ge.data.qc <- data.qc(data=ge.data,out.dir=out.dir,Cond.cutoff=Cond.cutoff,Ci.cutoff=Ci.cutoff, - Tleaf.cutoff=Tleaf.cutoff) - -out.qc.ge.data <- data.frame(ge.data.qc$Sample.Info,ge.data.qc$GE.data) -write.csv(out.qc.ge.data,file=paste(out.dir,"/","QC_GE_Data.csv",sep=""),row.names=FALSE) -rm(out.qc.ge.data,ge.data) +# data <- ge.data #For debugging +ge.data.qc <- data.qc( + data = ge.data, out.dir = out.dir, Cond.cutoff = Cond.cutoff, Ci.cutoff = Ci.cutoff, + Tleaf.cutoff = Tleaf.cutoff +) + +out.qc.ge.data <- data.frame(ge.data.qc$Sample.Info, ge.data.qc$GE.data) +write.csv(out.qc.ge.data, file = paste(out.dir, "/", "QC_GE_Data.csv", sep = ""), row.names = FALSE) +rm(out.qc.ge.data, ge.data) #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ### Apply temperature adjustments to MM constants -loc <- match(c("CI","PRESS"),toupper(names(ge.data.qc$GE.data))) -ge.data.qc$GE.data$Ci_Pcorr = ge.data.qc$GE.data[,loc[1]]*ge.data.qc$GE.data[,loc[2]]*0.01 ## Pressure Adj. of Ci -loc <- match("TLEAF",toupper(names(ge.data.qc$GE.data))) -TleafK <- ge.data.qc$GE.data[,loc[1]]+273.15 ## Leaf temperature -ge.data.qc$GE.data$Kc <- mm.constants$Kc25*exp(mm.constants$Ekc*(TleafK-298.15)/ - (298.15*mm.constants$R*TleafK)) ## Kc - based on Medlyn et al -ge.data.qc$GE.data$Ko <- mm.constants$Ko25*exp(mm.constants$Eko*(TleafK-298.15)/ - (298.15*mm.constants$R*TleafK)) ## Ko - based on Medlyn et al -ge.data.qc$GE.data$Gstar <- mm.constants$Gstar25*exp(mm.constants$EGstar*(TleafK-298.15)/ - (298.15*mm.constants$R*TleafK)) ## Gamma star. Medlyn et al +loc <- match(c("CI", "PRESS"), toupper(names(ge.data.qc$GE.data))) +ge.data.qc$GE.data$Ci_Pcorr <- ge.data.qc$GE.data[, loc[1]] * ge.data.qc$GE.data[, loc[2]] * 0.01 ## Pressure Adj. of Ci +loc <- match("TLEAF", toupper(names(ge.data.qc$GE.data))) +TleafK <- ge.data.qc$GE.data[, loc[1]] + 273.15 ## Leaf temperature +ge.data.qc$GE.data$Kc <- mm.constants$Kc25 * exp(mm.constants$Ekc * (TleafK - 298.15) / + (298.15 * mm.constants$R * TleafK)) ## Kc - based on Medlyn et al +ge.data.qc$GE.data$Ko <- mm.constants$Ko25 * exp(mm.constants$Eko * (TleafK - 298.15) / + (298.15 * mm.constants$R * TleafK)) ## Ko - based on Medlyn et al +ge.data.qc$GE.data$Gstar <- mm.constants$Gstar25 * exp(mm.constants$EGstar * (TleafK - 298.15) / + (298.15 * mm.constants$R * TleafK)) ## Gamma star. Medlyn et al #--------------------------------------------------------------------------------------------------# @@ -160,166 +164,174 @@ samples <- unique(ge.data.qc$Sample.Info) ### Get data names data.names <- names(ge.data.qc$GE.data) -remove.nms <- match(c("PRESS","PARI","CO2REF","PHOTO","CI","CI_PCORR","KC","KO","GSTAR"),toupper(data.names)) +remove.nms <- match(c("PRESS", "PARI", "CO2REF", "PHOTO", "CI", "CI_PCORR", "KC", "KO", "GSTAR"), toupper(data.names)) data.names <- data.names[-remove.nms] data.names # What to summarize -index <- within(ge.data.qc$GE.data, indx <- as.numeric(interaction(ge.data.qc$Sample.Info, - drop=TRUE,lex.order=TRUE))) -samples <- data.frame(samples,Index=unique(index$indx)) -samples <- samples[order(samples$Index),] -row.names(samples) <- seq(len=nrow(samples)) -samples <- samples[,-match("Index",names(samples))] +index <- within(ge.data.qc$GE.data, indx <- as.numeric(interaction(ge.data.qc$Sample.Info, + drop = TRUE, lex.order = TRUE +))) +samples <- data.frame(samples, Index = unique(index$indx)) +samples <- samples[order(samples$Index), ] +row.names(samples) <- seq(len = nrow(samples)) +samples <- samples[, -match("Index", names(samples))] ### Obs stats -means <- aggregate(.~index$indx,data=index,mean) +means <- aggregate(. ~ index$indx, data = index, mean) means <- means[data.names] #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ### Fit Farquhar model to data -data <- data.frame(ge.data.qc$Sample.Info,ge.data.qc$GE.data) +data <- data.frame(ge.data.qc$Sample.Info, ge.data.qc$GE.data) # Setup outputs -Rd <- array(data=NA,dim=dim(samples)[1]) -Vcmax <- array(data=NA,dim=dim(samples)[1]) -Jmax <- array(data=NA,dim=dim(samples)[1]) -RMSE.DEoptim <- array(data=NA,dim=dim(samples)[1]) -RMSE.photo <- array(data=NA,dim=dim(samples)[1]) +Rd <- array(data = NA, dim = dim(samples)[1]) +Vcmax <- array(data = NA, dim = dim(samples)[1]) +Jmax <- array(data = NA, dim = dim(samples)[1]) +RMSE.DEoptim <- array(data = NA, dim = dim(samples)[1]) +RMSE.photo <- array(data = NA, dim = dim(samples)[1]) # i = 122 # i = 44 # i=2 # Main outer loop system.time(for (i in 1:dim(samples)[1]) { -#system.time(for (i in 1:10) { - sub.data <- merge(data,samples[i,],by=names(samples[i,])) - sub.data = sub.data[order(sub.data$Ci_Pcorr),] + # system.time(for (i in 1:10) { + sub.data <- merge(data, samples[i, ], by = names(samples[i, ])) + sub.data <- sub.data[order(sub.data$Ci_Pcorr), ] print("--- Processing Sample: ") - print(paste(as.vector(unlist(samples[i,])),collapse=" ")) - + print(paste(as.vector(unlist(samples[i, ])), collapse = " ")) + # Determine what level of GE processing - chk2 <- length(which(sub.data$Ci_PcorrJmax.cutoff)) - + chk2 <- length(which(sub.data$Ci_Pcorr < Vcmax.cutoff)) + chk3 <- length(which(sub.data$Ci_Pcorr > Jmax.cutoff)) + # Check if there are enough obs. Remove curves with to few Vcmax/Jmax samples - if (dim(sub.data)[1]<3 | chk2<3 & chk3<3){ + if (dim(sub.data)[1] < 3 | chk2 < 3 & chk3 < 3) { Rd[i] <- -9999 Vcmax[i] <- -9999 Jmax[i] <- -9999 RMSE.DEoptim[i] <- -9999 RMSE.photo[i] <- -9999 - } else { Ci <- sub.data$Ci_Pcorr Oxy <- mm.constants$Oxygen Kc <- sub.data$Kc Ko <- sub.data$Ko Gstar <- sub.data$Gstar - Km <- Kc*(1+(Oxy/Ko)) - Photo <- sub.data[,match("PHOTO",toupper(names(sub.data)))] # Find photo data + Km <- Kc * (1 + (Oxy / Ko)) + Photo <- sub.data[, match("PHOTO", toupper(names(sub.data)))] # Find photo data f.model <- NA # Type variable for defining plots - + # Vcmax and Jmax - if (chk2>=3 & chk3>=3){ + if (chk2 >= 3 & chk3 >= 3) { f.model <- 1 - fit <- DEoptim(ACi.full,lower=lower.bound, upper=upper.bound, DEoptim.control(NP=NP, - F=DWF,CR=CR, itermax=max.iters, VTR=RMSE.min, strategy=2, trace=FALSE, - storepopfrom = 1,storepopfreq = 5)) + fit <- DEoptim(ACi.full, lower = lower.bound, upper = upper.bound, DEoptim.control( + NP = NP, + F = DWF, CR = CR, itermax = max.iters, VTR = RMSE.min, strategy = 2, trace = FALSE, + storepopfrom = 1, storepopfreq = 5 + )) tempout <- data.frame(fit$optim) - Vcmax[i] <- tempout[1,1] - Rd[i] <- tempout[2,1] - Jmax[i] <- tempout[3,1] + Vcmax[i] <- tempout[1, 1] + Rd[i] <- tempout[2, 1] + Jmax[i] <- tempout[3, 1] RMSE.DEoptim[i] <- fit$optim$bestval - fitted_photo = pmin(((Vcmax[i]*(Ci-Gstar))/(Ci+Km))-Rd[i], - ((Jmax[i]*(Ci-Gstar))/((4.5*Ci)+(10.5*Gstar)))-Rd[i]) - residuals = fitted_photo-Photo - RMSE.photo[i] = sqrt(mean((residuals)^2)) - - ### Display info to console - print(paste("Vcmax: ",round(Vcmax[i],2))) - print(paste("Jmax: ",round(Jmax[i],2))) - print(paste("Rd: ",round(Rd[i],4))) - print(paste("DEoptim RMSE: ",round(RMSE.DEoptim[i],2))) - print(paste("Photo RMSE: ",round(RMSE.photo[i],2))) + fitted_photo <- pmin( + ((Vcmax[i] * (Ci - Gstar)) / (Ci + Km)) - Rd[i], + ((Jmax[i] * (Ci - Gstar)) / ((4.5 * Ci) + (10.5 * Gstar))) - Rd[i] + ) + residuals <- fitted_photo - Photo + RMSE.photo[i] <- sqrt(mean((residuals)^2)) + + ### Display info to console + print(paste("Vcmax: ", round(Vcmax[i], 2))) + print(paste("Jmax: ", round(Jmax[i], 2))) + print(paste("Rd: ", round(Rd[i], 4))) + print(paste("DEoptim RMSE: ", round(RMSE.DEoptim[i], 2))) + print(paste("Photo RMSE: ", round(RMSE.photo[i], 2))) print("-----------------------------------------------------------") flush.console() - + # Vcmax only - } else if (chk2>=3 & chk3<3) { + } else if (chk2 >= 3 & chk3 < 3) { f.model <- 2 - fit <- DEoptim(ACi.rubisco,lower=lower.bound[1:2], upper=upper.bound[1:2], DEoptim.control(NP=NP, - F=DWF,CR=CR, itermax=max.iters, VTR=RMSE.min, strategy=2, trace=FALSE, - storepopfrom = 1,storepopfreq = 5)) + fit <- DEoptim(ACi.rubisco, lower = lower.bound[1:2], upper = upper.bound[1:2], DEoptim.control( + NP = NP, + F = DWF, CR = CR, itermax = max.iters, VTR = RMSE.min, strategy = 2, trace = FALSE, + storepopfrom = 1, storepopfreq = 5 + )) tempout <- data.frame(fit$optim) - Vcmax[i] <- tempout[1,1] - Rd[i] <- tempout[2,1] + Vcmax[i] <- tempout[1, 1] + Rd[i] <- tempout[2, 1] Jmax[i] <- -9999 RMSE.DEoptim[i] <- fit$optim$bestval - fitted_photo = ((Vcmax[i]*(Ci-Gstar))/(Ci+Km))-Rd[i] - residuals = fitted_photo-Photo - RMSE.photo[i] = sqrt(mean((residuals)^2)) - - ### Display info to console - print(paste("Vcmax: ",round(Vcmax[i],2))) - print(paste("Jmax: ",round(Jmax[i],2))) - print(paste("Rd: ",round(Rd[i],4))) - print(paste("DEoptim RMSE: ",round(RMSE.DEoptim[i],2))) - print(paste("Photo RMSE: ",round(RMSE.photo[i],2))) + fitted_photo <- ((Vcmax[i] * (Ci - Gstar)) / (Ci + Km)) - Rd[i] + residuals <- fitted_photo - Photo + RMSE.photo[i] <- sqrt(mean((residuals)^2)) + + ### Display info to console + print(paste("Vcmax: ", round(Vcmax[i], 2))) + print(paste("Jmax: ", round(Jmax[i], 2))) + print(paste("Rd: ", round(Rd[i], 4))) + print(paste("DEoptim RMSE: ", round(RMSE.DEoptim[i], 2))) + print(paste("Photo RMSE: ", round(RMSE.photo[i], 2))) print("-----------------------------------------------------------") flush.console() - + # Jmax only - } else if (chk2<3 & chk3>=3){ + } else if (chk2 < 3 & chk3 >= 3) { f.model <- 3 - fit <- DEoptim(ACi.rubp,lower=lower.bound[3:2], upper=upper.bound[3:2], DEoptim.control(NP=NP, - F=DWF,CR=CR, itermax=max.iters, VTR=RMSE.min, strategy=2, trace=FALSE, - storepopfrom = 1,storepopfreq = 5)) + fit <- DEoptim(ACi.rubp, lower = lower.bound[3:2], upper = upper.bound[3:2], DEoptim.control( + NP = NP, + F = DWF, CR = CR, itermax = max.iters, VTR = RMSE.min, strategy = 2, trace = FALSE, + storepopfrom = 1, storepopfreq = 5 + )) tempout <- data.frame(fit$optim) Vcmax[i] <- -9999 - Jmax[i] <- tempout[1,1] - Rd[i] <- tempout[2,1] + Jmax[i] <- tempout[1, 1] + Rd[i] <- tempout[2, 1] RMSE.DEoptim[i] <- fit$optim$bestval - fitted_photo = ((Jmax[i]*(Ci-Gstar))/((4.5*Ci)+(10.5*Gstar)))-Rd[i] - residuals = fitted_photo-Photo - RMSE.photo[i] = sqrt(mean((residuals)^2)) - - ### Display info to console - print(paste("Vcmax: ",round(Vcmax[i],2))) - print(paste("Jmax: ",round(Jmax[i],2))) - print(paste("Rd: ",round(Rd[i],4))) - print(paste("DEoptim RMSE: ",round(RMSE.DEoptim[i],2))) - print(paste("Photo RMSE: ",round(RMSE.photo[i],2))) + fitted_photo <- ((Jmax[i] * (Ci - Gstar)) / ((4.5 * Ci) + (10.5 * Gstar))) - Rd[i] + residuals <- fitted_photo - Photo + RMSE.photo[i] <- sqrt(mean((residuals)^2)) + + ### Display info to console + print(paste("Vcmax: ", round(Vcmax[i], 2))) + print(paste("Jmax: ", round(Jmax[i], 2))) + print(paste("Rd: ", round(Rd[i], 4))) + print(paste("DEoptim RMSE: ", round(RMSE.DEoptim[i], 2))) + print(paste("Photo RMSE: ", round(RMSE.photo[i], 2))) print("-----------------------------------------------------------") flush.console() - - } # End processing if/else - + } # End processing if/else + ### Generate fit diagnostics - #if (length(which(sub.data$Ci_Pcorr=3){ - if (chk2 >=3 | chk3 >=3){ - sample.name <- paste(unlist(samples[i,]),collapse="_") - sample.name <- gsub(pattern="/","-",sample.name) - params <- data.frame(Vcmax=Vcmax[i],Jmax=Jmax[i],Rd=Rd[i],RMSE.photo[i]) - plot.ge.fit(type="A-Ci",sub.data,fit,params,aci.fig.dir,sample.name,f.model) + # if (length(which(sub.data$Ci_Pcorr=3){ + if (chk2 >= 3 | chk3 >= 3) { + sample.name <- paste(unlist(samples[i, ]), collapse = "_") + sample.name <- gsub(pattern = "/", "-", sample.name) + params <- data.frame(Vcmax = Vcmax[i], Jmax = Jmax[i], Rd = Rd[i], RMSE.photo[i]) + plot.ge.fit(type = "A-Ci", sub.data, fit, params, aci.fig.dir, sample.name, f.model) rm(f.model) } - } # End total observations if/else check - rm(chk2,chk3) -}) #End of outer main loop + rm(chk2, chk3) +}) # End of outer main loop #--------------------------------------------------------------------------------------------------# #--------------------------------------------------------------------------------------------------# ### Generate output -output.data <- data.frame(samples,means,Vcmax=Vcmax,Jmax=Jmax,Rd=Rd,RMSE.DEoptim=RMSE.DEoptim, - RMSE.photo=RMSE.photo) -output.dataset <- paste(strsplit(dataset,".csv"),".processed.csv",sep="") -write.csv(output.data,file=paste(out.dir,"/",output.dataset,sep=""),row.names=FALSE) -#rm(list=ls(all=TRUE)) # clear workspace +output.data <- data.frame(samples, means, + Vcmax = Vcmax, Jmax = Jmax, Rd = Rd, RMSE.DEoptim = RMSE.DEoptim, + RMSE.photo = RMSE.photo +) +output.dataset <- paste(strsplit(dataset, ".csv"), ".processed.csv", sep = "") +write.csv(output.data, file = paste(out.dir, "/", output.dataset, sep = ""), row.names = FALSE) +# rm(list=ls(all=TRUE)) # clear workspace #--------------------------------------------------------------------------------------------------# diff --git a/modules/photosynthesis/code/serbin.simple.code/photo.processing.functions.R b/modules/photosynthesis/code/serbin.simple.code/photo.processing.functions.R index 75eb801bb5b..14a508c5e33 100644 --- a/modules/photosynthesis/code/serbin.simple.code/photo.processing.functions.R +++ b/modules/photosynthesis/code/serbin.simple.code/photo.processing.functions.R @@ -11,117 +11,122 @@ ##' @param Cond.cutoff cutoff for low conductance. set previously ##' @param Ci.cutoff cutoff for nonsensical Cis ##' @param Tleaf.cutoff cutoff for individual Tleaf variation from mean -##' +##' ##' @author Shawn P. Serbin -##' -data.qc <- function(data=NULL,out.dir=NULL,Cond.cutoff=NULL,Ci.cutoff=NULL, - Tleaf.cutoff=NULL){ - +##' +data.qc <- function(data = NULL, out.dir = NULL, Cond.cutoff = NULL, Ci.cutoff = NULL, + Tleaf.cutoff = NULL) { ### Remove samples not passing initial QC - loc <- match("QC",toupper(names(data))) - remove <- which(data[loc]==1) - if(length(remove)>0){ - data <- data[-remove,] + loc <- match("QC", toupper(names(data))) + remove <- which(data[loc] == 1) + if (length(remove) > 0) { + data <- data[-remove, ] } - rm(loc,remove) - + rm(loc, remove) + ### Remove QC and comments columns (if exists) - pattern <- c("QC","COMMENTS") + pattern <- c("QC", "COMMENTS") x <- toupper(names(data)) - remove <- match(pattern,x) - if (length(remove)>0){ - data <- data[,-remove] + remove <- match(pattern, x) + if (length(remove) > 0) { + data <- data[, -remove] } - rm(pattern,x,remove) - + rm(pattern, x, remove) + ### Find data columns - pattern <- c("Tair","Tleaf","deltaT","RH_R","RH_S","PRESS","PARi","CO2Ref","PHOTO","COND","Ci") + pattern <- c("Tair", "Tleaf", "deltaT", "RH_R", "RH_S", "PRESS", "PARi", "CO2Ref", "PHOTO", "COND", "Ci") pattern <- toupper(pattern) x <- toupper(names(data)) - keep <- match(pattern,x) - + keep <- match(pattern, x) + ### Extract sample info - sample.info <- data[,-keep] - data <- data[,keep] - + sample.info <- data[, -keep] + data <- data[, keep] + # Clean up sample info, remove any white spaces - temp <- as.data.frame(lapply(sample.info,gsub,pattern=" ",replacement="")) + temp <- as.data.frame(lapply(sample.info, gsub, pattern = " ", replacement = "")) sample.info <- temp - rm(pattern,x,keep,temp) - + rm(pattern, x, keep, temp) + ### Apply QC filters to data dims <- dim(data) - loc <- match(c("COND","CI"),toupper(names(data))) - cond.check <- which(data[,loc[1]]Ci.cutoff[2]) - all.check <- which(data[,loc[2]]Ci.cutoff[2] | data[,loc[1]] Ci.cutoff[2]) + all.check <- which(data[, loc[2]] < Ci.cutoff[1] | data[, loc[2]] > Ci.cutoff[2] | data[, loc[1]] < Cond.cutoff) + # nms <- unique(sample.info[check1,]) + nms1 <- sample.info[cond.check, ] + vals1 <- data[cond.check, loc[1]] + nms2 <- sample.info[ci.check, ] + vals2 <- data[ci.check, loc[2]] + # Tleaf check - if (!is.null(Tleaf.cutoff)){ - temp.data <- data.frame(sample.info,data) + if (!is.null(Tleaf.cutoff)) { + temp.data <- data.frame(sample.info, data) # Create unique index for each sample group - temp.data.index <- within(temp.data, indx <- as.numeric(interaction(sample.info, - drop=TRUE,lex.order=TRUE))) - #Mean.Tleaf <- aggregate(Tleaf~indx,data=temp.data.index,mean) - #Stdev.Tleaf <- aggregate(Tleaf~indx,data=temp.data.index,sd) - #names(Mean.Tleaf) <- c("indx","Mean.Tleaf") - #names(Stdev.Tleaf) <- c("indx","Stdev.Tleaf") - #aggregate(Tleaf~indx,data=temp.data.index, FUN = function(x) quantile(x, probs = c(0.05,0.95))) - stats <- data.frame(Mean.Tleaf=aggregate(Tleaf~indx,data=temp.data.index,mean), - Stdev.Tleaf=aggregate(Tleaf~indx,data=temp.data.index,sd), - CI05=aggregate(Tleaf~indx,data=temp.data.index, - FUN = function(x) quantile(x, probs = 0.05)), - CI95=aggregate(Tleaf~indx,data=temp.data.index, - FUN = function(x) quantile(x, probs = 0.95))) - stats <- stats[,-c(3,5,7)] - names(stats) <- c("indx","Mean.Tleaf","Stdev.Tleaf","L05.Tleaf","U95.Tleaf") - #temp.data2 <- merge(temp.data.index,Mean.Tleaf,by="indx",sort=F) # Preserve original order - temp.data2 <- merge(temp.data.index,stats,by="indx",sort=F) # Preserve original order - loc <- match(c("INDX"),toupper(names(temp.data2))) - temp.data2 <- temp.data2[,-loc[1]] - loc <- match(c("TLEAF","MEAN.TLEAF"),toupper(names(temp.data2))) - Tleaf.check <- which(temp.data2[,loc[1]] < temp.data2[,loc[2]]-Tleaf.cutoff | - temp.data2[,loc[1]] > temp.data2[,loc[2]]+Tleaf.cutoff) - nms3 <- sample.info[Tleaf.check,] - vals3 <- temp.data2[Tleaf.check,loc[1]] - vals4 <- temp.data2[Tleaf.check,loc[2]] - temp3 <- data.frame(nms3,Mean.Tleaf=vals4,Tleaf=vals3) - + temp.data.index <- within(temp.data, indx <- as.numeric(interaction(sample.info, + drop = TRUE, lex.order = TRUE + ))) + # Mean.Tleaf <- aggregate(Tleaf~indx,data=temp.data.index,mean) + # Stdev.Tleaf <- aggregate(Tleaf~indx,data=temp.data.index,sd) + # names(Mean.Tleaf) <- c("indx","Mean.Tleaf") + # names(Stdev.Tleaf) <- c("indx","Stdev.Tleaf") + # aggregate(Tleaf~indx,data=temp.data.index, FUN = function(x) quantile(x, probs = c(0.05,0.95))) + stats <- data.frame( + Mean.Tleaf = aggregate(Tleaf ~ indx, data = temp.data.index, mean), + Stdev.Tleaf = aggregate(Tleaf ~ indx, data = temp.data.index, sd), + CI05 = aggregate(Tleaf ~ indx, + data = temp.data.index, + FUN = function(x) quantile(x, probs = 0.05) + ), + CI95 = aggregate(Tleaf ~ indx, + data = temp.data.index, + FUN = function(x) quantile(x, probs = 0.95) + ) + ) + stats <- stats[, -c(3, 5, 7)] + names(stats) <- c("indx", "Mean.Tleaf", "Stdev.Tleaf", "L05.Tleaf", "U95.Tleaf") + # temp.data2 <- merge(temp.data.index,Mean.Tleaf,by="indx",sort=F) # Preserve original order + temp.data2 <- merge(temp.data.index, stats, by = "indx", sort = F) # Preserve original order + loc <- match(c("INDX"), toupper(names(temp.data2))) + temp.data2 <- temp.data2[, -loc[1]] + loc <- match(c("TLEAF", "MEAN.TLEAF"), toupper(names(temp.data2))) + Tleaf.check <- which(temp.data2[, loc[1]] < temp.data2[, loc[2]] - Tleaf.cutoff | + temp.data2[, loc[1]] > temp.data2[, loc[2]] + Tleaf.cutoff) + nms3 <- sample.info[Tleaf.check, ] + vals3 <- temp.data2[Tleaf.check, loc[1]] + vals4 <- temp.data2[Tleaf.check, loc[2]] + temp3 <- data.frame(nms3, Mean.Tleaf = vals4, Tleaf = vals3) + # update all check flags - all.check <- unique(c(all.check,Tleaf.check)) + all.check <- unique(c(all.check, Tleaf.check)) } # end if ### Output QA/QC info - temp1 <- data.frame(nms1,Cond=vals1) - temp2 <- data.frame(nms2,Ci=vals2) + temp1 <- data.frame(nms1, Cond = vals1) + temp2 <- data.frame(nms2, Ci = vals2) - if (dim(temp1)[1]>0){ - write.csv(temp1,file=paste(out.dir,"/","Failed_QC_Conductance_Check.csv",sep=""),row.names=FALSE) + if (dim(temp1)[1] > 0) { + write.csv(temp1, file = paste(out.dir, "/", "Failed_QC_Conductance_Check.csv", sep = ""), row.names = FALSE) } - if (dim(temp2)[1]>0){ - write.csv(temp2,file=paste(out.dir,"/","Failed_QC_Ci_Cutoff_Check.csv",sep=""),row.names=FALSE) + if (dim(temp2)[1] > 0) { + write.csv(temp2, file = paste(out.dir, "/", "Failed_QC_Ci_Cutoff_Check.csv", sep = ""), row.names = FALSE) } - if (dim(temp3)[1]>0){ - write.csv(temp3,file=paste(out.dir,"/","Failed_QC_Temperature_Cutoff_Check.csv",sep=""),row.names=FALSE) + if (dim(temp3)[1] > 0) { + write.csv(temp3, file = paste(out.dir, "/", "Failed_QC_Temperature_Cutoff_Check.csv", sep = ""), row.names = FALSE) } - + ### Remove bad data - if (length(all.check>0)){ - data <- data[-all.check,] - sample.info <- sample.info[-all.check,] + if (length(all.check > 0)) { + data <- data[-all.check, ] + sample.info <- sample.info[-all.check, ] } - row.names(data) <- seq(len=nrow(data)) - row.names(sample.info) <- seq(len=nrow(sample.info)) - rm(dims,loc,cond.check,ci.check,all.check,nms1,vals1,nms2,vals2,temp1,temp2,Ci.cutoff,Cond.cutoff) - + row.names(data) <- seq(len = nrow(data)) + row.names(sample.info) <- seq(len = nrow(sample.info)) + rm(dims, loc, cond.check, ci.check, all.check, nms1, vals1, nms2, vals2, temp1, temp2, Ci.cutoff, Cond.cutoff) + ### Return QC data and sample info - return(list(Sample.Info=sample.info,GE.data=data)) - + return(list(Sample.Info = sample.info, GE.data = data)) } #--------------------------------------------------------------------------------------------------# @@ -129,45 +134,45 @@ data.qc <- function(data=NULL,out.dir=NULL,Cond.cutoff=NULL,Ci.cutoff=NULL, #--------------------------------------------------------------------------------------------------# ##' ##' Farquhar functions -##' +##' ##' @author Shawn P. Serbin -##' +##' # Rd and Vcmax only ACi.rubisco <- function(x) { - keep <- which(CiJmax.cutoff) - Jmax <- x[1] ## Jmax param - Rd <- x[2] ## Resp param - - Aj <- Jmax*(Ci[keep]-Gstar[keep])/((4.5*Ci[keep])+(10.5*Gstar[keep])) - Assim <- pmin(Aj)-Rd - RMSE <- sqrt(mean((Photo[keep]-Assim)^2)) ## RMSE cost function +ACi.rubp <- function(x) { + keep <- which(Ci > Jmax.cutoff) + Jmax <- x[1] ## Jmax param + Rd <- x[2] ## Resp param + + Aj <- Jmax * (Ci[keep] - Gstar[keep]) / ((4.5 * Ci[keep]) + (10.5 * Gstar[keep])) + Assim <- pmin(Aj) - Rd + RMSE <- sqrt(mean((Photo[keep] - Assim)^2)) ## RMSE cost function return(RMSE) } # Vcmax, Rd, and Jmax ACi.full <- function(x) { - Vcmax <- x[1] ## Vcmax param - Rd <- x[2] ## Resp param - Jmax <- x[3] ## Jmax - - Ac <- ifelse(Ci<=Vcmax.cutoff,(Vcmax*(Ci-Gstar)/(Ci + Km))-Rd,9999) - inter <- ifelse((Ci>Vcmax.cutoff & Ci=Jmax.cutoff,(Jmax*(Ci-Gstar)/((4.5*Ci)+(10.5*Gstar)))-Rd,9999) - #Aj <- ifelse(Ci>=Jmax.cutoff,(Jmax*(Ci-Gstar)/((4.5*Ci)+(10.5*Gstar))),9999) - Assim <- pmin(Ac,Aj,inter) - RMSE <- sqrt(mean((Photo-Assim)^2)) ## RMSE cost function + Vcmax <- x[1] ## Vcmax param + Rd <- x[2] ## Resp param + Jmax <- x[3] ## Jmax + + Ac <- ifelse(Ci <= Vcmax.cutoff, (Vcmax * (Ci - Gstar) / (Ci + Km)) - Rd, 9999) + inter <- ifelse((Ci > Vcmax.cutoff & Ci < Jmax.cutoff), Photo, 9999) + Aj <- ifelse(Ci >= Jmax.cutoff, (Jmax * (Ci - Gstar) / ((4.5 * Ci) + (10.5 * Gstar))) - Rd, 9999) + # Aj <- ifelse(Ci>=Jmax.cutoff,(Jmax*(Ci-Gstar)/((4.5*Ci)+(10.5*Gstar))),9999) + Assim <- pmin(Ac, Aj, inter) + RMSE <- sqrt(mean((Photo - Assim)^2)) ## RMSE cost function return(RMSE) } @@ -180,18 +185,18 @@ A.Q <- function(x) { #--------------------------------------------------------------------------------------------------# ##' -##' GE Diagnostic plots +##' GE Diagnostic plots ##' @param type which type of data to invert. A-Ci or A-Q ##' @param data A-Ci data used for model fit ##' @param DEoptim.output DEoptim output structure -##' @param params +##' @param params ##' @param outdir output directory for A-Ci diagnostic figures ##' @param file output filename ##' @param f.model full, vcmax, or jmax -##' +##' ##' @author Shawn P. Serbin -##' -plot.ge.fit <- function(type=c("A-Ci","A-Q"),data,DEoptim.output,params,outdir,file,f.model){ +##' +plot.ge.fit <- function(type = c("A-Ci", "A-Q"), data, DEoptim.output, params, outdir, file, f.model) { type <- match.arg(type) sep <- .Platform$file.sep param.length <- length(DEoptim.output$optim$bestmem) @@ -199,104 +204,129 @@ plot.ge.fit <- function(type=c("A-Ci","A-Q"),data,DEoptim.output,params,outdir,f ### Plot params cexaxis <- 1.2 cexlab <- 1.4 - - if (type=="A-Ci"){ - loc1 <- match(c("CI","PHOTO"),toupper(names(data))) - + + if (type == "A-Ci") { + loc1 <- match(c("CI", "PHOTO"), toupper(names(data))) + # Figures - pdf(paste(outdir,sep,file,".pdf",sep=""),height=8,width=10) - plot(data[,loc1[1]],data[,loc1[2]],pch=21,bg="grey70",cex=3,cex.axis=cexaxis,xlim=c(0,range(data[,loc1[1]])[2]), - ylim=c(0,range(data[,loc1[2]])[2]),cex.lab=cexlab,xlab="Ci",ylab="Photo",main=paste(file)) - box(lwd=2.2) - + pdf(paste(outdir, sep, file, ".pdf", sep = ""), height = 8, width = 10) + plot(data[, loc1[1]], data[, loc1[2]], + pch = 21, bg = "grey70", cex = 3, cex.axis = cexaxis, xlim = c(0, range(data[, loc1[1]])[2]), + ylim = c(0, range(data[, loc1[2]])[2]), cex.lab = cexlab, xlab = "Ci", ylab = "Photo", main = paste(file) + ) + box(lwd = 2.2) + Oxygen <- mm.constants$Oxygen - loc2 <- match(c("KC","KO","GSTAR","TLEAF"),toupper(names(data))) - Kc <- mean(data[,loc2[1]]) - Ko <- mean(data[,loc2[2]]) - Gstar <- mean(data[,loc2[3]]) - - plot.x <- (data[,loc1[1]]-Gstar)/(data[,loc1[1]]+(Kc*(1+Oxygen/Ko))) - plot(plot.x,data[,loc1[2]],pch=21,bg="grey70",cex=3,cex.axis=cexaxis,xlim=c(0,range(plot.x)[2]), - ylim=c(0,range(data[,loc1[2]])[2]),cex.lab=cexlab,xlab="Ci-Gstar/Ci+Km",ylab="Photo",main=paste(file)) - box(lwd=2.2) + loc2 <- match(c("KC", "KO", "GSTAR", "TLEAF"), toupper(names(data))) + Kc <- mean(data[, loc2[1]]) + Ko <- mean(data[, loc2[2]]) + Gstar <- mean(data[, loc2[3]]) + + plot.x <- (data[, loc1[1]] - Gstar) / (data[, loc1[1]] + (Kc * (1 + Oxygen / Ko))) + plot(plot.x, data[, loc1[2]], + pch = 21, bg = "grey70", cex = 3, cex.axis = cexaxis, xlim = c(0, range(plot.x)[2]), + ylim = c(0, range(data[, loc1[2]])[2]), cex.lab = cexlab, xlab = "Ci-Gstar/Ci+Km", ylab = "Photo", main = paste(file) + ) + box(lwd = 2.2) # DEoptim trace plots - if (f.model==1){ - par(mfrow=c(3,1),mar=c(4,4.1,1,2)) #b, l, t, r - plot(DEoptim.output$member$bestmemit[,1],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Vcmax",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,1],lty=2,lwd=1.8) - box(lwd=2.2) - plot(DEoptim.output$member$bestmemit[,2],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Rd",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,2],lty=2,lwd=1.8) - box(lwd=2.2) - plot(DEoptim.output$member$bestmemit[,3],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Jmax",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,3],lty=2,lwd=1.8) - box(lwd=2.2) - } else if (f.model==2){ - par(mfrow=c(3,1),mar=c(4,4.1,1,2)) #b, l, t, r - plot(DEoptim.output$member$bestmemit[,1],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Vcmax",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,1],lty=2,lwd=1.8) - box(lwd=2.2) - plot(DEoptim.output$member$bestmemit[,2],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Rd",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,2],lty=2,lwd=1.8) - box(lwd=2.2) - } else if (f.model==3){ - par(mfrow=c(3,1),mar=c(4,4.1,1,2)) #b, l, t, r - plot(DEoptim.output$member$bestmemit[,1],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Jmax",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,1],lty=2,lwd=1.8) - box(lwd=2.2) - plot(DEoptim.output$member$bestmemit[,2],pch=21,bg="dark grey",col="dark grey", - cex=1.2,xlab="Iteration",ylab="Rd",cex.axis=cexaxis,cex.lab=cexlab) - lines(DEoptim.output$member$bestmemit[,2],lty=2,lwd=1.8) - box(lwd=2.2) + if (f.model == 1) { + par(mfrow = c(3, 1), mar = c(4, 4.1, 1, 2)) # b, l, t, r + plot(DEoptim.output$member$bestmemit[, 1], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Vcmax", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 1], lty = 2, lwd = 1.8) + box(lwd = 2.2) + plot(DEoptim.output$member$bestmemit[, 2], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Rd", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 2], lty = 2, lwd = 1.8) + box(lwd = 2.2) + plot(DEoptim.output$member$bestmemit[, 3], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Jmax", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 3], lty = 2, lwd = 1.8) + box(lwd = 2.2) + } else if (f.model == 2) { + par(mfrow = c(3, 1), mar = c(4, 4.1, 1, 2)) # b, l, t, r + plot(DEoptim.output$member$bestmemit[, 1], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Vcmax", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 1], lty = 2, lwd = 1.8) + box(lwd = 2.2) + plot(DEoptim.output$member$bestmemit[, 2], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Rd", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 2], lty = 2, lwd = 1.8) + box(lwd = 2.2) + } else if (f.model == 3) { + par(mfrow = c(3, 1), mar = c(4, 4.1, 1, 2)) # b, l, t, r + plot(DEoptim.output$member$bestmemit[, 1], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Jmax", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 1], lty = 2, lwd = 1.8) + box(lwd = 2.2) + plot(DEoptim.output$member$bestmemit[, 2], + pch = 21, bg = "dark grey", col = "dark grey", + cex = 1.2, xlab = "Iteration", ylab = "Rd", cex.axis = cexaxis, cex.lab = cexlab + ) + lines(DEoptim.output$member$bestmemit[, 2], lty = 2, lwd = 1.8) + box(lwd = 2.2) } # A-Ci diagnostic fit fig - plotCi = seq(-2,1800,2) + plotCi <- seq(-2, 1800, 2) Oxygen <- mm.constants$Oxygen - loc2 <- match(c("KC","KO","GSTAR","TLEAF"),toupper(names(data))) - Kc <- mean(data[,loc2[1]]) - Ko <- mean(data[,loc2[2]]) - Gstar <- mean(data[,loc2[3]]) - Tleaf <- mean(data[,loc2[4]]) + loc2 <- match(c("KC", "KO", "GSTAR", "TLEAF"), toupper(names(data))) + Kc <- mean(data[, loc2[1]]) + Ko <- mean(data[, loc2[2]]) + Gstar <- mean(data[, loc2[3]]) + Tleaf <- mean(data[, loc2[4]]) Vcmax.plot <- params[[1]] Jmax.plot <- params[[2]] - Rd.plot <-params[[3]] + Rd.plot <- params[[3]] RMSE.plot <- params[[4]] - - par(mfrow=c(1,1),mar=c(5,5,2,1)) # bot, left - ylim <- range(data[,loc1[2]]) - plot(data[,loc1[1]],data[,loc1[2]], main=paste(file), xlab="Ci", ylab="Photo", cex.lab=2,cex=1.8, - xlim=c(1.1,1500),ylim=c(0,ylim[2]+3)) - legend("bottomright",legend=c(paste("Tleaf =",round(Tleaf,2)),paste("Vcmax =",round(Vcmax.plot,2)), - paste("Jmax = ",round(Jmax.plot,2)),paste("Rd = ",round(Rd.plot,4)), - paste("RMSE = ",round(RMSE.plot,2))), - bty="n",cex=2) - legend("topleft",legend=c("Rubisco","RuBP","Photo"),lty=c(2,2,1), - col=c("dark blue","dark red","dark grey"),bty="n",lwd=6.3,seg.len=3.5,cex=1.5) - if (Vcmax.plot!=-9999){ - lines(plotCi,(Vcmax.plot*(plotCi-Gstar)/(plotCi+(Kc*(1+Oxygen/Ko))))-Rd.plot,lwd=5,col="dark blue",lty=2) + + par(mfrow = c(1, 1), mar = c(5, 5, 2, 1)) # bot, left + ylim <- range(data[, loc1[2]]) + plot(data[, loc1[1]], data[, loc1[2]], + main = paste(file), xlab = "Ci", ylab = "Photo", cex.lab = 2, cex = 1.8, + xlim = c(1.1, 1500), ylim = c(0, ylim[2] + 3) + ) + legend("bottomright", + legend = c( + paste("Tleaf =", round(Tleaf, 2)), paste("Vcmax =", round(Vcmax.plot, 2)), + paste("Jmax = ", round(Jmax.plot, 2)), paste("Rd = ", round(Rd.plot, 4)), + paste("RMSE = ", round(RMSE.plot, 2)) + ), + bty = "n", cex = 2 + ) + legend("topleft", + legend = c("Rubisco", "RuBP", "Photo"), lty = c(2, 2, 1), + col = c("dark blue", "dark red", "dark grey"), bty = "n", lwd = 6.3, seg.len = 3.5, cex = 1.5 + ) + if (Vcmax.plot != -9999) { + lines(plotCi, (Vcmax.plot * (plotCi - Gstar) / (plotCi + (Kc * (1 + Oxygen / Ko)))) - Rd.plot, lwd = 5, col = "dark blue", lty = 2) } - if (Jmax.plot!=-9999){ - lines(plotCi,((Jmax.plot*(plotCi-Gstar))/((4.5*plotCi)+(10.5*Gstar)))-Rd.plot,lwd=5,col="dark red",lty=2) + if (Jmax.plot != -9999) { + lines(plotCi, ((Jmax.plot * (plotCi - Gstar)) / ((4.5 * plotCi) + (10.5 * Gstar))) - Rd.plot, lwd = 5, col = "dark red", lty = 2) } - if (Vcmax.plot!=-9999 & Jmax.plot!=-9999){ - lines(plotCi,pmin(Vcmax.plot*(plotCi-Gstar)/(plotCi+(Kc*(1+Oxygen/Ko))), - (Jmax.plot*(plotCi-Gstar))/((4.5*plotCi)+(10.5*Gstar)))-Rd.plot,col="dark grey",lwd=2.0) + if (Vcmax.plot != -9999 & Jmax.plot != -9999) { + lines(plotCi, pmin( + Vcmax.plot * (plotCi - Gstar) / (plotCi + (Kc * (1 + Oxygen / Ko))), + (Jmax.plot * (plotCi - Gstar)) / ((4.5 * plotCi) + (10.5 * Gstar)) + ) - Rd.plot, col = "dark grey", lwd = 2.0) } - box(lwd=2.2) + box(lwd = 2.2) dev.off() - - } else if (type=="A-Q"){ + } else if (type == "A-Q") { print("Not Yet Implemented") } # End A-Ci / A-Q if/else - - } # End of function -#--------------------------------------------------------------------------------------------------# \ No newline at end of file +#--------------------------------------------------------------------------------------------------# diff --git a/modules/photosynthesis/code/test.fitA.R b/modules/photosynthesis/code/test.fitA.R index 6b08f901b45..33abe5073ba 100644 --- a/modules/photosynthesis/code/test.fitA.R +++ b/modules/photosynthesis/code/test.fitA.R @@ -1,53 +1,52 @@ ## development testing of fitA - setwd("~/Dropbox/Dietze_Lab_Undergrads/JAM - Xsite/UROP/") - in.folder = "raw" - pattern = "JAM_B" - cov.file = NULL#'c3covariates.txt' - - - ## Read Photosynthetic gas exchange data - filenames <- list.files(in.folder,pattern=pattern, full.names=TRUE) - master = lapply(filenames, read_Licor) - save(master,file="master.RData") - - ## run QA/QC checks - for(i in 1:length(master)){ - master[[i]] = Licor_QC(master[[i]]) - save(master,file="master.RData") - } - - ## Merge licor data - dat<-do.call("rbind", master) - dat = dat[-which(dat$QC < 1),] ## remove both unchecked points and those that fail QC - - - ## Read Covariate Data - if(!is.null(cov.file)){ - if(file.exists){ - cov.data=read.table(cov.file,header=TRUE) #v2 has NA filled-in - } else { - print("Covariate file does not exist",cov.file) - cov.file=cov.data=NULL - } +setwd("~/Dropbox/Dietze_Lab_Undergrads/JAM - Xsite/UROP/") +in.folder <- "raw" +pattern <- "JAM_B" +cov.file <- NULL #' c3covariates.txt' + + +## Read Photosynthetic gas exchange data +filenames <- list.files(in.folder, pattern = pattern, full.names = TRUE) +master <- lapply(filenames, read_Licor) +save(master, file = "master.RData") + +## run QA/QC checks +for (i in 1:length(master)) { + master[[i]] <- Licor_QC(master[[i]]) + save(master, file = "master.RData") +} + +## Merge licor data +dat <- do.call("rbind", master) +dat <- dat[-which(dat$QC < 1), ] ## remove both unchecked points and those that fail QC + + +## Read Covariate Data +if (!is.null(cov.file)) { + if (file.exists) { + cov.data <- read.table(cov.file, header = TRUE) # v2 has NA filled-in + } else { + print("Covariate file does not exist", cov.file) + cov.file <- cov.data <- NULL } - -fit = fitA(dat) - -A.model = list(a.fixed=NULL,a.random=NULL,V.fixed=NULL,V.random=NULL,n.iter=5000,match="fname") - -fit = fitA(dat,cov.data,A.model) - -plot(fit$params) ## MCMC diagnostic plots -summary(fit$params) ## parameter estimates - - ## predicted vs observed plot - mstats = summary(fit$predict) - pmean = mstats$statistics[grep("pmean",rownames(mstats$statistics)),1] - plot(pmean,dat$Photo,pch="+") - abline(0,1,col=2,lwd=2) - - -plot_photo(dat,fit) - \ No newline at end of file +} + +fit <- fitA(dat) + +A.model <- list(a.fixed = NULL, a.random = NULL, V.fixed = NULL, V.random = NULL, n.iter = 5000, match = "fname") + +fit <- fitA(dat, cov.data, A.model) + +plot(fit$params) ## MCMC diagnostic plots +summary(fit$params) ## parameter estimates + +## predicted vs observed plot +mstats <- summary(fit$predict) +pmean <- mstats$statistics[grep("pmean", rownames(mstats$statistics)), 1] +plot(pmean, dat$Photo, pch = "+") +abline(0, 1, col = 2, lwd = 2) + + +plot_photo(dat, fit) diff --git a/modules/priors/R/plots.R b/modules/priors/R/plots.R index 6687b5e8456..dfa695f3077 100644 --- a/modules/priors/R/plots.R +++ b/modules/priors/R/plots.R @@ -1,4 +1,4 @@ -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Plots a prior density from a parameterized probability distribution ##' ##' @param prior.density data frame containing columns x and y @@ -22,7 +22,7 @@ plot_prior.density <- function(prior.density, base.plot = NULL, prior.color = "b } # plot_prior.density -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Add posterior density to a plot ##' ##' @param posterior.density data frame containing columns x and y @@ -41,7 +41,7 @@ plot_posterior.density <- function(posterior.density, base.plot = NULL) { } # plot_posterior.density -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Plot prior density and data ##' ##' @param priordata observations to be plotted as points @@ -61,43 +61,46 @@ priorfig <- function(priordata = NA, priordensity = NA, trait = NA, xlim = "auto if (isTRUE(xlim == "auto")) { x.breaks <- pretty(c(signif(priordensity$x, 2)), 4) - xlim <- range(x.breaks) + xlim <- range(x.breaks) } else { x.breaks <- pretty(signif(xlim, 2), 4) - xlim <- range(c(x.breaks, xlim)) + xlim <- range(c(x.breaks, xlim)) } - priorfigure <- ggplot() + theme_bw() + + priorfigure <- ggplot() + + theme_bw() + scale_x_continuous(limits = xlim, breaks = x.breaks, name = trait$units) + scale_y_continuous(breaks = NULL) + labs(title = trait$figid) + - theme(panel.grid.major = element_blank(), - panel.grid.minor = element_blank(), - axis.text.y = element_blank(), ## hide y axis label - axis.text.x = element_text(size = fontsize), - axis.title.y = element_blank(), ## hide y axis label - axis.title.x = element_text(size = fontsize * 0.9), - plot.title = element_text(size = fontsize * 1.1)) + theme( + panel.grid.major = element_blank(), + panel.grid.minor = element_blank(), + axis.text.y = element_blank(), ## hide y axis label + axis.text.x = element_text(size = fontsize), + axis.title.y = element_blank(), ## hide y axis label + axis.title.x = element_text(size = fontsize * 0.9), + plot.title = element_text(size = fontsize * 1.1) + ) if (is.data.frame(priordata)) { - priordata <- subset(priordata, subset = !is.na(x)) - dx <- with(priordata, min(abs(diff(x)[diff(x) != 0]))) + priordata <- subset(priordata, subset = !is.na(x)) + dx <- with(priordata, min(abs(diff(x)[diff(x) != 0]))) ## add jitter to separate equal values priordata$x <- priordata$x + stats::runif(length(priordata$x), -dx / 2, dx / 2) - rug <- geom_rug(data = priordata, aes(x)) + rug <- geom_rug(data = priordata, aes(x)) priorfigure <- priorfigure + rug } if (is.data.frame(priordensity[1])) { - dens.line <- geom_line(data = priordensity, aes(x, y)) - qpts <- get.quantiles.from.density(priordensity) - dens.ci <- geom_point(data = qpts, aes(x, y)) + dens.line <- geom_line(data = priordensity, aes(x, y)) + qpts <- get.quantiles.from.density(priordensity) + dens.ci <- geom_point(data = qpts, aes(x, y)) priorfigure <- priorfigure + dens.line + dens.ci } return(priorfigure) } # priorfig -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Plot trait density and data ##' ##' @param trait dataframe with id, figid and units of the trait to be plotted @@ -132,22 +135,22 @@ plot_trait <- function(trait, x.lim = NULL, y.lim = NULL, logx = FALSE) { - if (!requireNamespace("PEcAn.visualization", quietly = TRUE)) { PEcAn.logger::logger.severe( "plot_trait requires package `PEcAn.visualization`,", - "but it is not installed. Please install it and try again.") + "but it is not installed. Please install it and try again." + ) } ## Determine plot components plot_posterior <- !is.null(posterior.sample) - plot_prior <- !is.null(prior) - plot_data <- !is.null(trait.df) + plot_prior <- !is.null(prior) + plot_data <- !is.null(trait.df) - if(plot_data) trait.df <- PEcAn.MA::jagify(trait.df) + if (plot_data) trait.df <- PEcAn.MA::jagify(trait.df) if (plot_prior) { - prior.color <- ifelse(plot_posterior, "grey", "black") + prior.color <- ifelse(plot_posterior, "grey", "black") prior.density <- create.density.df(distribution = prior) prior.density <- prior.density[prior.density$x > 0, ] } else { @@ -174,7 +177,8 @@ plot_trait <- function(trait, x.ticks <- pretty(c(0, x.lim[2])) - base.plot <- ggplot2::ggplot() + theme_bw() + base.plot <- ggplot2::ggplot() + + theme_bw() if (plot_prior) { base.plot <- plot_prior.density(prior.density, base.plot = base.plot, prior.color = prior.color) } @@ -189,22 +193,24 @@ plot_trait <- function(trait, geom_segment(aes(x = min(x.ticks), xend = max(x.ticks), y = 0, yend = 0)) + scale_x_continuous(limits = range(x.ticks), breaks = x.ticks, name = trait$units) + labs(title = trait$figid) + - theme(axis.text.x = element_text(size = fontsize$axis), - axis.text.y = element_blank(), - axis.title.x = element_text(size = fontsize$axis), - axis.title.y = element_blank(), - axis.ticks.y = element_blank(), - axis.line.y = element_blank(), - legend.position = "none", - plot.title = element_text(size = fontsize$title), - panel.grid.major = element_blank(), - panel.grid.minor = element_blank(), - panel.border = element_blank()) + theme( + axis.text.x = element_text(size = fontsize$axis), + axis.text.y = element_blank(), + axis.title.x = element_text(size = fontsize$axis), + axis.title.y = element_blank(), + axis.ticks.y = element_blank(), + axis.line.y = element_blank(), + legend.position = "none", + plot.title = element_text(size = fontsize$title), + panel.grid.major = element_blank(), + panel.grid.minor = element_blank(), + panel.border = element_blank() + ) return(trait.plot) } # plot_trait -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Plot probability density and data ##' ##' @export @@ -215,23 +221,25 @@ plot_trait <- function(trait, ##' @author David LeBauer ##' @return outputs plots in outdir/sensitivity.analysis.pdf file plot_densities <- function(density.plot_inputs, outdir, ...) { - trait.samples <- density.plot_inputs$trait.samples - trait.df <- density.plot_inputs$trait.df + trait.samples <- density.plot_inputs$trait.samples + trait.df <- density.plot_inputs$trait.df prior.trait.samples <- density.plot_inputs$trait.df traits <- names(trait.samples) grDevices::pdf(paste0(outdir, "trait.densities.pdf"), height = 12, width = 20) for (trait in traits) { - density.plot <- plot_density(trait.sample = trait.samples[, trait], - trait.df = trait.df[[trait]], ...) + density.plot <- plot_density( + trait.sample = trait.samples[, trait], + trait.df = trait.df[[trait]], ... + ) print(density.plot) } grDevices::dev.off() } # plot_densities -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# #' Get the quantiles from prior density #' #' Finds quantiles on a density data frame @@ -241,13 +249,13 @@ plot_densities <- function(density.plot_inputs, outdir, ...) { #' @author David LeBauer #' @export #' @examples -#' prior.df <- create.density.df(distribution = list('norm',0,1)) +#' prior.df <- create.density.df(distribution = list("norm", 0, 1)) #' get.quantiles.from.density(prior.df) #' samp.df <- create.density.df(samps = rnorm(100)) #' get.quantiles.from.density(samp.df) get.quantiles.from.density <- function(density.df, quantiles = c(0.025, 0.5, 0.975)) { colnames(density.df) <- c("prior.x", "dens.x") - density.df$prob.x <- density.df$dens.x / sum(density.df$dens.x) + density.df$prob.x <- density.df$dens.x / sum(density.df$dens.x) qi <- sapply(quantiles, function(x) which.min(abs(cumsum(density.df$prob.x) - x))) qs <- density.df[qi, c("prior.x", "dens.x")] colnames(qs) <- c("x", "y") diff --git a/modules/priors/R/priors.R b/modules/priors/R/priors.R index 75d73874acc..7286c05c562 100644 --- a/modules/priors/R/priors.R +++ b/modules/priors/R/priors.R @@ -1,6 +1,6 @@ ##' Fit a distribution to data ##' -##' @title Fit distribution to data +##' @title Fit distribution to data ##' @param trait.data data for distribution ##' @param trait name of trait to fit. ##' One of "tt", "sla", "rrr", "q" @@ -9,9 +9,8 @@ ##' @return best fit distribution ##' @export ##' @author David LeBauer -fit.dist <- function(trait.data, trait = colnames(trait.data), +fit.dist <- function(trait.data, trait = colnames(trait.data), dists = c("weibull", "lognormal", "gamma"), n = NULL) { - if (inherits(trait.data, "data.frame")) { trait.data <- trait.data[, 1] } @@ -22,57 +21,67 @@ fit.dist <- function(trait.data, trait = colnames(trait.data), if ("f" %in% dists) { print(trait) if (trait == "tt") { - a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", - start = list(df1 = 100, df2 = 200))) + a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", + start = list(df1 = 100, df2 = 200) + )) } else if (trait == "sla") { - a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", - start = list(df1 = 6, df2 = 1))) + a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", + start = list(df1 = 6, df2 = 1) + )) } else if (trait == "rrr") { - a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", - start = list(df1 = 6, df2 = 1))) + a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", + start = list(df1 = 6, df2 = 1) + )) } else if (trait == "q") { - a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", - start = list(df1 = 1, df2 = 2))) + a[["f"]] <- suppressWarnings(MASS::fitdistr(trait.data, "f", + start = list(df1 = 1, df2 = 2) + )) } else { PEcAn.logger::logger.severe(paste(trait, "not supported!")) } } if ("beta" %in% dists) { - a[["beta"]] <- suppressWarnings(MASS::fitdistr(trait.data, "beta", - start = list(shape1 = 2, shape2 = 1))) + a[["beta"]] <- suppressWarnings(MASS::fitdistr(trait.data, "beta", + start = list(shape1 = 2, shape2 = 1) + )) } aicvalues <- lapply(a, stats::AIC) result <- t(sapply( dists, - function(x) cbind( - t(PEcAn.utils::tabnum(a[[x]]$estimate)), - signif(aicvalues[[x]])) + function(x) { + cbind( + t(PEcAn.utils::tabnum(a[[x]]$estimate)), + signif(aicvalues[[x]]) + ) + } )) colnames(result) <- c("a", "b", "AIC") print(result) bestfitdist <- names(which.min(aicvalues)) parms <- PEcAn.utils::tabnum(a[[bestfitdist]]$estimate) - dist <- ifelse(bestfitdist == "normal", "norm", bestfitdist) - return(data.frame(distribution = dist, - a = as.numeric(parms[1]), - b = as.numeric(parms[2]), - n = ifelse(is.null(n), length(trait.data), n))) + dist <- ifelse(bestfitdist == "normal", "norm", bestfitdist) + return(data.frame( + distribution = dist, + a = as.numeric(parms[1]), + b = as.numeric(parms[2]), + n = ifelse(is.null(n), length(trait.data), n) + )) } # fit.dist #--------------------------------------------------------------------------------------------------# ##' Prior fitting function for optimization ##' -##' This function is used within `DEoptim` to parameterize a distribution to the -##' central tendency and confidence interval of a parameter. +##' This function is used within `DEoptim` to parameterize a distribution to the +##' central tendency and confidence interval of a parameter. ##' This function is not very robust; currently it needs to be tweaked when distributions ##' require starting values (e.g. beta, f) -##' @title prior.fn +##' @title prior.fn ##' @param parms target for optimization -##' @param x vector with c(lcl, ucl, ct) lcl / ucl = confidence limits, ct = entral tendency +##' @param x vector with c(lcl, ucl, ct) lcl / ucl = confidence limits, ct = entral tendency ##' @param alpha quantile at which lcl/ucl are estimated (e.g. for a 95\% CI, alpha = 0.5) -##' @param distn named distribution, one of 'lnorm', 'gamma', 'weibull', 'beta'; support for other distributions not currently implemented -##' @param central.tendency one of 'mode', 'median', and 'mean' +##' @param distn named distribution, one of 'lnorm', 'gamma', 'weibull', 'beta'; support for other distributions not currently implemented +##' @param central.tendency one of 'mode', 'median', and 'mean' ##' @param trait name of trait, can be used for exceptions (currently used for trait == 'q') ##' @export ##' @return parms @@ -128,7 +137,7 @@ prior.fn <- function(parms, x, alpha, distn, central.tendency = NULL, trait = NU if (is.null(central.tendency)) { ct <- x[3] } else if (central.tendency == "median") { - ct <- parms[2] * log(2) ^ (1 / parms[1]) + ct <- parms[2] * log(2)^(1 / parms[1]) } else if (central.tendency == "mean") { ct <- parms[2] * gamma(1 + 1 / parms[2]) } else if (central.tendency == "mode") { @@ -152,9 +161,9 @@ prior.fn <- function(parms, x, alpha, distn, central.tendency = NULL, trait = NU } else if (central.tendency == "mean") { ct <- a / (a + b) } else if (central.tendency == "median") { - ct <- stats::qbeta(0.5, a, b) ## median + ct <- stats::qbeta(0.5, a, b) ## median } else if (central.tendency == "mode") { - ct <- ifelse(a > 1 & b > 1, (a - 1) / (a + b - 2), 0) ## mode + ct <- ifelse(a > 1 & b > 1, (a - 1) / (a + b - 2), 0) ## mode } } return(sum(abs(c(lcl, ucl, ct) - x))) @@ -164,7 +173,7 @@ prior.fn <- function(parms, x, alpha, distn, central.tendency = NULL, trait = NU #--------------------------------------------------------------------------------------------------# ##' Take n random samples from prior ##' -##' @title Sample from prior +##' @title Sample from prior ##' @param distn name of distribution, e.g. "norm", "pois" ##' @param parama first parameter for distn call ##' @param paramb second parameter for distn call @@ -193,16 +202,16 @@ pr.samp <- function(distn, parama, paramb, n) { ##' \dontrun{ ##' # return 1st through 99th quantile of standard normal distribution: ##' PEcAn.priors::get.sample( -##' prior = data.frame(distn = 'norm', parama = 0, paramb = 1), +##' prior = data.frame(distn = 'norm', parama = 0, paramb = 1), ##' p = 1:99/100) ##' # return 100 random samples from standard normal distribution: ##' PEcAn.priors::get.sample( -##' prior = data.frame(distn = 'norm', parama = 0, paramb = 1), +##' prior = data.frame(distn = 'norm', parama = 0, paramb = 1), ##' n = 100) ##' } ##' @export get.sample <- function(prior, n = NULL, p = NULL) { - if(!is.null(p)){ + if (!is.null(p)) { if (as.character(prior$distn) %in% c("exp", "pois", "geom")) { ## one parameter distributions return(do.call(paste0("q", prior$distn), list(p, prior$parama))) @@ -224,7 +233,7 @@ get.sample <- function(prior, n = NULL, p = NULL) { #--------------------------------------------------------------------------------------------------# ##' Calculates density at n points across the range of a parameter ##' -##' For a distribution and parameters, return the density for values ranging from alpha to 1-alpha +##' For a distribution and parameters, return the density for values ranging from alpha to 1-alpha ##' @title Calculate densities ##' @param distn distribution ##' @param parama parameter @@ -237,20 +246,26 @@ get.sample <- function(prior, n = NULL, p = NULL) { pr.dens <- function(distn, parama, paramb, n = 1000, alpha = 0.001) { alpha <- ifelse(alpha < 0.5, alpha, 1 - alpha) n <- ifelse(alpha == 0.5, 1, n) - range.x <- do.call(paste("q", distn, sep = ""), - list(c(alpha, 1 - alpha), parama, paramb)) + range.x <- do.call( + paste("q", distn, sep = ""), + list(c(alpha, 1 - alpha), parama, paramb) + ) seq.x <- seq(from = range.x[1], to = range.x[2], length.out = n) - dens.df <- data.frame(x = seq.x, - y = do.call(paste("d", distn, sep = ""), - list(seq.x, parama, paramb))) + dens.df <- data.frame( + x = seq.x, + y = do.call( + paste("d", distn, sep = ""), + list(seq.x, parama, paramb) + ) + ) return(dens.df) } # pr.dens -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Create Density Data Frame from Sample ##' -##' Returns a data frame from `stats::density` function +##' Returns a data frame from `stats::density` function ##' ##' @param samps a vector of samples from a distribution ##' @param zero.bounded logical: Restrict density distribution to nonnegative values? @@ -268,7 +283,7 @@ pr.dens <- function(distn, parama, paramb, n = 1000, alpha = 0.001) { ##' plot(prior.df) ##' samp.df <- create.density.df(samps = rnorm(100)) ##' lines(samp.df) -create.density.df <- function(samps = NULL, zero.bounded = FALSE, distribution = NULL, +create.density.df <- function(samps = NULL, zero.bounded = FALSE, distribution = NULL, n = 1000, ...) { samp.exists <- !is.null(samps) dist.exists <- !is.null(distribution) @@ -286,7 +301,7 @@ create.density.df <- function(samps = NULL, zero.bounded = FALSE, distribution = } density.df <- with(new.density, data.frame(x = x, y = y)) } - + if (dist.exists) { density.df <- do.call(pr.dens, c(distribution[1:3])) } diff --git a/modules/priors/tests/testthat/test.priors.R b/modules/priors/tests/testthat/test.priors.R index 495e8447935..b4aca95c292 100644 --- a/modules/priors/tests/testthat/test.priors.R +++ b/modules/priors/tests/testthat/test.priors.R @@ -1,29 +1,38 @@ -test_that("pr.dens works",{ - +test_that("pr.dens works", { ## pr.dens() - expect_that(nrow(pr.dens('norm', 0, 1, n=10, alpha=0.5)), - equals(1)) - expect_that(nrow(pr.dens('norm', 0, 10, n=10, alpha=0.5)), - equals(1)) # function should set n=1 when alpha = 0.5 - expect_that(nrow(pr.dens('norm', 0, 10, n=10, alpha=0.4)), - equals(10)) - expect_that(sum(pr.dens('norm', 0, 10, n=10, alpha=0.4)$x), - equals(0)) + expect_that( + nrow(pr.dens("norm", 0, 1, n = 10, alpha = 0.5)), + equals(1) + ) + expect_that( + nrow(pr.dens("norm", 0, 10, n = 10, alpha = 0.5)), + equals(1) + ) # function should set n=1 when alpha = 0.5 + expect_that( + nrow(pr.dens("norm", 0, 10, n = 10, alpha = 0.4)), + equals(10) + ) + expect_that( + sum(pr.dens("norm", 0, 10, n = 10, alpha = 0.4)$x), + equals(0) + ) }) -test_that("pr.samp works",{ - +test_that("pr.samp works", { ## pr.samp() - expect_that(length(pr.samp('norm', 0, 1, 2)), - equals(2)) - expect_that(pr.samp('norm', 0, 1, 1) < 100, - is_true()) - + expect_that( + length(pr.samp("norm", 0, 1, 2)), + equals(2) + ) + expect_that( + pr.samp("norm", 0, 1, 1) < 100, + is_true() + ) }) test_that("create.density.df works on both stated distribution and samples", { - prior.df <- create.density.df(distribution = list('norm', 0, 1), n = 1000) - samp.df <- create.density.df(samps = stats::qnorm(1:100/101), n = 1000) + prior.df <- create.density.df(distribution = list("norm", 0, 1), n = 1000) + samp.df <- create.density.df(samps = stats::qnorm(1:100 / 101), n = 1000) expect_equal(colnames(prior.df), colnames(samp.df)) expect_equal(dim(prior.df), dim(samp.df)) expect_equal(colnames(prior.df), c("x", "y")) @@ -32,14 +41,14 @@ test_that("create.density.df works on both stated distribution and samples", { test_that("get.quantiles.from.density works", { - samp.df <- create.density.df(samps = stats::qnorm(1:100/101), n = 1000) + samp.df <- create.density.df(samps = stats::qnorm(1:100 / 101), n = 1000) test.q <- get.quantiles.from.density(samp.df, quantiles = c(0.25, 0.5, 0.75)) expect_is(test.q, "data.frame") expect_equal(signif(test.q$x, 3), c(-0.711, -0.00337, 0.705)) expect_equal(signif(test.q$y, 3), c(0.304, 0.381, 0.305)) - expect_equal(dim(test.q), c(3,2)) + expect_equal(dim(test.q), c(3, 2)) }) -test_that("plot_prior.density returns ggplot object",{ - expect_is(plot_prior.density(pr.dens('norm', 0, 1)), "ggplot") +test_that("plot_prior.density returns ggplot object", { + expect_is(plot_prior.density(pr.dens("norm", 0, 1)), "ggplot") }) diff --git a/modules/rtm/R/bayestools.R b/modules/rtm/R/bayestools.R index 5a680b6e1be..019a58c76f8 100644 --- a/modules/rtm/R/bayestools.R +++ b/modules/rtm/R/bayestools.R @@ -47,20 +47,20 @@ bt_check_convergence <- function(samples, threshold = 1.1, use_CI = TRUE, use_mp i <- ifelse(use_CI, 2, 1) gelman <- try(BayesianTools::gelmanDiagnostics(samples)) if (inherits(gelman, "try-error")) { - message('Error trying to calculate gelman diagnostic. Assuming no convergence') + message("Error trying to calculate gelman diagnostic. Assuming no convergence") return(FALSE) } if (use_mpsrf) { - gelman_vec <- c(gelman$psrf[,i], mpsrf = gelman$mpsrf) + gelman_vec <- c(gelman$psrf[, i], mpsrf = gelman$mpsrf) } else { - gelman_vec <- gelman$psrf[,i] + gelman_vec <- gelman$psrf[, i] } exceeds <- gelman_vec > threshold if (any(exceeds)) { exceeds_vec <- gelman_vec[exceeds] - exceeds_char <- sprintf('%s: %.2f', names(exceeds_vec), exceeds_vec) - exceeds_str <- paste(exceeds_char, collapse = '; ') - message('The following parameters exceed threshold: ', exceeds_str) + exceeds_char <- sprintf("%s: %.2f", names(exceeds_vec), exceeds_vec) + exceeds_str <- paste(exceeds_char, collapse = "; ") + message("The following parameters exceed threshold: ", exceeds_str) return(FALSE) } else { return(TRUE) @@ -73,25 +73,25 @@ bt_check_convergence <- function(samples, threshold = 1.1, use_CI = TRUE, use_mp #' @inheritParams prospect #' @export prospect_bt_prior <- function(version, custom_prior = list()) { - col_names <- c('param_name', 'distn', 'parama', 'paramb', 'lower') - prior_default_list <- list( - N = list('N', 'norm', 1.4, 0.8, 1), - Cab = list('Cab', 'lnorm', log(40), 0.9, 0), - Car = list('Car', 'lnorm', log(10), 1.1, 0), - Canth = list('Canth', 'lnorm', log(10), 1.1, 0), - Cbrown = list('Cbrown', 'lnorm', log(1), 1.1, 0), - Cw = list('Cw', 'lnorm', log(0.01), 1, 0), - Cm = list('Cm', 'lnorm', log(0.009), 1, 0), - residual = list('residual', 'lnorm', log(0.001), 2.5, 0) - ) - prior_list <- modifyList(prior_default_list, custom_prior) - prior_df_all <- do.call(rbind.data.frame, prior_list) - colnames(prior_df_all) <- col_names - default_params <- defparam(paste0('prospect_', tolower(version))) - use_names <- c(names(default_params), 'residual') - prior_df <- prior_df_all[prior_df_all[['param_name']] %in% use_names,] - prior <- PEcAn.assim.batch::pda.create.btprior(prior_df) - return(prior) + col_names <- c("param_name", "distn", "parama", "paramb", "lower") + prior_default_list <- list( + N = list("N", "norm", 1.4, 0.8, 1), + Cab = list("Cab", "lnorm", log(40), 0.9, 0), + Car = list("Car", "lnorm", log(10), 1.1, 0), + Canth = list("Canth", "lnorm", log(10), 1.1, 0), + Cbrown = list("Cbrown", "lnorm", log(1), 1.1, 0), + Cw = list("Cw", "lnorm", log(0.01), 1, 0), + Cm = list("Cm", "lnorm", log(0.009), 1, 0), + residual = list("residual", "lnorm", log(0.001), 2.5, 0) + ) + prior_list <- modifyList(prior_default_list, custom_prior) + prior_df_all <- do.call(rbind.data.frame, prior_list) + colnames(prior_df_all) <- col_names + default_params <- defparam(paste0("prospect_", tolower(version))) + use_names <- c(names(default_params), "residual") + prior_df <- prior_df_all[prior_df_all[["param_name"]] %in% use_names, ] + prior <- PEcAn.assim.batch::pda.create.btprior(prior_df) + return(prior) } #' Perform Bayesian inversion using BayesianTools package @@ -127,21 +127,20 @@ prospect_bt_prior <- function(version, custom_prior = list()) { #' See the BayesianTools sampler documentation for what can go in the `BayesianTools` settings lists. #' @param observed Vector of observations. Ignored if `loglike` is not `NULL`. #' @param model Function called by log-likelihood. Must be `function(params)` -#' and return a vector equal to `length(observed)` or `nrow(observed)`. Ignored +#' and return a vector equal to `length(observed)` or `nrow(observed)`. Ignored #' if `loglike` is not `NULL`. #' @param prior BayesianTools prior object. #' @param custom_settings Nested settings list. See Details. -#' @param loglike Custom log likelihood function. If `NULL`, use [rtm_loglike()] +#' @param loglike Custom log likelihood function. If `NULL`, use [rtm_loglike()] #' with provided `observed` and `model`. #' @export invert_bt <- function(observed, model, prior, custom_settings = list(), loglike = NULL) { - default_settings <- list( common = list(), init = list(iterations = 10000), loop = list(iterations = 2000), other = list( - sampler = 'DEzs', + sampler = "DEzs", use_mpsrf = FALSE, min_samp = 5000, max_iter = 1e6, @@ -157,8 +156,10 @@ invert_bt <- function(observed, model, prior, custom_settings = list(), loglike for (s in seq_along(default_settings)) { s_name <- names(default_settings)[s] if (s_name %in% names(custom_settings)) { - settings[[s_name]] <- modifyList(default_settings[[s_name]], - custom_settings[[s_name]]) + settings[[s_name]] <- modifyList( + default_settings[[s_name]], + custom_settings[[s_name]] + ) } else { settings[[s_name]] <- default_settings[[s_name]] } @@ -167,13 +168,13 @@ invert_bt <- function(observed, model, prior, custom_settings = list(), loglike settings <- default_settings } - use_mpsrf <- settings[['other']][['use_mpsrf']] - min_samp <- settings[['other']][['min_samp']] - lag.max <- settings[['other']][['lag.max']] - max_iter <- settings[['other']][['max_iter']] - save_progress <- settings[['other']][['save_progress']] - threshold <- settings[['other']][['threshold']] - verbose_loglike <- settings[['other']][['verbose_loglike']] + use_mpsrf <- settings[["other"]][["use_mpsrf"]] + min_samp <- settings[["other"]][["min_samp"]] + lag.max <- settings[["other"]][["lag.max"]] + max_iter <- settings[["other"]][["max_iter"]] + save_progress <- settings[["other"]][["save_progress"]] + threshold <- settings[["other"]][["threshold"]] + verbose_loglike <- settings[["other"]][["verbose_loglike"]] if (!is.null(save_progress)) { # `file.create` returns FALSE if target directory doesn't exist. @@ -189,7 +190,7 @@ invert_bt <- function(observed, model, prior, custom_settings = list(), loglike stop("One of the parameters must be `residual`.") } } - nparams <- length(test_samp[param_names != 'residual']) + nparams <- length(test_samp[param_names != "residual"]) if (is.null(loglike)) { loglike <- rtm_loglike( nparams = nparams, @@ -207,16 +208,16 @@ invert_bt <- function(observed, model, prior, custom_settings = list(), loglike ) - init_settings <- modifyList(settings[['common']], settings[['init']]) + init_settings <- modifyList(settings[["common"]], settings[["init"]]) stop_iter <- init_settings[["iterations"]] if (is.null(stop_iter)) { stop_iter <- 10000 - warning('init_settings$iterations is not set. Using ', stop_iter, '.') + warning("init_settings$iterations is not set. Using ", stop_iter, ".") } - message('Running initial ', stop_iter, ' iterations.') + message("Running initial ", stop_iter, " iterations.") samples <- BayesianTools::runMCMC( bayesianSetup = setup, - sampler = settings[['other']][['sampler']], + sampler = settings[["other"]][["sampler"]], settings = init_settings ) if (!is.null(save_progress)) { @@ -224,24 +225,26 @@ invert_bt <- function(observed, model, prior, custom_settings = list(), loglike } converged <- bt_check_convergence(samples = samples, threshold = threshold, use_mpsrf = use_mpsrf) - loop_settings <- modifyList(settings[['common']], settings[['loop']]) + loop_settings <- modifyList(settings[["common"]], settings[["loop"]]) - next_iter <- loop_settings[['iterations']] + next_iter <- loop_settings[["iterations"]] if (is.null(next_iter)) { next_iter <- 2000 - warning('loop_settings$iterations is not set. Using ', next_iter, '.') + warning("loop_settings$iterations is not set. Using ", next_iter, ".") } while (!(converged && enough_samples)) { start_iter <- stop_iter + 1 stop_iter <- stop_iter + next_iter if (start_iter > max_iter) { - warning('Next start iteration (', start_iter, ') greater than maximum iteration count (', max_iter, ') ', - 'but convergence has not been achieved. ', - 'Terminating sampling and returning results as is.') + warning( + "Next start iteration (", start_iter, ") greater than maximum iteration count (", max_iter, ") ", + "but convergence has not been achieved. ", + "Terminating sampling and returning results as is." + ) break } - message('Running ', next_iter, ' more iterations (', start_iter, ' to ', stop_iter, ').') + message("Running ", next_iter, " more iterations (", start_iter, " to ", stop_iter, ").") samples <- BayesianTools::runMCMC(samples, sampler = sampler, settings = loop_settings) if (!is.null(save_progress)) { saveRDS(object = samples, file = save_progress) @@ -249,22 +252,27 @@ invert_bt <- function(observed, model, prior, custom_settings = list(), loglike converged <- bt_check_convergence(samples = samples, threshold = threshold, use_mpsrf = use_mpsrf) if (converged) { coda_samples <- BayesianTools::getSample(samples, coda = TRUE) - burned_samples <- PEcAn.assim.batch::autoburnin(coda_samples, threshold = threshold, - return.burnin = TRUE, method = 'gelman.plot') + burned_samples <- PEcAn.assim.batch::autoburnin(coda_samples, + threshold = threshold, + return.burnin = TRUE, method = "gelman.plot" + ) if (burned_samples$burnin == 1) { - message('PEcAn.assim.batch::autoburnin reports convergence has not been achieved. ', - 'Resuming sampling.') + message( + "PEcAn.assim.batch::autoburnin reports convergence has not been achieved. ", + "Resuming sampling." + ) converged <- FALSE next } n_samples <- coda::niter(burned_samples$samples) enough_samples <- n_samples > min_samp if (!enough_samples) { - message(n_samples, ' samples after burnin is less than target ', min_samp, - '. Resuming sampling.') + message( + n_samples, " samples after burnin is less than target ", min_samp, + ". Resuming sampling." + ) } } } return(samples) } - diff --git a/modules/rtm/R/cbind.spectra.R b/modules/rtm/R/cbind.spectra.R index 77965f30d4c..4a50938c8a6 100644 --- a/modules/rtm/R/cbind.spectra.R +++ b/modules/rtm/R/cbind.spectra.R @@ -1,6 +1,6 @@ #' Combine spectra by wavelength #' -#' @param ... Spectra to combine +#' @param ... Spectra to combine #' @export cbind.spectra <- function(...) { dots <- list(...) diff --git a/modules/rtm/R/check.convergence.R b/modules/rtm/R/check.convergence.R index 4ab6a543e62..190cb4c859f 100644 --- a/modules/rtm/R/check.convergence.R +++ b/modules/rtm/R/check.convergence.R @@ -1,8 +1,8 @@ #' Check convergence of multiple MCMC chains -#' -#' Uses Gelman multivariate Gelman-Rubin diagnostic to check if +#' +#' Uses Gelman multivariate Gelman-Rubin diagnostic to check if #' multiple MCMC chains have converged -#' @param jags_out mcmc.list object (from coda package) containing +#' @param jags_out mcmc.list object (from coda package) containing #' samples from MCMC chains. #' @param threshold Gelman-Rubin diagnostic parameter threshold. Default = 1.1 #' @param verbose If TRUE, print convergence result. Default = TRUE @@ -10,12 +10,12 @@ #' @return List length 3 containing the following: #' * convergence: Logical. Whether or not convergence was achieved. #' * diagnostics: Numerical value of Gelman-Rubin diagnostics for each parameter and multivariate diagnostic -#' * error: Logical. Whether or not an error occured in the Gelman-Rubin calculation. +#' * error: Logical. Whether or not an error occured in the Gelman-Rubin calculation. #' @export check.convergence <- function(jags_out, threshold = 1.1, verbose = TRUE, - ...){ + ...) { if (!coda::is.mcmc.list(jags_out)) { stop("Input needs to be of class 'mcmc.list'") } @@ -43,7 +43,9 @@ check.convergence <- function(jags_out, print(msg) } } - return(list(converged = converged, - diagnostics = diagnostics, - error = error)) + return(list( + converged = converged, + diagnostics = diagnostics, + error = error + )) } diff --git a/modules/rtm/R/defparam.R b/modules/rtm/R/defparam.R index 8cc841db154..6471b5f6808 100644 --- a/modules/rtm/R/defparam.R +++ b/modules/rtm/R/defparam.R @@ -1,5 +1,5 @@ #' Get default parameters -#' +#' #' Extract default parameter values from `model.list` #' @param modname Model name. Must match `modname` in `model.list` #' @return Named vector of default parameter values @@ -7,11 +7,11 @@ defparam <- function(modname) { data(model.list) model.list$modname <- trimws(model.list$modname) - p.raw <- model.list[model.list$modname == modname, "par.default"] + p.raw <- model.list[model.list$modname == modname, "par.default"] p.split <- strsplit(trimws(as.character(p.raw)), " ")[[1]] p.names <- lapply(p.split, function(x) gsub("=.*", "", x)) - p.vals <- lapply(p.split, function(x) as.numeric(gsub(".*=", "", x))) - p.out <- unlist(p.vals) + p.vals <- lapply(p.split, function(x) as.numeric(gsub(".*=", "", x))) + p.out <- unlist(p.vals) names(p.out) <- unlist(p.names) p.out } # defparam diff --git a/modules/rtm/R/edr.wrapper.R b/modules/rtm/R/edr.wrapper.R index 321dafa451a..7eef2fa044d 100644 --- a/modules/rtm/R/edr.wrapper.R +++ b/modules/rtm/R/edr.wrapper.R @@ -56,7 +56,6 @@ EDR <- function(img_path, stderr = TRUE, verbose_error = TRUE, ...) { - ed2in_path <- normalizePath(ed2in_path, mustWork = TRUE) # Write ED2 config.xml file @@ -69,17 +68,21 @@ EDR <- function(img_path, new.config.path <- file.path(output.path, "config.xml") PREFIX_XML <- '\n\n' - XML::saveXML(xml, file = new.config.path, indent=TRUE, prefix = PREFIX_XML) + XML::saveXML(xml, file = new.config.path, indent = TRUE, prefix = PREFIX_XML) # Generate input files files_list <- file.path( output.path, - c("lengths.dat", + c( + "lengths.dat", "reflect_par.dat", "reflect_nir.dat", - "trans_par.dat", "trans_nir.dat") + "trans_par.dat", "trans_nir.dat" + ) + ) + names(files_list) <- c( + "lengths", "reflect_par", "reflect_nir", + "trans_par", "trans_nir" ) - names(files_list) <- c("lengths", "reflect_par", "reflect_nir", - "trans_par", "trans_nir") file.create(files_list) write_dat <- function(value, file) { @@ -117,15 +120,19 @@ EDR <- function(img_path, # Multi-PFT settings if (length(spectra_list) != length(trait.values)) { - stop("Spectral data and trait.values do not have same length. ", - "Spectral data length: ", length(spectra_list), - "trait.values length: ", length(trait.values)) + stop( + "Spectral data and trait.values do not have same length. ", + "Spectral data length: ", length(spectra_list), + "trait.values length: ", length(trait.values) + ) } pft_names <- names(trait.values) if (any(!names(spectra_list) %in% pft_names)) { - stop("Spectral data and trait.values do not have same PFT names. ", - "Spectral data names: ", names(spectra_list), - "trait.values names: ", pft_names) + stop( + "Spectral data and trait.values do not have same PFT names. ", + "Spectral data names: ", names(spectra_list), + "trait.values names: ", pft_names + ) } data(pftmapping, package = "PEcAn.ED2") npft <- length(pft_names) @@ -177,7 +184,7 @@ EDR <- function(img_path, # Analyze output albedo <- get.EDR.output(output.path) # Optionally, clean up all generated files - if(clean) { + if (clean) { delete.files <- file.remove(files_list) # NOTE that currently, not all files are deleted (e.g. history file, copied ED2IN) if (!delete.files) { @@ -216,13 +223,15 @@ EDR.preprocess.history <- function(history.path, output.path, datetime, history. ) # Extract date and time - day <- strftime(datetime, "%d", tz = "UTC") - month <- strftime(datetime, "%m", tz = "UTC") - year <- strftime(datetime, "%Y", tz = "UTC") + day <- strftime(datetime, "%d", tz = "UTC") + month <- strftime(datetime, "%m", tz = "UTC") + year <- strftime(datetime, "%Y", tz = "UTC") time.history <- strftime(datetime, "%H%M%S", tz = "UTC") # Locate history file - history.search <- sprintf("%1$s-S-%2$s-%3$s-%4$s", - history.prefix, year, month, day) + history.search <- sprintf( + "%1$s-S-%2$s-%3$s-%4$s", + history.prefix, year, month, day + ) history.name <- list.files(history.path, history.search) history.full.path <- file.path(history.path, history.name) if (length(history.name) > 1) { @@ -232,7 +241,7 @@ EDR.preprocess.history <- function(history.path, output.path, datetime, history. stop("No history files found") } # Copy and rename history file - history.new.name <- gsub('([[:digit:]]{6})', time.history, history.name) + history.new.name <- gsub("([[:digit:]]{6})", time.history, history.name) history.new.path <- file.path(output.path, history.new.name) history.copy <- file.copy(history.full.path, history.new.path, overwrite = FALSE) if (!history.copy) { diff --git a/modules/rtm/R/fortran.datamodule.R b/modules/rtm/R/fortran.datamodule.R index 2e6eb1bd6ee..94977f4738b 100644 --- a/modules/rtm/R/fortran.datamodule.R +++ b/modules/rtm/R/fortran.datamodule.R @@ -1,10 +1,10 @@ #' List to FORTRAN data module -#' +#' #' Convert R list to a Fortran `/data/` module block #' @author Alexey Shiklomanov #' @details For models with large constants (e.g. absorption features in the #' PROSPECT model), it may be preferable to store these in FORTRAN90 -#' modules. However, manually creating and formatting these files is +#' modules. However, manually creating and formatting these files is #' tedious. This script allows you to automatically generate module files #' from R lists. It automatically interprets the object lengths as array #' dimensions (only vectors are supported right now -- higher dimension @@ -16,22 +16,26 @@ #' @param modname Name of the module. We suggest the format 'MOD_yourmodname'. #' @param fname Output file name. Defaults to 'yourmodname.f90' #' @examples -#' w <- 3.2 -#' x <- 1:5 -#' y <- 6:15 -#' z <- seq(exp(1), pi, length.out=42) -#' l <- list(x=x, y=y, z=z) ## NOTE that names must be explicitly declared -#' l.types <- c('real','integer', 'real*4', 'real*8') -#' fortran_data_module(l, l.types, 'testmod', -#' file.path(tempdir(), "testmod.f90")) +#' w <- 3.2 +#' x <- 1:5 +#' y <- 6:15 +#' z <- seq(exp(1), pi, length.out = 42) +#' l <- list(x = x, y = y, z = z) ## NOTE that names must be explicitly declared +#' l.types <- c("real", "integer", "real*4", "real*8") +#' fortran_data_module( +#' l, l.types, "testmod", +#' file.path(tempdir(), "testmod.f90") +#' ) #' -#' x <- runif(10) -#' y <- rnorm(10) -#' z <- rgamma(10, 3) -#' d <- data.frame(x,y,z) ## NOTE that data.frames are just named lists -#' d.types <- rep('real*8', ncol(d)) -#' fortran_data_module(d, d.types, 'random', -#' file.path(tempdir(), "random.f90")) +#' x <- runif(10) +#' y <- rnorm(10) +#' z <- rgamma(10, 3) +#' d <- data.frame(x, y, z) ## NOTE that data.frames are just named lists +#' d.types <- rep("real*8", ncol(d)) +#' fortran_data_module( +#' d, d.types, "random", +#' file.path(tempdir(), "random.f90") +#' ) #' @export fortran_data_module <- function(dat, types, modname, fname = paste0(modname, ".f90")) { if (!is.list(dat)) { @@ -42,8 +46,8 @@ fortran_data_module <- function(dat, types, modname, fname = paste0(modname, ".f } obj.names <- names(dat) ld <- length(dat) - - write.strings <- list() + + write.strings <- list() write.strings[1] <- paste0("MODULE", modname) write.strings[2] <- "implicit.none" write.strings[3] <- "integer :: i" @@ -53,12 +57,12 @@ fortran_data_module <- function(dat, types, modname, fname = paste0(modname, ".f j <- j + 1 write.strings[j] <- sprintf("%s, dimension(%d) :: %s", types[i], length(d), obj.names[i]) } - + for (i in seq_len(ld)) { - d <- dat[[i]] - dc <- sprintf("%g", d) - d10 <- length(d)%/%10 - if (length(d)%%10 != 0) { + d <- dat[[i]] + dc <- sprintf("%g", d) + d10 <- length(d) %/% 10 + if (length(d) %% 10 != 0) { d10 <- d10 + 1 } dmin <- 1 @@ -77,7 +81,7 @@ fortran_data_module <- function(dat, types, modname, fname = paste0(modname, ".f } j <- j + 1 write.strings[j] <- sprintf("END MODULE %s", modname) - + write("! Automatically generated module", fname) for (s in write.strings) { write(s, fname, append = TRUE) diff --git a/modules/rtm/R/generate-rsr.R b/modules/rtm/R/generate-rsr.R index bea2fc6fa13..96f368d1e72 100644 --- a/modules/rtm/R/generate-rsr.R +++ b/modules/rtm/R/generate-rsr.R @@ -1,46 +1,46 @@ #' Generate relative spectral response (RSR) matrix based on FWHM data -#' +#' #' @param wavelength Vector of average band widths, as reported in FWHM data. -#' @param fwhm Vector of full-width half maximum (FWHM) bandwidths, as reported +#' @param fwhm Vector of full-width half maximum (FWHM) bandwidths, as reported #' in FWHM data. rsr.from.fwhm <- function(wavelength, fwhm) { sigma <- fwhm / 2 / abs(qnorm(0.25)) - rsr <- t(sapply(400:2500, dnorm, wavelength, sigma)) + rsr <- t(sapply(400:2500, dnorm, wavelength, sigma)) rownames(rsr) <- 400:2500 rsr <- cbind(400:2500 - 399, rsr) colnames(rsr) <- c("index", sprintf("B%s", seq_along(fwhm))) colsums <- colSums(rsr) - lt1 <- which(colsums < 0.99) + lt1 <- which(colsums < 0.99) rsr.sub <- rsr[, -lt1] return(rsr.sub) } # rsr.from.fwhm #' Trim RSR matrix to wavelength limits -#' +#' #' @param rsr RSR matrix #' @param wl.min Minimum wavelength (inclusive, default = 400) #' @param wl.max Maximum wavelength (inclusive, default = 2500) trim.rsr <- function(rsr, wl.min = 400, wl.max = 2500) { - inds.rsr <- as.logical((rsr[, "Wavelength"] >= wl.min) * - (rsr[, "Wavelength"] <= wl.max)) - rsr.sub <- rsr[inds.rsr, ] - rownames(rsr.sub) <- rsr.sub[, "Wavelength"] + inds.rsr <- as.logical((rsr[, "Wavelength"] >= wl.min) * + (rsr[, "Wavelength"] <= wl.max)) + rsr.sub <- rsr[inds.rsr, ] + rownames(rsr.sub) <- rsr.sub[, "Wavelength"] colnames(rsr.sub)[1] <- "index" - rsr.sub[, "index"] <- rsr.sub[, "index"] - 399 + rsr.sub[, "index"] <- rsr.sub[, "index"] - 399 return(rsr.sub) } # trim.rsr # Generate RSR matrices for all sensors and return as list -# -# Only needs to be called when updating these funcitons with new +# +# Only needs to be called when updating these funcitons with new # data generate.rsr.all <- function(path.to.licor = NULL) { data(raw.sensor.data) - rsr.aviris.ng <- with(fwhm.aviris.ng, rsr.from.fwhm(Wavelength, fwhm)) + rsr.aviris.ng <- with(fwhm.aviris.ng, rsr.from.fwhm(Wavelength, fwhm)) rsr.aviris.classic <- with(fwhm.aviris.classic, rsr.from.fwhm(avg, fwhm)) - rsr.hyperion <- with(fwhm.hyperion, rsr.from.fwhm(avg, rng)) + rsr.hyperion <- with(fwhm.hyperion, rsr.from.fwhm(avg, rng)) rsr.chris.proba <- with(bandwidth.chrisproba, rsr.from.fwhm(Mid, Max - Min)) rsr.landsat5 <- trim.rsr(rsr.landsat5) rsr.landsat7 <- trim.rsr(rsr.landsat7) @@ -53,33 +53,35 @@ generate.rsr.all <- function(path.to.licor = NULL) { } else { rsr.licro <- NA } - sensor.rsr <- list(aviris.ng = rsr.aviris.ng, - aviris.classic = rsr.aviris.classic, - hyperion = rsr.hyperion, - chris.proba = rsr.chris.proba, - landsat5 = rsr.landsat5, - landsat7 = rsr.landsat7, - landsat8 = rsr.landsat8, - modis = rsr.modis, - viirs = rsr.viirs, - avhrr = rsr.avhrr, - licor = rsr.licor) + sensor.rsr <- list( + aviris.ng = rsr.aviris.ng, + aviris.classic = rsr.aviris.classic, + hyperion = rsr.hyperion, + chris.proba = rsr.chris.proba, + landsat5 = rsr.landsat5, + landsat7 = rsr.landsat7, + landsat8 = rsr.landsat8, + modis = rsr.modis, + viirs = rsr.viirs, + avhrr = rsr.avhrr, + licor = rsr.licor + ) return(sensor.rsr) } # generate.rsr.all #' Read and process RSR data from directory -#' +#' #' @param dir.path Directory containing RSR data #' @param type Type of sensor. Options are: landsat, avhrr read.rsr.folder <- function(dir.path, type) { - type <- tolower(type) - flist <- list.files(dir.path) - nbands <- length(flist) + type <- tolower(type) + flist <- list.files(dir.path) + nbands <- length(flist) bandnames <- gsub("(.*)[.]csv", "\\1", flist) band.list <- list() for (i in seq_len(nbands)) { - fpath <- file.path(dir.path, flist[i]) + fpath <- file.path(dir.path, flist[i]) raw.input <- read.csv(fpath) if (type == "landsat") { out.dat <- raw.input[, 1:2] diff --git a/modules/rtm/R/generate.noise.R b/modules/rtm/R/generate.noise.R index 73db93975e1..70a05e13278 100644 --- a/modules/rtm/R/generate.noise.R +++ b/modules/rtm/R/generate.noise.R @@ -1,13 +1,13 @@ #' Generate autocorrelated spectral noise -#' +#' #' @param n Length of output vector (default = 2101) #' @param sigma Gaussian noise standard deviation (default=1e-4) #' @param fw Filter width. Will be coerced to an odd number if even (default = 201). #' @param fsd Scaling factor for filter standard deviation (default = 6) #' @export generate.noise <- function(n = 2101, sigma = 1e-04, fw = 201, fsd = 6) { - if (fw%%2 == 0) { - fw <- fw + 1 # fw must be odd + if (fw %% 2 == 0) { + fw <- fw + 1 # fw must be odd } f.in <- seq_len(fw) f.raw <- dnorm(f.in, median(f.in), fw / fsd) diff --git a/modules/rtm/R/gpm.R b/modules/rtm/R/gpm.R index 9f3d1382e21..501d816b6bf 100644 --- a/modules/rtm/R/gpm.R +++ b/modules/rtm/R/gpm.R @@ -12,6 +12,6 @@ generalized_plate_model <- function(k, refractive, N) { length(N) == 1 ) RT <- matrix(0, 2101, 2) - outlist <- .Fortran("gpm", k, refractive, N, RT, PACKAGE="PEcAnRTM") + outlist <- .Fortran("gpm", k, refractive, N, RT, PACKAGE = "PEcAnRTM") spectra(outlist[[4]], 400:2500) } diff --git a/modules/rtm/R/helpers.R b/modules/rtm/R/helpers.R index 65c13cb88ed..074652901b9 100644 --- a/modules/rtm/R/helpers.R +++ b/modules/rtm/R/helpers.R @@ -1,4 +1,4 @@ -if (requireNamespace('PEcAn.logger')) { +if (requireNamespace("PEcAn.logger")) { stop <- PEcAn.logger::logger.severe warning <- PEcAn.logger::logger.warn message <- PEcAn.logger::logger.info diff --git a/modules/rtm/R/invert.auto.R b/modules/rtm/R/invert.auto.R index 87968717ac7..d0e3cfea1e4 100644 --- a/modules/rtm/R/invert.auto.R +++ b/modules/rtm/R/invert.auto.R @@ -14,19 +14,19 @@ #' @param parallel.output Filename (or '' for stdout) for printing parallel #' outputs. Use with caution. Default = `'/dev/null'`. #' @inheritParams invert.custom -#' +#' #' @details #' Parameters specific to `invert.auto` are described here. #' For the remaining parameters, see [invert.custom()]. -#' +#' #' * `model` -- The model to be inverted. This should be an R function that #' takes \code{params} as input and returns one column of \code{observed} #' (nrows should be the same). Constants should be implicitly included here. #' #' * `nchains` -- Number of independent chains. -#' +#' #' * `inits.function` -- Function for generating initial conditions. -#' +#' #' * `ngibbs.max` -- Maximum number of total iterations (per chain). DEFAULT = 5e6 #' #' * `ngibbs.min` -- Minimum number of total iterations (per chain). DEFAULT = 5000. @@ -52,11 +52,10 @@ invert.auto <- function(observed, invert.options, return.samples = TRUE, save.samples = NULL, - quiet=FALSE, - parallel=TRUE, - parallel.cores=NULL, - parallel.output = '/dev/null') { - + quiet = FALSE, + parallel = TRUE, + parallel.cores = NULL, + parallel.output = "/dev/null") { if (parallel == TRUE) { testForPackage("parallel") } else { @@ -66,26 +65,34 @@ invert.auto <- function(observed, invert.options, ngibbs.max <- invert.options$ngibbs.max if (is.null(ngibbs.max)) { ngibbs.max <- 1e6 - message("ngibbs.max not provided. ", - "Setting default to ", ngibbs.max) + message( + "ngibbs.max not provided. ", + "Setting default to ", ngibbs.max + ) } ngibbs.min <- invert.options$ngibbs.min if (is.null(ngibbs.min)) { ngibbs.min <- 5000 - message("ngibbs.min not provided. ", - "Setting default to ", ngibbs.min) + message( + "ngibbs.min not provided. ", + "Setting default to ", ngibbs.min + ) } ngibbs.step <- invert.options$ngibbs.step if (is.null(ngibbs.step)) { ngibbs.step <- 1000 - message("ngibbs.step not provided. ", - "Setting default to ", ngibbs.step) + message( + "ngibbs.step not provided. ", + "Setting default to ", ngibbs.step + ) } nchains <- invert.options$nchains if (is.null(nchains)) { nchains <- 3 - message("nchains not provided. ", - "Setting default to ", nchains) + message( + "nchains not provided. ", + "Setting default to ", nchains + ) } inits.function <- invert.options$inits.function if (is.null(inits.function)) { @@ -93,8 +100,10 @@ invert.auto <- function(observed, invert.options, } if (is.null(invert.options$do.lsq)) { invert.options$do.lsq <- FALSE - message("do.lsq not provided. ", - "Setting default to ", invert.options$do.lsq) + message( + "do.lsq not provided. ", + "Setting default to ", invert.options$do.lsq + ) } if (invert.options$do.lsq) { testForPackage("minpack.lm") @@ -102,20 +111,26 @@ invert.auto <- function(observed, invert.options, iter_conv_check <- invert.options$iter_conv_check if (is.null(iter_conv_check)) { iter_conv_check <- 15000 - message("iter_conv_check not provided. ", - "Setting default to ", iter_conv_check) + message( + "iter_conv_check not provided. ", + "Setting default to ", iter_conv_check + ) } threshold <- invert.options$threshold if (is.null(threshold)) { threshold <- 1.1 - message("threshold not provided. ", - "Setting default to ", threshold) + message( + "threshold not provided. ", + "Setting default to ", threshold + ) } calculate.burnin <- invert.options$calculate.burnin if (is.null(calculate.burnin)) { calculate.burnin <- TRUE - message("calculate.burnin not provided. ", - "Setting default to ", calculate.burnin) + message( + "calculate.burnin not provided. ", + "Setting default to ", calculate.burnin + ) } # Set up cluster for parallel execution @@ -128,9 +143,13 @@ invert.auto <- function(observed, invert.options, if (!is.numeric(parallel.cores) | parallel.cores %% 1 != 0) { stop("Invalid argument to 'parallel.cores'. Must be integer or NULL") } else if (parallel.cores > maxcores) { - warning(sprintf("Requested %1$d cores but only %2$d cores available. ", - parallel.cores, maxcores), - "Using only available cores.") + warning( + sprintf( + "Requested %1$d cores but only %2$d cores available. ", + parallel.cores, maxcores + ), + "Using only available cores." + ) parallel.cores <- maxcores } } @@ -141,28 +160,34 @@ invert.auto <- function(observed, invert.options, # Otherwise, chains may start on same seed and end up identical. parallel::clusterSetRNGStream(cl) - message(sprintf("Running %d chains in parallel. ", nchains), - "Progress bar unavailable") + message( + sprintf("Running %d chains in parallel. ", nchains), + "Progress bar unavailable" + ) } # Create inversion function to be passed to parLapply invert.function <- function(x) { invert.options$inits <- x$inits invert.options$resume <- x$resume - samps <- invert.custom(observed = observed, - invert.options = invert.options, - quiet = quiet, - return.resume = TRUE, - runID = x$runID) + samps <- invert.custom( + observed = observed, + invert.options = invert.options, + quiet = quiet, + return.resume = TRUE, + runID = x$runID + ) return(samps) } runID_list <- seq_len(nchains) inputs <- list() for (i in seq_len(nchains)) { - inputs[[i]] <- list(runID = runID_list[i], - inits = inits.function(), - resume = NULL) + inputs[[i]] <- list( + runID = runID_list[i], + inits = inits.function(), + resume = NULL + ) } # Do initialization step if provided @@ -190,52 +215,62 @@ invert.auto <- function(observed, invert.options, } } - resume <- lapply(output.list, '[[', 'resume') - out <- process_output(output.list = output.list, - iter_conv_check = iter_conv_check, - save.samples = save.samples, - threshold = threshold, - calculate.burnin = calculate.burnin) + resume <- lapply(output.list, "[[", "resume") + out <- process_output( + output.list = output.list, + iter_conv_check = iter_conv_check, + save.samples = save.samples, + threshold = threshold, + calculate.burnin = calculate.burnin + ) # Loop until convergence (skipped if finished == TRUE) invert.options$ngibbs <- ngibbs.step while (!out$finished & i.ngibbs < ngibbs.max) { if (!quiet) { - message(sprintf("Running iterations %d to %d", i.ngibbs, - i.ngibbs + ngibbs.step)) + message(sprintf( + "Running iterations %d to %d", i.ngibbs, + i.ngibbs + ngibbs.step + )) } inits <- lapply(out$samples, getLastRow) inputs <- list() for (i in seq_len(nchains)) { - inputs[[i]] <- list(runID = runID_list[i], - inits = inits[[i]], - resume = resume[[i]]) + inputs[[i]] <- list( + runID = runID_list[i], + inits = inits[[i]], + resume = resume[[i]] + ) } if (parallel) { output.list <- parallel::parLapply(cl, inputs, invert.function) } else { output.list <- list() for (i in seq_along(inputs)) { - message(sprintf('Running chain %d of %d', i, nchains)) + message(sprintf("Running chain %d of %d", i, nchains)) output.list[[i]] <- invert.function(inputs[[i]]) } } i.ngibbs <- i.ngibbs + ngibbs.step - resume <- lapply(output.list, '[[', 'resume') - out <- process_output(output.list = output.list, - prev_out = out, - iter_conv_check = iter_conv_check, - save.samples = save.samples, - threshold = threshold, - calculate.burnin = calculate.burnin) + resume <- lapply(output.list, "[[", "resume") + out <- process_output( + output.list = output.list, + prev_out = out, + iter_conv_check = iter_conv_check, + save.samples = save.samples, + threshold = threshold, + calculate.burnin = calculate.burnin + ) } if (i.ngibbs > ngibbs.max & !out$finished) { - warning("Convergence was not achieved, and max iterations exceeded. ", - "Returning results as 'NA'.") + warning( + "Convergence was not achieved, and max iterations exceeded. ", + "Returning results as 'NA'." + ) } if (!return.samples) { - out$samples <- c('Samples not returned' = NA) + out$samples <- c("Samples not returned" = NA) } return(out) } # invert.auto @@ -265,7 +300,6 @@ process_output <- function(output.list, save.samples, threshold, calculate.burnin) { - samples.current <- lapply(output.list, "[[", "results") deviance_list.current <- lapply(output.list, "[[", "deviance") n_eff_list.current <- lapply(output.list, "[[", "n_eff") @@ -280,9 +314,12 @@ process_output <- function(output.list, } else { out$samples <- combineChains(prev_out$samples, samples.current) out$deviance_list <- mapply(c, prev_out$deviance_list, - deviance_list.current, SIMPLIFY = F) + deviance_list.current, + SIMPLIFY = F + ) out$n_eff_list <- mapply(c, prev_out$n_eff_list, n_eff_list.current, - SIMPLIFY = F) + SIMPLIFY = F + ) } rm(prev_out) @@ -291,11 +328,12 @@ process_output <- function(output.list, } out$nsamp <- coda::niter(out$samples) - nburn <- min(floor(out$nsamp/2), iter_conv_check) + nburn <- min(floor(out$nsamp / 2), iter_conv_check) burned_samples <- window(out$samples, start = nburn) check_initial <- check.convergence(burned_samples, - threshold = threshold, - autoburnin = FALSE) + threshold = threshold, + autoburnin = FALSE + ) if (check_initial$error) { warning("Could not calculate Gelman diag. Assuming no convergence.") out$finished <- FALSE @@ -309,11 +347,13 @@ process_output <- function(output.list, message("Passed initial convergence check.") } if (calculate.burnin) { - burn <- PEcAn.assim.batch::autoburnin(out$samples, return.burnin = TRUE, method = 'gelman.plot') + burn <- PEcAn.assim.batch::autoburnin(out$samples, return.burnin = TRUE, method = "gelman.plot") out$burnin <- burn$burnin if (out$burnin == 1) { - message("Robust convergence check in autoburnin failed. ", - "Resuming sampling.") + message( + "Robust convergence check in autoburnin failed. ", + "Resuming sampling." + ) out$finished <- FALSE return(out) } else { @@ -321,8 +361,10 @@ process_output <- function(output.list, out$results <- summary_simple(do.call(rbind, burn$samples)) } } else { - message("Skipping robust convergece check (autoburnin) because ", - "calculate.burnin == FALSE.") + message( + "Skipping robust convergece check (autoburnin) because ", + "calculate.burnin == FALSE." + ) out$burnin <- nburn out$results <- summary_simple(do.call(rbind, burned_samples)) } diff --git a/modules/rtm/R/invert.custom.R b/modules/rtm/R/invert.custom.R index 2eef1dfa089..b15bd966f65 100644 --- a/modules/rtm/R/invert.custom.R +++ b/modules/rtm/R/invert.custom.R @@ -1,56 +1,56 @@ #' Bayesian inversion of a model -#' -#' Performs an inversion of an arbitrary model using a modified -#' Metropolis Hastings algorithm with block sampling. This may be slightly -#' slower than the implementation in Fortran, but is much more customizable, as +#' +#' Performs an inversion of an arbitrary model using a modified +#' Metropolis Hastings algorithm with block sampling. This may be slightly +#' slower than the implementation in Fortran, but is much more customizable, as #' the model can be any R function. -#' @param observed Vector, matrix, or data frame (coerced to matrix) of -#' observed values. For spectral data, wavelengths are rows and spectra are +#' @param observed Vector, matrix, or data frame (coerced to matrix) of +#' observed values. For spectral data, wavelengths are rows and spectra are #' columns. Dimensions must align with the output of `model`. #' @param invert.options R list object containing inversion settings. See details. #' @param quiet Suppress progress bar and status messages. Default=FALSE -#' @param return.resume If `TRUE`, return results as list that includes current -#' Jump distribution (useful for continuing an ongoing run) and acceptance +#' @param return.resume If `TRUE`, return results as list that includes current +#' Jump distribution (useful for continuing an ongoing run) and acceptance #' rate. Default = `FALSE`. #' @param runID Run-unique ID. Useful for parallel runs. Default=NULL #' @details #' `inversion.options` contains the following: -#' +#' #' * `inits` -- Vector of initial values of model parameters to be inverted. #' #' * `ngibbs` -- Number of MCMC iterations #' #' * `prior.function` -- Function for use as prior. -#' Should take a vector of parameters as input and return a single value -- the +#' Should take a vector of parameters as input and return a single value -- the #' sum of their log-densities -- as output. #' #' * `param.mins` -- Vector of minimum values for inversion parameters -#' +#' #' * `param.maxs` -- Vector of minimum values for inversion parameters #' #' * `model` -- The model to be inverted. -#' This should be an R function that takes `params` and `runID` as input and +#' This should be an R function that takes `params` and `runID` as input and #' returns one column of `observed` (nrows should be the same). #' Constants should be implicitly included here. #' -#' * `adapt` -- Number of steps for adapting covariance matrix (i.e. adapt +#' * `adapt` -- Number of steps for adapting covariance matrix (i.e. adapt #' every 'n' steps). Default=100 -#' +#' #' * `adj_min` -- Minimum threshold for rescaling Jump standard deviation. #' Default = 0.1. -#' -#' * `target` -- Target acceptance rate. Default=0.234, based on recommendation +#' +#' * `target` -- Target acceptance rate. Default=0.234, based on recommendation #' for multivariate block sampling in Haario et al. 2001 -#' -#' * `do.lsq` -- Perform least squares optimization first (see `invert.lsq`), +#' +#' * `do.lsq` -- Perform least squares optimization first (see `invert.lsq`), #' and use outputs to initialize Metropolis Hastings. #' This may improve mixing time, but risks getting caught in a local minimum. #' Default=FALSE #' #' * `catch_error` -- If `TRUE` (default), wrap model in `tryCatch` to prevent sampling termination on model execution error. -#' @references -#' * Haario, Heikki; Saksman, Eero; Tamminen, Johanna. An adaptive Metropolis -#' algorithm. Bernoulli 7 (2001), no. 2, 223--242. +#' @references +#' * Haario, Heikki; Saksman, Eero; Tamminen, Johanna. An adaptive Metropolis +#' algorithm. Bernoulli 7 (2001), no. 2, 223--242. #' http://projecteuclid.org/euclid.bj/1080222083. #' @export invert.custom <- function(observed, invert.options, @@ -64,26 +64,32 @@ invert.custom <- function(observed, invert.options, n_obs <- nspec * nwl need_opts <- c("inits", "prior.function", "model") - available_defaults <- c("param.mins", "param.maxs", "adapt", - "adj_min", "target", "do.lsq", "catch_error") + available_defaults <- c( + "param.mins", "param.maxs", "adapt", + "adj_min", "target", "do.lsq", "catch_error" + ) have.invert.options <- names(invert.options) match_need <- need_opts %in% have.invert.options if (any(!match_need)) { error.msg <- paste("Missing the following invert.options:", - paste(need_opts[!match_need], - collapse=" "), - "Try modifying a default.invert.options() object", - sep = "\n") + paste(need_opts[!match_need], + collapse = " " + ), + "Try modifying a default.invert.options() object", + sep = "\n" + ) stop(error.msg) } match_default <- available_defaults %in% have.invert.options if (any(!match_default)) { msg <- paste("Using the following default options:", - paste(available_defaults[!match_default], - collapse=" "), - sep = "\n") + paste(available_defaults[!match_default], + collapse = " " + ), + sep = "\n" + ) message(msg) } @@ -151,13 +157,16 @@ invert.custom <- function(observed, invert.options, # Set up inversion if (do.lsq) { - fit <- invert.lsq(observed, inits, model, - lower = param.mins, upper = param.maxs) + fit <- invert.lsq(observed, inits, model, + lower = param.mins, upper = param.maxs + ) inits <- fit$par } if (!all(diag(init.Jump) > 0)) { - warning("Passed init.Jump matrix with zero values on diagonals. ", - "Reverting to default initial Jump matrix") + warning( + "Passed init.Jump matrix with zero values on diagonals. ", + "Reverting to default initial Jump matrix" + ) init.Jump <- NULL } if (is.null(init.Jump)) { @@ -198,25 +207,28 @@ invert.custom <- function(observed, invert.options, } # Precalculate quantities for first inversion step - sigma2 <- init_sigma ^ 2 - tau <- 1/sigma2 - PrevSpec <- tryCatch({ + sigma2 <- init_sigma^2 + tau <- 1 / sigma2 + PrevSpec <- tryCatch( + { model(inits, runID) - }, error = function(e) { + }, + error = function(e) { print(e) stop("Initial model execution hit an error") - }) + } + ) PrevError <- PrevSpec - observed PrevSS <- sum(PrevError * PrevError) PrevPrior <- prior.function(inits) n_eff <- neff(PrevError) - logLL_term1 <- -0.5 * n_obs * log(sigma2 * n_obs/n_eff) - Prev_logLL_term2 <- -0.5 * tau * n_eff/n_obs * PrevSS + logLL_term1 <- -0.5 * n_obs * log(sigma2 * n_obs / n_eff) + Prev_logLL_term2 <- -0.5 * tau * n_eff / n_obs * PrevSS PrevLL <- logLL_term1 + Prev_logLL_term2 # Sampling loop for (ng in seq_len(ngibbs)) { - if (!quiet) { + if (!quiet) { setTxtProgressBar(pb, ng) } if (ng %% adapt < 1) { @@ -253,10 +265,10 @@ invert.custom <- function(observed, invert.options, if (samp) { if (catch_error) { TrySpec <- try(model(tvec, runID)) - if (inherits(TrySpec, "try-error")) { - warning("Model hit an error. Skipping to next iteration") - samp <- FALSE - } + if (inherits(TrySpec, "try-error")) { + warning("Model hit an error. Skipping to next iteration") + samp <- FALSE + } } else { TrySpec <- model(tvec, runID) } @@ -274,10 +286,10 @@ invert.custom <- function(observed, invert.options, if (samp) { TryError <- TrySpec - observed TrySS <- sum(TryError * TryError) - Try_logLL_term2 <- -0.5 * tau * n_eff/n_obs * TrySS + Try_logLL_term2 <- -0.5 * tau * n_eff / n_obs * TrySS TryLL <- logLL_term1 + Try_logLL_term2 TryPost <- TryLL + TryPrior - Prev_logLL_term2 <- -0.5 * tau * n_eff/n_obs * PrevSS + Prev_logLL_term2 <- -0.5 * tau * n_eff / n_obs * PrevSS PrevLL <- logLL_term1 + Prev_logLL_term2 PrevPost <- PrevLL + PrevPrior a <- exp(TryPost - PrevPost) @@ -290,32 +302,35 @@ invert.custom <- function(observed, invert.options, PrevSS <- TrySS PrevPrior <- TryPrior n_eff <- neff(PrevError) - logLL_scale <- n_eff/n_obs + logLL_scale <- n_eff / n_obs ar <- ar + 1 } } results[ng, 1:npars] <- inits deviance_store[ng] <- -2 * PrevLL n_eff_store[ng] <- n_eff - rp1 <- tau_0 + n_obs/2 - rp2 <- tau_0 + PrevSS/2 + rp1 <- tau_0 + n_obs / 2 + rp2 <- tau_0 + PrevSS / 2 tau <- rgamma(1, rp1, rp2) - sigma2 <- 1/tau + sigma2 <- 1 / tau sigma <- sqrt(sigma2) results[ng, npars + 1] <- sigma - logLL_term1 <- -0.5 * n_obs * log(sigma2 * n_obs/n_eff) + logLL_term1 <- -0.5 * n_obs * log(sigma2 * n_obs / n_eff) } if (!quiet) { close(pb) } - out <- list(results = results, - deviance = deviance_store, - n_eff = n_eff_store) + out <- list( + results = results, + deviance = deviance_store, + n_eff = n_eff_store + ) if (return.resume) { - out <- append(out, list(resume = list(jump = Jump, - ar = ar, - sigma = sigma))) + out <- append(out, list(resume = list( + jump = Jump, + ar = ar, + sigma = sigma + ))) } return(out) } - diff --git a/modules/rtm/R/invert.lsq.R b/modules/rtm/R/invert.lsq.R index ef5e2e49a47..2bad3676fc2 100644 --- a/modules/rtm/R/invert.lsq.R +++ b/modules/rtm/R/invert.lsq.R @@ -1,14 +1,14 @@ #' Least squares model inversion -#' -#' Performs a least-squares inversion of an arbitrary radiative transfer model -#' (passed as an R function). The inversion attempts to minimize the sum of -#' residual least squares between modeled and observed spectra via the -#' Levenberg-Marquardt algorithm (`nls.lm` function from the `minpack.lm` +#' +#' Performs a least-squares inversion of an arbitrary radiative transfer model +#' (passed as an R function). The inversion attempts to minimize the sum of +#' residual least squares between modeled and observed spectra via the +#' Levenberg-Marquardt algorithm (`nls.lm` function from the `minpack.lm` #' package). #' @author Alexey Shiklomanov #' @param observed Vector of observations (e.g. a reflectance spectrum). #' @param inits Vector of initial conditions for the parameters. -#' @param model An R function that calls the RTM and returns the error to be +#' @param model An R function that calls the RTM and returns the error to be #' minimized. Be sure to include constants here. #' @param lower Lower bounds on parameters (default=NULL, which means -Inf). #' @param upper Upper bounds on parameters (default=NULL, which means +Inf). diff --git a/modules/rtm/R/neff.R b/modules/rtm/R/neff.R index 0417f9306b5..a45e374d763 100644 --- a/modules/rtm/R/neff.R +++ b/modules/rtm/R/neff.R @@ -10,22 +10,22 @@ neff <- function(x, ...) { #' @export neff.default <- function(x, lag.max = NULL, min_rho = 0.1) { - x_use <- x[!is.na(x)] - nx <- length(x_use) - if (is.null(lag.max)) { - # Same as in the ACF function - lag.max <- floor(10 * log10(nx)) - } - rho_all <- .Call(stats:::C_acf, x_use, lag.max, TRUE) - rho <- rho_all[-1] - too_small <- rho < min_rho - if (any(too_small)) { - rho <- rho[seq_len(which(too_small)[1])] - } - nrho <- length(rho) - tau <- 1 + 2 * sum((1 - seq_len(nrho) / nx) * rho) - n_eff <- nx / tau - return(n_eff) + x_use <- x[!is.na(x)] + nx <- length(x_use) + if (is.null(lag.max)) { + # Same as in the ACF function + lag.max <- floor(10 * log10(nx)) + } + rho_all <- .Call(stats:::C_acf, x_use, lag.max, TRUE) + rho <- rho_all[-1] + too_small <- rho < min_rho + if (any(too_small)) { + rho <- rho[seq_len(which(too_small)[1])] + } + nrho <- length(rho) + tau <- 1 + 2 * sum((1 - seq_len(nrho) / nx) * rho) + n_eff <- nx / tau + return(n_eff) } #' @export @@ -36,8 +36,8 @@ neff.matrix <- function(x, ...) { # Calculate max ACF lag from correlation power analysis corr_max_lag <- function(nx, r = 0.1, sig.level = 0.05, power = 0.95, ...) { - testForPackage('pwr') - power_analysis <- pwr::pwr.r.test(n = NULL, r = r, sig.level = sig.level, power = power, ...) - nlag <- ceiling(nx - power_analysis$n) - return(nlag) + testForPackage("pwr") + power_analysis <- pwr::pwr.r.test(n = NULL, r = r, sig.level = sig.level, power = power, ...) + nlag <- ceiling(nx - power_analysis$n) + return(nlag) } diff --git a/modules/rtm/R/params2edr.R b/modules/rtm/R/params2edr.R index 46204576c28..58222be87c1 100644 --- a/modules/rtm/R/params2edr.R +++ b/modules/rtm/R/params2edr.R @@ -2,31 +2,31 @@ #' #' Creates a nested list whose components are suitable for passing to EDR. #' -#' If `prospect = TRUE`, parameters prefixed with `prospect_` are passed to -#' [prospect] with the specified `version`, and other parameters are passed to +#' If `prospect = TRUE`, parameters prefixed with `prospect_` are passed to +#' [prospect] with the specified `version`, and other parameters are passed to #' `trait.values`. #' -#' The regular expression defining the separation is greedy, i.e. -#' `temperate.Early_Hardwood.SLA` will separate into `temperate.Early_Hardwood` -#' and `SLA` (assuming the default `sep = "."`). Therefore, it is crucial that -#' trait names not contain any `sep` characters (neither ED nor PROSPECT -#' parameters should anyway). If this is a problem, use an alternate separator +#' The regular expression defining the separation is greedy, i.e. +#' `temperate.Early_Hardwood.SLA` will separate into `temperate.Early_Hardwood` +#' and `SLA` (assuming the default `sep = "."`). Therefore, it is crucial that +#' trait names not contain any `sep` characters (neither ED nor PROSPECT +#' parameters should anyway). If this is a problem, use an alternate separator #' (e.g. `|`). #' -#' Note that using `sep = "."` allows this function to directly invert the -#' default behavior of `unlist`. That is, calling -#' `unlist(params2edr(params, prospect = FALSE)$trait.values)` will return the input vector of -#' trait values. This makes `unlist` a convenient way to go from a +#' Note that using `sep = "."` allows this function to directly invert the +#' default behavior of `unlist`. That is, calling +#' `unlist(params2edr(params, prospect = FALSE)$trait.values)` will return the input vector of +#' trait values. This makes `unlist` a convenient way to go from a #' `trait.values` list to a properly formatted `params` vector. #' -#' Because unused ED parameters in the `config.xml` are ignored, the PROSPECT -#' parameters are saved in the `trait.values` object as well, which may be +#' Because unused ED parameters in the `config.xml` are ignored, the PROSPECT +#' parameters are saved in the `trait.values` object as well, which may be #' useful for debugging. #' #' @param params Named parameter vector -#' @param sep Separator between PFT name and trait name. Must be a single +#' @param sep Separator between PFT name and trait name. Must be a single #' character (default = "."). -#' @param prospect Logical. If `TRUE` (default), scan for PROSPECT traits and +#' @param prospect Logical. If `TRUE` (default), scan for PROSPECT traits and #' pass them to PROSPECT. #' @param version PROSPECT version #' @return List containing `spectra_list` and `trait.values`, both objects needed by [EDR]. @@ -46,7 +46,6 @@ params2edr <- function(params, sep = ".", prospect = TRUE, version = 5) { names(result$trait.values) <- distinct_pfts if (prospect) { result$spectra_list <- result$trait.values - } for (pft in distinct_pfts) { pft_ind <- pfts == pft diff --git a/modules/rtm/R/plot.spectra.R b/modules/rtm/R/plot.spectra.R index 75a1fe96ceb..fc5a9e2960e 100644 --- a/modules/rtm/R/plot.spectra.R +++ b/modules/rtm/R/plot.spectra.R @@ -6,9 +6,11 @@ #' @export plot.spectra <- function(spectra, type = "l", ...) { if (ncol(spectra) > 1) { - warning("Multiple columns in spectra.", - "Only plotting first column.", - "You may want `matplot`.") + warning( + "Multiple columns in spectra.", + "Only plotting first column.", + "You may want `matplot`." + ) } wavelength <- wavelengths(spectra) value <- spectra[, 1] diff --git a/modules/rtm/R/print.spectra.R b/modules/rtm/R/print.spectra.R index 1505e3f2d95..fbba3a17b98 100644 --- a/modules/rtm/R/print.spectra.R +++ b/modules/rtm/R/print.spectra.R @@ -1,5 +1,5 @@ #' Print method for spectra S3 class -#' +#' #' @inheritParams wavelengths #' @param n Max number of rows to print (show first `n/2` and last `n/2` rows) #' @param ... Additional arguments to `print` diff --git a/modules/rtm/R/print_results_summary.R b/modules/rtm/R/print_results_summary.R index 0eca2714298..60bf30cadb4 100644 --- a/modules/rtm/R/print_results_summary.R +++ b/modules/rtm/R/print_results_summary.R @@ -1,5 +1,5 @@ #' Neatly print inversion results summary -#' +#' #' @author Alexey Shiklomanov #' @param output Output from \code{invert.auto} #' @export @@ -8,7 +8,7 @@ print_results_summary <- function(output) { npar <- sum(grepl(".q975$", names(results))) rawvec <- unlist(results) rawmat <- matrix(rawvec, ncol = npar, byrow = TRUE) - colnames(rawmat) <- gsub(".mu$", '', names(results)[1:npar]) + colnames(rawmat) <- gsub(".mu$", "", names(results)[1:npar]) rownames(rawmat) <- c("Mean", "SD", "2.5", "50", "97.5") return(rawmat) } diff --git a/modules/rtm/R/prior.R b/modules/rtm/R/prior.R index 4dd07a76310..2db2723433c 100644 --- a/modules/rtm/R/prior.R +++ b/modules/rtm/R/prior.R @@ -1,35 +1,35 @@ #' Functions for default priors #' Lognormal mean parameters -#' +#' #' @param mean Sample mean #' @param sd Sample standard deviation #' @export -lognorm.mu <- function(mean, sd) log(mean / sqrt(1 + (mean/sd)^2)) +lognorm.mu <- function(mean, sd) log(mean / sqrt(1 + (mean / sd)^2)) #' Lognormal sigma parameter -#' +#' #' @inheritParams lognorm.mu #' @export -lognorm.sigma <- function(mean, sd) sqrt(log(1 + (mean/sd)^2)) +lognorm.sigma <- function(mean, sd) sqrt(log(1 + (mean / sd)^2)) #' Default prior parameters for PROSPECT models -#' +#' #' @param sd.inflate Standard deviation multiplier (default = 3) #' @export prior.defaultvals.prospect <- function(sd.inflate = 3) { - pmean <- c(N = 0.7, Cab = 32.81, Car = 8.51, Cw = 0.0129, Cm = 0.0077) - psd <- c(N = 0.6, Cab = 17.87, Car = 3.2, Cw = 0.0073, Cm = 0.0035) - psd <- psd * sd.inflate - pmu <- lognorm.mu(pmean, psd) + pmean <- c(N = 0.7, Cab = 32.81, Car = 8.51, Cw = 0.0129, Cm = 0.0077) + psd <- c(N = 0.6, Cab = 17.87, Car = 3.2, Cw = 0.0073, Cm = 0.0035) + psd <- psd * sd.inflate + pmu <- lognorm.mu(pmean, psd) psigma <- lognorm.sigma(pmean, psd) return(list(mu = pmu, sigma = psigma)) } # prior.defaultvals.prospect #' Default PROSPECT 5 prior function -#' -#' @details Assumes lognormal distribution for all parameters. NOTE that prior +#' +#' @details Assumes lognormal distribution for all parameters. NOTE that prior #' on N is shifted by 1. #' @param pmu Lognormal mu parameter #' @param psigma Lognormal sigma parameter @@ -37,8 +37,10 @@ prior.defaultvals.prospect <- function(sd.inflate = 3) { priorfunc.prospect <- function(pmu, psigma) { prior <- function(params) { if (is.null(names(params))) { - warning("Parameters are not named.", "\n", - "Assuming N Cab (Car) (Cbrown) Cw Cm for priors") + warning( + "Parameters are not named.", "\n", + "Assuming N Cab (Car) (Cbrown) Cw Cm for priors" + ) params[1] <- params[1] - 1 } else { params["N"] <- params["N"] - 1 diff --git a/modules/rtm/R/pro2s.R b/modules/rtm/R/pro2s.R index c1adf275e03..251692acfd7 100644 --- a/modules/rtm/R/pro2s.R +++ b/modules/rtm/R/pro2s.R @@ -1,6 +1,6 @@ #' Coupled PROSPECT-Two-stream model -#' -#' @param param Model parameters, in the following order: N, Cab, (Car, +#' +#' @param param Model parameters, in the following order: N, Cab, (Car, #' Cbrown), Cw, Cm, solar zenith angle, LAI, soil_moisture #' @param prospect.version Version of PROSPECT to use (4, 5, or '5B'; default=5) #' @return Spectra matrix (see [spectra()]) for wavelengths 400 to 2500nm containing the following columns: @@ -32,16 +32,16 @@ pro2s <- function(param, prospect.version = 5) { } else { stop("prospect.version must be 4, 5, or 5B") } - nw <- 2101 # Length of PROSPECT output vector (400-2500nm, in 1 nm increments) - out.names <- c("alpha.c", "alpha.i", "Tc", "Ti", "Ac", "Ai") + nw <- 2101 # Length of PROSPECT output vector (400-2500nm, in 1 nm increments) + out.names <- c("alpha.c", "alpha.i", "Tc", "Ti", "Ac", "Ai") plist$alpha.c <- numeric(nw) plist$alpha.i <- numeric(nw) plist$Tc <- numeric(nw) plist$Ti <- numeric(nw) plist$Ac <- numeric(nw) plist$Ai <- numeric(nw) - inlist <- c(modname, plist, PACKAGE="PEcAnRTM") - outlist <- do.call(.Fortran, inlist) - out.mat <- do.call(cbind, outlist[out.names]) + inlist <- c(modname, plist, PACKAGE = "PEcAnRTM") + outlist <- do.call(.Fortran, inlist) + out.mat <- do.call(cbind, outlist[out.names]) spectra(out.mat, 400:2500) } # pro2s diff --git a/modules/rtm/R/process-output.R b/modules/rtm/R/process-output.R index c9d6e489011..838ce07476a 100644 --- a/modules/rtm/R/process-output.R +++ b/modules/rtm/R/process-output.R @@ -1,25 +1,25 @@ # Functions for processing output #' Burn-in and thinning of MCMC samples -#' +#' #' @param samples Matrix of MCMC samples -#' @param target Target number of samples (default = 5000). Only applicable if +#' @param target Target number of samples (default = 5000). Only applicable if #' auto=TRUE. -#' @param burnin.ratio Fraction of samples to burn-in; i.e. 2 means to remove -#' first 1/2 of samples, 3 means 1/3, etc. (default = 2). Only applicable if +#' @param burnin.ratio Fraction of samples to burn-in; i.e. 2 means to remove +#' first 1/2 of samples, 3 means 1/3, etc. (default = 2). Only applicable if #' auto=TRUE. -#' @param auto Whether or not to perform automatic burnin and thin based on +#' @param auto Whether or not to perform automatic burnin and thin based on #' target number of samples. #' @param burnin Number of samples to discard as burnin (auto must be FALSE) #' @param thin Thinning interval (auto must be FALSE) #' @export -burnin.thin <- function(samples, target = 5000, burnin.ratio = 2, - auto = TRUE, burnin = NULL, thin = NULL){ +burnin.thin <- function(samples, target = 5000, burnin.ratio = 2, + auto = TRUE, burnin = NULL, thin = NULL) { ngibbs <- nrow(samples) - if(auto) { + if (auto) { burnin <- floor(ngibbs / burnin.ratio) thin <- floor((ngibbs - burnin) / target) - if(thin < 1){ + if (thin < 1) { message("Fewer than target samples after burnin. No thinning applied.") thin <- 1 } @@ -30,7 +30,7 @@ burnin.thin <- function(samples, target = 5000, burnin.ratio = 2, } #' Load object from an RData file -#' +#' #' @param filename Full name (without path!) of RData file #' @param filepath Path of RData file (default='.') #' @export @@ -43,8 +43,8 @@ load.from.name <- function(filename, filepath = ".") { } #' Multivariate normal fit -#' -#' Fit multivariate normal to samples. Return means and covariance matrix as a +#' +#' Fit multivariate normal to samples. Return means and covariance matrix as a #' long list (for easy construction of data.tables) #' @param samples Matrix of MCMC samples. #' @export @@ -52,8 +52,10 @@ summary_mvnorm <- function(samples) { testForPackage("mclust") stopifnot(colnames(samples) != NULL) parnames <- colnames(samples) - sigmanames <- sprintf("%s.%s.sigma", rep(parnames, 1, each=6), - rep(parnames, 6)) + sigmanames <- sprintf( + "%s.%s.sigma", rep(parnames, 1, each = 6), + rep(parnames, 6) + ) fit <- mclust::mvn("XXX", samples) mu <- as.numeric(fit$parameters$mean) sigma <- c(fit$parameters$variance$Sigma) @@ -64,7 +66,7 @@ summary_mvnorm <- function(samples) { } #' Simple summary statistics on MCMC samples -#' +#' #' Calculate simple univariate summary statistics and return as named list #' @param samples Matrix of MCMC samples #' @export @@ -81,7 +83,9 @@ summary_simple <- function(samples) { names(med) <- sprintf("%s.med", parnames) q975 <- apply(samples, 2, quantile, 0.975, na.rm = TRUE) names(q975) <- sprintf("%s.q975", parnames) - out.list <- c(as.list(mu), as.list(sigma), as.list(q25), - as.list(med), as.list(q975)) + out.list <- c( + as.list(mu), as.list(sigma), as.list(q25), + as.list(med), as.list(q975) + ) return(out.list) } diff --git a/modules/rtm/R/process.specresponse.R b/modules/rtm/R/process.specresponse.R index 3cbf0819e6c..a1cc7ab581e 100644 --- a/modules/rtm/R/process.specresponse.R +++ b/modules/rtm/R/process.specresponse.R @@ -1,33 +1,33 @@ process.licor.rsr <- function(csv.path) { - licor.dat <- read.csv(csv.path) - blue <- licor.dat[1:2] - blue <- blue[complete.cases(blue), ] - blue.rsr <- interpolate.rsr(blue) - red <- licor.dat[3:4] - red <- red[complete.cases(red), ] - red.rsr <- interpolate.rsr(red) - farred <- licor.dat[5:6] - farred <- farred[complete.cases(farred), ] + licor.dat <- read.csv(csv.path) + blue <- licor.dat[1:2] + blue <- blue[complete.cases(blue), ] + blue.rsr <- interpolate.rsr(blue) + red <- licor.dat[3:4] + red <- red[complete.cases(red), ] + red.rsr <- interpolate.rsr(red) + farred <- licor.dat[5:6] + farred <- farred[complete.cases(farred), ] farred.rsr <- interpolate.rsr(farred) - licor.rsr <- cbind(400:2500 - 399, blue.rsr, red.rsr, farred.rsr) - licor.rsr <- licor.rsr[400:800 - 399, ] + licor.rsr <- cbind(400:2500 - 399, blue.rsr, red.rsr, farred.rsr) + licor.rsr <- licor.rsr[400:800 - 399, ] colnames(licor.rsr) <- c("index", "blue", "red", "farred") return(licor.rsr) } # process.licor.rsr -interpolate.rsr <- function(rsr, wl.start = 400, wl.end = 2500, +interpolate.rsr <- function(rsr, wl.start = 400, wl.end = 2500, zero.threshold = 0.001, normalize = TRUE) { - wl.int <- seq(floor(min(rsr[[1]])), ceiling(max(rsr[[1]]))) - wl.offset <- wl.start - 1 - wl.length <- wl.end - wl.offset - wl.index <- wl.int - wl.offset - rsr.full <- numeric(wl.length) + wl.int <- seq(floor(min(rsr[[1]])), ceiling(max(rsr[[1]]))) + wl.offset <- wl.start - 1 + wl.length <- wl.end - wl.offset + wl.index <- wl.int - wl.offset + rsr.full <- numeric(wl.length) rsr.spline <- splinefun(x = rsr[[1]], y = rsr[[2]]) rsr.interpolated <- rsr.spline(wl.int) rsr.interpolated[rsr.interpolated < zero.threshold] <- 0 if (normalize) { - rsr.interpolated <- rsr.interpolated/sum(rsr.interpolated) + rsr.interpolated <- rsr.interpolated / sum(rsr.interpolated) } rsr.full[wl.index] <- rsr.interpolated return(rsr.full) diff --git a/modules/rtm/R/prosail.R b/modules/rtm/R/prosail.R index 96800b34f6b..b8ac3cfdaac 100644 --- a/modules/rtm/R/prosail.R +++ b/modules/rtm/R/prosail.R @@ -18,7 +18,7 @@ #' * tto: Observer zenith angle #' * psi: Sun-sensor azimuth angle #' * psoil: Fraction of soil moisture -#' @return Spectra matrix (see [spectra()]) (2101 x 4) of reflectance factors +#' @return Spectra matrix (see [spectra()]) (2101 x 4) of reflectance factors #' for wavelengths 400 to 2500nm: #' * bi-hemispherical reflectance (rddt) #' * hemispherical directional (rsdt) @@ -27,18 +27,20 @@ #' @export pro4sail <- function(param) { plist <- as.list(param) - nw <- 2101 + nw <- 2101 plist$rddt <- numeric(nw) plist$rsdt <- numeric(nw) plist$rdot <- numeric(nw) plist$rsot <- numeric(nw) - inlist <- c("pro4sail", plist, PACKAGE="PEcAnRTM") + inlist <- c("pro4sail", plist, PACKAGE = "PEcAnRTM") outlist <- do.call(.Fortran, inlist) - lo <- length(outlist) - refl <- do.call(cbind, outlist[(lo - 3):lo]) + lo <- length(outlist) + refl <- do.call(cbind, outlist[(lo - 3):lo]) reflspec <- spectra(refl, 400:2500) - colnames(reflspec) <- c("bi-hemispherical", "hemispherical_directional", - "directional_hemispherical", "bi-directional") + colnames(reflspec) <- c( + "bi-hemispherical", "hemispherical_directional", + "directional_hemispherical", "bi-directional" + ) reflspec } # pro4sail @@ -64,7 +66,7 @@ pro4sail <- function(param) { #' * tto: Observer zenith angle #' * psi: Sun-sensor azimuth angle #' * psoil: Fraction of soil moisture -#' @return Spectra matrix (see [spectra()]) (2101 x 4) of reflectance factors +#' @return Spectra matrix (see [spectra()]) (2101 x 4) of reflectance factors #' for wavelengths 400 to 2500nm: #' * bi-hemispherical reflectance (rddt) #' * hemispherical directional (rsdt) @@ -73,17 +75,19 @@ pro4sail <- function(param) { #' @export pro4saild <- function(param) { plist <- as.list(param) - nw <- 2101 + nw <- 2101 plist$rddt <- numeric(nw) plist$rsdt <- numeric(nw) plist$rdot <- numeric(nw) plist$rsot <- numeric(nw) - inlist <- c("pro4saild", plist, PACKAGE="PEcAnRTM") + inlist <- c("pro4saild", plist, PACKAGE = "PEcAnRTM") outlist <- do.call(.Fortran, inlist) - lo <- length(outlist) - refl <- do.call(cbind, outlist[(lo - 3):lo]) + lo <- length(outlist) + refl <- do.call(cbind, outlist[(lo - 3):lo]) reflspec <- spectra(refl, 400:2500) - colnames(reflspec) <- c("bi-hemispherical", "hemispherical_directional", - "directional_hemispherical", "bi-directional") + colnames(reflspec) <- c( + "bi-hemispherical", "hemispherical_directional", + "directional_hemispherical", "bi-directional" + ) reflspec -} # pro4saild \ No newline at end of file +} # pro4saild diff --git a/modules/rtm/R/prospect.R b/modules/rtm/R/prospect.R index 5889f9de7ab..b6479e50a9e 100644 --- a/modules/rtm/R/prospect.R +++ b/modules/rtm/R/prospect.R @@ -1,5 +1,5 @@ #' PROSPECT (4, 5, or 5B) model -#' +#' #' R wrapper for PROSPECT models #' @author Alexey Shiklomanov #' @param param Vector of PROSPECT parameter values: @@ -11,14 +11,14 @@ #' * Cw: Leaf water content (cm) (>0) #' * Cm: Leaf dry matter content (ug/cm2) (>0) #' @param version PROSPECT version: 4, 5, or '5B' -#' @return Object of class `spectra` (see [spectra()]) with simulated +#' @return Object of class `spectra` (see [spectra()]) with simulated #' reflectance (column 1) and transmittance (column 2) from 400 to 2500 nm #' @export #' @useDynLib PEcAnRTM prospect <- function(param, version) { - version <- toupper(as.character(version)) - plist <- as.list(param) + version <- toupper(as.character(version)) + plist <- as.list(param) plist$RT <- matrix(0, 2101, 2) if (version == "4") { if (length(plist) != 5) { @@ -43,8 +43,8 @@ prospect <- function(param, version) { } else { stop("Version must be 4, 5, 5B, or D") } - - inlist <- c(inlist, PACKAGE="PEcAnRTM") + + inlist <- c(inlist, PACKAGE = "PEcAnRTM") outlist <- do.call(.Fortran, inlist) out <- spectra(outlist[[length(outlist)]], 400:2500) colnames(out) <- c("reflectance", "transmittance") @@ -78,11 +78,14 @@ params.prospectd <- c("N", "Cab", "Car", "Canth", "Cbrown", "Cw", "Cm") #' @title Defult inversion settings for PROSPECT 5 models #' @export default.settings.prospect <- list( - model = function(params, seed=NULL) prospect(params, 5)[,1], - inits.function = function() - with(prior.defaultvals.prospect(sd.inflate=3), - rlnorm(5, mu, sigma) + c("N"=1,"Cab"=0,"Car"=0,"Cw"=0,"Cm"=0)), - prior.function = with(prior.defaultvals.prospect(sd.inflate=3), priorfunc.prospect(mu,sigma)), + model = function(params, seed = NULL) prospect(params, 5)[, 1], + inits.function = function() { + with( + prior.defaultvals.prospect(sd.inflate = 3), + rlnorm(5, mu, sigma) + c("N" = 1, "Cab" = 0, "Car" = 0, "Cw" = 0, "Cm" = 0) + ) + }, + prior.function = with(prior.defaultvals.prospect(sd.inflate = 3), priorfunc.prospect(mu, sigma)), param.mins = c(1, 0, 0, 0, 0), param.maxs = c(Inf, Inf, Inf, Inf, Inf), ngibbs = 10000, @@ -97,4 +100,5 @@ default.settings.prospect <- list( save.samples = NULL, quiet = FALSE, adapt = 100, - adj_min = 0.1) + adj_min = 0.1 +) diff --git a/modules/rtm/R/resample.R b/modules/rtm/R/resample.R index 94de9d63846..eca6ac23f16 100644 --- a/modules/rtm/R/resample.R +++ b/modules/rtm/R/resample.R @@ -1,21 +1,21 @@ #' Resample vector, matrix, or spectra #' -#' Convenient wrapper around base R's `splinefun` and `approxfun`. See -#' [stats::splinefun()] and [stats::approxfun()] documentation for information +#' Convenient wrapper around base R's `splinefun` and `approxfun`. See +#' [stats::splinefun()] and [stats::approxfun()] documentation for information #' on different spline methods and additional arguments. #' -#' @param values Vector or matrix of values, or object of class [spectra()]. -#' Length of vector, or `nrow` of matrix must match length of `from`. For -#' `spectra`, `from` argument is omitted because it is taken from +#' @param values Vector or matrix of values, or object of class [spectra()]. +#' Length of vector, or `nrow` of matrix must match length of `from`. For +#' `spectra`, `from` argument is omitted because it is taken from #' `wavelengths`. -#' @param from X values for interpolation (for `spectra` objects, this is +#' @param from X values for interpolation (for `spectra` objects, this is #' assumed to be the `wavelengths` attribute.) -#' @param to Y values onto which to interpolate. For `spectra` objects, this +#' @param to Y values onto which to interpolate. For `spectra` objects, this #' should be new wavelengths. -#' @param method One of the methods for [stats::splinefun()] (for polynomial -#' and periodic splines) or [stats::approxfun()] (for constant or linear). +#' @param method One of the methods for [stats::splinefun()] (for polynomial +#' and periodic splines) or [stats::approxfun()] (for constant or linear). #' Default is `"fmm"` (same as splinefun). -#' @param ... Additional arguments to [stats::splinefun()] or +#' @param ... Additional arguments to [stats::splinefun()] or #' [stats::approxfun()] #' @return Object of the same class as `values`, resampled to the `to` values. #' @export diff --git a/modules/rtm/R/sail.R b/modules/rtm/R/sail.R index 8db7ecfd7f6..f4f2b68ac72 100644 --- a/modules/rtm/R/sail.R +++ b/modules/rtm/R/sail.R @@ -3,7 +3,7 @@ #' R wrapper for 4SAIL model #' @author Shawn Serbin #' @author Alexey Shiklomanov -#' +#' #' @param refl input leaf reflectance from 400-2500nm (can be measured or modeled) #' @param tran input leaf transmittance from 400-2500nm (can be measured or modeled) #' @param rsoil input soil reflectance spectra from 400-2500nm (can be measured or modeled) @@ -16,8 +16,8 @@ #' * tts: Solar zenith angle #' * tto: Observer zenith angle #' * psi: Sun-sensor azimuth angle -#' -#' @return Spectra matrix (see [spectra()]) (2101 x 4) of reflectance factors +#' +#' @return Spectra matrix (see [spectra()]) (2101 x 4) of reflectance factors #' for wavelengths 400 to 2500nm: #' * bi-hemispherical reflectance (rddt) #' * hemispherical directional (rsdt) @@ -29,19 +29,19 @@ foursail <- function(refl, tran, rsoil, param) { tau <- as.vector(tran) rsoil <- as.vector(rsoil) plist <- c(list(rho), list(tau), as.list(param), list(rsoil)) - nw <- 2101 + nw <- 2101 plist$rddt <- numeric(nw) plist$rsdt <- numeric(nw) plist$rdot <- numeric(nw) plist$rsot <- numeric(nw) - inputs <- c("foursail", plist, PACKAGE="PEcAnRTM") + inputs <- c("foursail", plist, PACKAGE = "PEcAnRTM") outlist <- do.call(.Fortran, inputs) - lo <- length(outlist) - canopy_refl <- do.call(cbind, outlist[(lo - 3):lo]) + lo <- length(outlist) + canopy_refl <- do.call(cbind, outlist[(lo - 3):lo]) reflspec <- spectra(canopy_refl, 400:2500) - colnames(reflspec) <- c("bi-hemispherical", "hemispherical_directional", - "directional_hemispherical", "bi-directional") + colnames(reflspec) <- c( + "bi-hemispherical", "hemispherical_directional", + "directional_hemispherical", "bi-directional" + ) reflspec } # sail - - diff --git a/modules/rtm/R/select.spectra.R b/modules/rtm/R/select.spectra.R index 6d955fa7bfd..8fb194f6ffc 100644 --- a/modules/rtm/R/select.spectra.R +++ b/modules/rtm/R/select.spectra.R @@ -20,7 +20,7 @@ } #' Select spectra by wavelength -#' +#' #' @inheritParams [.spectra #' @param wavelength Wavelength vector to select #' @param j index specifying elements to extract or replace. @@ -43,4 +43,3 @@ spectra[i, j] <- value spectra } - diff --git a/modules/rtm/R/setup_edr.R b/modules/rtm/R/setup_edr.R index a98bf3177b5..43a4748e62d 100644 --- a/modules/rtm/R/setup_edr.R +++ b/modules/rtm/R/setup_edr.R @@ -1,11 +1,11 @@ #' Setup EDR run #' -#' Using an existing ED2IN file as a template, create a new ED2IN and history +#' Using an existing ED2IN file as a template, create a new ED2IN and history #' file configured for running EDR. #' #' @param ed2in ED2IN list object (see [PEcAn.ED2::read_ed2in]). #' @param output_dir Directory in which run files will be stored -#' @param datetime Date time object (or compliant string) at which to run EDR. +#' @param datetime Date time object (or compliant string) at which to run EDR. #' Defaults to 12 noon on start date in ED2IN. #' @param ... Additional arguments passed on to `PEcAn.ED2::modify_ed2in` #' @return Path to EDR-configured ED2IN file. @@ -13,11 +13,12 @@ #' @export setup_edr <- function(ed2in, output_dir, datetime = ISOdatetime(ed2in[["IYEARA"]], - ed2in[["IMONTHA"]], - ed2in[["IDATEA"]], - 12, 00, 00, tz = "UTC"), + ed2in[["IMONTHA"]], + ed2in[["IDATEA"]], + 12, 00, 00, + tz = "UTC" + ), ...) { - hour <- as.numeric(strftime(datetime, "%H", tz = "UTC")) if (hour < 8 | hour > 17) { PEcAn.logger::logger.warn( @@ -28,7 +29,7 @@ setup_edr <- function(ed2in, output_dir, } dir.create(output_dir, showWarnings = FALSE) - nextday <- as.POSIXct(datetime, tz = "UTC") + 86400 # Add one day + nextday <- as.POSIXct(datetime, tz = "UTC") + 86400 # Add one day history_prefix <- EDR.preprocess.history( history.path = dirname(ed2in$SFILOUT), diff --git a/modules/rtm/R/spectra.R b/modules/rtm/R/spectra.R index fe12e366ca7..3e9862a2b04 100644 --- a/modules/rtm/R/spectra.R +++ b/modules/rtm/R/spectra.R @@ -1,7 +1,7 @@ #' Spectra S3 class #' #' @param spectra Vector (`length = length(wavelengths)`) or matrix (`ncol = length(wavelengths)`) -#' @param wavelengths Wavelengths of spectra. +#' @param wavelengths Wavelengths of spectra. #' @export spectra <- function(spectra, wavelengths = 400:2500) { if (!is.matrix(spectra)) { @@ -26,4 +26,3 @@ is_spectra <- function(spectra) inherits(spectra, "spectra") #' @param spectra Object of class `spectra` #' @export wavelengths <- function(spectra) attr(spectra, "wavelengths") - diff --git a/modules/rtm/R/spectral-response.R b/modules/rtm/R/spectral-response.R index 1c7de09a76e..2160510daca 100644 --- a/modules/rtm/R/spectral-response.R +++ b/modules/rtm/R/spectral-response.R @@ -1,21 +1,25 @@ #' Sensor spectral response functions -#' +#' #' @export -sensor.list <- c("identity", "aviris.ng", "aviris.classic", - "hyperion", "chris.proba", "landsat5", "landsat7", - "landsat8", "modis", "viirs", "avhrr", "licor") +sensor.list <- c( + "identity", "aviris.ng", "aviris.classic", + "hyperion", "chris.proba", "landsat5", "landsat7", + "landsat8", "modis", "viirs", "avhrr", "licor" +) #' Sensor list with proper names -#' +#' #' @export -sensor.proper <- c("ASD Field Spec", "AVIRIS NG", "AVIRIS Classic", - "Hyperion", "CHRIS-Proba", "Landsat 5", "Landsat 7", - "Landsat 8", "MODIS", "VIIRS", "AVHRR", "LiCor 6400 chamber") +sensor.proper <- c( + "ASD Field Spec", "AVIRIS NG", "AVIRIS Classic", + "Hyperion", "CHRIS-Proba", "Landsat 5", "Landsat 7", + "Landsat 8", "MODIS", "VIIRS", "AVHRR", "LiCor 6400 chamber" +) names(sensor.proper) <- sensor.list #' Convolution of spectra to sensor RSR -#' +#' #' @param spec Full (1 nm) spectrum (vector) #' @param sensor Sensor name (string). See sensor.list #' @export diff --git a/modules/rtm/R/truncnorm.R b/modules/rtm/R/truncnorm.R index 498c0b6a170..3ba217da35e 100644 --- a/modules/rtm/R/truncnorm.R +++ b/modules/rtm/R/truncnorm.R @@ -1,8 +1,8 @@ ## Truncated normal distribution functions #' Random sampling from one-sided truncated normal distribution -#' -#' @details Draws a random number and, if it doesn't fall within the +#' +#' @details Draws a random number and, if it doesn't fall within the #' specified range, resample using an adjusted Normal CDF. This isn't #' performed immediately because CDF sampling calls three functions -- #' qnorm, runif, and pnorm--and therefore is much less efficient than a @@ -25,7 +25,7 @@ rtnorm <- function(mu, sd, MIN) { #' Truncated normal distribution density -#' +#' #' Calculates the log density of a univariate truncated normal variable #' @author Alexey Shiklomanov #' @param x A random variable @@ -36,8 +36,8 @@ rtnorm <- function(mu, sd, MIN) { #' @export dtnorm <- function(x, mu, sd, MIN) { if (x < MIN) { - return(-1e+15) + return(-1e+15) } else { return(dnorm(x, mu, sd, 1) - log(1 - pnorm(MIN, mu, sd, 1, 0))) - } + } } # dtnorm diff --git a/modules/rtm/inst/scripts/analyze_results.R b/modules/rtm/inst/scripts/analyze_results.R index df7880da70e..7b9f253cd98 100644 --- a/modules/rtm/inst/scripts/analyze_results.R +++ b/modules/rtm/inst/scripts/analyze_results.R @@ -5,4 +5,4 @@ sampfile <- "autosamples.rds" samps <- readRDS(sampfile) smcmc <- PEcAn.assim.batch::makeMCMCList(samps$samps.list) -plot(smcmc[,5]) +plot(smcmc[, 5]) diff --git a/modules/rtm/inst/scripts/benchmark_inversion.R b/modules/rtm/inst/scripts/benchmark_inversion.R index 992400e52a1..2a6c43ab6db 100644 --- a/modules/rtm/inst/scripts/benchmark_inversion.R +++ b/modules/rtm/inst/scripts/benchmark_inversion.R @@ -3,15 +3,17 @@ devtools::document(pkg_path) devtools::install(pkg_path) library(PEcAnRTM) -params <- c('N' = 1.4, - 'Cab' = 40, - 'Car' = 8, - 'Cw' = 0.01, - 'Cm' = 0.01) +params <- c( + "N" = 1.4, + "Cab" = 40, + "Car" = 8, + "Cw" = 0.01, + "Cm" = 0.01 +) sensor <- "identity" data(sensor.rsr) -generate_obs <- function(i){ - obs.raw <- prospect(params, 5)[,1] + generate.noise() +generate_obs <- function(i) { + obs.raw <- prospect(params, 5)[, 1] + generate.noise() obs <- spectral.response(obs.raw, sensor) return(obs) } @@ -22,7 +24,7 @@ obs <- do.call(cbind, lapply(seq_len(n_obs), generate_obs)) invert.options <- default.settings.prospect invert.options$inits <- invert.options$inits.function() invert.options$model <- function(params) { - spectral.response(prospect(params,5)[,1], sensor) + spectral.response(prospect(params, 5)[, 1], sensor) } invert.options$ngibbs.min <- 5000 invert.options$ngibbs.step <- 2000 @@ -30,7 +32,7 @@ invert.options$ngibbs.max <- 100000 invert.options$do.lsq <- TRUE invert.options$nchains <- 3 -options(warn=1) +options(warn = 1) test.parallel <- invert.auto(obs, invert.options, parallel = TRUE) print_results_summary(test.parallel) @@ -38,8 +40,8 @@ print_results_summary(test.parallel) plot(PEcAn.assim.batch::makeMCMCList(test.parallel$n_eff_list)) plot(PEcAn.assim.batch::makeMCMCList(test.parallel$deviance_list)) -#test.serial <- invert.custom(obs, invert.options) +# test.serial <- invert.custom(obs, invert.options) -#sp <- summaryRprof() -#head(sp$by.self, 15) -#head(sp$by.total, 20) +# sp <- summaryRprof() +# head(sp$by.self, 15) +# head(sp$by.total, 20) diff --git a/modules/rtm/inst/scripts/edr_test.R b/modules/rtm/inst/scripts/edr_test.R index 379b13f489f..a0372e15da3 100644 --- a/modules/rtm/inst/scripts/edr_test.R +++ b/modules/rtm/inst/scripts/edr_test.R @@ -2,44 +2,60 @@ library(PEcAnRTM) # For developing, comment out `library` and use this: library(devtools) -load_all('~/Projects/pecan/pecan/modules/rtm') +load_all("~/Projects/pecan/pecan/modules/rtm") -data_dir <- '~/Projects/nasa-rtm/edr-da/run-ed/1cohort/dbh20/early_hardwood' +data_dir <- "~/Projects/nasa-rtm/edr-da/run-ed/1cohort/dbh20/early_hardwood" -paths <- list(ed2in = file.path(data_dir, 'ED2IN'), - history = file.path(data_dir, 'outputs')) +paths <- list( + ed2in = file.path(data_dir, "ED2IN"), + history = file.path(data_dir, "outputs") +) output_dir <- tempdir() -file.copy(from = '~/Projects/ED2/EDR/build/ed_2.1-opt', - to = file.path(output_dir, 'ed_2.1-opt'), - overwrite = TRUE) +file.copy( + from = "~/Projects/ED2/EDR/build/ed_2.1-opt", + to = file.path(output_dir, "ed_2.1-opt"), + overwrite = TRUE +) # Case 1 -- Two PFTs: spectra and traits for both -spectra_list <- list(temperate.Early_Hardwood = prospect(c(1.4, 40, 0.01, 0.01), 4, TRUE), - temperate.Late_Hardwood = prospect(c(1.5, 25, 0.01, 0.012), 4, TRUE)) - -trait.values <- list(temperate.Early_Hardwood = list(orient_factor = 0.5), - temperate.Late_Hardwood = list(orient_factor = 0.5)) - -output <- EDR(paths = paths, - spectra_list = spectra_list, - par.wl = 400:800, - nir.wl = 801:2500, - datetime = ISOdate(2004, 07, 01, 16, 00, 00), - trait.values = trait.values, - output.path = output_dir) +spectra_list <- list( + temperate.Early_Hardwood = prospect(c(1.4, 40, 0.01, 0.01), 4, TRUE), + temperate.Late_Hardwood = prospect(c(1.5, 25, 0.01, 0.012), 4, TRUE) +) + +trait.values <- list( + temperate.Early_Hardwood = list(orient_factor = 0.5), + temperate.Late_Hardwood = list(orient_factor = 0.5) +) + +output <- EDR( + paths = paths, + spectra_list = spectra_list, + par.wl = 400:800, + nir.wl = 801:2500, + datetime = ISOdate(2004, 07, 01, 16, 00, 00), + trait.values = trait.values, + output.path = output_dir +) # Case 2 -- Two PFTs: only spectra, no traits -spectra_list <- list(temperate.Early_Hardwood = prospect(c(1.4, 40, 0.01, 0.01), 4, TRUE), - temperate.Late_Hardwood = prospect(c(1.5, 25, 0.01, 0.012), 4, TRUE)) - -trait.values <- list(temperate.Early_Hardwood = list(), - temperate.Late_Hardwood = list()) - -output <- EDR(paths = paths, - spectra_list = spectra_list, - par.wl = 400:800, - nir.wl = 801:2500, - datetime = ISOdate(2004, 07, 01, 16, 00, 00), - trait.values = trait.values, - output.path = output_dir) +spectra_list <- list( + temperate.Early_Hardwood = prospect(c(1.4, 40, 0.01, 0.01), 4, TRUE), + temperate.Late_Hardwood = prospect(c(1.5, 25, 0.01, 0.012), 4, TRUE) +) + +trait.values <- list( + temperate.Early_Hardwood = list(), + temperate.Late_Hardwood = list() +) + +output <- EDR( + paths = paths, + spectra_list = spectra_list, + par.wl = 400:800, + nir.wl = 801:2500, + datetime = ISOdate(2004, 07, 01, 16, 00, 00), + trait.values = trait.values, + output.path = output_dir +) diff --git a/modules/rtm/inst/scripts/effectivesize.R b/modules/rtm/inst/scripts/effectivesize.R index b19ac58fefc..3c99e776686 100644 --- a/modules/rtm/inst/scripts/effectivesize.R +++ b/modules/rtm/inst/scripts/effectivesize.R @@ -1,11 +1,11 @@ library(devtools) library(FitAR) -load_all('~/Projects/pecan/pecan/modules/rtm') +load_all("~/Projects/pecan/pecan/modules/rtm") data(testspec) -p <- prospect(defparam("prospect_5"), 5)[,1] -obs <- testspec_ACRU[,1] +p <- prospect(defparam("prospect_5"), 5)[, 1] +obs <- testspec_ACRU[, 1] png("~/Pictures/pacf.obs.png") pacf(obs) dev.off() @@ -16,12 +16,12 @@ p2 <- as.matrix(p[-1]) neff_prof <- function(n = 10000) replicate(n, neff(p)) -#Rprof() -#neff_orig <- replicate(n = 10000, neff(p)) -#Rprof(NULL) +# Rprof() +# neff_orig <- replicate(n = 10000, neff(p)) +# Rprof(NULL) -#sp <- summaryRprof() -#head(sp$by.self, 10) +# sp <- summaryRprof() +# head(sp$by.self, 10) # Steps to my neff base <- function() ar.yw(p, order.max = 1) @@ -32,12 +32,11 @@ ggplot2::autoplot(mb) arout <- ar.yw(p, order.max = 1) ar2 <- FitAR::FitAR(p, 1) -vp <- arout$var.pred # Prediction variance -arar <- arout$ar # Autoregression coefficients +vp <- arout$var.pred # Prediction variance +arar <- arout$ar # Autoregression coefficients spec <- vp / (1 - sum(arar))^2 varp <- var(p) out <- length(p) * var(p) / spec print(out) -#myneff <- function(x) { - +# myneff <- function(x) { diff --git a/modules/rtm/inst/scripts/invert_quadratic.R b/modules/rtm/inst/scripts/invert_quadratic.R index 39ac8664c9d..2b90e0c8862 100644 --- a/modules/rtm/inst/scripts/invert_quadratic.R +++ b/modules/rtm/inst/scripts/invert_quadratic.R @@ -1,7 +1,7 @@ library(devtools) -#install('~/Projects/pecan/pecan/modules/rtm') -load_all('~/Projects/pecan/pecan/modules/assim.batch/') -load_all('~/Projects/pecan/pecan/modules/rtm') +# install('~/Projects/pecan/pecan/modules/rtm') +load_all("~/Projects/pecan/pecan/modules/assim.batch/") +load_all("~/Projects/pecan/pecan/modules/rtm") true <- c(2, 3, 4) @@ -14,21 +14,23 @@ model <- function(params) { set.seed(666) y <- as.matrix(model(true) + generate.noise(n = nx, fw = 10, sigma = 1)) -#plot(x, y, type='l') +# plot(x, y, type='l') invert.options <- list() invert.options$model <- model -invert.options$prior.function <- function(p) sum(dnorm(p, 0, 30, TRUE)) +invert.options$prior.function <- function(p) sum(dnorm(p, 0, 30, TRUE)) invert.options$inits.function <- function() rnorm(3, 0, 30) invert.options$inits <- invert.options$inits.function() invert.options$nchains <- 3 -#invert.options$ngibbs <- 10000 -#invert.options$ngibbs <- 100 +# invert.options$ngibbs <- 10000 +# invert.options$ngibbs <- 100 -samp <- invert.auto(observed = y, - invert.options = invert.options) +samp <- invert.auto( + observed = y, + invert.options = invert.options +) -#samp <- invert.custom(observed = y, - #invert.options = invert.options) -#smcmc <- coda::as.mcmc(samp) -#plot(smcmc) +# samp <- invert.custom(observed = y, +# invert.options = invert.options) +# smcmc <- coda::as.mcmc(samp) +# plot(smcmc) diff --git a/modules/rtm/inst/scripts/like_scale_compare.R b/modules/rtm/inst/scripts/like_scale_compare.R index 3f6146605bb..52f43442d3d 100644 --- a/modules/rtm/inst/scripts/like_scale_compare.R +++ b/modules/rtm/inst/scripts/like_scale_compare.R @@ -4,15 +4,17 @@ devtools::install(pkg_path) library(PEcAnRTM) -params <- c('N' = 1.4, - 'Cab' = 40, - 'Car' = 8, - 'Cw' = 0.01, - 'Cm' = 0.01) +params <- c( + "N" = 1.4, + "Cab" = 40, + "Car" = 8, + "Cw" = 0.01, + "Cm" = 0.01 +) sensor <- "identity" data(sensor.rsr) -generate_obs <- function(i){ - obs.raw <- prospect(params, 5)[,1] + generate.noise() +generate_obs <- function(i) { + obs.raw <- prospect(params, 5)[, 1] + generate.noise() obs <- spectral.response(obs.raw, sensor) return(obs) } @@ -21,23 +23,24 @@ n_obs <- 3 obs <- do.call(cbind, lapply(seq_len(n_obs), generate_obs)) invert.options <- default.settings.prospect -invert.options$model <- function(params) spectral.response(prospect(params,5)[,1], sensor) +invert.options$model <- function(params) spectral.response(prospect(params, 5)[, 1], sensor) invert.options$ngibbs.min <- 5000 invert.options$ngibbs.step <- 2000 invert.options$ngibbs.max <- 100000 invert.options$do.lsq <- TRUE invert.options$nchains <- 3 -invert.options$likelihood_scale <- 6.83/2101 +invert.options$likelihood_scale <- 6.83 / 2101 samp_scaled <- invert.auto(obs, invert.options, - return.samples = TRUE, - parallel = TRUE) + return.samples = TRUE, + parallel = TRUE +) invert.options$likelihood_scale <- 1 samp_unscaled <- invert.auto(obs, invert.options, - return.samples = TRUE, - parallel = TRUE) + return.samples = TRUE, + parallel = TRUE +) results <- rbind(samp_scaled$results, samp_unscaled$results) results[, grep("Cw\\.q", colnames(results))] - diff --git a/modules/rtm/inst/scripts/read_dataspec.R b/modules/rtm/inst/scripts/read_dataspec.R index 72c0d099157..d877389276d 100644 --- a/modules/rtm/inst/scripts/read_dataspec.R +++ b/modules/rtm/inst/scripts/read_dataspec.R @@ -22,8 +22,10 @@ pars <- c( "k_cw", "k_cm" ) -dat_list <- c(list(wavelength = 400:2500), - lapply(pars, get_values, raw_file = raw_file)) +dat_list <- c( + list(wavelength = 400:2500), + lapply(pars, get_values, raw_file = raw_file) +) dataSpec_prospectd <- do.call(cbind, dat_list) colnames(dataSpec_prospectd) <- c("wavelength", pars) save(dataSpec_prospectd, file = "data/dataSpec_prospectd.RData") diff --git a/modules/rtm/tests/testthat.R b/modules/rtm/tests/testthat.R index 842ad3dcd77..f966bf1ca44 100755 --- a/modules/rtm/tests/testthat.R +++ b/modules/rtm/tests/testthat.R @@ -2,5 +2,5 @@ library(testthat) library(PEcAnRTM) -options(warn=1) +options(warn = 1) test_check("PEcAnRTM") diff --git a/modules/rtm/tests/testthat/test.2s.R b/modules/rtm/tests/testthat/test.2s.R index d6adfe58da0..76e3613ab94 100644 --- a/modules/rtm/tests/testthat/test.2s.R +++ b/modules/rtm/tests/testthat/test.2s.R @@ -7,13 +7,13 @@ ts.pars <- c(solar.zenith = 0, LAI = 5, soil.moisture = 0.3) nwl <- 2101 -p2s.test <- function(modname, input){ - test_that(paste(modname, "works and gives physically possible output"), { - expect_is(input, "matrix") - expect_equal(dim(input), c(nwl, 6)) - expect_true(all(input < 1)) - expect_true(all(input > 0)) - }) +p2s.test <- function(modname, input) { + test_that(paste(modname, "works and gives physically possible output"), { + expect_is(input, "matrix") + expect_equal(dim(input), c(nwl, 6)) + expect_true(all(input < 1)) + expect_true(all(input > 0)) + }) } @@ -23,6 +23,3 @@ p5 <- pro2s(c(p5.pars, ts.pars), 5) p2s.test("PROSPECT5-2S", p4) p5b <- pro2s(c(p5b.pars, ts.pars), "5B") p2s.test("PROSPECT5B-2S", p4) - - - diff --git a/modules/rtm/tests/testthat/test.foursail.R b/modules/rtm/tests/testthat/test.foursail.R index 62b51827a50..5a3ddf67ccd 100644 --- a/modules/rtm/tests/testthat/test.foursail.R +++ b/modules/rtm/tests/testthat/test.foursail.R @@ -1,34 +1,34 @@ context("Testing standalone SAIL RTM") # get soil reflectance spectra -rsoil <- system.file("extdata", "soil_reflect_par.dat", package="PEcAnRTM") -rsoil <- read.table(rsoil,header = F) -rsoil <- as.vector(unlist(rsoil[1,])) -rsoil <- c(rsoil,rsoil[2100]) # make soil reflectance the correct length +rsoil <- system.file("extdata", "soil_reflect_par.dat", package = "PEcAnRTM") +rsoil <- read.table(rsoil, header = F) +rsoil <- as.vector(unlist(rsoil[1, ])) +rsoil <- c(rsoil, rsoil[2100]) # make soil reflectance the correct length if (interactive()) { - plot(seq(400,2500,1), rsoil, type = "l") + plot(seq(400, 2500, 1), rsoil, type = "l") } # define sail parameters -LIDFa <- -0.35 +LIDFa <- -0.35 LIDFb <- -0.15 TypeLIDF <- 1 LAI <- 4 q <- 0.1 tts <- 48 -tto <- 0 +tto <- 0 psi <- 234 -params <- c(LIDFa,LIDFb,TypeLIDF,LAI,q,tts,tto,psi) +params <- c(LIDFa, LIDFb, TypeLIDF, LAI, q, tts, tto, psi) param <- params # get leaf reflectance and transmittance -LRT <- PEcAnRTM::prospect(c(2,55,10,3,0.1,0.007,0.007), 'D') +LRT <- PEcAnRTM::prospect(c(2, 55, 10, 3, 0.1, 0.007, 0.007), "D") if (interactive()) { - plot(LRT[,1]) + plot(LRT[, 1]) } -refl <- LRT[,1] +refl <- LRT[, 1] length(refl) -tran <- LRT[,2] +tran <- LRT[, 2] test_that("standalone SAIL RTM", { # generate 4SAIL canopy spectra diff --git a/modules/rtm/tests/testthat/test.gpm.R b/modules/rtm/tests/testthat/test.gpm.R index bee5f098bbf..0a83d746a3a 100644 --- a/modules/rtm/tests/testthat/test.gpm.R +++ b/modules/rtm/tests/testthat/test.gpm.R @@ -13,7 +13,8 @@ rtmat <- generalized_plate_model(k, refractive, N) prosp <- prospect(true_param, "D") test_that( - "Generalized plate model returns same values as PROSPECT", { + "Generalized plate model returns same values as PROSPECT", + { expect_lt(sum(rtmat - prosp), 1e-5) } ) diff --git a/modules/rtm/tests/testthat/test.invert_bayestools.R b/modules/rtm/tests/testthat/test.invert_bayestools.R index f49c5103866..0e9115bcc04 100644 --- a/modules/rtm/tests/testthat/test.invert_bayestools.R +++ b/modules/rtm/tests/testthat/test.invert_bayestools.R @@ -1,5 +1,5 @@ # devtools::load_all('.') -context('Inversion using BayesianTools') +context("Inversion using BayesianTools") skip_on_travis() @@ -9,57 +9,63 @@ skip_if_not( ) set.seed(12345678) -true_prospect <- defparam('prospect_5') +true_prospect <- defparam("prospect_5") true_params <- c(true_prospect, residual = 0.01) -model <- function(x) prospect(x, 5)[,1] +model <- function(x) prospect(x, 5)[, 1] true_model <- model(true_prospect) -noise <- rnorm(length(true_model), 0, true_params['residual']) +noise <- rnorm(length(true_model), 0, true_params["residual"]) observed <- true_model + noise if (interactive()) { - plot(400:2500, observed, type = 'l') - lines(400:2500, true_model, col = 'red') - legend("topright", c('observation', 'pseudo-data'), col = c('black', 'red'), lty = 'solid') + plot(400:2500, observed, type = "l") + lines(400:2500, true_model, col = "red") + legend("topright", c("observation", "pseudo-data"), col = c("black", "red"), lty = "solid") } # Alternate test, using observed spectra # data("testspec") # observed <- testspec_ACRU[,2] prior <- prospect_bt_prior(5) threshold <- 1.1 -custom_settings <- list(init = list(iterations = 2000), - loop = list(iterations = 1000), - other = list(threshold = threshold, - verbose_loglike = FALSE)) -samples <- invert_bt(observed = observed, model = model, prior = prior, - custom_settings = custom_settings) +custom_settings <- list( + init = list(iterations = 2000), + loop = list(iterations = 1000), + other = list( + threshold = threshold, + verbose_loglike = FALSE + ) +) +samples <- invert_bt( + observed = observed, model = model, prior = prior, + custom_settings = custom_settings +) samples_mcmc <- BayesianTools::getSample(samples, coda = TRUE) -samples_burned <- PEcAn.assim.batch::autoburnin(samples_mcmc, method = 'gelman.plot', threshold = threshold) +samples_burned <- PEcAn.assim.batch::autoburnin(samples_mcmc, method = "gelman.plot", threshold = threshold) -mean_estimates <- do.call(cbind, summary(samples_burned, quantiles = c(0.01, 0.5, 0.99))[c('statistics', 'quantiles')]) +mean_estimates <- do.call(cbind, summary(samples_burned, quantiles = c(0.01, 0.5, 0.99))[c("statistics", "quantiles")]) test_that( - 'True values are within 95% confidence interval', + "True values are within 95% confidence interval", { - expect_true(all(true_params > mean_estimates[,'1%'])) - expect_true(all(true_params < mean_estimates[,'99%'])) + expect_true(all(true_params > mean_estimates[, "1%"])) + expect_true(all(true_params < mean_estimates[, "99%"])) } ) test_that( - 'Mean estimates are within 10% of true values', - expect_equal(true_params, mean_estimates[names(true_params), 'Mean'], tol = 0.1) + "Mean estimates are within 10% of true values", + expect_equal(true_params, mean_estimates[names(true_params), "Mean"], tol = 0.1) ) # Compare observation with predicted interval samp_mat <- as.matrix(samples_burned) nsamp <- 2500 prosp_mat <- matrix(0.0, nsamp, 2101) -message('Generating PROSPECT confidence interval') +message("Generating PROSPECT confidence interval") pb <- txtProgressBar(style = 3) for (i in seq_len(nsamp)) { - setTxtProgressBar(pb, i/nsamp) - samp_param <- samp_mat[sample.int(nrow(samp_mat), 1),] - prosp_mat[i,] <- rnorm(2101, model(samp_param[-6]), samp_param[6]) + setTxtProgressBar(pb, i / nsamp) + samp_param <- samp_mat[sample.int(nrow(samp_mat), 1), ] + prosp_mat[i, ] <- rnorm(2101, model(samp_param[-6]), samp_param[6]) } close(pb) mid <- colMeans(prosp_mat) @@ -70,19 +76,18 @@ pi_x <- c(seq_along(lo), rev(seq_along(hi))) outside <- which(observed < lo | observed > hi) test_that( - '95% predictive interval overlaps around 95% of data', + "95% predictive interval overlaps around 95% of data", expect_lt(100 * length(outside) / length(true_model), 7.5) - ) if (interactive()) { - par(mfrow = c(1,1)) - plot(observed, type = 'l') - lines(mid, col = 'red') - polygon(pi_x, pi_y, col = rgb(1, 0, 0, 0.2), border = 'red', lty = 'dashed') + par(mfrow = c(1, 1)) + plot(observed, type = "l") + lines(mid, col = "red") + polygon(pi_x, pi_y, col = rgb(1, 0, 0, 0.2), border = "red", lty = "dashed") legend( - 'topright', - c('observed', 'mean prediction', 'predictive interval'), - lty = c('solid', 'solid', 'dashed'), - col = c('black', 'red', 'red') + "topright", + c("observed", "mean prediction", "predictive interval"), + lty = c("solid", "solid", "dashed"), + col = c("black", "red", "red") ) } diff --git a/modules/rtm/tests/testthat/test.invert_simple.R b/modules/rtm/tests/testthat/test.invert_simple.R index 782af2ad05c..7bc758f2d2d 100644 --- a/modules/rtm/tests/testthat/test.invert_simple.R +++ b/modules/rtm/tests/testthat/test.invert_simple.R @@ -19,7 +19,7 @@ y <- as.matrix(model(true) + generate.noise(n = nx, fw = 10, sigma = 1)) nchains <- 3 invert.options <- list() invert.options$model <- model -invert.options$prior.function <- function(p) sum(dnorm(p, 0, 30, TRUE)) +invert.options$prior.function <- function(p) sum(dnorm(p, 0, 30, TRUE)) invert.options$inits.function <- function() rnorm(3, 0, 30) invert.options$inits <- invert.options$inits.function() invert.options$nchains <- nchains @@ -40,8 +40,10 @@ output_tests <- function(output) { }) test_that("Inversion output produces distinct chains", { - expect_false(identical(output$samples[[1]], - output$samples[[2]])) + expect_false(identical( + output$samples[[1]], + output$samples[[2]] + )) }) test_that("Saving samples is successful", { @@ -54,17 +56,21 @@ output_tests <- function(output) { } invert.options$threshold <- 1.2 -samp_parallel <- invert.auto(observed = y, - invert.options = invert.options, - save.samples = save.samples) +samp_parallel <- invert.auto( + observed = y, + invert.options = invert.options, + save.samples = save.samples +) output_tests(samp_parallel) invert.options$calculate.burnin <- FALSE invert.options$threshold <- NULL -samp_series <- invert.auto(observed = y, - invert.options = invert.options, - save.samples = save.samples, - parallel = FALSE) +samp_series <- invert.auto( + observed = y, + invert.options = invert.options, + save.samples = save.samples, + parallel = FALSE +) output_tests(samp_series) file.remove(fname_expect, save.samples) diff --git a/modules/rtm/tests/testthat/test.prospect.R b/modules/rtm/tests/testthat/test.prospect.R index cfc2295e96a..dbf5fd878cd 100644 --- a/modules/rtm/tests/testthat/test.prospect.R +++ b/modules/rtm/tests/testthat/test.prospect.R @@ -1,25 +1,25 @@ #' Tests of radiative transfer models context("PROSPECT models") -p4 <- c("N"=1.4, "Cab"=30, "Cw"=0.004, "Cm"=0.003) -p5 <- c(p4, "Car"=10)[c("N", "Cab", "Car", "Cw", "Cm")] -p5b <- c(p5, "Cbrown"=1)[c("N", "Cab", "Car", "Cbrown", "Cw", "Cm")] -pd <- c(p5b, "Canth"=8)[c('N', 'Cab', 'Car', 'Canth', 'Cbrown', 'Cw', 'Cm')] +p4 <- c("N" = 1.4, "Cab" = 30, "Cw" = 0.004, "Cm" = 0.003) +p5 <- c(p4, "Car" = 10)[c("N", "Cab", "Car", "Cw", "Cm")] +p5b <- c(p5, "Cbrown" = 1)[c("N", "Cab", "Car", "Cbrown", "Cw", "Cm")] +pd <- c(p5b, "Canth" = 8)[c("N", "Cab", "Car", "Canth", "Cbrown", "Cw", "Cm")] p4out <- prospect(p4, 4) p5out <- prospect(p5, 5) p5bout <- prospect(p5b, "5b") -pdout <- prospect(pd, 'D') +pdout <- prospect(pd, "D") -test.dim <- c(2101,2) +test.dim <- c(2101, 2) test_model <- function(x) { - test_that("Return matrix", expect_is(x, 'matrix')) - test_that("Return spectra", expect_is(x, 'spectra')) - test_that("Correct dimenions", expect_equal(dim(x), test.dim)) - test_that("Don't return 0", expect_true(sum(x) > 0)) + test_that("Return matrix", expect_is(x, "matrix")) + test_that("Return spectra", expect_is(x, "spectra")) + test_that("Correct dimenions", expect_equal(dim(x), test.dim)) + test_that("Don't return 0", expect_true(sum(x) > 0)) } for (m in list(p4out, p5out, p5bout, pdout)) { - test_model(m) + test_model(m) } diff --git a/modules/rtm/tests/testthat/test.resample.R b/modules/rtm/tests/testthat/test.resample.R index 231a0fa97e9..ed14c0f7a9b 100644 --- a/modules/rtm/tests/testthat/test.resample.R +++ b/modules/rtm/tests/testthat/test.resample.R @@ -5,8 +5,8 @@ test_that( { from <- seq(2, 20, 2) to <- seq(2, 20) - values <- from ^ 2 - true_values <- to ^ 2 + values <- from^2 + true_values <- to^2 resample_values <- resample(values, from, to) expect_equal(true_values, resample_values) } @@ -46,7 +46,7 @@ test_that( } ) -# This test doesn't work with the PEcAn.logger functions because they mess with +# This test doesn't work with the PEcAn.logger functions because they mess with # the warning output. if (!requireNamespace("PEcAn.logger")) { test_that( @@ -54,7 +54,7 @@ if (!requireNamespace("PEcAn.logger")) { { from <- seq(2, 20, 2) to <- seq(1, 30) - values <- from ^ 2 + values <- from^2 expect_warning(resample(values, from, to), "Resampled values .* unreliable") } ) diff --git a/modules/rtm/tests/testthat/test.sail.R b/modules/rtm/tests/testthat/test.sail.R index 1000c3522cd..ac9e95753b2 100644 --- a/modules/rtm/tests/testthat/test.sail.R +++ b/modules/rtm/tests/testthat/test.sail.R @@ -3,16 +3,16 @@ context("SAIL models") p <- defparam("pro4sail") pout <- pro4sail(p) -test.dim <- c(2101,4) +test.dim <- c(2101, 4) test_that("Returns matrix", { - expect_is(pout, "matrix") + expect_is(pout, "matrix") }) test_that("Correct dimensions", { - expect_equal(dim(pout), test.dim) + expect_equal(dim(pout), test.dim) }) test_that("Don't return 0", { - expect_true(all(colSums(pout) > 0)) + expect_true(all(colSums(pout) > 0)) }) diff --git a/modules/rtm/tests/testthat/test.spectra.R b/modules/rtm/tests/testthat/test.spectra.R index a3e52606970..b39e583f141 100644 --- a/modules/rtm/tests/testthat/test.spectra.R +++ b/modules/rtm/tests/testthat/test.spectra.R @@ -10,12 +10,12 @@ spec1 <- spectra(rawspec1, 400:2500) spec2 <- spectra(rawspec2, 400:2500) spec3 <- spectra(rawspec3, 401:500) -#test_that( +# test_that( # "Wavelength mismatch throws error", # { # expect_error(spectra(rawspec1, 400:1700)) # } -#) +# ) wl1 <- 400 i1 <- wl1 - 399 diff --git a/modules/rtm/vignettes/edr.sensitivity.R b/modules/rtm/vignettes/edr.sensitivity.R index aa7a110aa4c..1d72482a65e 100644 --- a/modules/rtm/vignettes/edr.sensitivity.R +++ b/modules/rtm/vignettes/edr.sensitivity.R @@ -3,37 +3,43 @@ library(PEcAnRTM) # Set paths and get files # pecan.workflow.id <- "1000001494" # ed.run.id <- "1000443342" -#pecan.workflow.id <- "1000001502" # Worked, but low sensitivity -#ed.run.id <- "1000443579" +# pecan.workflow.id <- "1000001502" # Worked, but low sensitivity +# ed.run.id <- "1000443579" pecan.workflow.id <- "1000001543" ed.run.id <- "1000443674" -analysis.path <- sprintf('analysis.output.%s.%s', pecan.workflow.id, ed.run.id) +analysis.path <- sprintf("analysis.output.%s.%s", pecan.workflow.id, ed.run.id) dir.create(analysis.path) exec.path <- file.path(analysis.path, "ed_2.1-opt") -if(!file.exists(exec.path)){ +if (!file.exists(exec.path)) { file.link("/projectnb/dietzelab/ashiklom/ED2/EDR/build/ed_2.1-opt", exec.path) system(paste("chmod +x", exec.path)) } -base.output.dir <- file.path("/projectnb", "dietzelab", "pecan.data", - "output", "ashiklom") -ed2in.path <- file.path(base.output.dir, pecan.workflow.id, - "run", ed.run.id, "ED2IN") -history.path <- file.path(base.output.dir, pecan.workflow.id, - "out", ed.run.id) +base.output.dir <- file.path( + "/projectnb", "dietzelab", "pecan.data", + "output", "ashiklom" +) +ed2in.path <- file.path( + base.output.dir, pecan.workflow.id, + "run", ed.run.id, "ED2IN" +) +history.path <- file.path( + base.output.dir, pecan.workflow.id, + "out", ed.run.id +) paths <- list(ed2in.path = ed2in.path, history.path = history.path) history.file <- tail(list.files(history.path, "history-S-.*"), 1) date.raw <- gsub("history-S-(.*)-g01.h5", "\\1", history.file) datetime <- strptime(date.raw, "%Y-%m-%d-%H%M%S", tz = "UTC") -if(lubridate::hour(datetime) < 8 | lubridate::hour(datetime) > 6) { +if (lubridate::hour(datetime) < 8 | lubridate::hour(datetime) > 6) { lubridate::hour(datetime) <- 12 } # Set sensitivity parameters -arg <- commandArgs(trailingOnly=TRUE) -if(length(arg) > 0){ - n.sens <- as.numeric(arg[1]) +arg <- commandArgs(trailingOnly = TRUE) +if (length(arg) > 0) { + n.sens <- as.numeric(arg[1]) } else { - n.sens <- 4 + n.sens <- 4 } sensitivity.means <- list( prospect.N = 1.4, @@ -60,24 +66,33 @@ sensitivity.means <- list( ed.wood_reflect_nir = 0.25, ed.wood_trans_vis = 0.001, ed.wood_trans_nir = 0.001, - ed.orient_factor = 0) + ed.orient_factor = 0 +) sensitivity.seqs <- list( prospect.N = seq(1, 4, length.out = n.sens), prospect.Cab = seq(10, 100, length.out = n.sens), prospect.Car = seq(2, 60, length.out = n.sens), - prospect.Cw = c(seq(0.004, 0.01, length.out = n.sens/2), - seq(0.015, 0.05, length.out = n.sens/2)), - prospect.Cm = c(seq(0.001, 0.005, length.out = n.sens/2), - seq(0.006, 0.015, length.out = n.sens/2)), + prospect.Cw = c( + seq(0.004, 0.01, length.out = n.sens / 2), + seq(0.015, 0.05, length.out = n.sens / 2) + ), + prospect.Cm = c( + seq(0.001, 0.005, length.out = n.sens / 2), + seq(0.006, 0.015, length.out = n.sens / 2) + ), ed.sla = c(seq(6, 50, length.out = n.sens)), - ed.b1bl = c(seq(0.01, 0.08, length.out = n.sens/2), - seq(0.1, 0.8, length.out = n.sens/2)), + ed.b1bl = c( + seq(0.01, 0.08, length.out = n.sens / 2), + seq(0.1, 0.8, length.out = n.sens / 2) + ), ed.b2bl = c(seq(0.9, 2, length.out = n.sens)), ed.b1bs = c(seq(0.00001, 0.3, length.out = n.sens)), ed.b2bs = c(seq(1, 3, length.out = n.sens)), - ed.b1ht = c(seq(0.03, 0.5, length.out = n.sens/2), - seq(1, 27, length.out = n.sens/2)), + ed.b1ht = c( + seq(0.03, 0.5, length.out = n.sens / 2), + seq(1, 27, length.out = n.sens / 2) + ), ed.b2ht = c(seq(-0.75, 0.75, length.out = n.sens)), ed.clumping_factor = seq(0.5, 1, length.out = n.sens), ed.leaf_width = seq(0.02, 0.15, length.out = n.sens), @@ -97,51 +112,54 @@ sensitivity.seqs <- list( par.wl <- 400:2499 nir.wl <- 2500 prospect.version <- 5 -pft.names <- c('Optics.Temperate_Early_Hardwood', - 'Optics.Temperate_Mid_Hardwood', - 'Optics.Temperate_Late_Hardwood', - 'Optics.Temperate_Late_Conifer') +pft.names <- c( + "Optics.Temperate_Early_Hardwood", + "Optics.Temperate_Mid_Hardwood", + "Optics.Temperate_Late_Hardwood", + "Optics.Temperate_Late_Conifer" +) # Loop over traits -for(i in seq_along(sensitivity.means)){ +for (i in seq_along(sensitivity.means)) { current.trait <- names(sensitivity.means)[i] albedo.mat <- matrix(NA, 2101, n.sens) - for(j in 1:n.sens){ + for (j in 1:n.sens) { # Set inputs print(sprintf("%s: %d of %d", current.trait, j, n.sens)) prospect.in <- unlist(sensitivity.means[1:5]) - if(i <= 5){ + if (i <= 5) { prospect.in[current.trait] <- sensitivity.seqs[[current.trait]][j] trait.in <- NULL } else { trait.in <- sensitivity.seqs[[current.trait]][j] - names(trait.in) <- substring(current.trait, 4) ## Remove "ed." from trait name + names(trait.in) <- substring(current.trait, 4) ## Remove "ed." from trait name } - trait.values <- lapply(pft.names, function(x) c(list(name=x), trait.in)) + trait.values <- lapply(pft.names, function(x) c(list(name = x), trait.in)) names(trait.values) <- pft.names - albedo <- EDR.prospect(prospect.param = prospect.in, - prospect.version = prospect.version, - paths=paths, - par.wl = par.wl, nir.wl = nir.wl, - datetime = datetime, - trait.values = trait.values, - history.prefix = 'history', - change.history.time = TRUE, - output.path = analysis.path) - albedo.diff <- c(0,diff(albedo)) + albedo <- EDR.prospect( + prospect.param = prospect.in, + prospect.version = prospect.version, + paths = paths, + par.wl = par.wl, nir.wl = nir.wl, + datetime = datetime, + trait.values = trait.values, + history.prefix = "history", + change.history.time = TRUE, + output.path = analysis.path + ) + albedo.diff <- c(0, diff(albedo)) threshold <- 0.01 albedo.na <- which(abs(albedo.diff) > threshold) albedo[albedo.na] <- NA - albedo.mat[,j] <- albedo + albedo.mat[, j] <- albedo } -# Save output + # Save output fname <- sprintf("%s/albedo.%s.rds", analysis.path, current.trait) - saveRDS(albedo.mat, file=fname) -# Plot output + saveRDS(albedo.mat, file = fname) + # Plot output png(sprintf("%s/%s.png", analysis.path, current.trait)) plot.title <- sprintf("%s", current.trait) wl <- 400:2500 - matplot(wl, albedo.mat, type='l', main=plot.title) + matplot(wl, albedo.mat, type = "l", main = plot.title) dev.off() } - diff --git a/modules/rtm/vignettes/invert.edr.R b/modules/rtm/vignettes/invert.edr.R index 6eebe913119..295ad42574b 100644 --- a/modules/rtm/vignettes/invert.edr.R +++ b/modules/rtm/vignettes/invert.edr.R @@ -1,84 +1,91 @@ library(PEcAnRTM) -# All inversion settings are defined in a list object, like in PEcAn. The -# `default.settings.prospect` is a template that we then modify (rather than +# All inversion settings are defined in a list object, like in PEcAn. The +# `default.settings.prospect` is a template that we then modify (rather than # building the entire list from scratch). invert.options <- default.settings.prospect -# Define the model. The dimensions of the output of this function must match +# Define the model. The dimensions of the output of this function must match # the observations. invert.options$model <- function(params) { + ## `params` is a 1D vector, which is sampled as a block by the inversion + ## algorithm. This means that values in `params` need to be distributed + ## into the `prospect.params` object (for the PROSPECT call) and + ## `trait.values`, which is a list of lists (outer list = PFT, inner list = + ## trait). - ## `params` is a 1D vector, which is sampled as a block by the inversion - ## algorithm. This means that values in `params` need to be distributed - ## into the `prospect.params` object (for the PROSPECT call) and - ## `trait.values`, which is a list of lists (outer list = PFT, inner list = - ## trait). + ## It helps for `params` to be a named vector. Those names are preserved by + ## the sampler. - ## It helps for `params` to be a named vector. Those names are preserved by - ## the sampler. + ## Any constants should be defined in here. - ## Any constants should be defined in here. + # Set paths for ED executable and history files + paths <- list( + ed2in = "/path/to/ED2IN", # Points directly at FILE + history = "/path/to/history/" + ) # Points to DIRECTORY where history files are stored - # Set paths for ED executable and history files - paths <- list(ed2in = "/path/to/ED2IN", # Points directly at FILE - history = "/path/to/history/") # Points to DIRECTORY where history files are stored - - prospect.params <- params[1:5] - trait.values <- as.list(params[-(1:5)]) - out <- EDR.prospect(prospect.param = prospect.params, - prospect.version = 5, - trait.values = trait.values, # ED trait values list, written to the `config.xml` parameter file - paths = paths, - par.wl = 400:2499, # Wavelengths for PAR vector - nir.wl = 2500, # Wavelengths for NIR vector - datetime = ISOdate(2004, 07, 01, 12, 0, 0, tz="UTC"), # Datetime at which EDR will run - edr.exe.name = 'ed_2.1-opt', # Name of the EDR executable. - # It must be located in `output.path`. - # I recommend using a symlink. - output.path = "/path/to/output", # Where all outputs are stored - settings = list(model = list(revision = "git", - config.header = NULL)) # Required PEcAn settings - ) - return(out) + prospect.params <- params[1:5] + trait.values <- as.list(params[-(1:5)]) + out <- EDR.prospect( + prospect.param = prospect.params, + prospect.version = 5, + trait.values = trait.values, # ED trait values list, written to the `config.xml` parameter file + paths = paths, + par.wl = 400:2499, # Wavelengths for PAR vector + nir.wl = 2500, # Wavelengths for NIR vector + datetime = ISOdate(2004, 07, 01, 12, 0, 0, tz = "UTC"), # Datetime at which EDR will run + edr.exe.name = "ed_2.1-opt", # Name of the EDR executable. + # It must be located in `output.path`. + # I recommend using a symlink. + output.path = "/path/to/output", # Where all outputs are stored + settings = list(model = list( + revision = "git", + config.header = NULL + )) # Required PEcAn settings + ) + return(out) } -# This function should return a valid `params` vector for initializing the -# MCMC. The initial conditions can be fixed or, preferably, sampled randomly -# from a distribution (but the values must be valid; otherwise, the MCMC may +# This function should return a valid `params` vector for initializing the +# MCMC. The initial conditions can be fixed or, preferably, sampled randomly +# from a distribution (but the values must be valid; otherwise, the MCMC may # fail). -invert.options$inits.function <- function(){ - params <- c("N" = 1, "Cab" = 0, "Car" = 0, - "Cw" = 0, "Cm" = 0, - "clumping.factor" = 0.5) # Etc. Etc. - return(params) +invert.options$inits.function <- function() { + params <- c( + "N" = 1, "Cab" = 0, "Car" = 0, + "Cw" = 0, "Cm" = 0, + "clumping.factor" = 0.5 + ) # Etc. Etc. + return(params) } # This function returns the log density of the `params` vector for the priors. -invert.options$prior.function <- function(){ - prior.dens <- dlnorm(params, prior.logmus, prior.logsigma, log=TRUE) - return(sum(prior.dens)) +invert.options$prior.function <- function() { + prior.dens <- dlnorm(params, prior.logmus, prior.logsigma, log = TRUE) + return(sum(prior.dens)) } -# Vector of minimum values for parameters. If any parameters are sampled below +# Vector of minimum values for parameters. If any parameters are sampled below # this minimum, the likelihood is set to extremely low probability. invert.options$param.mins <- rep(0, length(invert.options$inits.function())) # Other inversion settings -invert.options$ngibbs <- 500000 # Number of iterations -invert.options$nchains <- 6 # Number of chains -invert.options$burnin <- 300000 # Number of iterations discarded as "burnun" -invert.options$n.tries <- 5 # Number of attempts for invert.auto +invert.options$ngibbs <- 500000 # Number of iterations +invert.options$nchains <- 6 # Number of chains +invert.options$burnin <- 300000 # Number of iterations discarded as "burnun" +invert.options$n.tries <- 5 # Number of attempts for invert.auto -# Generate some synthetic data. The `generate.noise` function generates some +# Generate some synthetic data. The `generate.noise` function generates some # random ,autocorrelated noise. obs <- EDR.prospect(...) + generate.noise() # Run the inversion. -inversion.output <- invert.auto(observed = obs, - invert.options = invert.options, - return.samples = TRUE, - save.samples = "file/name/for/saving/samples", # Or NULL if not saving samples - parallel = TRUE, # Whether or not to run in parallel - parallel.cores = 3 # How many parallel cores - ) +inversion.output <- invert.auto( + observed = obs, + invert.options = invert.options, + return.samples = TRUE, + save.samples = "file/name/for/saving/samples", # Or NULL if not saving samples + parallel = TRUE, # Whether or not to run in parallel + parallel.cores = 3 # How many parallel cores +) diff --git a/modules/rtm/vignettes/test.edr.R b/modules/rtm/vignettes/test.edr.R index 1ccba4d42aa..03c2f706152 100644 --- a/modules/rtm/vignettes/test.edr.R +++ b/modules/rtm/vignettes/test.edr.R @@ -1,20 +1,20 @@ -#install.packages("~/dietzelab/pecan/models/ed", repos=NULL) -#install.packages("~/dietzelab/pecan/modules/rtm", repos=NULL) -#library(PEcAn.ED2) +# install.packages("~/dietzelab/pecan/models/ed", repos=NULL) +# install.packages("~/dietzelab/pecan/modules/rtm", repos=NULL) +# library(PEcAn.ED2) library(PEcAnRTM) -#print(write.config.xml.ED2) +# print(write.config.xml.ED2) -analysis.path <- '~/dietzelab/ED2/EDR/run/' +analysis.path <- "~/dietzelab/ED2/EDR/run/" setwd(analysis.path) -#pecan.id <- "1000001494" -#run.id <- "1000443342" +# pecan.id <- "1000001494" +# run.id <- "1000443342" pecan.id <- "1000001502" run.id <- "1000443579" -base.path <- '/projectnb/dietzelab/pecan.data/output/ashiklom' -ed2in.path <- file.path(base.path, pecan.id, 'run', run.id, 'ED2IN') -history.path <- file.path(base.path, pecan.id, 'out', run.id) +base.path <- "/projectnb/dietzelab/pecan.data/output/ashiklom" +ed2in.path <- file.path(base.path, pecan.id, "run", run.id, "ED2IN") +history.path <- file.path(base.path, pecan.id, "out", run.id) print(list.files(history.path)) paths <- list(ed2in.path = ed2in.path, history.path = history.path) par.wl <- 400:2499 @@ -25,44 +25,48 @@ datetime <- ISOdatetime(2004, 07, 01, 12, 00, 00) lenout <- 10 trait.seq <- seq(0.1, 0.4, length.out = lenout) albedo.mat <- matrix(NA, 2101, lenout) -pft.names <- c('Optics.Temperate_Early_Hardwood', - 'Optics.Temperate_Mid_Hardwood', - 'Optics.Temperate_Late_Hardwood', - 'Optics.Temperate_Late_Conifer') +pft.names <- c( + "Optics.Temperate_Early_Hardwood", + "Optics.Temperate_Mid_Hardwood", + "Optics.Temperate_Late_Hardwood", + "Optics.Temperate_Late_Conifer" +) trait.values <- lapply(pft.names, function(x) list(name = x)) names(trait.values) <- pft.names -leaf.refl <- prospect(prospect.param, 5, FALSE)[,1] -for(i in 1:length(trait.seq)){ - val <- trait.seq[i] - print(val) - for(p in seq_along(pft.names)){ - trait.values[[p]]$clumping_factor <- val - } - trait.name <- names(trait.values[[p]])[2] - #xml <- write.config.xml.ED2(defaults = list(), - #settings = list(model = list(revision = "git")), - #trait.values = trait.values) - #print(xml) - albedo <- EDR.prospect(prospect.param = prospect.param, - prospect.version = prospect.version, - paths=paths, - par.wl = par.wl, nir.wl = nir.wl, - datetime = datetime, - trait.values = trait.values, - history.prefix = 'history', - change.history.time = TRUE, - output.path = getwd()) -# print(head(albedo)) - albedo.diff <- c(0,diff(albedo)) - threshold <- 0.01 - albedo.na <- which(abs(albedo.diff) > threshold) - albedo[albedo.na] <- NA - plot(albedo, type='l') - albedo.mat[,i] <- albedo +leaf.refl <- prospect(prospect.param, 5, FALSE)[, 1] +for (i in 1:length(trait.seq)) { + val <- trait.seq[i] + print(val) + for (p in seq_along(pft.names)) { + trait.values[[p]]$clumping_factor <- val + } + trait.name <- names(trait.values[[p]])[2] + # xml <- write.config.xml.ED2(defaults = list(), + # settings = list(model = list(revision = "git")), + # trait.values = trait.values) + # print(xml) + albedo <- EDR.prospect( + prospect.param = prospect.param, + prospect.version = prospect.version, + paths = paths, + par.wl = par.wl, nir.wl = nir.wl, + datetime = datetime, + trait.values = trait.values, + history.prefix = "history", + change.history.time = TRUE, + output.path = getwd() + ) + # print(head(albedo)) + albedo.diff <- c(0, diff(albedo)) + threshold <- 0.01 + albedo.na <- which(abs(albedo.diff) > threshold) + albedo[albedo.na] <- NA + plot(albedo, type = "l") + albedo.mat[, i] <- albedo } -#png(sprintf("%s.png", trait.name)) +# png(sprintf("%s.png", trait.name)) plot.title <- sprintf("%s: %.3f to %.3f", trait.name, min(trait.seq), max(trait.seq)) wl <- 400:2500 -matplot(wl, albedo.mat, type='l', main=plot.title) -#dev.off() +matplot(wl, albedo.mat, type = "l", main = plot.title) +# dev.off() diff --git a/modules/uncertainty/R/ensemble.R b/modules/uncertainty/R/ensemble.R index dc92e3985a9..2322c385e49 100644 --- a/modules/uncertainty/R/ensemble.R +++ b/modules/uncertainty/R/ensemble.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -9,10 +9,10 @@ ##' Reads output from model ensemble ##' -##' Reads output for an ensemble of length specified by \code{ensemble.size} and bounded by \code{start.year} +##' Reads output for an ensemble of length specified by \code{ensemble.size} and bounded by \code{start.year} ##' and \code{end.year} ##' @title Read ensemble output -##' @return a list of ensemble model output +##' @return a list of ensemble model output ##' @param ensemble.size the number of ensemble members run ##' @param pecandir specifies where pecan writes its configuration files ##' @param outdir directory with model output to use in ensemble analysis @@ -24,37 +24,36 @@ ##' @export ##' @author Ryan Kelly, David LeBauer, Rob Kooper #--------------------------------------------------------------------------------------------------# -read.ensemble.output <- function(ensemble.size, pecandir, outdir, start.year, end.year, +read.ensemble.output <- function(ensemble.size, pecandir, outdir, start.year, end.year, variable, ens.run.ids = NULL) { if (is.null(ens.run.ids)) { samples.file <- file.path(pecandir, "samples.Rdata") if (file.exists(samples.file)) { - samples = new.env() + samples <- new.env() load(samples.file, envir = samples) ens.run.ids <- samples$runs.samples$ensemble } else { stop(samples.file, "not found required by read.ensemble.output") } } - + expr <- variable$expression variables <- variable$variables - + ensemble.output <- list() for (row in rownames(ens.run.ids)) { run.id <- ens.run.ids[row, "id"] PEcAn.logger::logger.info("reading ensemble output from run id: ", format(run.id, scientific = FALSE)) - for(var in seq_along(variables)){ + for (var in seq_along(variables)) { out.tmp <- PEcAn.utils::read.output(run.id, file.path(outdir, run.id), start.year, end.year, variables[var]) assign(variables[var], out.tmp[[variables[var]]]) } - + # derivation out <- eval(parse(text = expr)) - - ensemble.output[[row]] <- mean(out, na.rm= TRUE) - + + ensemble.output[[row]] <- mean(out, na.rm = TRUE) } return(ensemble.output) } # read.ensemble.output @@ -62,31 +61,30 @@ read.ensemble.output <- function(ensemble.size, pecandir, outdir, start.year, en ##' Get parameter values used in ensemble ##' -##' Returns a matrix of randomly or quasi-randomly sampled trait values +##' Returns a matrix of randomly or quasi-randomly sampled trait values ##' to be assigned to traits over several model runs. ##' given the number of model runs and a list of sample distributions for traits ##' The model run is indexed first by model run, then by trait -##' +##' ##' @title Get Ensemble Samples ##' @name get.ensemble.samples ##' @param ensemble.size number of runs in model ensemble -##' @param pft.samples random samples from parameter distribution, e.g. from a MCMC chain +##' @param pft.samples random samples from parameter distribution, e.g. from a MCMC chain ##' @param env.samples env samples -##' @param method the method used to generate the ensemble samples. Random generators: uniform, uniform with latin hypercube permutation. Quasi-random generators: halton, sobol, torus. Random generation draws random variates whereas quasi-random generation is deterministic but well equidistributed. Default is uniform. For small ensemble size with relatively large parameter number (e.g ensemble size < 5 and # of traits > 5) use methods other than halton. +##' @param method the method used to generate the ensemble samples. Random generators: uniform, uniform with latin hypercube permutation. Quasi-random generators: halton, sobol, torus. Random generation draws random variates whereas quasi-random generation is deterministic but well equidistributed. Default is uniform. For small ensemble size with relatively large parameter number (e.g ensemble size < 5 and # of traits > 5) use methods other than halton. ##' @param param.names a list of parameter names that were fitted either by MA or PDA, important argument, if NULL parameters will be resampled independently ##' @param ... Other arguments passed on to the sampling method -##' +##' ##' @return matrix of (quasi-)random samples from trait distributions ##' @export ##' @author David LeBauer, Istem Fer -get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, +get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, method = "uniform", param.names = NULL, ...) { - if (is.null(method)) { PEcAn.logger::logger.info("No sampling method supplied, defaulting to uniform random sampling") method <- "uniform" } - + ## force as numeric for compatibility with Fortran code in halton() ensemble.size <- as.numeric(ensemble.size) if (ensemble.size <= 0) { @@ -100,11 +98,11 @@ get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, for (i in seq_along(pft.samples)) { pft2col <- c(pft2col, rep(i, length(pft.samples[[i]]))) } - + total.sample.num <- sum(sapply(pft.samples, length)) random.samples <- NULL - - + + if (method == "halton") { PEcAn.logger::logger.info("Using ", method, "method for sampling") random.samples <- randtoolbox::halton(n = ensemble.size, dim = total.sample.num, ...) @@ -126,27 +124,31 @@ get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, } else if (method == "uniform") { PEcAn.logger::logger.info("Using ", method, "random sampling") # uniform random - random.samples <- matrix(stats::runif(ensemble.size * total.sample.num), - ensemble.size, - total.sample.num) + random.samples <- matrix( + stats::runif(ensemble.size * total.sample.num), + ensemble.size, + total.sample.num + ) } else { PEcAn.logger::logger.info("Method ", method, " has not been implemented yet, using uniform random sampling") # uniform random - random.samples <- matrix(stats::runif(ensemble.size * total.sample.num), - ensemble.size, - total.sample.num) + random.samples <- matrix( + stats::runif(ensemble.size * total.sample.num), + ensemble.size, + total.sample.num + ) } - - + + ensemble.samples <- list() - - + + col.i <- 0 for (pft.i in seq(pft.samples)) { ensemble.samples[[pft.i]] <- matrix(nrow = ensemble.size, ncol = length(pft.samples[[pft.i]])) - + # meaning we want to keep MCMC samples together - if(length(pft.samples[[pft.i]])>0 & !is.null(param.names)){ + if (length(pft.samples[[pft.i]]) > 0 & !is.null(param.names)) { if (method == "halton") { same.i <- round(randtoolbox::halton(ensemble.size) * length(pft.samples[[pft.i]][[1]])) } else if (method == "sobol") { @@ -162,21 +164,22 @@ get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, # uniform random same.i <- sample.int(length(pft.samples[[pft.i]][[1]]), ensemble.size) } - } - + for (trait.i in seq(pft.samples[[pft.i]])) { col.i <- col.i + 1 - if(names(pft.samples[[pft.i]])[trait.i] %in% param.names[[pft.i]]){ # keeping samples + if (names(pft.samples[[pft.i]])[trait.i] %in% param.names[[pft.i]]) { # keeping samples ensemble.samples[[pft.i]][, trait.i] <- pft.samples[[pft.i]][[trait.i]][same.i] - }else{ - ensemble.samples[[pft.i]][, trait.i] <- stats::quantile(pft.samples[[pft.i]][[trait.i]], - random.samples[, col.i]) + } else { + ensemble.samples[[pft.i]][, trait.i] <- stats::quantile( + pft.samples[[pft.i]][[trait.i]], + random.samples[, col.i] + ) } - } # end trait + } # end trait ensemble.samples[[pft.i]] <- as.data.frame(ensemble.samples[[pft.i]]) colnames(ensemble.samples[[pft.i]]) <- names(pft.samples[[pft.i]]) - } #end pft + } # end pft names(ensemble.samples) <- names(pft.samples) ans <- ensemble.samples } @@ -187,7 +190,7 @@ get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, ##' Write ensemble config files ##' ##' Writes config files for use in meta-analysis and returns a list of run ids. -##' Given a pft.xml object, a list of lists as supplied by get.sa.samples, +##' Given a pft.xml object, a list of lists as supplied by get.sa.samples, ##' a name to distinguish the output files, and the directory to place the files. ##' ##' @param defaults pft @@ -210,134 +213,141 @@ get.ensemble.samples <- function(ensemble.size, pft.samples, env.samples, ##' @importFrom rlang .data ##' @export ##' @author David LeBauer, Carl Davidson, Hamze Dokoohaki -write.ensemble.configs <- function(defaults, ensemble.samples, settings, model, +write.ensemble.configs <- function(defaults, ensemble.samples, settings, model, clean = FALSE, write.to.db = TRUE, restart = NULL, rename = FALSE) { - con <- NULL my.write.config <- paste("write.config.", model, sep = "") my.write_restart <- paste0("write_restart.", model) - + if (is.null(ensemble.samples)) { return(list(runs = NULL, ensemble.id = NULL)) } - + # See if we need to write to DB write.to.db <- as.logical(settings$database$bety$write) - + if (write.to.db) { # Open connection to database so we can store all run/ensemble information con <- try(PEcAn.DB::db.open(settings$database$bety)) on.exit(try(PEcAn.DB::db.close(con), silent = TRUE), add = TRUE) - + # If we fail to connect to DB then we set to NULL - if (inherits(con, "try-error")) { + if (inherits(con, "try-error")) { con <- NULL PEcAn.logger::logger.warn("We were not able to successfully establish a connection with Bety ") } } - + # Get the workflow id if (!is.null(settings$workflow$id)) { workflow.id <- settings$workflow$id } else { workflow.id <- -1 } - #------------------------------------------------- if this is a new fresh run------------------ - if (is.null(restart)){ + #------------------------------------------------- if this is a new fresh run------------------ + if (is.null(restart)) { # create an ensemble id if (!is.null(con) && write.to.db) { # write ensemble first ensemble.id <- PEcAn.DB::db.query(paste0( "INSERT INTO ensembles (runtype, workflow_id) ", "VALUES ('ensemble', ", format(workflow.id, scientific = FALSE), ")", - "RETURNING id"), con = con)[['id']] - + "RETURNING id" + ), con = con)[["id"]] + for (pft in defaults) { PEcAn.DB::db.query(paste0( "INSERT INTO posteriors_ensembles (posterior_id, ensemble_id) ", - "values (", pft$posteriorid, ", ", ensemble.id, ")"), con = con) + "values (", pft$posteriorid, ", ", ensemble.id, ")" + ), con = con) } } else { ensemble.id <- NA } #-------------------------generating met/param/soil/veg/... for all ensembles---- - if (!is.null(con)){ + if (!is.null(con)) { #-- lets first find out what tags are required for this model - required_tags <- dplyr::tbl(con, 'models') %>% + required_tags <- dplyr::tbl(con, "models") %>% dplyr::filter(.data$id == !!as.numeric(settings$model$id)) %>% - dplyr::inner_join(dplyr::tbl(con, "modeltypes_formats"), by = c('modeltype_id')) %>% + dplyr::inner_join(dplyr::tbl(con, "modeltypes_formats"), by = c("modeltype_id")) %>% dplyr::collect() %>% dplyr::filter(.data$required == TRUE) %>% dplyr::pull("tag") - - }else{ - required_tags<-c("met","parameters") - + } else { + required_tags <- c("met", "parameters") } - - #now looking into the xml + + # now looking into the xml samp <- settings$ensemble$samplingspace - #finding who has a parent - parents <- lapply(samp,'[[', 'parent') - #order parents based on the need of who has to be first - order <- names(samp)[lapply(parents, function(tr) which(names(samp) %in% tr)) %>% unlist()] - #new ordered sampling space + # finding who has a parent + parents <- lapply(samp, "[[", "parent") + # order parents based on the need of who has to be first + order <- names(samp)[lapply(parents, function(tr) which(names(samp) %in% tr)) %>% unlist()] + # new ordered sampling space samp.ordered <- samp[c(order, names(samp)[!(names(samp) %in% order)])] - #performing the sampling - samples<-list() + # performing the sampling + samples <- list() # For the tags specified in the xml I do the sampling - for(i in seq_along(samp.ordered)){ - myparent<-samp.ordered[[i]]$parent # do I have a parent ? - #call the function responsible for generating the ensemble - samples[[names(samp.ordered[i])]] <- input.ens.gen(settings=settings, - input=names(samp.ordered)[i], - method=samp.ordered[[i]]$method, - parent_ids=if( !is.null(myparent)) samples[[myparent]] # if I have parent then give me their ids - this is where the ordering matters making sure the parent is done before it's asked + for (i in seq_along(samp.ordered)) { + myparent <- samp.ordered[[i]]$parent # do I have a parent ? + # call the function responsible for generating the ensemble + samples[[names(samp.ordered[i])]] <- input.ens.gen( + settings = settings, + input = names(samp.ordered)[i], + method = samp.ordered[[i]]$method, + parent_ids = if (!is.null(myparent)) samples[[myparent]] # if I have parent then give me their ids - this is where the ordering matters making sure the parent is done before it's asked ) } - # if there is a tag required by the model but it is not specified in the xml then I replicate n times the first element - required_tags%>% - purrr::walk(function(r_tag){ - if (is.null(samples[[r_tag]]) & r_tag!="parameters") samples[[r_tag]]$samples <<- rep(settings$run$inputs[[tolower(r_tag)]]$path[1], settings$ensemble$size) + # if there is a tag required by the model but it is not specified in the xml then I replicate n times the first element + required_tags %>% + purrr::walk(function(r_tag) { + if (is.null(samples[[r_tag]]) & r_tag != "parameters") samples[[r_tag]]$samples <<- rep(settings$run$inputs[[tolower(r_tag)]]$path[1], settings$ensemble$size) }) - + # Reading the site.pft specific tags from xml - site.pfts.vec <- settings$run$site$site.pft %>% unlist %>% as.character - + site.pfts.vec <- settings$run$site$site.pft %>% + unlist() %>% + as.character() + if (!is.null(site.pfts.vec)) { # find the name of pfts defined in the body of pecan.xml defined.pfts <- - settings$pfts %>% purrr::map('name') %>% unlist %>% as.character + settings$pfts %>% + purrr::map("name") %>% + unlist() %>% + as.character() # subset ensemble samples based on the pfts that are specified in the site and they are also sampled from. - if (length(which(site.pfts.vec %in% defined.pfts)) > 0) + if (length(which(site.pfts.vec %in% defined.pfts)) > 0) { ensemble.samples <- - ensemble.samples [site.pfts.vec[which(site.pfts.vec %in% defined.pfts)]] + ensemble.samples[site.pfts.vec[which(site.pfts.vec %in% defined.pfts)]] + } # warn if there is a pft specified in the site but it's not defined in the pecan xml. - if (length(which(!(site.pfts.vec %in% defined.pfts))) > 0) + if (length(which(!(site.pfts.vec %in% defined.pfts))) > 0) { PEcAn.logger::logger.warn( paste0( "The following pfts are specified for the siteid ", - settings$run$site$id , + settings$run$site$id, " but they are not defined as a pft in pecan.xml:", site.pfts.vec[which(!(site.pfts.vec %in% defined.pfts))], collapse = "," ) ) + } } - + # if no ensemble piece was in the xml I replicate n times the first element in params - if ( is.null(samp$parameters) ) samples$parameters$samples <- ensemble.samples %>% purrr::map(~.x[rep(1, settings$ensemble$size) , ]) - # This where we handle the parameters - ensemble.samples is already generated in run.write.config and it's sent to this function as arg - - if ( is.null(samples$parameters$samples) ) samples$parameters$samples <- ensemble.samples + if (is.null(samp$parameters)) samples$parameters$samples <- ensemble.samples %>% purrr::map(~ .x[rep(1, settings$ensemble$size), ]) + # This where we handle the parameters - ensemble.samples is already generated in run.write.config and it's sent to this function as arg - + if (is.null(samples$parameters$samples)) samples$parameters$samples <- ensemble.samples #------------------------End of generating ensembles----------------------------------- # find all inputs that have an id inputs <- names(settings$run$inputs) inputs <- inputs[grepl(".id$", inputs)] - + # write configuration for each run of the ensemble runs <- data.frame() for (i in seq_len(settings$ensemble$size)) { @@ -346,31 +356,33 @@ write.ensemble.configs <- function(defaults, ensemble.samples, settings, model, # inserting this into the table and getting an id back run.id <- PEcAn.DB::db.query(paste0( "INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, ensemble_id, parameter_list) ", - "values ('", - settings$model$id, "', '", - settings$run$site$id, "', '", - settings$run$start.date, "', '", - settings$run$end.date, "', '", - settings$run$outdir, "', ", - ensemble.id, ", '", + "values ('", + settings$model$id, "', '", + settings$run$site$id, "', '", + settings$run$start.date, "', '", + settings$run$end.date, "', '", + settings$run$outdir, "', ", + ensemble.id, ", '", paramlist, "') ", - "RETURNING id"), con = con)[['id']] + "RETURNING id" + ), con = con)[["id"]] # associate inputs with runs if (!is.null(inputs)) { for (x in inputs) { - PEcAn.DB::db.query(paste0("INSERT INTO inputs_runs (input_id, run_id) ", - "values (", settings$run$inputs[[x]], ", ", run.id, ")"), - con = con) + PEcAn.DB::db.query( + paste0( + "INSERT INTO inputs_runs (input_id, run_id) ", + "values (", settings$run$inputs[[x]], ", ", run.id, ")" + ), + con = con + ) } } - } else { - - run.id <- PEcAn.utils::get.run.id("ENS", PEcAn.utils::left.pad.zeros(i, 5), site.id=settings$run$site$id) - + run.id <- PEcAn.utils::get.run.id("ENS", PEcAn.utils::left.pad.zeros(i, 5), site.id = settings$run$site$id) } runs[i, "id"] <- run.id - + # create folders (cleaning up old ones if needed) if (clean) { unlink(file.path(settings$rundir, run.id)) @@ -380,96 +392,104 @@ write.ensemble.configs <- function(defaults, ensemble.samples, settings, model, dir.create(file.path(settings$modeloutdir, run.id), recursive = TRUE) # write run information to disk cat("runtype : ensemble\n", - "workflow id : ", format(workflow.id, scientific = FALSE), "\n", - "ensemble id : ", format(ensemble.id, scientific = FALSE), "\n", - "run : ", i, "/", settings$ensemble$size, "\n", - "run id : ", format(run.id, scientific = FALSE), "\n", - "pft names : ", as.character(lapply(settings$pfts, function(x) x[["name"]])), "\n", - "model : ", model, "\n", - "model id : ", format(settings$model$id, scientific = FALSE), "\n", - "site : ", settings$run$site$name, "\n", - "site id : ", format(settings$run$site$id, scientific = FALSE), "\n", - "met data : ", samples$met$samples[[i]], "\n", - "start date : ", settings$run$start.date, "\n", - "end date : ", settings$run$end.date, "\n", - "hostname : ", settings$host$name, "\n", - "rundir : ", file.path(settings$host$rundir, run.id), "\n", - "outdir : ", file.path(settings$host$outdir, run.id), "\n", - file = file.path(settings$rundir, run.id, "README.txt")) - - #changing the structure of input tag to what the models are expecting - for(input_i in seq_along(settings$run$inputs)){ + "workflow id : ", format(workflow.id, scientific = FALSE), "\n", + "ensemble id : ", format(ensemble.id, scientific = FALSE), "\n", + "run : ", i, "/", settings$ensemble$size, "\n", + "run id : ", format(run.id, scientific = FALSE), "\n", + "pft names : ", as.character(lapply(settings$pfts, function(x) x[["name"]])), "\n", + "model : ", model, "\n", + "model id : ", format(settings$model$id, scientific = FALSE), "\n", + "site : ", settings$run$site$name, "\n", + "site id : ", format(settings$run$site$id, scientific = FALSE), "\n", + "met data : ", samples$met$samples[[i]], "\n", + "start date : ", settings$run$start.date, "\n", + "end date : ", settings$run$end.date, "\n", + "hostname : ", settings$host$name, "\n", + "rundir : ", file.path(settings$host$rundir, run.id), "\n", + "outdir : ", file.path(settings$host$outdir, run.id), "\n", + file = file.path(settings$rundir, run.id, "README.txt") + ) + + # changing the structure of input tag to what the models are expecting + for (input_i in seq_along(settings$run$inputs)) { input_tag <- names(settings$run$inputs)[[input_i]] - if (!is.null(samples[[input_tag]])) + if (!is.null(samples[[input_tag]])) { settings$run$inputs[[input_tag]][["path"]] <- samples[[input_tag]][["samples"]][[i]] + } } - - do.call(my.write.config, args = list( defaults = defaults, - trait.values = lapply(samples$parameters$samples, function(x, n) { x[n, , drop=FALSE] }, n=i), # this is the params - settings = settings, - run.id = run.id - ) - ) - cat(format(run.id, scientific = FALSE), file = file.path(settings$rundir, "runs.txt"), sep = "\n", append = TRUE) + do.call(my.write.config, args = list( + defaults = defaults, + trait.values = lapply(samples$parameters$samples, function(x, n) { + x[n, , drop = FALSE] + }, n = i), # this is the params + settings = settings, + run.id = run.id + )) + cat(format(run.id, scientific = FALSE), file = file.path(settings$rundir, "runs.txt"), sep = "\n", append = TRUE) } - return(invisible(list(runs = runs, ensemble.id = ensemble.id, samples=samples))) - #------------------------------------------------- if we already have everything ------------------ - }else{ - #reading retstart inputs - inputs<-restart$inputs - run.id<-restart$runid - new.params<-restart$new.params - new.state<-restart$new.state - ensemble.id<-restart$ensemble.id - + return(invisible(list(runs = runs, ensemble.id = ensemble.id, samples = samples))) + #------------------------------------------------- if we already have everything ------------------ + } else { + # reading retstart inputs + inputs <- restart$inputs + run.id <- restart$runid + new.params <- restart$new.params + new.state <- restart$new.state + ensemble.id <- restart$ensemble.id + # Reading the site.pft specific tags from xml - site.pfts.vec <- settings$run$site$site.pft %>% unlist %>% as.character - - if(!is.null(site.pfts.vec)){ + site.pfts.vec <- settings$run$site$site.pft %>% + unlist() %>% + as.character() + + if (!is.null(site.pfts.vec)) { # find the name of pfts defined in the body of pecan.xml - defined.pfts <- settings$pfts %>% purrr::map('name') %>% unlist %>% as.character + defined.pfts <- settings$pfts %>% + purrr::map("name") %>% + unlist() %>% + as.character() # subset ensemble samples based on the pfts that are specified in the site and they are also sampled from. - if (length(which(site.pfts.vec %in% defined.pfts)) > 0 ) - new.params <- new.params %>% purrr::map(~list(.x[[which(site.pfts.vec %in% defined.pfts)]],restart=.x$restart)) + if (length(which(site.pfts.vec %in% defined.pfts)) > 0) { + new.params <- new.params %>% purrr::map(~ list(.x[[which(site.pfts.vec %in% defined.pfts)]], restart = .x$restart)) + } # warn if there is a pft specified in the site but it's not defined in the pecan xml. - if (length(which(!(site.pfts.vec %in% defined.pfts)))>0) - PEcAn.logger::logger.warn(paste0("The following pfts are specified for the siteid ", settings$run$site$id ," but they are not defined as a pft in pecan.xml:", - site.pfts.vec[which(!(site.pfts.vec %in% defined.pfts))])) + if (length(which(!(site.pfts.vec %in% defined.pfts))) > 0) { + PEcAn.logger::logger.warn(paste0( + "The following pfts are specified for the siteid ", settings$run$site$id, " but they are not defined as a pft in pecan.xml:", + site.pfts.vec[which(!(site.pfts.vec %in% defined.pfts))] + )) + } } - - #if ensemble folders do not exist create them - for(j in 1:length(run.id)){ - if(!file.exists(file.path(settings$rundir, run.id[[j]]))){ + + # if ensemble folders do not exist create them + for (j in 1:length(run.id)) { + if (!file.exists(file.path(settings$rundir, run.id[[j]]))) { dir.create(file.path(settings$rundir, run.id[[j]])) } - } - + # stop and start time are required by bc we are wrtting them down into job.sh for (i in seq_len(settings$ensemble$size)) { - do.call(my.write_restart, - args = list(outdir = settings$host$outdir, - runid = run.id[[i]], - start.time = restart$start.time, - stop.time =restart$stop.time, - settings = settings, - new.state = new.state[i, ], - new.params = new.params[[i]], #new.params$`646`[[i]] for debugging - inputs =list(met=list(path=inputs$samples[[i]])), - RENAME = rename)#for restart from previous model runs, not sharing the same outdir + do.call(my.write_restart, + args = list( + outdir = settings$host$outdir, + runid = run.id[[i]], + start.time = restart$start.time, + stop.time = restart$stop.time, + settings = settings, + new.state = new.state[i, ], + new.params = new.params[[i]], # new.params$`646`[[i]] for debugging + inputs = list(met = list(path = inputs$samples[[i]])), + RENAME = rename + ) # for restart from previous model runs, not sharing the same outdir ) } - params<-new.params - return(invisible(list(runs = data.frame(id=run.id), ensemble.id = ensemble.id, samples=list(met=inputs) - ) - )) + params <- new.params + return(invisible(list(runs = data.frame(id = run.id), ensemble.id = ensemble.id, samples = list(met = inputs)))) } - - - } # write.ensemble.configs @@ -487,39 +507,47 @@ write.ensemble.configs <- function(defaults, ensemble.samples, settings, model, #' @export #' #' @examples -#' \dontrun{input.ens.gen(settings,"met","sampling")} +#' \dontrun{ +#' input.ens.gen(settings, "met", "sampling") +#' } #' input.ens.gen <- function(settings, input, method = "sampling", parent_ids = NULL) { - #-- reading the dots and exposing them to the inside of the function samples <- list() samples$ids <- c() # - if (is.null(method)) return(NULL) + if (is.null(method)) { + return(NULL) + } # parameter is exceptional it needs to be handled spearatly - if (input == "parameters") return(NULL) + if (input == "parameters") { + return(NULL) + } #-- assing the sample ids based on different scenarios input_path <- settings$run$inputs[[tolower(input)]]$path if (!is.null(parent_ids)) { samples$ids <- parent_ids$ids out.of.sample.size <- length(samples$ids[samples$ids > length(input_path)]) - #sample for those that our outside the param size - forexample, parent id may send id number 200 but we have only100 sample for param + # sample for those that our outside the param size - forexample, parent id may send id number 200 but we have only100 sample for param samples$ids[samples$ids %in% out.of.sample.size] <- sample( seq_along(input_path), out.of.sample.size, - replace = TRUE) + replace = TRUE + ) } else if (tolower(method) == "sampling") { samples$ids <- sample( seq_along(input_path), settings$ensemble$size, - replace = TRUE) + replace = TRUE + ) } else if (tolower(method) == "looping") { samples$ids <- rep_len( seq_along(input_path), - length.out = settings$ensemble$size) + length.out = settings$ensemble$size + ) } - #using the sample ids + # using the sample ids samples$samples <- input_path[samples$ids] return(samples) diff --git a/modules/uncertainty/R/flux_uncertainty.R b/modules/uncertainty/R/flux_uncertainty.R index 6902cbe902a..198ce84c9ac 100644 --- a/modules/uncertainty/R/flux_uncertainty.R +++ b/modules/uncertainty/R/flux_uncertainty.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -9,23 +9,25 @@ #--------------------------------------------------------------------------------------------------# #' Read Ameriflux L2 Data -#' +#' #' @param file.name path to file #' @param year currently ignored #' @return Ameriflux L2 data read from file #' @export #' @author Mike Dietze, Carl Davidson read.ameriflux.L2 <- function(file.name, year) { - data <- as.data.frame(utils::read.table(file.name, header = TRUE, sep = ",", - na.strings = c("-9999", "-6999"), - stringsAsFactors = FALSE)) + data <- as.data.frame(utils::read.table(file.name, + header = TRUE, sep = ",", + na.strings = c("-9999", "-6999"), + stringsAsFactors = FALSE + )) # data$time <- year + (data$DTIME / 366.0) return(data) } # read.ameriflux.L2 ##' Get delta between sequential flux datapoints -##' +##' ##' @param measurement numeric vector ##' @return Difference between consecutive measurements ##' @export @@ -34,20 +36,20 @@ get.change <- function(measurement) { gaps <- measurement %in% c(-6999, -9999) # | quality > 0 measurement[gaps] <- NA - even <- seq(measurement)%%2 == 0 - odd <- seq(measurement)%%2 == 1 + even <- seq(measurement) %% 2 == 0 + odd <- seq(measurement) %% 2 == 1 return(measurement[even] - measurement[odd]) } # get.change #--------------------------------------------------------------------------------------------------# ##' Calculate parameters for heteroskedastic flux uncertainty -##' +##' ##' @name flux.uncertainty ##' @title Calculate parameters for heteroskedastic flux uncertainty ##' @param measurement = flux time-series ##' @param QC = quality control flag time series (0 = best) -##' @param flags = additional flags on flux filtering of PAIRS (length = 1/2 that of the +##' @param flags = additional flags on flux filtering of PAIRS (length = 1/2 that of the ##' time series, TRUE = use). ##' @param bin.num = number of bins (default = 10) ##' @param transform = transformation of magnitude (default = identity) @@ -56,30 +58,32 @@ get.change <- function(measurement) { ##' @return return.list List of parameters from the fitted uncertainty model ##' @export ##' @author Mike Dietze, Carl Davidson -flux.uncertainty <- function(measurement, QC = 0, flags = TRUE, bin.num = 10, +flux.uncertainty <- function(measurement, QC = 0, flags = TRUE, bin.num = 10, transform = identity, minBin = 5, ...) { ## calcuate paired differences between points change <- get.change(measurement) - + ## convert gaps to NA gaps <- measurement %in% c(-6999, -9999) # | quality > 0 measurement[gaps] <- NA - + ## combine all indicators even <- seq(measurement) %% 2 == 0 odd <- seq(measurement) %% 2 == 1 Q2 <- QC[even] == 0 & QC[odd] == 0 & flags & !is.na(measurement[even]) & !is.na(measurement[odd]) - + ## calulate error and flux magnitude for each pair of points indErr <- abs(change[Q2]) / sqrt(2) magnitude <- measurement[even][Q2] - + ## calculate bins - bins <- seq(from = min(magnitude, na.rm = TRUE), - to = max(magnitude, na.rm = TRUE), - length.out = bin.num) - + bins <- seq( + from = min(magnitude, na.rm = TRUE), + to = max(magnitude, na.rm = TRUE), + length.out = bin.num + ) + ## calculate binned mean, error, bias, and sample size magBin <- c() errBin <- c() @@ -89,13 +93,15 @@ flux.uncertainty <- function(measurement, QC = 0, flags = TRUE, bin.num = 10, use <- magnitude >= bins[k] & magnitude < bins[k + 1] nBin[k] <- sum(use, na.rm = TRUE) magBin[k] <- mean(transform(magnitude[use]), na.rm = TRUE) - + if (nBin[k] > minBin) { ## && sum(!is.na(change[use])) > 50) { errBin[k] <- stats::sd(indErr[use], na.rm = TRUE) biasBin[k] <- mean(indErr[use], na.rm = TRUE) - print(paste(length(magnitude[use]), sum(!is.na(change[use])), - magBin[k], errBin[k])) + print(paste( + length(magnitude[use]), sum(!is.na(change[use])), + magBin[k], errBin[k] + )) } else { if (nBin[k] == 0) { magBin[k] <- NA @@ -105,43 +111,45 @@ flux.uncertainty <- function(measurement, QC = 0, flags = TRUE, bin.num = 10, print(paste(length(magnitude[use]), sum(!is.na(change[use])))) } } - + ## separate fluxes into positive, negative, and zero bins zero <- diff(sign(bins)) > 0 pos <- magBin > 0 & !zero neg <- magBin < 0 & !zero - + ## subtract off zero bin, fit regression to positive and negative components ## would be better to fit a two line model with a common intercept, but this ## is quicker to implement for the time being E2 <- errBin - errBin[zero] E2 <- errBin - errBin[zero] intercept <- errBin[zero] - - return.list <- list(mag = magBin, - err = errBin, - bias = biasBin, - n = nBin, - intercept = intercept) - - if(!all(is.na(E2[pos]))){ + + return.list <- list( + mag = magBin, + err = errBin, + bias = biasBin, + n = nBin, + intercept = intercept + ) + + if (!all(is.na(E2[pos]))) { mp <- stats::lm(E2[pos] ~ magBin[pos] - 1) return.list$slopeP <- mp$coefficients[1] - } - if(!all(is.na(E2[neg]))){ + } + if (!all(is.na(E2[neg]))) { mn <- stats::lm(E2[neg] ~ magBin[neg] - 1) return.list$slopeN <- mn$coefficients[1] - }else{ + } else { return.list$slopeN <- mp$coefficients[1] } - + return(return.list) } # flux.uncertainty #--------------------------------------------------------------------------------------------------# ##' Plot fit for heteroskedastic flux uncertainty -##' +##' ##' @name plot_flux_uncertainty ##' @title Plot fit for heteroskedastic flux uncertainty ##' @param f output of flux.uncertainty functions @@ -153,9 +161,11 @@ plot_flux_uncertainty <- function(f, ...) { big <- 10000 graphics::lines(c(0, big), c(f$intercept, f$slopeP * big)) graphics::lines(c(0, -big), c(f$intercept, -f$slopeN * big)) - graphics::legend("bottomleft", legend = c("intercept", f$intercept, - "slopeP", f$slopeP, - "slopeN", f$slopeN)) + graphics::legend("bottomleft", legend = c( + "intercept", f$intercept, + "slopeP", f$slopeP, + "slopeN", f$slopeN + )) } # plot_flux_uncertainty @@ -163,53 +173,67 @@ plot_oechel_flux <- function(observations, site) { graphics::par(mfrow = c(2, 2)) # only use data from March 1 through November 1 observations <- observations[observations$DOY > 60 & observations$DOY < 305, ] - + # dRg <- get.change(observations$Rg) dD <- get.change(observations$D) # abs(DRg)<200 & abs(DD)<5 & dTa <- get.change(observations$TA) flags <- abs(dTa) < 3 - + print("NEE+") - plot_flux_uncertainty((observations$FC[observations$FC >= 0]), flags = flags, - main = site, xlab = "NEE bin (+)", ylab = "NEE random error", - bin.num = 80) + plot_flux_uncertainty((observations$FC[observations$FC >= 0]), + flags = flags, + main = site, xlab = "NEE bin (+)", ylab = "NEE random error", + bin.num = 80 + ) print("NEE-") - plot_flux_uncertainty((observations$FC[observations$FC <= 0]), flags = flags, - main = site, xlab = "NEE bin (-)", ylab = "NEE random error", - bin.num = 80) + plot_flux_uncertainty((observations$FC[observations$FC <= 0]), + flags = flags, + main = site, xlab = "NEE bin (-)", ylab = "NEE random error", + bin.num = 80 + ) print("LE") - plot_flux_uncertainty((observations$LE[observations$LE >= 0]), flags = flags, - main = site, xlab = "LE bin (+)", ylab = "LE random error") - plot_flux_uncertainty((observations$LE[observations$LE <= 0]), flags = flags, - main = site, xlab = "LE bin (-)", ylab = "LE random error") + plot_flux_uncertainty((observations$LE[observations$LE >= 0]), + flags = flags, + main = site, xlab = "LE bin (+)", ylab = "LE random error" + ) + plot_flux_uncertainty((observations$LE[observations$LE <= 0]), + flags = flags, + main = site, xlab = "LE bin (-)", ylab = "LE random error" + ) print("Soil Temperature") - plot_flux_uncertainty(observations$TS1[observations$TS1 >= 0], flags = flags, - main = site, xlab = "Soil Temp bin (+)", ylab = "Soil Temp random error") - plot_flux_uncertainty(observations$TS1[observations$TS1 <= 0], flags = flags, - main = site, xlab = "Soil Temp bin (-)", ylab = "Soil Temp random error") + plot_flux_uncertainty(observations$TS1[observations$TS1 >= 0], + flags = flags, + main = site, xlab = "Soil Temp bin (+)", ylab = "Soil Temp random error" + ) + plot_flux_uncertainty(observations$TS1[observations$TS1 <= 0], + flags = flags, + main = site, xlab = "Soil Temp bin (-)", ylab = "Soil Temp random error" + ) } # plot_oechel_flux tundra.flux.uncertainty <- function() { ## dummy function created temporarly to encapsulate Carl's analysis - + itex.climate <- lapply(1998:2011, function(year) { folder <- "../SoilMoistureHollister" file <- paste0(year, "%20ITEX") file <- dir(folder, pattern = file, full.names = TRUE) print(file) data <- as.data.frame(utils::read.table(file, header = FALSE, sep = "\t", stringsAsFactors = FALSE)) - columns <- c("UTC", "ATZ", "location", "site", "treatment", "plot", - "year", "julian", "month", "hour") + columns <- c( + "UTC", "ATZ", "location", "site", "treatment", "plot", + "year", "julian", "month", "hour" + ) colnames(data)[1:length(columns)] <- columns colnames(data)[ncol(data) - 1] <- "wfv" data <- data[data$wfv > -999, ] - data <- data[, colnames(data) %in% c(columns, "wfv")] #standardize columns + data <- data[, colnames(data) %in% c(columns, "wfv")] # standardize columns print(names(data)) return(data) }) itex.climate <- do.call(rbind, itex.climate) - + oechel.atqasuk <- lapply(1999:2006, function(year) { file <- paste("usatqasu_", year, "_L2.csv", sep = "") print(file) @@ -217,11 +241,12 @@ tundra.flux.uncertainty <- function() { }) oechel.atqasuk <- do.call(rbind, oechel.atqasuk) plot_oechel_flux(oechel.atqasuk, "Atqasuk") - plot_flux_uncertainty(itex.climate$wfv[itex.climate$site %in% c("AD")], - main = "Atqasuk", - xlab = "Soil Moisture bin (%vol)", - ylab = "Soil Moisture random error (%vol)") - + plot_flux_uncertainty(itex.climate$wfv[itex.climate$site %in% c("AD")], + main = "Atqasuk", + xlab = "Soil Moisture bin (%vol)", + ylab = "Soil Moisture random error (%vol)" + ) + oechel.barrow <- lapply(1998:2006, function(year) { file <- paste("usakbarr_", year, "_L2.csv", sep = "") print(file) @@ -230,7 +255,8 @@ tundra.flux.uncertainty <- function() { oechel.barrow <- do.call(rbind, oechel.barrow) plot_oechel_flux(oechel.barrow, "Barrow") return(plot_flux_uncertainty(itex.climate$wfv[itex.climate$site %in% c("BD")], - main = "Barrow", - xlab = "Soil Moisture bin (%vol)", - ylab = "Soil Moisture random error (%vol)")) + main = "Barrow", + xlab = "Soil Moisture bin (%vol)", + ylab = "Soil Moisture random error (%vol)" + )) } # tundra.flux.uncertainty diff --git a/modules/uncertainty/R/get.parameter.samples.R b/modules/uncertainty/R/get.parameter.samples.R index 03e69b29783..54d92bbd7ee 100644 --- a/modules/uncertainty/R/get.parameter.samples.R +++ b/modules/uncertainty/R/get.parameter.samples.R @@ -1,6 +1,5 @@ - -##' Convert priors / MCMC samples to chains that can be sampled for model parameters -##' +##' Convert priors / MCMC samples to chains that can be sampled for model parameters +##' ##' @param settings PEcAn settings object ##' @param posterior.files list of filenames to read from ##' @param ens.sample.method one of "halton", "sobol", "torus", "lhc", "uniform" @@ -8,52 +7,50 @@ ##' ##' @author David LeBauer, Shawn Serbin, Istem Fer ### Identify PFTs in the input settings.xml file -get.parameter.samples <- function(settings, - posterior.files = rep(NA, length(settings$pfts)), +get.parameter.samples <- function(settings, + posterior.files = rep(NA, length(settings$pfts)), ens.sample.method = "uniform") { - pfts <- settings$pfts - num.pfts <- length(settings$pfts) + pfts <- settings$pfts + num.pfts <- length(settings$pfts) pft.names <- list() - outdirs <- list() + outdirs <- list() ## Open database connection con <- try(PEcAn.DB::db.open(settings$database$bety)) on.exit(try(PEcAn.DB::db.close(con), silent = TRUE), add = TRUE) - + # If we fail to connect to DB then we set to NULL - if (inherits(con, "try-error")) { + if (inherits(con, "try-error")) { con <- NULL PEcAn.logger::logger.warn("We were not able to successfully establish a connection with Bety ") } - + for (i.pft in seq_along(pfts)) { pft.names[i.pft] <- settings$pfts[[i.pft]]$name - + ### If no PFT(s) are specified insert NULL to warn user if (length(pft.names) == 0) { pft.names[1] <- "NULL" } - + ### Get output directory info - if(!is.null(settings$pfts[[i.pft]]$outdir)){ + if (!is.null(settings$pfts[[i.pft]]$outdir)) { outdirs[i.pft] <- settings$pfts[[i.pft]]$outdir - } else { - outdirs[i.pft] <- unique(PEcAn.DB::dbfile.check(type = "Posterior",container.id = settings$pfts[[i.pft]]$posteriorid,con=con)$file_path) + } else { + outdirs[i.pft] <- unique(PEcAn.DB::dbfile.check(type = "Posterior", container.id = settings$pfts[[i.pft]]$posteriorid, con = con)$file_path) } - - } ### End of for loop to extract pft names - + } ### End of for loop to extract pft names + PEcAn.logger::logger.info("Selected PFT(s): ", pft.names) - + ## Generate empty list arrays for output. trait.samples <- sa.samples <- ensemble.samples <- env.samples <- runs.samples <- param.names <- list() - + # flag determining whether samples are independent (e.g. when params fitted individually) independent <- TRUE - + ## Load PFT priors and posteriors for (i in seq_along(pft.names)) { - - distns = new.env() + distns <- new.env() ## Load posteriors if (!is.na(posterior.files[i])) { @@ -72,13 +69,15 @@ get.parameter.samples <- function(settings, load(file.path(outdirs[i], "prior.distns.Rdata"), envir = distns) } } - + ### Load trait mcmc data (if exists, either from MA or PDA) - if (!is.null(settings$pfts[[i]]$posteriorid) && !inherits(con, "try-error")) {# first check if there are any files associated with posterior ids + if (!is.null(settings$pfts[[i]]$posteriorid) && !inherits(con, "try-error")) { # first check if there are any files associated with posterior ids files <- PEcAn.DB::dbfile.check("Posterior", - settings$pfts[[i]]$posteriorid, - con, settings$host$name, return.all = TRUE) - tid <- grep("trait.mcmc.*Rdata", files$file_name) + settings$pfts[[i]]$posteriorid, + con, settings$host$name, + return.all = TRUE + ) + tid <- grep("trait.mcmc.*Rdata", files$file_name) if (length(tid) > 0) { trait.mcmc.file <- file.path(files$file_path[tid], files$file_name[tid]) ma.results <- TRUE @@ -86,55 +85,59 @@ get.parameter.samples <- function(settings, # PDA samples are fitted together, to preserve correlations downstream let workflow know they should go together - if(grepl("mcmc.pda", trait.mcmc.file)) independent <- FALSE + if (grepl("mcmc.pda", trait.mcmc.file)) independent <- FALSE # NOTE: Global MA samples will also be together, right? - - - }else{ + } else { PEcAn.logger::logger.info("No trait.mcmc file is associated with this posterior ID.") ma.results <- FALSE } - }else if ("trait.mcmc.Rdata" %in% dir(unlist(outdirs[i]))) { + } else if ("trait.mcmc.Rdata" %in% dir(unlist(outdirs[i]))) { PEcAn.logger::logger.info("Defaulting to trait.mcmc file in the pft directory.") ma.results <- TRUE load(file.path(outdirs[i], "trait.mcmc.Rdata"), envir = distns) } else { ma.results <- FALSE } - + pft.name <- unlist(pft.names[i]) - + ### When no ma for a trait, sample from prior ### Trim all chains to shortest mcmc chain, else 20000 samples - if(!is.null(distns$prior.distns)){ + if (!is.null(distns$prior.distns)) { priors <- rownames(distns$prior.distns) } else { priors <- NULL - } + } if (!is.null(distns$trait.mcmc)) { param.names[[i]] <- names(distns$trait.mcmc) names(param.names)[i] <- pft.name - + samples.num <- min(sapply(distns$trait.mcmc, function(x) nrow(as.matrix(x)))) - + ## report which traits use MA results, which use priors if (length(param.names[[i]]) > 0) { - PEcAn.logger::logger.info("PFT", pft.names[i], "has MCMC samples for:\n", - paste0(param.names[[i]], collapse = "\n ")) + PEcAn.logger::logger.info( + "PFT", pft.names[i], "has MCMC samples for:\n", + paste0(param.names[[i]], collapse = "\n ") + ) } if (!all(priors %in% param.names[[i]])) { - PEcAn.logger::logger.info("PFT", pft.names[i], "will use prior distributions for:\n", - paste0(priors[!priors %in% param.names[[i]]], collapse = "\n ")) + PEcAn.logger::logger.info( + "PFT", pft.names[i], "will use prior distributions for:\n", + paste0(priors[!priors %in% param.names[[i]]], collapse = "\n ") + ) } } else { param.names[[i]] <- list() samples.num <- 20000 PEcAn.logger::logger.info("No MCMC results for PFT", pft.names[i]) - PEcAn.logger::logger.info("PFT", pft.names[i], "will use prior distributions for", - priors) + PEcAn.logger::logger.info( + "PFT", pft.names[i], "will use prior distributions for", + priors + ) } - if(is.null(priors)) priors = param.names[[i]] - + if (is.null(priors)) priors <- param.names[[i]] + PEcAn.logger::logger.info("using ", samples.num, "samples per trait") if (ens.sample.method == "halton") { q_samples <- randtoolbox::halton(n = samples.num, dim = length(priors)) @@ -145,64 +148,75 @@ get.parameter.samples <- function(settings, } else if (ens.sample.method == "lhc") { q_samples <- PEcAn.emulator::lhc(t(matrix(0:1, ncol = length(priors), nrow = 2)), samples.num) } else if (ens.sample.method == "uniform") { - q_samples <- matrix(stats::runif(samples.num * length(priors)), - samples.num, - length(priors)) + q_samples <- matrix( + stats::runif(samples.num * length(priors)), + samples.num, + length(priors) + ) } else { PEcAn.logger::logger.info("Method ", ens.sample.method, " has not been implemented yet, using uniform random sampling") # uniform random - q_samples <- matrix(stats::runif(samples.num * length(priors)), - samples.num, - length(priors)) + q_samples <- matrix( + stats::runif(samples.num * length(priors)), + samples.num, + length(priors) + ) } for (prior in priors) { if (prior %in% param.names[[i]]) { samples <- distns$trait.mcmc[[prior]] %>% - purrr::map(~ .x[,'beta.o']) %>% + purrr::map(~ .x[, "beta.o"]) %>% unlist() %>% as.matrix() } else { - samples <- PEcAn.priors::get.sample(distns$prior.distns[prior, ], samples.num, q_samples[ , priors==prior]) + samples <- PEcAn.priors::get.sample(distns$prior.distns[prior, ], samples.num, q_samples[, priors == prior]) } trait.samples[[pft.name]][[prior]] <- samples } - } ### End for loop - + } ### End for loop + # if samples are independent, set param.names to NULL # this is important for downstream, when param.names is not NULL MCMC will be sampled accordingly - if(independent){ + if (independent) { param.names <- NULL } - + if ("sensitivity.analysis" %in% names(settings)) { - ### Get info on the quantiles to be run in the sensitivity analysis (if requested) quantiles <- PEcAn.utils::get.quantiles(settings$sensitivity.analysis$quantiles) ### Get info on the years to run the sensitivity analysis (if requested) - sa.years <- data.frame(sa.start = settings$sensitivity.analysis$start.year, - sa.end = settings$sensitivity.analysis$end.year) - + sa.years <- data.frame( + sa.start = settings$sensitivity.analysis$start.year, + sa.end = settings$sensitivity.analysis$end.year + ) + PEcAn.logger::logger.info("\n Selected Quantiles: ", PEcAn.utils::vecpaste(round(quantiles, 3))) - + ### Generate list of sample quantiles for SA run - sa.samples <- PEcAn.utils::get.sa.sample.list(pft = trait.samples, env = env.samples, - quantiles = quantiles) + sa.samples <- PEcAn.utils::get.sa.sample.list( + pft = trait.samples, env = env.samples, + quantiles = quantiles + ) } if ("ensemble" %in% names(settings)) { if (settings$ensemble$size == 1) { ## run at median if only one run in ensemble - ensemble.samples <- PEcAn.utils::get.sa.sample.list(pft = trait.samples, env = env.samples, - quantiles = 0.5) - #if it's not there it's one probably - if (is.null(settings$ensemble$size)) settings$ensemble$size<-1 + ensemble.samples <- PEcAn.utils::get.sa.sample.list( + pft = trait.samples, env = env.samples, + quantiles = 0.5 + ) + # if it's not there it's one probably + if (is.null(settings$ensemble$size)) settings$ensemble$size <- 1 } else if (settings$ensemble$size > 1) { - ## subset the trait.samples to ensemble size using Halton sequence - ensemble.samples <- get.ensemble.samples(settings$ensemble$size, trait.samples, - env.samples, ens.sample.method, param.names) + ensemble.samples <- get.ensemble.samples( + settings$ensemble$size, trait.samples, + env.samples, ens.sample.method, param.names + ) } } - - save(ensemble.samples, trait.samples, sa.samples, runs.samples, env.samples, - file = file.path(settings$outdir, "samples.Rdata")) + + save(ensemble.samples, trait.samples, sa.samples, runs.samples, env.samples, + file = file.path(settings$outdir, "samples.Rdata") + ) } # get.parameter.samples diff --git a/modules/uncertainty/R/get.results.R b/modules/uncertainty/R/get.results.R index 89024960336..98462770cf1 100644 --- a/modules/uncertainty/R/get.results.R +++ b/modules/uncertainty/R/get.results.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -20,40 +20,41 @@ ##' @param variable variables to retrieve, as vector of names or expressions ##' @param start.year,end.year first and last years to retrieve ##' @author David LeBauer, Shawn Serbin, Mike Dietze, Ryan Kelly -get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, +get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, variable = NULL, start.year = NULL, end.year = NULL) { - outdir <- settings$outdir - + sensitivity.output <- list() if ("sensitivity.analysis" %in% names(settings)) { ### Load PEcAn sa info - # Can specify ensemble ids manually. If not, look in settings. - # if no ensemble ids in settings look in samples.Rdata, + # Can specify ensemble ids manually. If not, look in settings. + # if no ensemble ids in settings look in samples.Rdata, # which for backwards compatibility still contains the sample info for (the most recent) # sensitivity and ensemble analysis combined. if (!is.null(sa.ensemble.id)) { - fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", - ensemble.id = sa.ensemble.id, - all.var.yr = TRUE, - pft = NULL) + fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", + ensemble.id = sa.ensemble.id, + all.var.yr = TRUE, + pft = NULL + ) } else if (!is.null(settings$sensitivity.analysis$ensemble.id)) { sa.ensemble.id <- settings$sensitivity.analysis$ensemble.id - fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", - ensemble.id = sa.ensemble.id, - all.var.yr = TRUE, - pft = NULL) + fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", + ensemble.id = sa.ensemble.id, + all.var.yr = TRUE, + pft = NULL + ) } else { fname <- file.path(outdir, "samples.Rdata") sa.ensemble.id <- NULL } - + if (!file.exists(fname)) { PEcAn.logger::logger.severe("No sensitivity analysis samples file found!") } samples <- new.env() load(fname, envir = samples) - + # For backwards compatibility, define some variables if not just loaded if (is.null(samples$pft.names)) { samples$pft.names <- names(samples$trait.samples) @@ -64,7 +65,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, if (is.null(samples$sa.run.ids)) { samples$sa.run.ids <- samples$runs.samples$sa } - + # Set variable and years. Use args first, then settings, then defaults/error start.year.sa <- start.year if (is.null(start.year.sa)) { @@ -73,7 +74,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, if (is.null(start.year.sa)) { start.year.sa <- NA } - + end.year.sa <- end.year if (is.null(end.year.sa)) { end.year.sa <- settings$sensitivity.analysis$end.year @@ -81,7 +82,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, if (is.null(end.year.sa)) { end.year.sa <- NA } - + variables.sa <- variable if (is.null(variables.sa)) { if ("variable" %in% names(settings$sensitivity.analysis)) { @@ -90,26 +91,29 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, PEcAn.logger::logger.severe("no variable defined for sensitivity analysis") } } - + # Only handling one variable at a time for now if (length(variables.sa) >= 1) { - for(variable.sa in variables.sa){ - PEcAn.logger::logger.warn(paste0("Currently performing sensitivity analysis on variable ", - variable.sa, ")")) - + for (variable.sa in variables.sa) { + PEcAn.logger::logger.warn(paste0( + "Currently performing sensitivity analysis on variable ", + variable.sa, ")" + )) + # if an expression is provided, convert.expr returns names of the variables accordingly # if a derivation is not requested it returns the variable name as is variables <- PEcAn.utils::convert.expr(unlist(variable.sa)) variable.sa <- variables$variable.eqn variable.fn <- variables$variable.drv - - for(pft.name in samples$pft.names){ + + for (pft.name in samples$pft.names) { quantiles <- rownames(samples$sa.samples[[pft.name]]) traits <- samples$trait.names[[pft.name]] - + # when there is variable-per pft in the outputs, check for the tag for deciding SA per pft - per.pft <- ifelse(!is.null(settings$sensitivity.analysis$perpft), - as.logical(settings$sensitivity.analysis$perpft), FALSE) + per.pft <- ifelse(!is.null(settings$sensitivity.analysis$perpft), + as.logical(settings$sensitivity.analysis$perpft), FALSE + ) sensitivity.output[[pft.name]] <- read.sa.output( traits = traits, quantiles = quantiles, @@ -120,23 +124,25 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, end.year = end.year.sa, variable = variable.sa, sa.run.ids = samples$sa.run.ids, - per.pft = per.pft) + per.pft = per.pft + ) } - + # Save sensitivity output - - fname <- sensitivity.filename(settings, "sensitivity.output", "Rdata", - all.var.yr = FALSE, - pft = NULL, - ensemble.id = sa.ensemble.id, - variable = variable.fn, - start.year = start.year.sa, - end.year = end.year.sa) + + fname <- sensitivity.filename(settings, "sensitivity.output", "Rdata", + all.var.yr = FALSE, + pft = NULL, + ensemble.id = sa.ensemble.id, + variable = variable.fn, + start.year = start.year.sa, + end.year = end.year.sa + ) save(sensitivity.output, file = fname) } } } - + ensemble.output <- list() if ("ensemble" %in% names(settings)) { ### Load PEcAn ensemble info Can specify ensemble ids manually. If not, look in @@ -144,14 +150,16 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, ### compatibility still contains the sample info for (the most recent) sensitivity ### and ensemble analysis combined. if (!is.null(ens.ensemble.id)) { - fname <- ensemble.filename(settings, "ensemble.samples", "Rdata", - ensemble.id = ens.ensemble.id, - all.var.yr = TRUE) + fname <- ensemble.filename(settings, "ensemble.samples", "Rdata", + ensemble.id = ens.ensemble.id, + all.var.yr = TRUE + ) } else if (!is.null(settings$ensemble$ensemble.id)) { ens.ensemble.id <- settings$ensemble$ensemble.id fname <- ensemble.filename(settings, "ensemble.samples", "Rdata", - ensemble.id = ens.ensemble.id, - all.var.yr = TRUE) + ensemble.id = ens.ensemble.id, + all.var.yr = TRUE + ) } else { fname <- file.path(outdir, "samples.Rdata") } @@ -160,7 +168,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, } ens <- new.env() load(fname, envir = ens) - + # For backwards compatibility, define some variables if not just loaded if (is.null(ens$pft.names)) { ens$pft.names <- names(ens$trait.samples) @@ -171,7 +179,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, if (is.null(ens$ens.run.ids)) { ens$ens.run.ids <- ens$runs.samples$ens } - + # Set variable and years. Use args first, then settings, then defaults/error start.year.ens <- start.year if (is.null(start.year.ens)) { @@ -180,7 +188,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, if (is.null(start.year.ens)) { start.year.ens <- NA } - + end.year.ens <- end.year if (is.null(end.year.ens)) { end.year.ens <- settings$ensemble$end.year @@ -188,7 +196,7 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, if (is.null(end.year.ens)) { end.year.ens <- NA } - + variables.ens <- variable if (is.null(variables.ens)) { if ("variable" %in% names(settings$ensemble)) { @@ -198,39 +206,43 @@ get.results <- function(settings, sa.ensemble.id = NULL, ens.ensemble.id = NULL, } } } - - if (is.null(variables.ens)) + + if (is.null(variables.ens)) { PEcAn.logger::logger.severe("No variables for ensemble analysis!") - + } + # Only handling one variable at a time for now if (length(variables.ens) >= 1) { - for(variable.ens in variables.ens){ - PEcAn.logger::logger.warn(paste0("Currently performing ensemble analysis on variable ", - variable.ens, ")")) - + for (variable.ens in variables.ens) { + PEcAn.logger::logger.warn(paste0( + "Currently performing ensemble analysis on variable ", + variable.ens, ")" + )) + # if an expression is provided, convert.expr returns names of the variables accordingly # if a derivation is not requested it returns the variable name as is variables <- PEcAn.utils::convert.expr(variable.ens) variable.ens <- variables$variable.eqn variable.fn <- variables$variable.drv - + ensemble.output <- PEcAn.uncertainty::read.ensemble.output( settings$ensemble$size, - pecandir = outdir, + pecandir = outdir, outdir = settings$modeloutdir, - start.year = start.year.ens, - end.year = end.year.ens, + start.year = start.year.ens, + end.year = end.year.ens, variable = variable.ens, ens.run.ids = ens$ens.run.ids ) - + # Save ensemble output - fname <- ensemble.filename(settings, "ensemble.output", "Rdata", - all.var.yr = FALSE, - ensemble.id = ens.ensemble.id, - variable = variable.fn, - start.year = start.year.ens, - end.year = end.year.ens) + fname <- ensemble.filename(settings, "ensemble.output", "Rdata", + all.var.yr = FALSE, + ensemble.id = ens.ensemble.id, + variable = variable.fn, + start.year = start.year.ens, + end.year = end.year.ens + ) save(ensemble.output, file = fname) } } diff --git a/modules/uncertainty/R/plots.R b/modules/uncertainty/R/plots.R index c39435efb5a..5bfd7233888 100644 --- a/modules/uncertainty/R/plots.R +++ b/modules/uncertainty/R/plots.R @@ -1,13 +1,13 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Variance Decomposition Plots ##' ##' Plots variance decomposition tryptich @@ -23,112 +23,130 @@ ##' elasticities = c(a=1,b=2,c=0.5), ##' variances = c(a = 20, b=30, c = 10)) ##' do.call(gridExtra::grid.arrange, c(plot_variance_decomposition(x), ncol = 4)) -plot_variance_decomposition <- function(plot.inputs, +plot_variance_decomposition <- function(plot.inputs, fontsize = list(title = 18, axis = 14)) { - ggplot2::theme_set(ggplot2::theme_classic() + ggplot2::theme(axis.text.x = ggplot2::element_text(size = fontsize$axis, vjust = -1), - axis.text.y = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(), - axis.line = ggplot2::element_blank(), axis.title.x = ggplot2::element_blank(), - axis.title.y = ggplot2::element_blank(), - panel.grid.minor = ggplot2::element_blank(), - panel.border = ggplot2::element_blank())) - + ggplot2::theme_set(ggplot2::theme_classic() + ggplot2::theme( + axis.text.x = ggplot2::element_text(size = fontsize$axis, vjust = -1), + axis.text.y = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(), + axis.line = ggplot2::element_blank(), axis.title.x = ggplot2::element_blank(), + axis.title.y = ggplot2::element_blank(), + panel.grid.minor = ggplot2::element_blank(), + panel.border = ggplot2::element_blank() + )) + traits <- names(plot.inputs$variances) units <- as.character(PEcAn.utils::trait.lookup(traits)$units) trait.labels <- as.character(PEcAn.utils::trait.lookup(traits)$figid) - plot.data <- data.frame(trait.labels = ifelse(!is.na(trait.labels), - trait.labels, - traits), - units = ifelse(!is.na(units), units, ""), - coef.vars = plot.inputs$coef.vars * 100, - elasticities = plot.inputs$elasticities, - variances = plot.inputs$variances, - points = seq_along(traits) - 0.5) - + plot.data <- data.frame( + trait.labels = ifelse(!is.na(trait.labels), + trait.labels, + traits + ), + units = ifelse(!is.na(units), units, ""), + coef.vars = plot.inputs$coef.vars * 100, + elasticities = plot.inputs$elasticities, + variances = plot.inputs$variances, + points = seq_along(traits) - 0.5 + ) + plot.data <- plot.data[order(plot.data$variances, decreasing = FALSE), ] - - base.plot <- ggplot2::ggplot(plot.data) + ggplot2::coord_flip() - - - trait.plot <- base.plot + ggplot2::ggtitle("Parameter") + - ggplot2::geom_text(ggplot2::aes(y = 1, x = .data$points, label = trait.labels, hjust = 1), size = fontsize$axis/3) + - ggplot2::scale_y_continuous(breaks = c(0, 0), limits = c(0, 1)) + + + base.plot <- ggplot2::ggplot(plot.data) + + ggplot2::coord_flip() + + + trait.plot <- base.plot + ggplot2::ggtitle("Parameter") + + ggplot2::geom_text(ggplot2::aes(y = 1, x = .data$points, label = trait.labels, hjust = 1), size = fontsize$axis / 3) + + ggplot2::scale_y_continuous(breaks = c(0, 0), limits = c(0, 1)) + ggplot2::theme(axis.text.x = ggplot2::element_blank()) - cv.plot <- base.plot + ggplot2::ggtitle("CV (%)") + + cv.plot <- base.plot + ggplot2::ggtitle("CV (%)") + ggplot2::geom_pointrange(ggplot2::aes(x = .data$points, y = .data$coef.vars, ymin = 0, ymax = .data$coef.vars), size = 1.25) + ggplot2::theme(plot.title = ggplot2::element_text(size = fontsize$title)) - - el.plot <- base.plot + ggplot2::ggtitle("Elasticity") + - ggplot2::theme(plot.title = ggplot2::element_text(size = fontsize$title)) + + + el.plot <- base.plot + ggplot2::ggtitle("Elasticity") + + ggplot2::theme(plot.title = ggplot2::element_text(size = fontsize$title)) + ggplot2::geom_pointrange(ggplot2::aes(x = .data$points, y = .data$elasticities, ymin = 0, ymax = .data$elasticities), size = 1.25) - - pv.plot <- base.plot + ggplot2::ggtitle("Variance") + - ggplot2::theme(plot.title = ggplot2::element_text(size = fontsize$title)) + + + pv.plot <- base.plot + ggplot2::ggtitle("Variance") + + ggplot2::theme(plot.title = ggplot2::element_text(size = fontsize$title)) + ggplot2::geom_pointrange(ggplot2::aes(x = .data$points, sqrt(.data$variances), ymin = 0, ymax = sqrt(.data$variances)), size = 1.25) - + return(list(trait.plot = trait.plot, cv.plot = cv.plot, el.plot = el.plot, pv.plot = pv.plot)) } # plot_variance_decomposition -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Plot univariate response of model output to a trait parameter. ##' ##' Plots for a single trait; called by \code{\link{plot_sensitivities}} ##' to plot sensitivity plots for multiple traits. ##' @name plot_sensitivity -##' @title Sensitivity plot -##' @param sa.sample trait quantiles used in sensitivity analysis +##' @title Sensitivity plot +##' @param sa.sample trait quantiles used in sensitivity analysis ##' @param sa.spline spline function estimated from sensitivity analysis ##' @param trait trait name for title ##' @param y.range limits for y axis of plot ##' @param median.i index of median value in sa.sample; \code{median.i == which(as.numeric(rownames(sa.sample)) == 50) } ##' @param prior.sa.sample similar to sa.sample, but for prior distribution. If given, plots sensitivity for prior run -##' @param prior.sa.spline similar to sa.spline, but for prior trait distribution. +##' @param prior.sa.spline similar to sa.spline, but for prior trait distribution. ##' @param fontsize (optional) list with three arguments that can be set to vary the fontsize of the title, axis labels, and axis title in the sensitivity plots ##' @param linesize passed to ggplot to set line thickness ##' @param dotsize passed to ggplot to set point size ##' ##' @export ##' @return object of class ggplot -plot_sensitivity <- function(sa.sample, sa.spline, trait, y.range = c(0, 50), median.i = 4, - prior.sa.sample = NULL, prior.sa.spline = NULL, - fontsize = list(title = 12, axis = 8), +plot_sensitivity <- function(sa.sample, sa.spline, trait, y.range = c(0, 50), median.i = 4, + prior.sa.sample = NULL, prior.sa.spline = NULL, + fontsize = list(title = 12, axis = 8), linesize = 1, dotsize = 2) { LENGTH_OUT <- 1000 - + units <- PEcAn.utils::trait.lookup(trait)$units saplot <- ggplot2::ggplot() - + post.x <- seq(from = min(sa.sample), to = max(sa.sample), length.out = LENGTH_OUT) - + saplot <- saplot + ## plot spline function - ggplot2::geom_line(ggplot2::aes(x=.data$x, y=.data$y), data = data.frame(x = post.x, y = sa.spline(post.x)), size = linesize) + + ggplot2::geom_line(ggplot2::aes(x = .data$x, y = .data$y), data = data.frame(x = post.x, y = sa.spline(post.x)), size = linesize) + ## plot points used to evaluate spline - ggplot2::geom_point(ggplot2::aes(x=.data$x, y=.data$y), data = data.frame(x = sa.sample, y = sa.spline(sa.sample)), - size = dotsize) + # indicate median with larger point - ggplot2::geom_point(ggplot2::aes(x = .data$x, y=.data$y), data = data.frame(x = sa.sample[median.i], y = sa.spline(sa.sample[median.i])), - size = dotsize * 1.3) + + ggplot2::geom_point(ggplot2::aes(x = .data$x, y = .data$y), + data = data.frame(x = sa.sample, y = sa.spline(sa.sample)), + size = dotsize + ) + # indicate median with larger point + ggplot2::geom_point(ggplot2::aes(x = .data$x, y = .data$y), + data = data.frame(x = sa.sample[median.i], y = sa.spline(sa.sample[median.i])), + size = dotsize * 1.3 + ) + ggplot2::scale_y_continuous(limits = range(pretty(y.range)), breaks = pretty(y.range, n = 3)[1:3]) + - ggplot2::theme_bw() + + ggplot2::theme_bw() + ggplot2::ggtitle(trait) + - ggplot2::theme(axis.text.x = ggplot2::element_text(size = fontsize$axis), - axis.text.y = ggplot2::element_text(size = fontsize$axis), - axis.title.x = ggplot2::element_text(size = fontsize$axis), - axis.title.y = ggplot2::element_blank(), - plot.title = ggplot2::element_text(size = fontsize$title), - panel.border = ggplot2::element_blank()) - + ggplot2::theme( + axis.text.x = ggplot2::element_text(size = fontsize$axis), + axis.text.y = ggplot2::element_text(size = fontsize$axis), + axis.title.x = ggplot2::element_text(size = fontsize$axis), + axis.title.y = ggplot2::element_blank(), + plot.title = ggplot2::element_text(size = fontsize$title), + panel.border = ggplot2::element_blank() + ) + ## Following conditional can be removed to only plot posterior sa prior.x <- post.x if (!is.null(prior.sa.sample) & !is.null(prior.sa.spline)) { prior.x <- seq(from = min(prior.sa.sample), to = max(prior.sa.sample), length.out = LENGTH_OUT) saplot <- saplot + ## plot spline - ggplot2::geom_line(ggplot2::aes(x = .data$x, y= .data$y), data = data.frame(x = prior.x, y = prior.sa.spline(prior.x)), - size = linesize, color = "grey") + ## plot points used to evaluate spline - ggplot2::geom_point(ggplot2::aes(x= .data$x, y= .data$y), data = data.frame(x = prior.sa.sample, y = prior.sa.spline(prior.sa.sample)), - size = dotsize, color = "grey") + ## indicate location of medians - ggplot2::geom_point(ggplot2::aes(x = .data$x, y= .data$y), data = data.frame(x = prior.sa.sample[median.i], y = prior.sa.spline(prior.sa.sample[median.i])), - size = dotsize * 1.5, color = "grey") + ggplot2::geom_line(ggplot2::aes(x = .data$x, y = .data$y), + data = data.frame(x = prior.x, y = prior.sa.spline(prior.x)), + size = linesize, color = "grey" + ) + ## plot points used to evaluate spline + ggplot2::geom_point(ggplot2::aes(x = .data$x, y = .data$y), + data = data.frame(x = prior.sa.sample, y = prior.sa.spline(prior.sa.sample)), + size = dotsize, color = "grey" + ) + ## indicate location of medians + ggplot2::geom_point(ggplot2::aes(x = .data$x, y = .data$y), + data = data.frame(x = prior.sa.sample[median.i], y = prior.sa.spline(prior.sa.sample[median.i])), + size = dotsize * 1.5, color = "grey" + ) } max.x <- max(prior.x) min.x <- min(prior.x) @@ -139,7 +157,7 @@ plot_sensitivity <- function(sa.sample, sa.spline, trait, y.range = c(0, 50), me } # plot_sensitivity -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' Plot functions and quantiles used in sensitivity analysis ##' ##' Generates a plot using \code{\link{plot_sensitivity}} for multiple traits. @@ -149,7 +167,7 @@ plot_sensitivity <- function(sa.sample, sa.spline, trait, y.range = c(0, 50), me ##' @param ... arguments passed to \code{\link{plot_sensitivity}} ##' @export ##' @return list of plots, one per trait -plot_sensitivities <- function(sensitivity.plot.inputs, +plot_sensitivities <- function(sensitivity.plot.inputs, prior.sensitivity.plot.inputs = NULL, ...) { sa.samples <- sensitivity.plot.inputs$sa.samples sa.splines <- sensitivity.plot.inputs$sa.splines @@ -158,11 +176,11 @@ plot_sensitivities <- function(sensitivity.plot.inputs, prior.sa.splines <- prior.sensitivity.plot.inputs$sa.splines } traits <- names(sa.samples) - + # y.range <- c(0, max(mapply(do.call, sa.splines, lapply(sa.samples, list)), # na.rm = TRUE)) y.range <- range(mapply(do.call, sa.splines, lapply(sa.samples, list)), na.rm = TRUE) - + sensitivity.plots <- list() for (trait in traits) { if (!is.null(prior.sensitivity.plot.inputs)) { @@ -172,14 +190,16 @@ plot_sensitivities <- function(sensitivity.plot.inputs, prior.sa.sample <- NULL prior.sa.spline <- NULL } - sensitivity.plots[[trait]] <- plot_sensitivity(sa.sample = sa.samples[, trait], - sa.spline = sa.splines[[trait]], - trait <- trait, - y.range = y.range, - median.i = which(as.numeric(rownames(sa.samples)) == 50), - prior.sa.sample = prior.sa.sample, - prior.sa.spline = prior.sa.spline, - ...) + sensitivity.plots[[trait]] <- plot_sensitivity( + sa.sample = sa.samples[, trait], + sa.spline = sa.splines[[trait]], + trait <- trait, + y.range = y.range, + median.i = which(as.numeric(rownames(sa.samples)) == 50), + prior.sa.sample = prior.sa.sample, + prior.sa.spline = prior.sa.spline, + ... + ) } return(sensitivity.plots) } # plot_sensitivities diff --git a/modules/uncertainty/R/run.ensemble.analysis.R b/modules/uncertainty/R/run.ensemble.analysis.R index 9447b59a21c..be97b944096 100644 --- a/modules/uncertainty/R/run.ensemble.analysis.R +++ b/modules/uncertainty/R/run.ensemble.analysis.R @@ -1,14 +1,14 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- #' run ensemble.analysis -#' +#' #' @param settings PEcAn settings object #' @param plot.timeseries if TRUE plots a modeled timeseries of target variable(s) with CIs #' @param ensemble.id database ID, taken from settings if not specified @@ -19,26 +19,28 @@ #' @return nothing, creates ensemble plots as ensemble.analysis.pdf #' @export #' @author David LeBauer, Shawn Serbin, Ryan Kelly -run.ensemble.analysis <- function(settings, plot.timeseries = NA, ensemble.id = NULL, +run.ensemble.analysis <- function(settings, plot.timeseries = NA, ensemble.id = NULL, variable = NULL, start.year = NULL, end.year = NULL, ...) { - # Set variable and years. Use args first, then settings, then defaults/error if (is.null(ensemble.id)) { ensemble.id <- settings$ensemble$ensemble.id } if (is.null(ensemble.id)) { # Try to just grab the most recent one - suppressWarnings(ens.ids <- as.numeric(sub("ensemble.samples.", "", - sub(".Rdata", "", dir(settings$outdir, "ensemble.samples"))))) - + suppressWarnings(ens.ids <- as.numeric(sub( + "ensemble.samples.", "", + sub(".Rdata", "", dir(settings$outdir, "ensemble.samples")) + ))) + if (length(ens.ids) > 0) { ensemble.id <- max(ens.ids) } else { - if (is.null(ensemble.id)) + if (is.null(ensemble.id)) { PEcAn.logger::logger.severe("Can't find a valid ensemble for ensemble analysis!") + } } } - + if (is.null(start.year)) { start.year <- settings$ensemble$start.year } @@ -48,7 +50,7 @@ run.ensemble.analysis <- function(settings, plot.timeseries = NA, ensemble.id = if (is.null(start.year) | is.null(end.year)) { PEcAn.logger::logger.severe("No years given for ensemble analysis!") } - + if (is.null(variable)) { if ("variable" %in% names(settings$ensemble)) { var <- which(names(settings$ensemble) == "variable") @@ -63,34 +65,38 @@ run.ensemble.analysis <- function(settings, plot.timeseries = NA, ensemble.id = variables <- variable if (length(variables) >= 1) { - for(variable in variables) { - PEcAn.logger::logger.warn(paste0("Currently performing ensemble analysis on variable ", - variable)) - - cflux <- c("GPP", "NPP", "NEE", "TotalResp", "AutoResp", "HeteroResp", "DOC_flux", "Fire_flux") #converted to gC/m2/s - wflux <- c("Evap", "TVeg", "Qs", "Qsb", "Rainf") #kgH20 m-2 s-1 - + for (variable in variables) { + PEcAn.logger::logger.warn(paste0( + "Currently performing ensemble analysis on variable ", + variable + )) + + cflux <- c("GPP", "NPP", "NEE", "TotalResp", "AutoResp", "HeteroResp", "DOC_flux", "Fire_flux") # converted to gC/m2/s + wflux <- c("Evap", "TVeg", "Qs", "Qsb", "Rainf") # kgH20 m-2 s-1 + variables <- PEcAn.utils::convert.expr(variable) variable.ens <- variables$variable.eqn variable.fn <- variables$variable.drv - + print(paste("----- Variable: ", variable.fn, sep = "")) - - #units <- lapply(variable, function(x) { paste0(x, " (", mstmipvar(x, silent=TRUE)$units, ")") }) - units <- paste0(variable.fn, " (", PEcAn.utils::mstmipvar(variable.fn, silent=TRUE)$units, ")") - + + # units <- lapply(variable, function(x) { paste0(x, " (", mstmipvar(x, silent=TRUE)$units, ")") }) + units <- paste0(variable.fn, " (", PEcAn.utils::mstmipvar(variable.fn, silent = TRUE)$units, ")") + ### Load parsed model results - fname <- ensemble.filename(settings, "ensemble.output", "Rdata", all.var.yr = FALSE, - ensemble.id = ensemble.id, variable = variable.fn, start.year = start.year, end.year = end.year) + fname <- ensemble.filename(settings, "ensemble.output", "Rdata", + all.var.yr = FALSE, + ensemble.id = ensemble.id, variable = variable.fn, start.year = start.year, end.year = end.year + ) samples <- new.env() load(fname, envir = samples) - - my.dat = unlist(samples$ensemble.output) - if(is.null(my.dat) | all(is.na(my.dat))){ + + my.dat <- unlist(samples$ensemble.output) + if (is.null(my.dat) | all(is.na(my.dat))) { PEcAn.logger::logger.warn("no data in ensemble.output") return() } - + ### ------------------- Start ensemble analysis ------------------- ensemble.results <- list() if (is.null(settings$run$site$name)) { @@ -98,54 +104,61 @@ run.ensemble.analysis <- function(settings, plot.timeseries = NA, ensemble.id = } else { print(paste("----- Running ensemble analysis for site: ", settings$run$site$name)) } - + ## Generate ensemble figure - fname <- ensemble.filename(settings, "ensemble.analysis", "pdf", - all.var.yr = FALSE, - ensemble.id = ensemble.id, - variable = variable.fn, - start.year = start.year, - end.year = end.year) - + fname <- ensemble.filename(settings, "ensemble.analysis", "pdf", + all.var.yr = FALSE, + ensemble.id = ensemble.id, + variable = variable.fn, + start.year = start.year, + end.year = end.year + ) + grDevices::pdf(file = fname, width = 13, height = 6) - graphics::par(mfrow = c(1, 2), mar = c(4, 4.8, 1, 2)) # B, L, T, R - - graphics::hist(my.dat,xlab=units, - main="",cex.axis=1.1,cex.lab=1.4,col="grey85") + graphics::par(mfrow = c(1, 2), mar = c(4, 4.8, 1, 2)) # B, L, T, R + + graphics::hist(my.dat, + xlab = units, + main = "", cex.axis = 1.1, cex.lab = 1.4, col = "grey85" + ) graphics::box(lwd = 2.2) - - graphics::boxplot(my.dat,ylab=units, - boxwex=0.6,col="grey85", cex.axis=1.1,range=2, - pch=21,cex=1.4, bg="black",cex.lab=1.5) + + graphics::boxplot(my.dat, + ylab = units, + boxwex = 0.6, col = "grey85", cex.axis = 1.1, range = 2, + pch = 21, cex = 1.4, bg = "black", cex.lab = 1.5 + ) graphics::box(lwd = 2.2) - + grDevices::dev.off() - + print("----- Done!") print(" ") print("-----------------------------------------------") print(" ") print(" ") - + ### Plot ensemble time-series if (!is.na(plot.timeseries)) { fname <- ensemble.filename(settings, "ensemble.ts", "pdf", - all.var.yr = FALSE, - ensemble.id = ensemble.id, - variable = variable.fn, - start.year = start.year, - end.year = end.year) - + all.var.yr = FALSE, + ensemble.id = ensemble.id, + variable = variable.fn, + start.year = start.year, + end.year = end.year + ) + grDevices::pdf(fname, width = 12, height = 9) ensemble.ts.analysis <- ensemble.ts(read.ensemble.ts(settings, variable = variable), ...) grDevices::dev.off() - - fname <- ensemble.filename(settings, "ensemble.ts.analysis", "Rdata", - all.var.yr = FALSE, - ensemble.id = ensemble.id, - variable = variable.fn, - start.year = start.year, - end.year = end.year) + + fname <- ensemble.filename(settings, "ensemble.ts.analysis", "Rdata", + all.var.yr = FALSE, + ensemble.id = ensemble.id, + variable = variable.fn, + start.year = start.year, + end.year = end.year + ) save(ensemble.ts.analysis, file = fname) } } @@ -178,9 +191,8 @@ runModule.run.ensemble.analysis <- function(settings, ...) { #' @export #' #' @author Michael Dietze, Ryan Kelly -read.ensemble.ts <- function(settings, ensemble.id = NULL, variable = NULL, +read.ensemble.ts <- function(settings, ensemble.id = NULL, variable = NULL, start.year = NULL, end.year = NULL) { - model <- settings$model$type # Set variable and years. Use args first, then settings, then defaults/error if (is.null(start.year)) { @@ -196,31 +208,35 @@ read.ensemble.ts <- function(settings, ensemble.id = NULL, variable = NULL, if (is.null(variable)) { PEcAn.logger::logger.severe("No variables for ensemble time series analysis!") } - + # Only handling one variable at a time for now - PEcAn.logger::logger.warn(paste0("Currently performing ensemble time series analysis on variable ", - variable, ")")) - + PEcAn.logger::logger.warn(paste0( + "Currently performing ensemble time series analysis on variable ", + variable, ")" + )) + variables <- PEcAn.utils::convert.expr(variable) variable.ens <- variables$variable.eqn variable.fn <- variables$variable.drv - - print(paste("----- Variable: ", variable.fn, sep="")) + + print(paste("----- Variable: ", variable.fn, sep = "")) print("----- Reading ensemble output ------") - + ### Load ensemble run IDs Can specify ensemble ids manually. If not, look in ### settings. If none there, just look in samples.Rdata, which for backwards ### compatibility still contains the sample info for (the most recent) sensitivity ### and ensemble analysis combined. if (!is.null(ensemble.id)) { fname <- ensemble.filename(settings, "ensemble.samples", "Rdata", - ensemble.id = ensemble.id, - all.var.yr = TRUE) + ensemble.id = ensemble.id, + all.var.yr = TRUE + ) } else if (!is.null(settings$ensemble$ensemble.id)) { ensemble.id <- settings$ensemble$ensemble.id fname <- ensemble.filename(settings, "ensemble.samples", "Rdata", - ensemble.id = ensemble.id, - all.var.yr = TRUE) + ensemble.id = ensemble.id, + all.var.yr = TRUE + ) } else { fname <- file.path(settings$outdir, "samples.Rdata") } @@ -229,63 +245,63 @@ read.ensemble.ts <- function(settings, ensemble.id = NULL, variable = NULL, } samples <- new.env() load(fname, envir = samples) - + # For backwards compatibility, define ens.run.ids if not just loaded if (is.null(samples$ens.run.ids)) { samples$ens.run.ids <- samples$runs.samples$ens } - + ensemble.size <- nrow(samples$ens.run.ids) - + expr <- variable.ens$expression variables <- variable.ens$variables - + ## read ensemble output # Leaving list output even though only one variable allowed for now. Will improve backwards compatibility and maybe help in the future. - ensemble.ts <- vector("list", length(variables)) - for(row in rownames(samples$ens.run.ids)) { - run.id <- samples$ens.run.ids[row, 'id'] + ensemble.ts <- vector("list", length(variables)) + for (row in rownames(samples$ens.run.ids)) { + run.id <- samples$ens.run.ids[row, "id"] print(run.id) - - for(var in seq_along(variables)){ + + for (var in seq_along(variables)) { out.tmp <- PEcAn.utils::read.output( run.id, file.path(settings$outdir, "out", run.id), start.year, end.year, - variables[var]) + variables[var] + ) assign(variables[var], out.tmp[[variables[var]]]) } - + # derivation newrun <- eval(parse(text = expr)) - - if(is.null(newrun)){ + + if (is.null(newrun)) { # run failed # skip to next next } - - for(j in seq_along(variable.fn)){ - - if(is.null(ensemble.ts[[1]])){ # dimensions of the sublist matrix hasn't been declared yet - ensemble.ts[[j]] <- matrix(NA,ensemble.size,length(newrun)) + + for (j in seq_along(variable.fn)) { + if (is.null(ensemble.ts[[1]])) { # dimensions of the sublist matrix hasn't been declared yet + ensemble.ts[[j]] <- matrix(NA, ensemble.size, length(newrun)) } - - ensemble.ts[[j]][as.numeric(row),] <- newrun - - } + + ensemble.ts[[j]][as.numeric(row), ] <- newrun + } } - + names(ensemble.ts) <- variable.fn # BMR 10/16/13 Save this variable now to operate later on fname <- ensemble.filename(settings, "ensemble.ts", "Rdata", - all.var.yr = FALSE, - ensemble.id = ensemble.id, - variable = variable, - start.year = start.year, - end.year = end.year) - + all.var.yr = FALSE, + ensemble.id = ensemble.id, + variable = variable, + start.year = start.year, + end.year = end.year + ) + save(ensemble.ts, file = fname) return(ensemble.ts) } # read.ensemble.ts @@ -317,77 +333,84 @@ filterNA <- function(x, w) { ensemble.ts <- function(ensemble.ts, observations = NULL, window = 1, ...) { print("------ Generating ensemble time-series plot ------") variable <- names(ensemble.ts) - + ## temporary check for plots that should be >0 nonzero <- c("GPP", "TotalResp", "AutoResp", "HeteroResp", "Evap", "TVeg") - + ## should probably add an extraction of the time axis from the first ensemble ## member - + ## should probably add extraction of meta-data from netCDF files - + ## plot for (j in seq_along(variable)) { - if (window > 1) { # myens <- apply(ensemble.ts[[j]],1,filterNA,window)#rep(1/window,window)) myens <- t(apply(ensemble.ts[[j]], 1, function(x) { - tapply(x, rep(1:(length(x)/window + 1), each = window)[1:length(x)], - mean, na.rm = TRUE) + tapply(x, rep(1:(length(x) / window + 1), each = window)[1:length(x)], + mean, + na.rm = TRUE + ) })) } else { myens <- ensemble.ts[[j]] } - + ens.mean <- apply(myens, 2, mean, na.rm = TRUE) CI <- apply(myens, 2, stats::quantile, c(0.025, 0.5, 0.975), na.rm = TRUE) ylim <- range(CI, na.rm = TRUE) - + ### temporary fix to values less than zero that are biologically unreasonable (e.g. ### GPP) if (variable[j] %in% nonzero) { ylim <- c(0, ylim[2]) } - - graphics::plot(ens.mean, ylim = ylim, lwd = 2, - xlab = "time", ylab = variable[j], main = variable[j], - type = "l") - + + graphics::plot(ens.mean, + ylim = ylim, lwd = 2, + xlab = "time", ylab = variable[j], main = variable[j], + type = "l" + ) + ### Code to be updated with polygon (below) for(i in 1:nrow(CI)){ ### lines(CI[i,],col=2,lty=c(2,1,2),lwd=c(1.2,1.0,1.2)) } graphics::lines(CI[1, ], col = 2, lty = 2, lwd = 1.2) # lines(CI[2,],col='dark grey',lty=1,lwd=1.5) graphics::lines(CI[3, ], col = 2, lty = 2, lwd = 1.2) - + ## generate plot polygon using CIs dims <- dim(CI) poly <- 1:dims[2] ## polygon(c(poly ,rev(poly)),c(CI[3,], rev(CI[1,])),col='#99CC99',border=NA) - + ## plot mean over others again lines(ens.mean,col='black',lwd=1.5) ## lines(CI[2,],col='dark grey',lty=1,lwd=1.5) - + if (!is.null(observations)) { if (window == 1) { fobs <- observations } else { - fobs <- tapply(observations, rep(1:(length(observations) / window + 1), - each = window)[1:length(observations)], - mean, na.rm = TRUE) + fobs <- tapply(observations, rep(1:(length(observations) / window + 1), + each = window + )[1:length(observations)], + mean, + na.rm = TRUE + ) } # lines(filter(observations,rep(1/window,window)),col=2,lwd=1.5) # lines(filterNA(observations,window),col=2,lwd=1.5) graphics::points(fobs, col = 3, lwd = 1.5) } - + ## show legend graphics::legend("topleft", - legend = c("mean", "95% CI", "data"), - lwd = 3, col = c(1, 2, 3), lty = c(1, 2, 1)) + legend = c("mean", "95% CI", "data"), + lwd = 3, col = c(1, 2, 3), lty = c(1, 2, 1) + ) ## add surrounding box to plot graphics::box(lwd = 2.2) } ensemble.analysis.results <- list() ensemble.analysis.results$mean <- ens.mean ensemble.analysis.results$CI <- CI - + return(invisible(ensemble.analysis.results)) } # ensemble.ts diff --git a/modules/uncertainty/R/run.sensitivity.analysis.R b/modules/uncertainty/R/run.sensitivity.analysis.R index ec7b58a89a0..f69fb0ea91f 100644 --- a/modules/uncertainty/R/run.sensitivity.analysis.R +++ b/modules/uncertainty/R/run.sensitivity.analysis.R @@ -8,7 +8,7 @@ #------------------------------------------------------------------------------- #--------------------------------------------------------------------------------------------------# ##' run sensitivity.analysis -##' +##' ##' Runs the sensitivity analysis module on a finished run ##' ##' @return nothing, saves \code{sensitivity.results} as @@ -27,7 +27,7 @@ ##' @param pfts a vector of PFT names found in `settings` to run sensitivity ##' analysis on ##' @param ... currently unused -##' +##' ##' ##' @export ##' @author David LeBauer, Shawn Serbin, Ryan Kelly @@ -48,170 +48,181 @@ run.sensitivity.analysis <- end.year = NULL, pfts = NULL, ...) { - - if ('sensitivity.analysis' %in% names(settings)) { - # Set variable and years. Use args first, then settings, then defaults/error - if(is.null(start.year)) { - start.year <- settings$sensitivity.analysis$start.year - } - if(is.null(end.year)) { - end.year <- settings$sensitivity.analysis$end.year - } - if(is.null(start.year) | is.null(end.year)) { - PEcAn.logger::logger.severe("No years given for sensitivity analysis!") - } - if (is.null(variable)) { - if ("variable" %in% names(settings$sensitivity.analysis)) { - var <- which(names(settings$sensitivity.analysis) == "variable") - for (i in seq_along(var)) { - variable[i] <- settings$sensitivity.analysis[[var[i]]] + if ("sensitivity.analysis" %in% names(settings)) { + # Set variable and years. Use args first, then settings, then defaults/error + if (is.null(start.year)) { + start.year <- settings$sensitivity.analysis$start.year + } + if (is.null(end.year)) { + end.year <- settings$sensitivity.analysis$end.year + } + if (is.null(start.year) | is.null(end.year)) { + PEcAn.logger::logger.severe("No years given for sensitivity analysis!") + } + if (is.null(variable)) { + if ("variable" %in% names(settings$sensitivity.analysis)) { + var <- which(names(settings$sensitivity.analysis) == "variable") + for (i in seq_along(var)) { + variable[i] <- settings$sensitivity.analysis[[var[i]]] + } } } - } - if(is.null(variable)) { - PEcAn.logger::logger.severe("No variables for sensitivity analysis!") - } - if(is.null(pfts)) { - #extract just pft names - pfts <- purrr::map_chr(settings$pfts, "name") - } else { - # validate pfts argument - if(!is.character(pfts)) { - PEcAn.logger::logger.severe("Please supply a character vector for `pfts`") + if (is.null(variable)) { + PEcAn.logger::logger.severe("No variables for sensitivity analysis!") } - if(!pfts %in% purrr::map_chr(settings$pfts, "name")) { - PEcAn.logger::logger.severe("`pfts` must be a subset of the PFTs defined in `settings`") + if (is.null(pfts)) { + # extract just pft names + pfts <- purrr::map_chr(settings$pfts, "name") + } else { + # validate pfts argument + if (!is.character(pfts)) { + PEcAn.logger::logger.severe("Please supply a character vector for `pfts`") + } + if (!pfts %in% purrr::map_chr(settings$pfts, "name")) { + PEcAn.logger::logger.severe("`pfts` must be a subset of the PFTs defined in `settings`") + } } - } - variables <- variable - if(length(variables) >= 1) { - for(variable in variables){ - PEcAn.logger::logger.warn(paste0("Currently performing sensitivity analysis on variable ", variable)) - - ### Load samples - # Have to load samples.Rdata for the traits. But can overwrite the run ids if a sensitivity analysis ensemble id provided. samples.Rdata always has only the most recent ensembles for both ensemble and sensitivity runs. - fname <- file.path(settings$outdir, 'samples.Rdata') - if(!file.exists(fname)) PEcAn.logger::logger.severe("No samples.Rdata file found!") - samples <- new.env() - load(fname, envir = samples) - - # Can specify ensemble ids manually. If not, look in settings. If none there, will use the most recent, which was loaded with samples.Rdata - if(!is.null(ensemble.id)) { - fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", - ensemble.id=ensemble.id, all.var.yr=TRUE) - } else if(!is.null(settings$sensitivity.analysis$ensemble.id)) { - ensemble.id <- settings$sensitivity.analysis$ensemble.id - fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", - ensemble.id=ensemble.id, all.var.yr=TRUE) - } else { - ensemble.id <- NULL - } - if(file.exists(fname)) { + variables <- variable + if (length(variables) >= 1) { + for (variable in variables) { + PEcAn.logger::logger.warn(paste0("Currently performing sensitivity analysis on variable ", variable)) + + ### Load samples + # Have to load samples.Rdata for the traits. But can overwrite the run ids if a sensitivity analysis ensemble id provided. samples.Rdata always has only the most recent ensembles for both ensemble and sensitivity runs. + fname <- file.path(settings$outdir, "samples.Rdata") + if (!file.exists(fname)) PEcAn.logger::logger.severe("No samples.Rdata file found!") + samples <- new.env() load(fname, envir = samples) - } - - # For backwards compatibility, define some variables if not just loaded - if(is.null(samples$pft.names)) samples$pft.names <- names(samples$trait.samples) - if(is.null(samples$trait.names)) samples$trait.names <- lapply(samples$trait.samples, names) - if(is.null(samples$sa.run.ids)) samples$sa.run.ids <- samples$runs.samples$sa - - ### Load parsed model results - variables <- PEcAn.utils::convert.expr(variable) - variable.fn <- variables$variable.drv - - fname <- sensitivity.filename( - settings, "sensitivity.output", "Rdata", all.var.yr = FALSE, - ensemble.id = ensemble.id, variable = variable.fn, - start.year = start.year, end.year = end.year) - sens_out <- new.env() - load(fname, envir = sens_out) - - ### Generate SA output and diagnostic plots - sensitivity.results <- list() - - for (pft in settings$pfts) { - if (pft$name %in% pfts) { - traits <- samples$trait.names[[pft$name]] - quantiles.str <- rownames(samples$sa.samples[[pft$name]]) - quantiles.str <- quantiles.str[which(quantiles.str != '50')] - quantiles <- as.numeric(quantiles.str)/100 - - C.units <- grepl('^Celsius$', PEcAn.utils::trait.lookup(traits)$units, ignore.case = TRUE) - if(any(C.units)){ - for(x in which(C.units)) { - samples$trait.samples[[pft$name]][[x]] <- PEcAn.utils::ud_convert( - samples$trait.samples[[pft$name]][[x]], "degC", "K") + + # Can specify ensemble ids manually. If not, look in settings. If none there, will use the most recent, which was loaded with samples.Rdata + if (!is.null(ensemble.id)) { + fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", + ensemble.id = ensemble.id, all.var.yr = TRUE + ) + } else if (!is.null(settings$sensitivity.analysis$ensemble.id)) { + ensemble.id <- settings$sensitivity.analysis$ensemble.id + fname <- sensitivity.filename(settings, "sensitivity.samples", "Rdata", + ensemble.id = ensemble.id, all.var.yr = TRUE + ) + } else { + ensemble.id <- NULL + } + if (file.exists(fname)) { + load(fname, envir = samples) + } + + # For backwards compatibility, define some variables if not just loaded + if (is.null(samples$pft.names)) samples$pft.names <- names(samples$trait.samples) + if (is.null(samples$trait.names)) samples$trait.names <- lapply(samples$trait.samples, names) + if (is.null(samples$sa.run.ids)) samples$sa.run.ids <- samples$runs.samples$sa + + ### Load parsed model results + variables <- PEcAn.utils::convert.expr(variable) + variable.fn <- variables$variable.drv + + fname <- sensitivity.filename( + settings, "sensitivity.output", "Rdata", + all.var.yr = FALSE, + ensemble.id = ensemble.id, variable = variable.fn, + start.year = start.year, end.year = end.year + ) + sens_out <- new.env() + load(fname, envir = sens_out) + + ### Generate SA output and diagnostic plots + sensitivity.results <- list() + + for (pft in settings$pfts) { + if (pft$name %in% pfts) { + traits <- samples$trait.names[[pft$name]] + quantiles.str <- rownames(samples$sa.samples[[pft$name]]) + quantiles.str <- quantiles.str[which(quantiles.str != "50")] + quantiles <- as.numeric(quantiles.str) / 100 + + C.units <- grepl("^Celsius$", PEcAn.utils::trait.lookup(traits)$units, ignore.case = TRUE) + if (any(C.units)) { + for (x in which(C.units)) { + samples$trait.samples[[pft$name]][[x]] <- PEcAn.utils::ud_convert( + samples$trait.samples[[pft$name]][[x]], "degC", "K" + ) + } + } + + ## only perform sensitivity analysis on traits where no more than 2 results are missing + good.saruns <- sapply(sens_out$sensitivity.output[[pft$name]], function(x) sum(is.na(x)) <= 2) + if (!all(good.saruns)) { # if any bad saruns, reduce list of traits and print warning + bad.saruns <- !good.saruns + warning(paste( + "missing >2 runs for", PEcAn.utils::vecpaste(traits[bad.saruns]), + "\n sensitivity analysis or variance decomposition will be performed on these trait(s)", + "\n it is likely that the runs did not complete, this should be fixed !!!!!!" + )) + } + + ### Gather SA results + sensitivity.results[[pft$name]] <- sensitivity.analysis( + trait.samples = samples$trait.samples[[pft$name]][traits], + sa.samples = samples$sa.samples[[pft$name]][, traits, drop = FALSE], + sa.output = sens_out$sensitivity.output[[pft$name]][, traits, drop = FALSE], + outdir = pft$outdir + ) + + ### Send diagnostic output to the console + print(sensitivity.results[[pft$name]]$variance.decomposition.output) + print(sens_out$sensitivity.output[[pft$name]]) + + ### Plotting - Optional + if (plot) { + fname <- sensitivity.filename( + settings, "sensitivity.analysis", "pdf", + all.var.yr = FALSE, pft = pft$name, ensemble.id = ensemble.id, variable = variable.fn, + start.year = start.year, end.year = end.year + ) + + ### Generate SA diagnostic plots + sensitivity.plots <- plot_sensitivities( + sensitivity.results[[pft$name]]$sensitivity.output, + linesize = 1, dotsize = 3 + ) + + grDevices::pdf(fname, height = 12, width = 9) + ## arrange plots http://stackoverflow.com/q/10706753/199217 + ncol <- floor(sqrt(length(sensitivity.plots))) + print(do.call(gridExtra::grid.arrange, c(sensitivity.plots, ncol = ncol))) + print(sensitivity.plots) # old method. depreciated. + grDevices::dev.off() + + ### Generate VD diagnostic plots + vd.plots <- plot_variance_decomposition(sensitivity.results[[pft$name]]$variance.decomposition.output) + # variance.scale = log, variance.prefix='Log') + fname <- sensitivity.filename(settings, "variance.decomposition", "pdf", + all.var.yr = FALSE, pft = pft$name, ensemble.id = ensemble.id, variable = variable.fn, + start.year = start.year, end.year = end.year + ) + + grDevices::pdf(fname, width = 11, height = 8) + do.call(gridExtra::grid.arrange, c(vd.plots, ncol = 4)) + grDevices::dev.off() } - } - - ## only perform sensitivity analysis on traits where no more than 2 results are missing - good.saruns <- sapply(sens_out$sensitivity.output[[pft$name]], function(x) sum(is.na(x)) <=2) - if(!all(good.saruns)) { # if any bad saruns, reduce list of traits and print warning - bad.saruns <- !good.saruns - warning(paste('missing >2 runs for', PEcAn.utils::vecpaste(traits[bad.saruns]), - '\n sensitivity analysis or variance decomposition will be performed on these trait(s)', - '\n it is likely that the runs did not complete, this should be fixed !!!!!!')) - } - - ### Gather SA results - sensitivity.results[[pft$name]] <- sensitivity.analysis( - trait.samples = samples$trait.samples[[pft$name]][traits], - sa.samples = samples$sa.samples[[pft$name]][ ,traits, drop=FALSE], - sa.output = sens_out$sensitivity.output[[pft$name]][ ,traits, drop=FALSE], - outdir = pft$outdir) - - ### Send diagnostic output to the console - print(sensitivity.results[[pft$name]]$variance.decomposition.output) - print(sens_out$sensitivity.output[[pft$name]]) - - ### Plotting - Optional - if(plot){ - fname <- sensitivity.filename( - settings, "sensitivity.analysis", "pdf", - all.var.yr=FALSE, pft=pft$name, ensemble.id=ensemble.id, variable=variable.fn, - start.year=start.year, end.year=end.year) - - ### Generate SA diagnostic plots - sensitivity.plots <- plot_sensitivities( - sensitivity.results[[pft$name]]$sensitivity.output, linesize = 1, dotsize = 3) - - grDevices::pdf(fname, height = 12, width = 9) - ## arrange plots http://stackoverflow.com/q/10706753/199217 - ncol <- floor(sqrt(length(sensitivity.plots))) - print(do.call(gridExtra::grid.arrange, c(sensitivity.plots, ncol=ncol))) - print(sensitivity.plots) # old method. depreciated. - grDevices::dev.off() - - ### Generate VD diagnostic plots - vd.plots <- plot_variance_decomposition(sensitivity.results[[pft$name]]$variance.decomposition.output) - #variance.scale = log, variance.prefix='Log') - fname <- sensitivity.filename(settings, "variance.decomposition", "pdf", - all.var.yr=FALSE, pft=pft$name, ensemble.id=ensemble.id, variable=variable.fn, - start.year=start.year, end.year=end.year) - - grDevices::pdf(fname, width = 11, height = 8) - do.call(gridExtra::grid.arrange, c(vd.plots, ncol = 4)) - grDevices::dev.off() - } - - } ## end if sensitivity analysis - - fname <- sensitivity.filename(settings, "sensitivity.results", "Rdata", - all.var.yr=FALSE, pft=NULL, ensemble.id=ensemble.id, variable=variable.fn, - start.year=start.year, end.year=end.year) - - save(sensitivity.results, file = fname) + } ## end if sensitivity analysis + + fname <- sensitivity.filename(settings, "sensitivity.results", "Rdata", + all.var.yr = FALSE, pft = NULL, ensemble.id = ensemble.id, variable = variable.fn, + start.year = start.year, end.year = end.year + ) + + save(sensitivity.results, file = fname) + } } } } } -} -#==================================================================================================# +# ==================================================================================================# ##' @export runModule.run.sensitivity.analysis <- function(settings, ...) { - if(PEcAn.settings::is.MultiSettings(settings)) { + if (PEcAn.settings::is.MultiSettings(settings)) { return(PEcAn.settings::papply(settings, runModule.run.sensitivity.analysis, ...)) } else if (PEcAn.settings::is.Settings(settings)) { run.sensitivity.analysis(settings, ...) diff --git a/modules/uncertainty/R/sensitivity.R b/modules/uncertainty/R/sensitivity.R index 805da534db8..ed1adb22444 100644 --- a/modules/uncertainty/R/sensitivity.R +++ b/modules/uncertainty/R/sensitivity.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -9,8 +9,8 @@ ##' Reads output of sensitivity analysis runs ##' -##' -##' @title Read Sensitivity Analysis output +##' +##' @title Read Sensitivity Analysis output ##' @return dataframe with one col per quantile analysed and one row per trait, ##' each cell is a list of AGB over time ##' @param traits model parameters included in the sensitivity analysis @@ -18,7 +18,7 @@ ##' @param pecandir specifies where pecan writes its configuration files ##' @param outdir directory with model output to use in sensitivity analysis ##' @param pft.name name of PFT used in sensitivity analysis (Optional) -##' @param start.year first year to include in sensitivity analysis +##' @param start.year first year to include in sensitivity analysis ##' @param end.year last year to include in sensitivity analysis ##' @param variable variables to be read from model output ##' @param per.pft flag to determine whether we want SA on pft-specific variables @@ -28,10 +28,8 @@ ##' @export ##' @author Ryan Kelly, David LeBauer, Rob Kooper, Mike Dietze, Istem Fer #--------------------------------------------------------------------------------------------------# -read.sa.output <- function(traits, quantiles, pecandir, outdir, pft.name = "", +read.sa.output <- function(traits, quantiles, pecandir, outdir, pft.name = "", start.year, end.year, variable, sa.run.ids = NULL, per.pft = FALSE) { - - if (is.null(sa.run.ids)) { samples.file <- file.path(pecandir, "samples.Rdata") if (file.exists(samples.file)) { @@ -42,36 +40,41 @@ read.sa.output <- function(traits, quantiles, pecandir, outdir, pft.name = "", PEcAn.logger::logger.error(samples.file, "not found, this file is required by the read.sa.output function") } } - - sa.output <- matrix(nrow = length(quantiles), - ncol = length(traits), - dimnames = list(quantiles, traits)) - + + sa.output <- matrix( + nrow = length(quantiles), + ncol = length(traits), + dimnames = list(quantiles, traits) + ) + expr <- variable$expression variables <- variable$variables - - for(trait in traits){ - for(quantile in quantiles){ + + for (trait in traits) { + for (quantile in quantiles) { run.id <- sa.run.ids[[pft.name]][quantile, trait] - - for(var in seq_along(variables)){ + + for (var in seq_along(variables)) { # if SA is requested on a variable available per pft, pass pft.name to read.output # so that it only returns values for that pft - pass_pft <- switch(per.pft + 1, NULL, pft.name) + pass_pft <- switch(per.pft + 1, + NULL, + pft.name + ) out.tmp <- PEcAn.utils::read.output( runid = run.id, outdir = file.path(outdir, run.id), start.year = start.year, end.year = end.year, variables = variables[var], - pft.name = pass_pft) + pft.name = pass_pft + ) assign(variables[var], out.tmp[[variables[var]]]) } - + # derivation out <- eval(parse(text = expr)) - - sa.output[quantile, trait] <- mean(out, na.rm=TRUE) + sa.output[quantile, trait] <- mean(out, na.rm = TRUE) } ## end loop over quantiles PEcAn.logger::logger.info("reading sensitivity analysis output for model run at ", quantiles, "quantiles of trait", trait) } ## end loop over traits @@ -99,7 +102,7 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, scipen <- getOption("scipen") options(scipen = 12) my.write.config <- paste("write.config.", model, sep = "") - + if (write.to.db) { con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) if (inherits(con, "try-error")) { @@ -110,41 +113,45 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, } else { con <- NULL } - + # Get the workflow id if ("workflow" %in% names(settings)) { workflow.id <- settings$workflow$id } else { workflow.id <- -1 } - + # find all inputs that have an id inputs <- names(settings$run$inputs) inputs <- inputs[grepl(".id$", inputs)] - + runs <- data.frame() - + # Reading the site.pft specific tags from xml site.pfts.vec <- as.character(unlist(settings$run$site$site.pft)) - - if(!is.null(site.pfts.vec)){ + + if (!is.null(site.pfts.vec)) { # find the name of pfts defined in the body of pecan.xml - defined.pfts <- as.character(unlist(purrr::map(settings$pfts, 'name'))) + defined.pfts <- as.character(unlist(purrr::map(settings$pfts, "name"))) # subset ensemble samples based on the pfts that are specified in the site and they are also sampled from. - if (length(which(site.pfts.vec %in% defined.pfts)) > 0 ) - quantile.samples <- quantile.samples [site.pfts.vec[ which(site.pfts.vec %in% defined.pfts) ]] + if (length(which(site.pfts.vec %in% defined.pfts)) > 0) { + quantile.samples <- quantile.samples[site.pfts.vec[which(site.pfts.vec %in% defined.pfts)]] + } # warn if there is a pft specified in the site but it's not defined in the pecan xml. - if (length(which(!(site.pfts.vec %in% defined.pfts)))>0) - PEcAn.logger::logger.warn(paste0("The following pfts are specified for the siteid ", settings$run$site$id ," but they are not defined as a pft in pecan.xml:", - site.pfts.vec[which(!(site.pfts.vec %in% defined.pfts))])) + if (length(which(!(site.pfts.vec %in% defined.pfts))) > 0) { + PEcAn.logger::logger.warn(paste0( + "The following pfts are specified for the siteid ", settings$run$site$id, " but they are not defined as a pft in pecan.xml:", + site.pfts.vec[which(!(site.pfts.vec %in% defined.pfts))] + )) + } } - - + + ## write median run MEDIAN <- "50" median.samples <- list() for (i in seq_along(quantile.samples)) { - median.samples[[i]] <- quantile.samples[[i]][MEDIAN, , drop=FALSE] + median.samples[[i]] <- quantile.samples[[i]][MEDIAN, , drop = FALSE] } names(median.samples) <- names(quantile.samples) @@ -152,35 +159,42 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, ensemble.id <- PEcAn.DB::db.query(paste0( "INSERT INTO ensembles (runtype, workflow_id) ", "VALUES ('sensitivity analysis', ", format(workflow.id, scientific = FALSE), ") ", - "RETURNING id"), con = con)[['id']] - - paramlist <- paste0("quantile=MEDIAN,trait=all,pft=", - paste(lapply(settings$pfts, function(x) x[["name"]]), sep = ",")) - run.id <- PEcAn.DB::db.query(paste0("INSERT INTO runs ", + "RETURNING id" + ), con = con)[["id"]] + + paramlist <- paste0( + "quantile=MEDIAN,trait=all,pft=", + paste(lapply(settings$pfts, function(x) x[["name"]]), sep = ",") + ) + run.id <- PEcAn.DB::db.query(paste0( + "INSERT INTO runs ", "(model_id, site_id, start_time, finish_time, outdir, ensemble_id, parameter_list) ", - "values ('", - settings$model$id, "', '", - settings$run$site$id, "', '", - settings$run$start.date, "', '", - settings$run$end.date, "', '", - settings$run$outdir, "', ", - ensemble.id, ", '", - paramlist, "') ", - "RETURNING id"), con = con)[['id']] - + "values ('", + settings$model$id, "', '", + settings$run$site$id, "', '", + settings$run$start.date, "', '", + settings$run$end.date, "', '", + settings$run$outdir, "', ", + ensemble.id, ", '", + paramlist, "') ", + "RETURNING id" + ), con = con)[["id"]] + # associate posteriors with ensembles for (pft in defaults) { PEcAn.DB::db.query(paste0( "INSERT INTO posteriors_ensembles (posterior_id, ensemble_id) ", - "values (", pft$posteriorid, ", ", ensemble.id, ")"), con = con) + "values (", pft$posteriorid, ", ", ensemble.id, ")" + ), con = con) } - + # associate inputs with runs if (!is.null(inputs)) { for (x in inputs) { PEcAn.DB::db.query(paste0( - "INSERT INTO inputs_runs (input_id, run_id) ", - "values (", settings$run$inputs[[x]], ", ", run.id, ")"), con = con) + "INSERT INTO inputs_runs (input_id, run_id) ", + "values (", settings$run$inputs[[x]], ", ", run.id, ")" + ), con = con) } } } else { @@ -188,7 +202,7 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, ensemble.id <- NA } medianrun <- run.id - + # create folders (cleaning up old ones if needed) if (clean) { unlink(file.path(settings$rundir, run.id)) @@ -196,47 +210,49 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, } dir.create(file.path(settings$rundir, run.id), recursive = TRUE) dir.create(file.path(settings$modeloutdir, run.id), recursive = TRUE) - + # write run information to disk TODO need to print list of pft names and trait # names - cat("runtype : sensitivity analysis\n", - "workflow id : ", workflow.id, "\n", - "ensemble id : ", ensemble.id, "\n", - "pft name : ALL PFT", "\n", - "quantile : MEDIAN\n", - "trait : ALL TRAIT", "\n", - "run id : ", run.id, "\n", - "model : ", model, "\n", - "model id : ", settings$model$id, "\n", - "site : ", settings$run$site$name, "\n", - "site id : ", settings$run$site$id, "\n", - "met data : ", settings$run$site$met, "\n", - "start date : ", settings$run$start.date, "\n", - "end date : ", settings$run$end.date, "\n", - "hostname : ", settings$host$name, "\n", - "rundir : ", file.path(settings$host$rundir, run.id), "\n", - "outdir : ", file.path(settings$host$outdir, run.id), "\n", - file = file.path(settings$rundir, run.id, "README.txt"), - sep = "") - - + cat("runtype : sensitivity analysis\n", + "workflow id : ", workflow.id, "\n", + "ensemble id : ", ensemble.id, "\n", + "pft name : ALL PFT", "\n", + "quantile : MEDIAN\n", + "trait : ALL TRAIT", "\n", + "run id : ", run.id, "\n", + "model : ", model, "\n", + "model id : ", settings$model$id, "\n", + "site : ", settings$run$site$name, "\n", + "site id : ", settings$run$site$id, "\n", + "met data : ", settings$run$site$met, "\n", + "start date : ", settings$run$start.date, "\n", + "end date : ", settings$run$end.date, "\n", + "hostname : ", settings$host$name, "\n", + "rundir : ", file.path(settings$host$rundir, run.id), "\n", + "outdir : ", file.path(settings$host$outdir, run.id), "\n", + file = file.path(settings$rundir, run.id, "README.txt"), + sep = "" + ) + + # I check to make sure the path under the met is a list. if it's specified what met needs to be used in 'met.id' under sensitivity analysis of pecan xml we used that otherwise, I use the first met. - if (is.list(settings$run$inputs$met$path)){ + if (is.list(settings$run$inputs$met$path)) { # This checks for met.id tag in the settings under sensitivity analysis - if it's not there it creates it. Then it's gonna use what it created. - if (is.null(settings$sensitivity.analysis$met.id)) settings$sensitivity.analysis$met.id <- 1 - + if (is.null(settings$sensitivity.analysis$met.id)) settings$sensitivity.analysis$met.id <- 1 + settings$run$inputs$met$path <- settings$run$inputs$met$path[[settings$sensitivity.analysis$met.id]] - } - - + + # write configuration - do.call(my.write.config, args = list(defaults = defaults, - trait.values = median.samples, - settings = settings, - run.id = run.id)) + do.call(my.write.config, args = list( + defaults = defaults, + trait.values = median.samples, + settings = settings, + run.id = run.id + )) cat(run.id, file = file.path(settings$rundir, "runs.txt"), sep = "\n", append = TRUE) - + ## loop over pfts runs <- list() for (i in seq_along(names(quantile.samples))) { @@ -244,45 +260,53 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, if (pftname == "env") { next } - + traits <- colnames(quantile.samples[[i]]) quantiles.str <- rownames(quantile.samples[[i]]) - + runs[[pftname]] <- data.frame() - + ## loop over variables for (trait in traits) { for (quantile.str in quantiles.str) { if (quantile.str != MEDIAN) { quantile <- as.numeric(quantile.str) / 100 trait.samples <- median.samples - trait.samples[[i]][trait] <- quantile.samples[[i]][quantile.str, trait, drop=FALSE] - + trait.samples[[i]][trait] <- quantile.samples[[i]][quantile.str, trait, drop = FALSE] + if (!is.null(con)) { paramlist <- paste0("quantile=", quantile.str, ",trait=", trait, ",pft=", pftname) - insert_result <- PEcAn.DB::db.query(paste0("INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, ensemble_id, parameter_list) values ('", - settings$model$id, "', '", - settings$run$site$id, "', '", - settings$run$start.date, "', '", - settings$run$end.date, "', '", - settings$run$outdir, "', ", - ensemble.id, ", '", - paramlist, "') RETURNING id"), con = con) + insert_result <- PEcAn.DB::db.query(paste0( + "INSERT INTO runs (model_id, site_id, start_time, finish_time, outdir, ensemble_id, parameter_list) values ('", + settings$model$id, "', '", + settings$run$site$id, "', '", + settings$run$start.date, "', '", + settings$run$end.date, "', '", + settings$run$outdir, "', ", + ensemble.id, ", '", + paramlist, "') RETURNING id" + ), con = con) run.id <- insert_result[["id"]] - + # associate posteriors with ensembles for (pft in defaults) { - PEcAn.DB::db.query(paste0("INSERT INTO posteriors_ensembles (posterior_id, ensemble_id) values (", - pft$posteriorid, ", ", - ensemble.id, ");"), con = con) + PEcAn.DB::db.query(paste0( + "INSERT INTO posteriors_ensembles (posterior_id, ensemble_id) values (", + pft$posteriorid, ", ", + ensemble.id, ");" + ), con = con) } - + # associate inputs with runs if (!is.null(inputs)) { for (x in inputs) { - PEcAn.DB::db.query(paste0("INSERT INTO inputs_runs (input_id, run_id) ", - "values (", settings$run$inputs[[x]], ", ", run.id, ");"), - con = con) + PEcAn.DB::db.query( + paste0( + "INSERT INTO inputs_runs (input_id, run_id) ", + "values (", settings$run$inputs[[x]], ", ", run.id, ");" + ), + con = con + ) } } } else { @@ -290,10 +314,11 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, run.type = "SA", index = round(quantile, 3), trait = trait, - pft.name = names(trait.samples)[i]) + pft.name = names(trait.samples)[i] + ) } runs[[pftname]][quantile.str, trait] <- run.id - + # create folders (cleaning up old ones if needed) if (clean) { unlink(file.path(settings$rundir, run.id)) @@ -301,36 +326,41 @@ write.sa.configs <- function(defaults, quantile.samples, settings, model, } dir.create(file.path(settings$rundir, run.id), recursive = TRUE) dir.create(file.path(settings$modeloutdir, run.id), recursive = TRUE) - + # write run information to disk - cat("runtype : sensitivity analysis\n", - "workflow id : ", workflow.id, "\n", - "ensemble id : ", ensemble.id, "\n", - "pft name : ", names(trait.samples)[i], "\n", - "quantile : ", quantile.str, "\n", - "trait : ", trait, "\n", - "run id : ", run.id, "\n", - "model : ", model, "\n", - "model id : ", settings$model$id, "\n", - "site : ", settings$run$site$name, "\n", - "site id : ", settings$run$site$id, "\n", - "met data : ", settings$run$site$met, "\n", - "start date : ", settings$run$start.date, "\n", - "end date : ", settings$run$end.date, "\n", - "hostname : ", settings$host$name, "\n", - "rundir : ", file.path(settings$host$rundir, run.id), "\n", - "outdir : ", file.path(settings$host$outdir, run.id), "\n", - file = file.path(settings$rundir, run.id, "README.txt"), - sep = "") - + cat("runtype : sensitivity analysis\n", + "workflow id : ", workflow.id, "\n", + "ensemble id : ", ensemble.id, "\n", + "pft name : ", names(trait.samples)[i], "\n", + "quantile : ", quantile.str, "\n", + "trait : ", trait, "\n", + "run id : ", run.id, "\n", + "model : ", model, "\n", + "model id : ", settings$model$id, "\n", + "site : ", settings$run$site$name, "\n", + "site id : ", settings$run$site$id, "\n", + "met data : ", settings$run$site$met, "\n", + "start date : ", settings$run$start.date, "\n", + "end date : ", settings$run$end.date, "\n", + "hostname : ", settings$host$name, "\n", + "rundir : ", file.path(settings$host$rundir, run.id), "\n", + "outdir : ", file.path(settings$host$outdir, run.id), "\n", + file = file.path(settings$rundir, run.id, "README.txt"), + sep = "" + ) + # write configuration - do.call(my.write.config, args = list(defaults = defaults, - trait.values = trait.samples, - settings = settings, - run.id)) - cat(run.id, file = file.path(settings$rundir, "runs.txt"), sep = "\n", - append = TRUE) + do.call(my.write.config, args = list( + defaults = defaults, + trait.values = trait.samples, + settings = settings, + run.id + )) + cat(run.id, + file = file.path(settings$rundir, "runs.txt"), sep = "\n", + append = TRUE + ) } else { runs[[pftname]][MEDIAN, trait] <- medianrun } diff --git a/modules/uncertainty/R/variance.R b/modules/uncertainty/R/variance.R index ee3d3120379..9dbafa89d6f 100644 --- a/modules/uncertainty/R/variance.R +++ b/modules/uncertainty/R/variance.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -13,7 +13,7 @@ ##' @param x numeric vector ##' @return list with variance and sd of variance ##' @author David LeBauer -variance.stats <- function(x){ +variance.stats <- function(x) { list(var = stats::var(x), sd = sd.var(x)) } @@ -26,33 +26,36 @@ variance.stats <- function(x){ ##' and \code{\link{spline.ensemble}} ##' ##' @title Get g_i(phi_i) -##' @param splinefuns univariate spline functions created for each trait, e.g. by the \code{\link{sensitivity.analysis}} function. -##' @param trait.samples n x m matrix (or list with m vectors of length n) of n parameter sets, each with a sample from m traits +##' @param splinefuns univariate spline functions created for each trait, e.g. by the \code{\link{sensitivity.analysis}} function. +##' @param trait.samples n x m matrix (or list with m vectors of length n) of n parameter sets, each with a sample from m traits ##' @param maxn maximum number of parameter sets to evaluate -##' @return matrix of spline estimates of model output for each of n parameter sets +##' @return matrix of spline estimates of model output for each of n parameter sets ##' @author David LeBauer -get.gi.phii <- function(splinefuns, trait.samples, maxn = NULL){ +get.gi.phii <- function(splinefuns, trait.samples, maxn = NULL) { ## check inputs - if(is.list(trait.samples)){ - trait.samples <- matrix(unlist(trait.samples), - ncol = length(names(trait.samples))) + if (is.list(trait.samples)) { + trait.samples <- matrix(unlist(trait.samples), + ncol = length(names(trait.samples)) + ) colnames(trait.samples) <- names(splinefuns) - if(!is.null(maxn) & maxn < nrow(trait.samples)){ + if (!is.null(maxn) & maxn < nrow(trait.samples)) { j <- sample(1:nrow(trait.samples), maxn) trait.samples <- trait.samples[j, ] } } - if(!is.matrix(trait.samples)){ - stop(paste('variance.decomposition currently does not handle trait.samples of class', class(trait.samples), '\n please convert to list or matrix')) + if (!is.matrix(trait.samples)) { + stop(paste("variance.decomposition currently does not handle trait.samples of class", class(trait.samples), "\n please convert to list or matrix")) } - if(!all(names(splinefuns) %in% colnames(trait.samples))){ - stop('mismatch between splinefuns and samples') + if (!all(names(splinefuns) %in% colnames(trait.samples))) { + stop("mismatch between splinefuns and samples") } traits <- names(splinefuns) - + ## g_i(phi_i) the spline estimate of model output for value of trait i - gi.phii <- t(plyr::laply(traits, - function(x) splinefuns[[x]](trait.samples[,x]))) + gi.phii <- t(plyr::laply( + traits, + function(x) splinefuns[[x]](trait.samples[, x]) + )) colnames(gi.phii) <- traits return(gi.phii) } @@ -67,21 +70,25 @@ get.gi.phii <- function(splinefuns, trait.samples, maxn = NULL){ ##' @param gi.phii matrix given as output from \code{\link{get.gi.phii}} ##' @param median median value around which variance will be calculated ##' @author David LeBauer -spline.ensemble <- function(gi.phii, median){ +spline.ensemble <- function(gi.phii, median) { ## Calculate ensemble output for each parameter set (each row of trait.samples) ## Equation 3 ## 1. calculate residuals (g_i(phi_i,j) - g_i(median)) residuals <- gi.phii - median ## 2. sum residuals by row, then truncate at 0: - spline.estimate <- sapply(rowSums(residuals), - function(x) max(0, x + median)) + spline.estimate <- sapply( + rowSums(residuals), + function(x) max(0, x + median) + ) return(spline.estimate) } -vd.variance <- function(gi.phii){ +vd.variance <- function(gi.phii) { ## Calculate variance for each trait - var.phii <- apply(gi.phii, 2, stats::var) + var.phii <- apply(gi.phii, 2, stats::var) sd.var.phii <- apply(gi.phii, 2, sd.var) - return(list(var = sum(var.phii), - sd = sqrt(sum(sd.var.phii^2)))) + return(list( + var = sum(var.phii), + sd = sqrt(sum(sd.var.phii^2)) + )) } diff --git a/modules/uncertainty/inst/abbreviated_workflow_SIPNET.R b/modules/uncertainty/inst/abbreviated_workflow_SIPNET.R index 2c5458e8106..4a9554c0990 100755 --- a/modules/uncertainty/inst/abbreviated_workflow_SIPNET.R +++ b/modules/uncertainty/inst/abbreviated_workflow_SIPNET.R @@ -1,26 +1,20 @@ - -#AbbreviatedWorkflow_Sipnet -#Variables : +# AbbreviatedWorkflow_Sipnet +# Variables : library(PEcAn.all) library(PEcAn.utils) -variable_S <- c("GPP ", "NPP", "TotalResp","AutoResp", "HeteroResp", "SoilResp", "NEE", "QLE", "leaf_carbon_content", "GWBI", "TotSoilCarb", "course_root_carbon_content", "fine_root_carbon_content", "litter_carbon_content", "Transp", "TotLivBiom", "LAI", "AGB", "SoilMoist", "SoilMoistFracSWE", "AbvGrndWood") +variable_S <- c("GPP ", "NPP", "TotalResp", "AutoResp", "HeteroResp", "SoilResp", "NEE", "QLE", "leaf_carbon_content", "GWBI", "TotSoilCarb", "course_root_carbon_content", "fine_root_carbon_content", "litter_carbon_content", "Transp", "TotLivBiom", "LAI", "AGB", "SoilMoist", "SoilMoistFracSWE", "AbvGrndWood") settings <- PEcAn.settings::read.settings("pecan.CHECKED.xml") for (i in 1:seq_along(variable_S)) { - settings$sensitivity.analysis$variable_S <- variable_S print(settings$sensitivity.analysis$variable) - + # Get results of model runs - - runModule.get.results(settings) - - # Run sensitivity analysis and variance decomposition on model output - runModule.run.sensitivity.analysis(settings) + runModule.get.results(settings) + # Run sensitivity analysis and variance decomposition on model output - + runModule.run.sensitivity.analysis(settings) } - diff --git a/modules/uncertainty/tests/testthat.R b/modules/uncertainty/tests/testthat.R index 09d9953c3c2..c4e483a47aa 100644 --- a/modules/uncertainty/tests/testthat.R +++ b/modules/uncertainty/tests/testthat.R @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html diff --git a/modules/uncertainty/tests/testthat/test.plot.sensitivity.R b/modules/uncertainty/tests/testthat/test.plot.sensitivity.R index 66894d94a09..6911ec4241d 100644 --- a/modules/uncertainty/tests/testthat/test.plot.sensitivity.R +++ b/modules/uncertainty/tests/testthat/test.plot.sensitivity.R @@ -1,8 +1,7 @@ -test_that("plot_sensitivity.analysis works",{ +test_that("plot_sensitivity.analysis works", { sa.sample <- 1:7 - sa.splinefun <- splinefun(1:10,10:1) + sa.splinefun <- splinefun(1:10, 10:1) trait <- "foo" sa.plot <- plot_sensitivity(sa.sample, sa.splinefun, trait) expect_true(inherits(sa.plot, "ggplot")) - }) diff --git a/modules/uncertainty/tests/testthat/test.plot.variance.decomposition.R b/modules/uncertainty/tests/testthat/test.plot.variance.decomposition.R index 3905aeaa246..0078c956213 100644 --- a/modules/uncertainty/tests/testthat/test.plot.variance.decomposition.R +++ b/modules/uncertainty/tests/testthat/test.plot.variance.decomposition.R @@ -1,11 +1,12 @@ test_that("plot_variance_decomposition runs without returning errors", { traits <- c("mort2", "fineroot2leaf", "root_turnover_rate") - sa.results <- structure(list(coef.vars = structure(c(0.1, 0.97, 2), .Names = traits), - elasticities = structure(c(-1, 0, 1), .Names = traits), - variances = structure(c(1, 2, 3), .Names = traits), - .Names = c("coef.vars", "elasticities", "variances"))) + sa.results <- structure(list( + coef.vars = structure(c(0.1, 0.97, 2), .Names = traits), + elasticities = structure(c(-1, 0, 1), .Names = traits), + variances = structure(c(1, 2, 3), .Names = traits), + .Names = c("coef.vars", "elasticities", "variances") + )) plots <- plot_variance_decomposition(sa.results) expect_equal(names(plots), c("trait.plot", "cv.plot", "el.plot", "pv.plot")) expect_true(all(grepl("ggplot", lapply(plots, class)))) }) - diff --git a/modules/uncertainty/tests/testthat/test.sensitivity.analysis.R b/modules/uncertainty/tests/testthat/test.sensitivity.analysis.R index 411d5da601b..c658f2e2128 100644 --- a/modules/uncertainty/tests/testthat/test.sensitivity.analysis.R +++ b/modules/uncertainty/tests/testthat/test.sensitivity.analysis.R @@ -1,20 +1,21 @@ - -test_that("spline.truncate truncates a vector at zero iff only values < min.quantile are negative",{ +test_that("spline.truncate truncates a vector at zero iff only values < min.quantile are negative", { set.seed(0) - x <- c(rgamma(9980,1,1), - rnorm(20)) + x <- c( + rgamma(9980, 1, 1), + rnorm(20) + ) x.t <- spline.truncate(x, pnorm(-3)) expect_true(min(x) < 0) expect_true(min(x.t) >= 0) expect_true(length(x) == length(spline.truncate(x))) ## test that it does not truncate a vector with < 0 at min.quantile - x.tt <- spline.truncate(x, min.quantile = sum(x<0)/length(x)) + x.tt <- spline.truncate(x, min.quantile = sum(x < 0) / length(x)) expect_true(min(x.tt) < 0) }) -test_that("sensitivity and elasticity calculations are done at the median",{ - expect_equal(get.elasticity(1, c(1, 10, 100), c(1, 0.1, 0.01)), 10/0.1) - testfun <- sa.splinefun(c(1,2,3), c(1, 4, 6)) ## y = x2; first derivative = 2 - expect_equal(get.sensitivity(c(1,10, 100), testfun), 2) +test_that("sensitivity and elasticity calculations are done at the median", { + expect_equal(get.elasticity(1, c(1, 10, 100), c(1, 0.1, 0.01)), 10 / 0.1) + testfun <- sa.splinefun(c(1, 2, 3), c(1, 4, 6)) ## y = x2; first derivative = 2 + expect_equal(get.sensitivity(c(1, 10, 100), testfun), 2) expect_equal(signif(get.coef.var(c(1, 10, 100)), 4), 5.474) -}) \ No newline at end of file +}) diff --git a/scripts/EFI_metprocess.R b/scripts/EFI_metprocess.R index ef8bf68c0dd..5cc7ce2e7bd 100644 --- a/scripts/EFI_metprocess.R +++ b/scripts/EFI_metprocess.R @@ -1,19 +1,19 @@ ############################################## # -# EFI Forecasting Challenge +# EFI Forecasting Challenge # ############################################### -#set home directory as object (remember to change to your own directory before running this script) +# set home directory as object (remember to change to your own directory before running this script) homedir <- "/projectnb/dietzelab/ahelgeso" library(PEcAn.all) library(tidyverse) -source('/projectnb/dietzelab/ahelgeso/pecan/modules/data.atmosphere/R/download.NOAA_GEFS.R') -source('/projectnb/dietzelab/ahelgeso/pecan/modules/data.atmosphere/R/download.raw.met.module.R') +source("/projectnb/dietzelab/ahelgeso/pecan/modules/data.atmosphere/R/download.NOAA_GEFS.R") +source("/projectnb/dietzelab/ahelgeso/pecan/modules/data.atmosphere/R/download.raw.met.module.R") -#read in .csv with site info -setwd(file.path(homedir, "pecan/scripts/")) #remember to change to where you keep your dataprep .csv file with the site info -data_prep <- read.csv("dataprep_10_sites.csv") #this .csv file contains the sitename, BETY site id, location to store met files, model name, met source (from .xml), and the met output (from .xml) for each site you want to download met data +# read in .csv with site info +setwd(file.path(homedir, "pecan/scripts/")) # remember to change to where you keep your dataprep .csv file with the site info +data_prep <- read.csv("dataprep_10_sites.csv") # this .csv file contains the sitename, BETY site id, location to store met files, model name, met source (from .xml), and the met output (from .xml) for each site you want to download met data data_prep <- filter(data_prep, met_download == "metprocess") sitename <- data_prep$site_name site_id <- data_prep$siteid_BETY4 @@ -22,48 +22,44 @@ model_name <- data_prep$model_name4 met_source <- data_prep$input_met_source4 met_output <- data_prep$input_met_output4 -#run info -start_date = as.Date(format(Sys.Date()-1, "%Y-%m-%d")) -end_date = as.Date(format(Sys.Date(), "%Y-%m-%d")) -host = list() - host$name = "localhost" -dbparms = list() - dbparms$dbname = "bety" - dbparms$host = "psql-pecan.bu.edu" - dbparms$user = "bety" - dbparms$password = "bety" +# run info +start_date <- as.Date(format(Sys.Date() - 1, "%Y-%m-%d")) +end_date <- as.Date(format(Sys.Date(), "%Y-%m-%d")) +host <- list() +host$name <- "localhost" +dbparms <- list() +dbparms$dbname <- "bety" +dbparms$host <- "psql-pecan.bu.edu" +dbparms$user <- "bety" +dbparms$password <- "bety" -#met.process - for (i in 1:length(sitename)) { - outfolder = file.path(base_dir[i], "noaa_clim/", sitename[i], "/", start_date, "/") - if(!dir.exists(outfolder)){dir.create(outfolder, recursive = TRUE)} - - input_met = list() - input_met$source = met_source[i] - input_met$output = met_output[i] - - site = list() - site$id = site_id[i] - site$name = sitename[i] - - model = model_name[i] - - met.process(site = site, - input_met = input_met, - start_date = start_date, - end_date = end_date, - model = model, - host = host, - dbparms = dbparms, - dir = outfolder, - spin = NULL, - overwrite = FALSE) - - - +# met.process +for (i in 1:length(sitename)) { + outfolder <- file.path(base_dir[i], "noaa_clim/", sitename[i], "/", start_date, "/") + if (!dir.exists(outfolder)) { + dir.create(outfolder, recursive = TRUE) } + input_met <- list() + input_met$source <- met_source[i] + input_met$output <- met_output[i] + site <- list() + site$id <- site_id[i] + site$name <- sitename[i] + model <- model_name[i] - + met.process( + site = site, + input_met = input_met, + start_date = start_date, + end_date = end_date, + model = model, + host = host, + dbparms = dbparms, + dir = outfolder, + spin = NULL, + overwrite = FALSE + ) +} diff --git a/scripts/EFI_workflow.R b/scripts/EFI_workflow.R index b985bd2d45a..679ee33e36c 100644 --- a/scripts/EFI_workflow.R +++ b/scripts/EFI_workflow.R @@ -1,5 +1,5 @@ -#You must run this script in the terminal using the code: -#Rscript --vanilla EFI_workflow.R "/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/inst/Site_XMLS/harvard.xml" "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/" 2021-07-01 2021-08-01 +# You must run this script in the terminal using the code: +# Rscript --vanilla EFI_workflow.R "/projectnb/dietzelab/ahelgeso/pecan/modules/assim.sequential/inst/Site_XMLS/harvard.xml" "/projectnb/dietzelab/ahelgeso/Site_Outputs/Harvard/" 2021-07-01 2021-08-01 library("PEcAn.all") library("PEcAn.utils") @@ -10,51 +10,53 @@ library("R.utils") library("dynutils") ###### Preping Workflow for regular SIPNET Run ############## -#set home directory as object (remember to change to your own directory before running this script) +# set home directory as object (remember to change to your own directory before running this script) homedir <- "/projectnb/dietzelab/ahelgeso" -#Load site.xml, start & end date, (with commandArgs specify args in terminal) and outputPath (i.e. where the model outputs will be stored) into args -tmp = commandArgs(trailingOnly = TRUE) -if(length(tmp)<3){ +# Load site.xml, start & end date, (with commandArgs specify args in terminal) and outputPath (i.e. where the model outputs will be stored) into args +tmp <- commandArgs(trailingOnly = TRUE) +if (length(tmp) < 3) { logger.severe("Missing required arguments") } -args = list() -args$settings = tmp[1] -if(!file.exists(args$settings)){ +args <- list() +args$settings <- tmp[1] +if (!file.exists(args$settings)) { logger.severe("Not a valid xml path") } -args$outputPath = tmp[2] -if(!isAbsolutePath(args$outputPath)){ +args$outputPath <- tmp[2] +if (!isAbsolutePath(args$outputPath)) { logger.severe("Not a valid outputPath") } -args$start_date = as.Date(tmp[3]) -if(is.na(args$start_date)){ +args$start_date <- as.Date(tmp[3]) +if (is.na(args$start_date)) { logger.severe("No start date provided") } -if(length(args)>3){ - args$end_date = as.Date(tmp[4]) +if (length(args) > 3) { + args$end_date <- as.Date(tmp[4]) } else { - args$end_date = args$start_date + 35 + args$end_date <- args$start_date + 35 } -if(length(args)>4){ - args$continue = tmp[5] +if (length(args) > 4) { + args$continue <- tmp[5] } else { - args$continue = TRUE + args$continue <- TRUE } -if(!dir.exists(args$outputPath)){dir.create(args$outputPath, recursive = TRUE)} +if (!dir.exists(args$outputPath)) { + dir.create(args$outputPath, recursive = TRUE) +} setwd(args$outputPath) # Open and read in settings file for PEcAn run. settings <- PEcAn.settings::read.settings(args$settings) start_date <- args$start_date -end_date<- args$end_date +end_date <- args$end_date # Finding the right end and start date -met.start <- start_date +met.start <- start_date met.end <- met.start + lubridate::days(35) @@ -63,7 +65,7 @@ settings$run$start.date <- as.character(met.start) settings$run$end.date <- as.character(met.end) settings$run$site$met.start <- as.character(met.start) settings$run$site$met.end <- as.character(met.end) -#info +# info settings$info$date <- paste0(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), " +0000") # Update/fix/check settings. @@ -74,50 +76,50 @@ settings <- # Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") -#manually add in clim files -con <-try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) +# manually add in clim files +con <- try(PEcAn.DB::db.open(settings$database$bety), silent = TRUE) input_check <- PEcAn.DB::dbfile.input.check( - siteid=settings$run$site$id %>% as.character(), - startdate = settings$run$start.date %>% as.Date, + siteid = settings$run$site$id %>% as.character(), + startdate = settings$run$start.date %>% as.Date(), enddate = NULL, parentid = NA, - mimetype="text/csv", - formatname="Sipnet.climna", + mimetype = "text/csv", + formatname = "Sipnet.climna", con = con, hostname = PEcAn.remote::fqdn(), - pattern = NULL, + pattern = NULL, exact.dates = TRUE, - return.all=TRUE + return.all = TRUE ) -#If INPUTS already exists, add id and met path to settings file +# If INPUTS already exists, add id and met path to settings file -if(length(input_check$id) > 0){ - #met paths - clim_check = list() - for(i in 1:length(input_check$file_path)){ - +if (length(input_check$id) > 0) { + # met paths + clim_check <- list() + for (i in 1:length(input_check$file_path)) { clim_check[[i]] <- file.path(input_check$file_path[i], input_check$file_name[i]) - }#end i loop for creating file paths - #ids - index_id = list() - index_path = list() - for(i in 1:length(input_check$id)){ - index_id[[i]] = as.character(input_check$id[i])#get ids as list - - }#end i loop for making lists - names(index_id) = sprintf("id%s",seq(1:length(input_check$id))) #rename list - names(clim_check) = sprintf("path%s",seq(1:length(input_check$id))) - - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check -}else{PEcAn.logger::logger.error("No met file found")} -#settings <- PEcAn.workflow::do_conversions(settings, T, T, T) - -if(is_empty(settings$run$inputs$met$path) & length(clim_check)>0){ - settings$run$inputs$met$id = index_id - settings$run$inputs$met$path = clim_check + } # end i loop for creating file paths + # ids + index_id <- list() + index_path <- list() + for (i in 1:length(input_check$id)) { + index_id[[i]] <- as.character(input_check$id[i]) # get ids as list + } # end i loop for making lists + names(index_id) <- sprintf("id%s", seq(1:length(input_check$id))) # rename list + names(clim_check) <- sprintf("path%s", seq(1:length(input_check$id))) + + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check +} else { + PEcAn.logger::logger.error("No met file found") +} +# settings <- PEcAn.workflow::do_conversions(settings, T, T, T) + +if (is_empty(settings$run$inputs$met$path) & length(clim_check) > 0) { + settings$run$inputs$met$id <- index_id + settings$run$inputs$met$path <- clim_check } PEcAn.DB::db.close(con) @@ -131,22 +133,22 @@ if (args$continue && file.exists(status_file)) { } # Do conversions -#settings <- PEcAn.workflow::do_conversions(settings) +# settings <- PEcAn.workflow::do_conversions(settings) # Write model specific configs if (PEcAn.utils::status.check("CONFIG") == 0) { PEcAn.utils::status.start("CONFIG") settings <- PEcAn.workflow::runModule.run.write.configs(settings) - + PEcAn.settings::write.settings(settings, outputfile = "pecan.CONFIGS.xml") PEcAn.utils::status.end() } else if (file.exists(file.path(settings$outdir, "pecan.CONFIGS.xml"))) { settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.CONFIGS.xml")) } -if ((length(which(commandArgs() == "--advanced")) != 0) - && (PEcAn.utils::status.check("ADVANCED") == 0)) { +if ((length(which(commandArgs() == "--advanced")) != 0) && + (PEcAn.utils::status.check("ADVANCED") == 0)) { PEcAn.utils::status.start("ADVANCED") q() } @@ -159,7 +161,7 @@ if (PEcAn.utils::status.check("MODEL") == 0) { # If we're doing an ensemble run, don't stop. If only a single run, we # should be stopping. if (is.null(settings[["ensemble"]]) || - as.numeric(settings[[c("ensemble", "size")]]) == 1) { + as.numeric(settings[[c("ensemble", "size")]]) == 1) { stop_on_error <- TRUE } else { stop_on_error <- FALSE @@ -205,11 +207,11 @@ if (PEcAn.utils::status.check("FINISHED") == 0) { ), params = settings$database$bety ) - + # Send email if configured - if (!is.null(settings$email) - && !is.null(settings$email$to) - && (settings$email$to != "")) { + if (!is.null(settings$email) && + !is.null(settings$email$to) && + (settings$email$to != "")) { sendmail( settings$email$from, settings$email$to, @@ -236,11 +238,11 @@ print("---------- PEcAn Workflow Complete ----------") # outdir <- args$outputPath # site.name <- settings$run$site$name # wid <- settings$workflow$id -# +# # output_args = c(as.character(wid), site.num, outdir) -# +# # data = efi.data.process(output_args) -# +# # #Run SIPNET Outputs # data.final = data %>% # mutate(date = as.Date(date)) %>% @@ -254,9 +256,9 @@ print("---------- PEcAn Workflow Complete ----------") # #re-order columns and delete unnecessary columns in data.final # datacols <- c("date", "time", "siteID", "ensemble", "nee", "le", "vswc", "forecast", "data_assimilation") # data.final = data.final[datacols] -# +# # ############ Plots to check out reliability of forecast ######################### -# +# # # ggplot(data.final, aes(x = time, y = nee, group = ensemble)) + # # geom_line(aes(x = time, y = nee, color = ensemble)) # # @@ -265,9 +267,9 @@ print("---------- PEcAn Workflow Complete ----------") # # # # ggplot(data.final, aes(x = time, y = vswc, group = ensemble)) + # # geom_line(aes(x = time, y = vswc, color = ensemble)) -# +# # ########### Export data.final ############### -# +# # write.csv(data.final, file = paste0(site.name, "-", start_date, "-", end_date, ".csv")) -# -# +# +# diff --git a/scripts/HARV_metdownload_efi.R b/scripts/HARV_metdownload_efi.R index abab73d3a39..7726fe356f1 100644 --- a/scripts/HARV_metdownload_efi.R +++ b/scripts/HARV_metdownload_efi.R @@ -1,44 +1,47 @@ -#load libraries +# load libraries library(dplyr) -#load fcns +# load fcns source("/projectnb/dietzelab/ahelgeso/Forecast_Scripts/download_noaa_gefs_efi.R") source("/projectnb/dietzelab/ahelgeso/Forecast_Scripts/noaa_gefs_efi_helper.R") -#set fcn inputs -startdate = format(Sys.Date(), "%Y-%m-%d") -nc_dir = "/projectnb/dietzelab/ahelgeso/NOAA_met_data/" -site.lat = 42.5 -site.lon = -72.15 +# set fcn inputs +startdate <- format(Sys.Date(), "%Y-%m-%d") +nc_dir <- "/projectnb/dietzelab/ahelgeso/NOAA_met_data/" +site.lat <- 42.5 +site.lon <- -72.15 sitename <- "HARV" siteid <- 646 clim_dir <- "/projectnb/dietzelab/ahelgeso/NOAA_met_data_CH1/" -#download met using EFI fcns -download_NOAA_GEFS_EFI(sitename = sitename, - outfolder = nc_dir, - start_date = startdate, - site.lat = site.lat, - site.lon = site.lon) -#set up path for met2model +# download met using EFI fcns +download_NOAA_GEFS_EFI( + sitename = sitename, + outfolder = nc_dir, + start_date = startdate, + site.lat = site.lat, + site.lon = site.lon +) +# set up path for met2model output_path <- file.path("/projectnb/dietzelab/ahelgeso/NOAA_met_data/noaa/NOAAGEFS_1hr", sitename, startdate, "00") ########## Met2Model For SIPNET ############## -outfolder = file.path(clim_dir, "noaa_clim", sitename, startdate) -if(!dir.exists(outfolder)){dir.create(outfolder, recursive = TRUE)} +outfolder <- file.path(clim_dir, "noaa_clim", sitename, startdate) +if (!dir.exists(outfolder)) { + dir.create(outfolder, recursive = TRUE) +} -in.path = output_path -in.prefix = list.files(output_path) +in.path <- output_path +in.prefix <- list.files(output_path) -end_date = as.Date(startdate) + lubridate::days(35) - -for(l in 1:length(in.prefix)){ - - PEcAn.SIPNET::met2model.SIPNET(in.path = in.path, - in.prefix = in.prefix[l], - outfolder = outfolder, - start_date = startdate, - end_date = end_date, - overwrite = FALSE, - verbose = FALSE, - year.fragment = TRUE) - -} +end_date <- as.Date(startdate) + lubridate::days(35) +for (l in 1:length(in.prefix)) { + PEcAn.SIPNET::met2model.SIPNET( + in.path = in.path, + in.prefix = in.prefix[l], + outfolder = outfolder, + start_date = startdate, + end_date = end_date, + overwrite = FALSE, + verbose = FALSE, + year.fragment = TRUE + ) +} diff --git a/scripts/Rfcn.R b/scripts/Rfcn.R index d6c52fcd313..480141c791b 100755 --- a/scripts/Rfcn.R +++ b/scripts/Rfcn.R @@ -21,7 +21,6 @@ if (args[1] != "NULL") { ## check that function exists if (exists(args[2])) { - ## put function arguments into a list fcn.args <- list() if (length(args) > 2) { @@ -29,7 +28,7 @@ if (exists(args[2])) { fcn.args[[i]] <- args[i + 2] } } - + ## call function do.call(args[2], fcn.args) } diff --git a/scripts/check_with_errors.R b/scripts/check_with_errors.R index 7ac8694f514..ff9227da63f 100755 --- a/scripts/check_with_errors.R +++ b/scripts/check_with_errors.R @@ -12,13 +12,13 @@ if (resave) die_level <- "never" old_file <- file.path(pkg, "tests", "Rcheck_reference.log") if (file.exists(old_file)) { - # Package has old unfixed warnings that we should ignore by default - # (but if log/die level are explicitly set, respect them) - if (is.na(log_level)) log_level <- "error" - if (is.na(die_level)) die_level <- "error" + # Package has old unfixed warnings that we should ignore by default + # (but if log/die level are explicitly set, respect them) + if (is.na(log_level)) log_level <- "error" + if (is.na(die_level)) die_level <- "error" } else { - if (is.na(log_level)) log_level <- "all" - if (is.na(die_level)) die_level <- "note" + if (is.na(log_level)) log_level <- "all" + if (is.na(die_level)) die_level <- "note" } log_level <- match.arg(log_level, c("error", "warning", "note", "all")) @@ -29,9 +29,9 @@ log_notes <- log_level %in% c("note", "all") # should test se run if (!runtests) { - args <- c("--no-tests", "--timings") + args <- c("--no-tests", "--timings") } else { - args <- c("--timings") + args <- c("--timings") } # devtools 2.4.0 changed values accepted by document argument: @@ -39,31 +39,33 @@ if (!runtests) { # >= 2.4.0: TRUE = yes, FALSE = no, # NULL = if installed Roxygen is same version as package's RoxygenNote if ((packageVersion("devtools") >= "2.4.0") && is.na(redocument)) { - redocument <- NULL + redocument <- NULL } -chk <- devtools::check(pkg, args = args, quiet = TRUE, - error_on = die_level, document = redocument) +chk <- devtools::check(pkg, + args = args, quiet = TRUE, + error_on = die_level, document = redocument +) errors <- chk[["errors"]] n_errors <- length(errors) if (n_errors > 0) { - cat(errors, "\n") - stop(n_errors, " errors found in ", pkg, ".") + cat(errors, "\n") + stop(n_errors, " errors found in ", pkg, ".") } warns <- chk[["warnings"]] n_warns <- length(warns) message(n_warns, " warnings found in ", pkg, ".") if ((log_warn) && n_warns > 0) { - cat(warns, "\n") + cat(warns, "\n") } notes <- chk[["notes"]] n_notes <- length(notes) message(n_notes, " notes found in ", pkg, ".") if (log_notes && n_notes > 0) { - cat(notes, "\n") + cat(notes, "\n") } @@ -89,18 +91,18 @@ if (log_notes && n_notes > 0) { # * Commit updated /tests/Rcheck_reference.log file new_file <- file.path(chk$checkdir, "00check.log") if (resave) { - cat("Saving current check results as the new standard\n") - if (file.exists(old_file)) { - cat("**Overwriting** existing saved check output\n") - } - file.copy(from = new_file, to = old_file, overwrite = TRUE) - quit("no") + cat("Saving current check results as the new standard\n") + if (file.exists(old_file)) { + cat("**Overwriting** existing saved check output\n") + } + file.copy(from = new_file, to = old_file, overwrite = TRUE) + quit("no") } ### # everything beyond this point is comparing to old version if (!file.exists(old_file) || sum(n_errors, n_warns, n_notes) == 0) { - quit("no") + quit("no") } old <- rcmdcheck::parse_check(old_file) @@ -117,105 +119,111 @@ chk <- rcmdcheck::parse_check(new_file) cmp <- rcmdcheck::compare_checks(old, chk) collapse_title <- function(x) { - - # Remove "[14s/16s]" timestamp from title line, if present - # modified from https://github.com/r-lib/rcmdcheck/issues/128 - x[[1]] <- gsub( - "\\[[0-9]+s(/[0-9]+s)?\\] ?", "", x[[1]], useBytes=TRUE) - - if (length(x) > 1) { - paste(x[[1]], x[-1], sep = ": ") - } else { - x - } + # Remove "[14s/16s]" timestamp from title line, if present + # modified from https://github.com/r-lib/rcmdcheck/issues/128 + x[[1]] <- gsub( + "\\[[0-9]+s(/[0-9]+s)?\\] ?", "", x[[1]], + useBytes = TRUE + ) + + if (length(x) > 1) { + paste(x[[1]], x[-1], sep = ": ") + } else { + x + } } msg_lines <- function(msg) { - # leading double-space indicates wrapped line -> rejoin - msg <- gsub("\n ", " ", msg, fixed = TRUE) + # leading double-space indicates wrapped line -> rejoin + msg <- gsub("\n ", " ", msg, fixed = TRUE) - #split lines, delete empty ones - msg <- strsplit(msg, split = "\n", fixed = TRUE) - msg <- lapply(msg, function(x)x[x != ""]) + # split lines, delete empty ones + msg <- strsplit(msg, split = "\n", fixed = TRUE) + msg <- lapply(msg, function(x) x[x != ""]) - # prepend message title (e.g. "checking Rd files ... NOTE") to each line - unlist(lapply(msg, collapse_title)) + # prepend message title (e.g. "checking Rd files ... NOTE") to each line + unlist(lapply(msg, collapse_title)) } if (cmp$status != "+") { - # rcmdcheck found new messages, so check has failed - print(cmp) - cat("R check of", pkg, "reports the following new problems.", - "Please fix these and resubmit:\n") - cat(cmp$cmp$output[cmp$cmp$change == 1], sep = "\n") - stop("Please fix these and resubmit.") + # rcmdcheck found new messages, so check has failed + print(cmp) + cat( + "R check of", pkg, "reports the following new problems.", + "Please fix these and resubmit:\n" + ) + cat(cmp$cmp$output[cmp$cmp$change == 1], sep = "\n") + stop("Please fix these and resubmit.") } else { - # No new messages, but need to check details of pre-existing ones - # We stopped earlier for errors, so all entries here are WARNING or NOTE - cur_msgs <- msg_lines(cmp$cmp$output[cmp$cmp$which == "new"]) - prev_msgs <- msg_lines(cmp$cmp$output[cmp$cmp$which == "old"]) - - # avoids false positives from tempdir changes - cur_msgs <- gsub(chk$checkdir, "...", cur_msgs) - prev_msgs <- gsub(old$checkdir, "...", prev_msgs) - - # R 3.6.0 switched style for lists of packages - # from space-separated fancy quotes to comma-separated straight quotes - # We'll meet halfway, with space-separated straight quotes - cur_msgs <- gsub("[‘’]", "'", cur_msgs) - cur_msgs <- gsub("', '", "' '", cur_msgs) - prev_msgs <- gsub("[‘’]", "'", prev_msgs) - prev_msgs <- gsub("', '", "' '", prev_msgs) - - # Compression warnings report slightly different sizes on different R - # versions. If the only difference is in the numbers, don't complain - cmprs_msg <- grepl("significantly better compression", cur_msgs) - if (any(cmprs_msg)) { - prev_cmprs_msg <- grepl("significantly better compression", prev_msgs) - cur_cmprs_nodigit <- gsub("[0-9]", "", cur_msgs[cmprs_msg]) - prev_cmprs_nodigit <- gsub("[0-9]", "", prev_msgs[prev_cmprs_msg]) - if (all(cur_cmprs_nodigit %in% prev_cmprs_nodigit)) { - cur_msgs <- cur_msgs[!cmprs_msg] - } + # No new messages, but need to check details of pre-existing ones + # We stopped earlier for errors, so all entries here are WARNING or NOTE + cur_msgs <- msg_lines(cmp$cmp$output[cmp$cmp$which == "new"]) + prev_msgs <- msg_lines(cmp$cmp$output[cmp$cmp$which == "old"]) + + # avoids false positives from tempdir changes + cur_msgs <- gsub(chk$checkdir, "...", cur_msgs) + prev_msgs <- gsub(old$checkdir, "...", prev_msgs) + + # R 3.6.0 switched style for lists of packages + # from space-separated fancy quotes to comma-separated straight quotes + # We'll meet halfway, with space-separated straight quotes + cur_msgs <- gsub("[‘’]", "'", cur_msgs) + cur_msgs <- gsub("', '", "' '", cur_msgs) + prev_msgs <- gsub("[‘’]", "'", prev_msgs) + prev_msgs <- gsub("', '", "' '", prev_msgs) + + # Compression warnings report slightly different sizes on different R + # versions. If the only difference is in the numbers, don't complain + cmprs_msg <- grepl("significantly better compression", cur_msgs) + if (any(cmprs_msg)) { + prev_cmprs_msg <- grepl("significantly better compression", prev_msgs) + cur_cmprs_nodigit <- gsub("[0-9]", "", cur_msgs[cmprs_msg]) + prev_cmprs_nodigit <- gsub("[0-9]", "", prev_msgs[prev_cmprs_msg]) + if (all(cur_cmprs_nodigit %in% prev_cmprs_nodigit)) { + cur_msgs <- cur_msgs[!cmprs_msg] } - - # These lines are redundant summaries of issues also reported individually - # and create false positives when an existing issue is fixed - cur_msgs <- cur_msgs[!grepl( - "NOTE: Undefined global functions or variables:", cur_msgs)] - cur_msgs <- cur_msgs[!grepl("NOTE: Consider adding importFrom", cur_msgs)] - - lines_changed <- setdiff(cur_msgs, prev_msgs) - - # Crude hack: - # Some messages are locale-dependent in complex ways, - # e.g. the note about undocumented datasets concatenates CSV names - # (ordered in the current locale) and objects in RData files (always - # ordered in C locale), and so on. - # As a last effort, we look for pre-existing lines that contain the same - # words in a different order - if (length(lines_changed) > 0) { - prev_words <- strsplit(prev_msgs, " ") - changed_words <- strsplit(lines_changed, " ") - is_reordered <- function(v1, v2) { - length(v1[v1 != ""]) == length(v2[v2 != ""]) && setequal(v1, v2) - } - is_in_prev <- function(line) { - any(vapply( - X = prev_words, - FUN = is_reordered, - FUN.VALUE = logical(1), - line)) - } - in_prev <- vapply( - X = changed_words, - FUN = is_in_prev, - FUN.VALUE = logical(1)) - lines_changed <- lines_changed[!in_prev] + } + + # These lines are redundant summaries of issues also reported individually + # and create false positives when an existing issue is fixed + cur_msgs <- cur_msgs[!grepl( + "NOTE: Undefined global functions or variables:", cur_msgs + )] + cur_msgs <- cur_msgs[!grepl("NOTE: Consider adding importFrom", cur_msgs)] + + lines_changed <- setdiff(cur_msgs, prev_msgs) + + # Crude hack: + # Some messages are locale-dependent in complex ways, + # e.g. the note about undocumented datasets concatenates CSV names + # (ordered in the current locale) and objects in RData files (always + # ordered in C locale), and so on. + # As a last effort, we look for pre-existing lines that contain the same + # words in a different order + if (length(lines_changed) > 0) { + prev_words <- strsplit(prev_msgs, " ") + changed_words <- strsplit(lines_changed, " ") + is_reordered <- function(v1, v2) { + length(v1[v1 != ""]) == length(v2[v2 != ""]) && setequal(v1, v2) } - if (length(lines_changed) > 0) { - cat("R check of", pkg, "returned new problems:\n") - cat(lines_changed, sep = "\n") - stop("Please fix these and resubmit.") + is_in_prev <- function(line) { + any(vapply( + X = prev_words, + FUN = is_reordered, + FUN.VALUE = logical(1), + line + )) } + in_prev <- vapply( + X = changed_words, + FUN = is_in_prev, + FUN.VALUE = logical(1) + ) + lines_changed <- lines_changed[!in_prev] + } + if (length(lines_changed) > 0) { + cat("R check of", pkg, "returned new problems:\n") + cat(lines_changed, sep = "\n") + stop("Please fix these and resubmit.") + } } diff --git a/scripts/confirm_deps.R b/scripts/confirm_deps.R index fd272c1c29b..14d3718cc76 100755 --- a/scripts/confirm_deps.R +++ b/scripts/confirm_deps.R @@ -37,7 +37,6 @@ confirm_deps <- function(pkg, install = TRUE, dependencies = NA, ...) { - # Q: "Why a separate variable instead of overwriting `dependencies`?" # A: As a quick workaround for https://github.com/r-lib/remotes/issues/809: # remotes::install_deps(pkgdir, `dependencies = TRUE`) correctly installs diff --git a/scripts/dependencies.R b/scripts/dependencies.R index d31121e4da7..1a971547bb9 100644 --- a/scripts/dependencies.R +++ b/scripts/dependencies.R @@ -1,16 +1,16 @@ #!/usr/bin/env Rscript -##--------------------------------------------------------------------------------------------------# +## --------------------------------------------------------------------------------------------------# ##' ##' Create a dependency graph for a list of packages. Each package is checked for their dependencies ##' both Depends as well Suggests. -##' +##' ##' @name dependencygraph ##' @title Create a graph of all package dependencies ##' @param packages TODO ##' @param filename TODO ##' @param suggests TODO ##' @param filter TODO -##' @export +##' @export ##' @examples ##' dependencygraph(c('igraph'), suggests=TRUE) ##' @author Rob Kooper @@ -18,10 +18,10 @@ dependencygraph <- function(packages, filename = "", suggests = FALSE, filter = NA) { library(graphics) library(igraph) - + graphData <- data.frame(from = numeric(0), to = numeric(0), relationship = numeric(0)) seen <- c() - + scanPackageField <- function(package, field) { packages <- packageDescription(package, fields = field) packages <- gsub("\n", "", packages) @@ -39,7 +39,7 @@ dependencygraph <- function(packages, filename = "", suggests = FALSE, filter = scanPackage(p) } } # scanPackageField - + scanPackage <- function(package) { if (!(package %in% seen)) { seen <<- c(seen, package) @@ -49,26 +49,26 @@ dependencygraph <- function(packages, filename = "", suggests = FALSE, filter = } } } # scanPackageField - + for (p in packages) { scanPackage(p) } - + graph <- graph.data.frame(graphData) - + V(graph)$size <- 10 V(graph)$color <- "white" V(graph)$label.color <- "black" - + E(graph)$arrow.size <- 0.5 E(graph)[relationship == "Depends"]$color <- "red" E(graph)[relationship == "Suggests"]$color <- "blue" # colors <- heat.colors(vcount(graph)) E(graph)$color <- colors[match(graphData$to, unique(graphData$to))] - + set.seed(3952) layout <- layout.fruchterman.reingold.grid(graph) # layout <- layout.circle(graph) - + if (length(grep("\\.pdf$", filename)) != 0) { pdf(file = filename) plot(graph, layout = layout, frame = true) @@ -94,10 +94,12 @@ dependencygraph <- function(packages, filename = "", suggests = FALSE, filter = } } # dependencygraph -packages <- c("PEcAn.BIOCRO", "PEcAn.DB", "PEcAn.ED2", "PEcAn.MA", "PEcAn.SIPNET", - "PEcAn.assim.batch", "PEcAnAssimSequential", - "PEcAn.data.atmosphere", "PEcAn.data.land", "PEcAn.priors", "PEcAn.settings", - "PEcAn.uncertainty", "PEcAn.utils", "PEcAn.visualization") +packages <- c( + "PEcAn.BIOCRO", "PEcAn.DB", "PEcAn.ED2", "PEcAn.MA", "PEcAn.SIPNET", + "PEcAn.assim.batch", "PEcAnAssimSequential", + "PEcAn.data.atmosphere", "PEcAn.data.land", "PEcAn.priors", "PEcAn.settings", + "PEcAn.uncertainty", "PEcAn.utils", "PEcAn.visualization" +) # dependencygraph(packages, suggests=FALSE, filename='graph.png') dependencygraph(packages, suggests = TRUE, filename = "graph.png", filter = "PEcAn") diff --git a/scripts/efi_data_process.R b/scripts/efi_data_process.R index 3e4705dfaf5..9596291a51e 100644 --- a/scripts/efi_data_process.R +++ b/scripts/efi_data_process.R @@ -1,15 +1,17 @@ -#### need to create a graph function here to call with the args of start time +#### need to create a graph function here to call with the args of start time #' EFI Data Process #' -#' @param args completed forecast run settings file +#' @param args completed forecast run settings file #' #' @return #' #' #' @examples -efi.data.process <- function(args){ - start_date <- tryCatch(as.POSIXct(args[1]), error = function(e) {NULL} ) +efi.data.process <- function(args) { + start_date <- tryCatch(as.POSIXct(args[1]), error = function(e) { + NULL + }) if (is.null(start_date)) { in_wid <- as.integer(args[1]) } @@ -17,197 +19,236 @@ efi.data.process <- function(args){ dbname = "bety", host = "128.197.168.114", user = "bety", - password = "bety") + password = "bety" + ) con <- PEcAn.DB::db.open(dbparms) on.exit(PEcAn.DB::db.close(con), add = TRUE) # Identify the workflow with the proper information if (!is.null(start_date)) { - workflows <- PEcAn.DB::db.query(paste0("SELECT * FROM workflows WHERE start_date='", format(start_date, "%Y-%m-%d %H:%M:%S"), - "' ORDER BY id"), con) + workflows <- PEcAn.DB::db.query(paste0( + "SELECT * FROM workflows WHERE start_date='", format(start_date, "%Y-%m-%d %H:%M:%S"), + "' ORDER BY id" + ), con) } else { workflows <- PEcAn.DB::db.query(paste0("SELECT * FROM workflows WHERE id='", in_wid, "'"), con) } print(workflows) - - workflows <- workflows[which(workflows$site_id == args[2]),] - - SDA_check = grep("StateData", workflows$folder) - if(length(SDA_check) > 0){ workflows = workflows[-SDA_check,]} - - index = grepl(basename(args[3]), workflows$folder) - - if(length(index) == 0){ workflow <- workflows[which(workflows$folder == "" ),]} - - if(length(index) > 0){workflow = workflows[index, ]} - - + + workflows <- workflows[which(workflows$site_id == args[2]), ] + + SDA_check <- grep("StateData", workflows$folder) + if (length(SDA_check) > 0) { + workflows <- workflows[-SDA_check, ] + } + + index <- grepl(basename(args[3]), workflows$folder) + + if (length(index) == 0) { + workflow <- workflows[which(workflows$folder == ""), ] + } + + if (length(index) > 0) { + workflow <- workflows[index, ] + } + + if (nrow(workflow) > 1) { - workflow <- workflow[1,] - } - - if(nrow(workflow) == 0){ + workflow <- workflow[1, ] + } + + if (nrow(workflow) == 0) { PEcAn.logger::logger.error(paste0("There are no workflows for ", start_date)) stop() } - + print(paste0("Using workflow ", workflow$id)) wid <- workflow$id outdir <- args[3] - pecan_out_dir <- paste0(outdir, "PEcAn_", wid, "/out"); + pecan_out_dir <- paste0(outdir, "PEcAn_", wid, "/out") pecan_out_dirs <- list.dirs(path = pecan_out_dir) if (is.na(pecan_out_dirs[1])) { print(paste0(pecan_out_dirs, " does not exist.")) } - - - #neemat <- matrix(1:64, nrow=1, ncol=64) # Proxy row, will be deleted later. - #qlemat <- matrix(1:64, nrow=1, ncol=64)# Proxy row, will be deleted later. - + + + # neemat <- matrix(1:64, nrow=1, ncol=64) # Proxy row, will be deleted later. + # qlemat <- matrix(1:64, nrow=1, ncol=64)# Proxy row, will be deleted later. + neemat <- vector() qlemat <- vector() soilmoist <- vector() gppmat <- vector() time <- vector() - - num_results <- 0; + + num_results <- 0 for (i in 2:length(pecan_out_dirs)) { - #datafile <- file.path(pecan_out_dirs[i], format(workflow$start_date, "%Y.nc")) - datafiles <- list.files(pecan_out_dirs[i]) + # datafile <- file.path(pecan_out_dirs[i], format(workflow$start_date, "%Y.nc")) + datafiles <- list.files(pecan_out_dirs[i]) datafiles <- datafiles[grep("*.nc$", datafiles)] - + if (length(datafiles) == 0) { print(paste0("File ", pecan_out_dirs[i], " does not exist.")) next } - - if(length(datafiles) == 1){ - - file = paste0(pecan_out_dirs[i],'/', datafiles[1]) - + + if (length(datafiles) == 1) { + file <- paste0(pecan_out_dirs[i], "/", datafiles[1]) + num_results <- num_results + 1 - - #open netcdf file - ncptr <- ncdf4::nc_open(file); - + + # open netcdf file + ncptr <- ncdf4::nc_open(file) + # Attach data to matricies nee <- ncdf4::ncvar_get(ncptr, "NEE") - if(i == 2){ neemat <- nee} else{neemat <- cbind(neemat,nee)} - + if (i == 2) { + neemat <- nee + } else { + neemat <- cbind(neemat, nee) + } + qle <- ncdf4::ncvar_get(ncptr, "Qle") - if(i == 2){ qlemat <- qle} else{qlemat <- cbind(qlemat,qle)} - + if (i == 2) { + qlemat <- qle + } else { + qlemat <- cbind(qlemat, qle) + } + soil <- ncdf4::ncvar_get(ncptr, "SoilMoistFrac") - if(i == 2){ soilmoist <- soil} else{soilmoist <- cbind(soilmoist,soil)} - + if (i == 2) { + soilmoist <- soil + } else { + soilmoist <- cbind(soilmoist, soil) + } + gpp <- ncdf4::ncvar_get(ncptr, "GPP") - if(i == 2){ gppmat <- gpp} else{gppmat <- cbind(gppmat,nee)} - - + if (i == 2) { + gppmat <- gpp + } else { + gppmat <- cbind(gppmat, nee) + } + + sec <- ncptr$dim$time$vals origin <- strsplit(ncptr$dim$time$units, " ")[[1]][3] - + # Close netcdf file ncdf4::nc_close(ncptr) } - - if(length(datafiles) > 1){ - - - file = paste0(pecan_out_dirs[i],'/', datafiles[1]) - file2 = paste0(pecan_out_dirs[i],'/', datafiles[2]) - + + if (length(datafiles) > 1) { + file <- paste0(pecan_out_dirs[i], "/", datafiles[1]) + file2 <- paste0(pecan_out_dirs[i], "/", datafiles[2]) + num_results <- num_results + 1 - - #open netcdf file - ncptr1 <- ncdf4::nc_open(file); - ncptr2 <- ncdf4::nc_open(file2); + + # open netcdf file + ncptr1 <- ncdf4::nc_open(file) + ncptr2 <- ncdf4::nc_open(file2) # Attach data to matricies nee1 <- ncdf4::ncvar_get(ncptr1, "NEE") nee2 <- ncdf4::ncvar_get(ncptr2, "NEE") nee <- c(nee1, nee2) - if(i == 2){ neemat <- nee} else{neemat <- cbind(neemat,nee)} - + if (i == 2) { + neemat <- nee + } else { + neemat <- cbind(neemat, nee) + } + qle1 <- ncdf4::ncvar_get(ncptr1, "Qle") qle2 <- ncdf4::ncvar_get(ncptr2, "Qle") qle <- c(qle1, qle2) - - if(i == 2){ qlemat <- qle} else{qlemat <- cbind(qlemat,qle)} - + + if (i == 2) { + qlemat <- qle + } else { + qlemat <- cbind(qlemat, qle) + } + soil1 <- ncdf4::ncvar_get(ncptr1, "SoilMoistFrac") soil2 <- ncdf4::ncvar_get(ncptr2, "SoilMoistFrac") soil <- c(soil1, soil2) - if(i == 2){ soilmoist <- soil} else{soilmoist <- cbind(soilmoist,soil)} - - - sec <- c(ncptr1$dim$time$vals, ncptr2$dim$time$vals+ last(ncptr1$dim$time$vals)) + if (i == 2) { + soilmoist <- soil + } else { + soilmoist <- cbind(soilmoist, soil) + } + + + sec <- c(ncptr1$dim$time$vals, ncptr2$dim$time$vals + last(ncptr1$dim$time$vals)) origin <- strsplit(ncptr1$dim$time$units, " ")[[1]][3] - - + + # Close netcdf file ncdf4::nc_close(ncptr1) ncdf4::nc_close(ncptr2) - } - } - + if (num_results == 0) { print("No results found.") quit("no") } else { print(paste0(num_results, " results found.")) } - + # Time - time <- seq(1, length.out= length(sec)) - - + time <- seq(1, length.out = length(sec)) + + # Change to long format with ensemble numbers - #lets get rid of col names for easier pivoting + # lets get rid of col names for easier pivoting colnames(neemat) <- paste0(rep("ens_", 100), seq(1, 100)) - needf = neemat %>% - as_tibble() %>% - mutate(date= as.Date(sec, origin = origin), - Time = round(abs(sec - floor(sec)) * 24)) %>% - pivot_longer(!c(date, Time), - names_to = "ensemble", - names_prefix = "ens_", - values_to = "nee") %>% - mutate(nee = PEcAn.utils::misc.convert(nee, "kg C m-2 s-1", "umol C m-2 s-1")) - + needf <- neemat %>% + as_tibble() %>% + mutate( + date = as.Date(sec, origin = origin), + Time = round(abs(sec - floor(sec)) * 24) + ) %>% + pivot_longer(!c(date, Time), + names_to = "ensemble", + names_prefix = "ens_", + values_to = "nee" + ) %>% + mutate(nee = PEcAn.utils::misc.convert(nee, "kg C m-2 s-1", "umol C m-2 s-1")) + colnames(qlemat) <- paste0(rep("ens_", 100), seq(1, 100)) - qledf = qlemat %>% - as_tibble() %>% - mutate(date= as.Date(sec, origin = origin), - Time = round(abs(sec - floor(sec)) * 24)) %>% - pivot_longer(!c(date, Time), - names_to = "ens", - names_prefix = "ens_", - values_to = "le") - + qledf <- qlemat %>% + as_tibble() %>% + mutate( + date = as.Date(sec, origin = origin), + Time = round(abs(sec - floor(sec)) * 24) + ) %>% + pivot_longer(!c(date, Time), + names_to = "ens", + names_prefix = "ens_", + values_to = "le" + ) + colnames(soilmoist) <- paste0(rep("ens_", 100), seq(1, 100)) - soildf = soilmoist %>% - as_tibble() %>% - mutate(date= as.Date(sec, origin = origin), - Time = round(abs(sec - floor(sec)) * 24)) %>% - pivot_longer(!c(date, Time), - names_to = "ens", - names_prefix = "ens_", - values_to = "vswc") - - - data = needf %>% - mutate(le = qledf$le, - vswc = soildf$vswc) - - - - - -return(data) - -} + soildf <- soilmoist %>% + as_tibble() %>% + mutate( + date = as.Date(sec, origin = origin), + Time = round(abs(sec - floor(sec)) * 24) + ) %>% + pivot_longer(!c(date, Time), + names_to = "ens", + names_prefix = "ens_", + values_to = "vswc" + ) + + + data <- needf %>% + mutate( + le = qledf$le, + vswc = soildf$vswc + ) + + + return(data) +} diff --git a/scripts/format.sh b/scripts/format.sh new file mode 100755 index 00000000000..40fce440aec --- /dev/null +++ b/scripts/format.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Script Name: format_staged_r_files.sh +# Description: Formats only the staged .R files in the Git repository using the styler package in R. +# This script is optimized for use as a pre-commit hook to reduce execution time. + +# Exit immediately if a command exits with a non-zero status +set -e + +# Function to check if an R package is installed +is_r_package_installed() { + local pkg="$1" + Rscript -e "if (!requireNamespace('$pkg', quietly = TRUE)) { quit(status=1) }" +} + +# Check if styler is installed; install it if not +if is_r_package_installed "styler"; then + echo "The 'styler' package is already installed." +else + echo "The 'styler' package is not installed. Installing now..." + Rscript -e "install.packages('styler', repos='https://cloud.r-project.org')" + echo "'styler' package installed successfully." +fi + +# Retrieve list of staged .R files +echo "Retrieving list of staged .R files..." +STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep '\.R$' || true) + +if [ -z "$STAGED_FILES" ]; then + echo "No staged .R files to format." + exit 0 +fi + +echo "Found the following staged .R files to format:" +echo "$STAGED_FILES" + +# Iterate over each staged .R file and format it +for file in $STAGED_FILES; do + if [ -f "$file" ]; then + echo "Formatting: $file" + # Format the file using styler + Rscript -e "styler::style_file('$file')" + + # Check if the file was modified by styler + if git diff --quiet "$file"; then + echo "No changes made to: $file" + else + echo "Changes made to: $file. Re-adding to staging area." + git add "$file" + fi + else + echo "File not found: $file. Skipping." + fi +done + +echo "Staged .R files have been formatted successfully." diff --git a/scripts/generate_dependencies.R b/scripts/generate_dependencies.R index 63e64e5b5cc..5677a5fccad 100755 --- a/scripts/generate_dependencies.R +++ b/scripts/generate_dependencies.R @@ -22,11 +22,12 @@ extract_remotes <- function(pkg_desc) { remote_sources <- pkg_desc$get_remotes() non_gh <- !grepl("^github::", remote_sources) if (any(non_gh)) { - warning( - "Found `Remotes` address pointing to non-Github repo: ", - remote_sources[non_gh], - "pecan.depends.R only supports github remotes so far, ", - "so skipping.") + warning( + "Found `Remotes` address pointing to non-Github repo: ", + remote_sources[non_gh], + "pecan.depends.R only supports github remotes so far, ", + "so skipping." + ) } sub("^github::", "", remote_sources[!non_gh]) @@ -50,10 +51,12 @@ parse_desc <- function(path) { list( mapping = data.frame( package = d$get_field("Package"), - package_dir = dirname(path)), + package_dir = dirname(path) + ), remotes = remote_sources, roxygen_version = d$get_field("RoxygenNote"), - deps = deps) + deps = deps + ) } @@ -89,12 +92,14 @@ deps <- pkgs_parsed |> # Add Roxygen, used to build all packages but not declared as a dependency roxy <- pkgs_parsed |> purrr::map_dfr( - ~c(needed_by_dir = .$mapping$package_dir, version = .$roxygen_version)) |> + ~ c(needed_by_dir = .$mapping$package_dir, version = .$roxygen_version) + ) |> dplyr::mutate( package = "roxygen2", version = paste("==", version), type = "Roxygen", - is_pecan = FALSE) + is_pecan = FALSE + ) deps <- deps |> rbind(roxy) |> dplyr::arrange(package, version, needed_by_dir, .locale = "en_US") @@ -112,14 +117,16 @@ pecan_deps <- deps |> dplyr::summarize( dep_list = paste0(".install/", sort(unique(package_dir)), collapse = " "), call_txt = paste0("$(call depends,", unique(needed_by_dir), "): | ", dep_list), - .groups = "drop") |> + .groups = "drop" + ) |> dplyr::pull(call_txt) cat( c("# autogenerated", pecan_deps), file = "Makefile.depends", sep = "\n", - append = FALSE) + append = FALSE +) # list all the GitHub remotes mentioned by any package, @@ -128,4 +135,3 @@ remote_repos <- sapply(pkgs_parsed, \(x)x$remotes) |> unlist() |> sort() |> writeLines("docker/depends/pecan_deps_from_github.txt") - diff --git a/scripts/install_shiny_deps.R b/scripts/install_shiny_deps.R index 8ef5e042aca..28208fbf02a 100644 --- a/scripts/install_shiny_deps.R +++ b/scripts/install_shiny_deps.R @@ -1,4 +1,3 @@ - # Install dependencies declared by a Shiny app # Relies on the undocumented behavior that devtools::install_deps will work # on any directory containing a DESCRIPTION file, even if not an R package. @@ -11,11 +10,13 @@ path <- commandArgs(trailingOnly = TRUE)[[1]] devtools::install_deps(path) dep <- desc::desc_get_deps(file.path(path, "DESCRIPTION")) purrr::walk( - dep$package, - ~if (system.file(package = .) == "") { - # empty string = package not found - # = install_deps didn't install it after all - stop("Don't know how to install dependency ", ., - ", which is required by ", path) - } + dep$package, + ~ if (system.file(package = .) == "") { + # empty string = package not found + # = install_deps didn't install it after all + stop( + "Don't know how to install dependency ", ., + ", which is required by ", path + ) + } ) diff --git a/scripts/quickbuild.R b/scripts/quickbuild.R index 45039ecf29e..7342e13d5a7 100755 --- a/scripts/quickbuild.R +++ b/scripts/quickbuild.R @@ -3,7 +3,11 @@ library(devtools) dev_mode(on = TRUE) -lapply(list("utils", "db", "settings", "visualization", "modules/priors", "modules/meta.analysis", - "modules/uncertainty", "modules/data.land", "modules/data.atmosphere", "modules/assim.batch", - "modules/assim.sequential", "models/ed", "models/sipnet", "models/biocro", "all"), - function(x) install(x, quick = TRUE, local = TRUE, quiet = TRUE)) +lapply( + list( + "utils", "db", "settings", "visualization", "modules/priors", "modules/meta.analysis", + "modules/uncertainty", "modules/data.land", "modules/data.atmosphere", "modules/assim.batch", + "modules/assim.sequential", "models/ed", "models/sipnet", "models/biocro", "all" + ), + function(x) install(x, quick = TRUE, local = TRUE, quiet = TRUE) +) diff --git a/scripts/workflow.bm.R b/scripts/workflow.bm.R index 6f90dc6f1e0..bcfcb0af818 100644 --- a/scripts/workflow.bm.R +++ b/scripts/workflow.bm.R @@ -2,7 +2,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -28,13 +28,16 @@ status.end <- function(status = "DONE") { } status.skip <- function(name) { if (exists("settings")) { - cat(paste(name, format(Sys.time(), "%F %T"), "", format(Sys.time(), "%F %T"), "SKIPPED", "\n", sep = "\t"), file = file.path(settings$outdir, - "STATUS"), append = TRUE) + cat(paste(name, format(Sys.time(), "%F %T"), "", format(Sys.time(), "%F %T"), "SKIPPED", "\n", sep = "\t"), file = file.path( + settings$outdir, + "STATUS" + ), append = TRUE) } } status.check <- function(name) { - if (!exists("settings")) + if (!exists("settings")) { return(0) + } status.file <- file.path(settings$outdir, "STATUS") if (!file.exists(status.file)) { return(0) @@ -77,9 +80,9 @@ options(error = quote({ })) -# ---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # PEcAn Workflow -# ---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # Open and read in settings file for PEcAn run. args <- commandArgs(trailingOnly = TRUE) # if (is.na(args[1])){ settings <- read.settings('pecan.xml') } else { settings.file = args[1] settings <- @@ -99,9 +102,9 @@ for (i in seq_along(settings$run$inputs)) { if (is.null(input)) { next } - + input.tag <- names(settings$run$input)[i] - + # fia database if ((input["input"] == "fia") && (status.check("FIA2ED") == 0)) { status.start("FIA2ED") @@ -109,20 +112,22 @@ for (i in seq_along(settings$run$inputs)) { status.end() needsave <- TRUE } - + # met conversion if (input.tag == "met") { name <- "MET Process" if (is.null(input$path) && (status.check(name) == 0)) { status.start(name) - result <- PEcAn.data.atmosphere::met.process(site = settings$run$site, - input_met = settings$run$inputs$met, - start_date = settings$run$start.date, - end_date = settings$run$end.date, - model = settings$model$type, - host = settings$run$host, - dbparms = settings$database$bety, - dir = settings$run$dbfiles) + result <- PEcAn.data.atmosphere::met.process( + site = settings$run$site, + input_met = settings$run$inputs$met, + start_date = settings$run$start.date, + end_date = settings$run$end.date, + model = settings$model$type, + host = settings$run$host, + dbparms = settings$database$bety, + dir = settings$run$dbfiles + ) settings$run$inputs[[i]][["path"]] <- result status.end() needsave <- TRUE @@ -149,12 +154,14 @@ if (status.check("TRAIT") == 0) { if ("meta.analysis" %in% names(settings)) { if (status.check("META") == 0) { status.start("META") - run.meta.analysis(settings$pfts, - settings$meta.analysis$iter, - settings$meta.analysis$random.effects, - settings$meta.analysis$threshold, - settings$run$dbfiles, - settings$database$bety) + run.meta.analysis( + settings$pfts, + settings$meta.analysis$iter, + settings$meta.analysis$random.effects, + settings$meta.analysis$threshold, + settings$run$dbfiles, + settings$database$bety + ) status.end() } } @@ -162,9 +169,10 @@ if ("meta.analysis" %in% names(settings)) { # Write model specific configs if (status.check("CONFIG") == 0) { status.start("CONFIG") - settings <- run.write.configs(settings, - write = settings$database$bety$write, - ens.sample.method = settings$ensemble$method) + settings <- run.write.configs(settings, + write = settings$database$bety$write, + ens.sample.method = settings$ensemble$method + ) saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.CONFIGS.xml")) status.end() } else if (file.exists(file.path(settings$outdir, "pecan.CONFIGS.xml"))) { @@ -218,11 +226,13 @@ if (status.check("FINISHED") == 0) { status.start("FINISHED") kill.tunnel() db.query(paste("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, "AND finished_at IS NULL"), params = settings$database$bety) - + # Send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, paste0("Workflow has finished executing at ", date()), paste0("You can find the results on ", - settings$email$url)) + sendmail(settings$email$from, settings$email$to, paste0("Workflow has finished executing at ", date()), paste0( + "You can find the results on ", + settings$email$url + )) } status.end() } diff --git a/scripts/workflow.pda.R b/scripts/workflow.pda.R index e782d900301..934bb9f36ee 100755 --- a/scripts/workflow.pda.R +++ b/scripts/workflow.pda.R @@ -3,13 +3,13 @@ args <- commandArgs(trailingOnly = TRUE) #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -# ---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # Load required libraries # ---------------------------------------------------------------------- library(PEcAn.all) @@ -18,16 +18,19 @@ library(PEcAn.all) # Functions used to write STATUS used by history #--------------------------------------------------------------------------------# status.start <- function(name) { - cat(paste(name, format(Sys.time(), "%F %T"), sep = "\t"), - file = file.path(settings$outdir, "STATUS"), append = TRUE) + cat(paste(name, format(Sys.time(), "%F %T"), sep = "\t"), + file = file.path(settings$outdir, "STATUS"), append = TRUE + ) } status.end <- function(status = "DONE") { - cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), - file = file.path(settings$outdir, "STATUS"), append = TRUE) + cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), + file = file.path(settings$outdir, "STATUS"), append = TRUE + ) } status.skip <- function(name) { - cat(paste(name, format(Sys.time(), "%F %T"), "", format(Sys.time(), "%F %T"), "SKIPPED", "\n", sep = "\t"), - file = file.path(settings$outdir, "STATUS"), append = TRUE) + cat(paste(name, format(Sys.time(), "%F %T"), "", format(Sys.time(), "%F %T"), "SKIPPED", "\n", sep = "\t"), + file = file.path(settings$outdir, "STATUS"), append = TRUE + ) } options(warn = 1) @@ -41,9 +44,9 @@ options(error = quote({ # options(warning.expression=status.end('ERROR')) -# ---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # PEcAn Workflow -# ---------------------------------------------------------------------- +# ---------------------------------------------------------------------- # Open and read in settings file for PEcAn run. if (is.na(args[1])) { settings <- read.settings("pecan.xml") @@ -55,42 +58,44 @@ if (is.na(args[1])) { if (length(which(commandArgs() == "--continue")) == 0) { # Remove existing STATUS file file.remove(file.path(settings$outdir, "STATUS")) - + # Do conversions for (i in seq_along(settings$run$inputs)) { input <- settings$run$inputs[[i]] if (is.null(input)) { next } - + input.tag <- names(settings$run$input)[i] - + # fia database if (input["input"] == "fia") { status.start("FIA2ED") fia.to.psscss(settings) status.end() } - + # met conversion if (input.tag == "met") { if (is.null(input$path)) { status.start("MET Process") - result <- PEcAn.data.atmosphere::met.process(site = settings$run$site, - input_met = settings$run$inputs$met, - start_date = settings$run$start.date, - end_date = settings$run$end.date, - model = settings$model$type, - host = settings$host, - dbparms = settings$database$bety, - dir = settings$database$dbfiles) + result <- PEcAn.data.atmosphere::met.process( + site = settings$run$site, + input_met = settings$run$inputs$met, + start_date = settings$run$start.date, + end_date = settings$run$end.date, + model = settings$model$type, + host = settings$host, + dbparms = settings$database$bety, + dir = settings$database$dbfiles + ) settings$run$inputs[[i]][["path"]] <- result status.end() } } } saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.METProcess.xml")) - + # Check status to avoid repeating work check.status <- function(check.name) { status.file <- file.path(settings$outdir, "STATUS") @@ -111,32 +116,36 @@ if (length(which(commandArgs() == "--continue")) == 0) { } return(0) } - + # Query the trait database for data and priors if (check.status("TRAIT") == 0) { status.start("TRAIT") - settings$pfts <- get.trait.data(settings$pfts, - settings$model$type, - settings$database$dbfiles, - settings$database$bety, - settings$meta.analysis$update) + settings$pfts <- get.trait.data( + settings$pfts, + settings$model$type, + settings$database$dbfiles, + settings$database$bety, + settings$meta.analysis$update + ) saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.TRAIT.xml")) status.end() } - + # Run the PEcAn meta.analysis if (check.status("META") == 0) { status.start("META") if ("meta.analysis" %in% names(settings)) { - run.meta.analysis(settings$pfts, settings$meta.analysis$iter, - settings$meta.analysis$random.effects, - settings$meta.analysis$threshold, - settings$database$dbfiles, - settings$database$bety) + run.meta.analysis( + settings$pfts, settings$meta.analysis$iter, + settings$meta.analysis$random.effects, + settings$meta.analysis$threshold, + settings$database$dbfiles, + settings$database$bety + ) } status.end() } - + # Write model specific configs if (check.status("CONFIG") == 0) { status.start("CONFIG") @@ -144,7 +153,7 @@ if (length(which(commandArgs() == "--continue")) == 0) { saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.CONFIGS.xml")) status.end() } - + if (length(which(commandArgs() == "--advanced")) != 0) { status.start("ADVANCED") q() @@ -172,7 +181,7 @@ if (!is.null(settings$assim.batch$inputs)) { con <- try(db.open(settings$database$bety), silent = TRUE) inputs <- load.pda.data(settings$assim.batch$inputs, con) NEEo <- inputs[[1]]$NEEo - NEEo <- NEEo/(1000/12 * 1e+06/10000/86400/365) #convert umol/m2/s -> kgC/ha/yr + NEEo <- NEEo / (1000 / 12 * 1e+06 / 10000 / 86400 / 365) # convert umol/m2/s -> kgC/ha/yr db.close(con) } else { NEEo <- NULL @@ -201,28 +210,28 @@ if ("assim.batch" %in% names(settings)) { if (!is.null(settings$assim.batch)) { # Repeat sensitivity and ensemble analysis with PDA-constrained params - + # Calls model specific write.configs e.g. write.config.ed.R status.start("PDA.CONFIG") settings <- run.write.configs(settings, write = settings$database$bety$write, ens.sample.method = "halton") saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.PDA.CONFIGS.xml")) status.end() - + # Start ecosystem model runs status.start("PDA.MODEL") PEcAn.workflow::start_model_runs(settings, settings$database$bety$write) status.end() - + # Get results of model runs status.start("PDA.OUTPUT") get.results(settings) status.end() - + # Run ensemble analysis on model output. status.start("PDA.ENSEMBLE") run.ensemble.analysis(plot.timeseries = TRUE, observations = NEEo, window = window) status.end() - + # Run sensitivity analysis and variance decomposition on model output status.start("PDA.SENSITIVITY") run.sensitivity.analysis() @@ -232,22 +241,26 @@ if (!is.null(settings$assim.batch)) { # Pecan workflow complete if (check.status("FINISHED") == 0) { status.start("FINISHED") - db.query(paste("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, "AND finished_at IS NULL"), - params = settings$database$bety) + db.query(paste("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, "AND finished_at IS NULL"), + params = settings$database$bety + ) status.end() } # Send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, - paste0("Workflow has finished executing at ", date()), - paste0("You can find the results on ", settings$email$url)) + sendmail( + settings$email$from, settings$email$to, + paste0("Workflow has finished executing at ", date()), + paste0("You can find the results on ", settings$email$url) + ) } # Write end time in database if (settings$workflow$id != "NA") { - db.query(paste0("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, " AND finished_at IS NULL"), - params = settings$database$bety) + db.query(paste0("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, " AND finished_at IS NULL"), + params = settings$database$bety + ) } status.end() diff --git a/scripts/workflow.treering.R b/scripts/workflow.treering.R index 7d50b4fb86b..27ecab23fdd 100644 --- a/scripts/workflow.treering.R +++ b/scripts/workflow.treering.R @@ -38,8 +38,8 @@ trees <- read.csv("~/Camp2016/ForestPlots/2016/TenderfootBog_2016_Cleaned.csv") rings <- Read_Tucson("~/Camp2016/ForestPlots/2016/TucsonCombined/") ## Match observations & format for JAGS -combined <- matchInventoryRings(trees, rings, extractor = "Tag", nyears = 39, coredOnly = FALSE) #WARNINGS -data <- buildJAGSdata_InventoryRings(combined) #WARNINGS +combined <- matchInventoryRings(trees, rings, extractor = "Tag", nyears = 39, coredOnly = FALSE) # WARNINGS +data <- buildJAGSdata_InventoryRings(combined) # WARNINGS status.end() #---------------- Load plot and tree ring data. -------------------------------------------------------# @@ -62,11 +62,13 @@ pft.data <- list() for (ipft in seq_along(settings$pfts)) { ## loop over PFTs pft_name <- settings$pfts[[ipft]]$name - query <- paste0("SELECT s.spcd,", "s.\"Symbol\"", " as acronym from pfts as p join pfts_species on p.id = pfts_species.pft_id join species as s on pfts_species.specie_id = s.id where p.name like '%", - pft_name, "%'") + query <- paste0( + "SELECT s.spcd,", "s.\"Symbol\"", " as acronym from pfts as p join pfts_species on p.id = pfts_species.pft_id join species as s on pfts_species.specie_id = s.id where p.name like '%", + pft_name, "%'" + ) pft.data[[pft_name]] <- db.query(query, con) } -allom.stats <- AllomAve(pft.data, outdir = settings$outdir, ngibbs = n.iter/10) +allom.stats <- AllomAve(pft.data, outdir = settings$outdir, ngibbs = n.iter / 10) save(allom.stats, file = file.path(settings$outdir, "allom.stats.Rdata")) status.end() @@ -74,17 +76,17 @@ status.end() status.start("PLOT2AGB") out <- as.matrix(jags.out) sel <- grep("x[", colnames(out), fixed = TRUE) -unit.conv <- pi * 10^2/10000 +unit.conv <- pi * 10^2 / 10000 state <- plot2AGB(combined, out[, sel], settings$outdir, list(allom.stats[[2]]), unit.conv = unit.conv) biomass2carbon <- 0.48 -state$NPP <- PEcAn.utils::ud_convert(state$NPP,'Mg/ha/yr','kg/m^2/s') * biomass2carbon# kgC/m^2/s -state$AGB <- PEcAn.utils::ud_convert(state$AGB,'Mg/ha','kg/m^2') * biomass2carbon# kgC/m2 +state$NPP <- PEcAn.utils::ud_convert(state$NPP, "Mg/ha/yr", "kg/m^2/s") * biomass2carbon # kgC/m^2/s +state$AGB <- PEcAn.utils::ud_convert(state$AGB, "Mg/ha", "kg/m^2") * biomass2carbon # kgC/m2 NPP <- apply(state$NPP[1, , ], 2, mean, na.rm = TRUE) AGB <- apply(state$AGB[1, , ], 2, mean, na.rm = TRUE) - + obs.mean <- list() for (i in seq_along(NPP)) { obs.mean[[i]] <- c(NPP[i], AGB[i]) @@ -101,8 +103,8 @@ for (i in seq_along(NPP)) { obs.times <- seq(as.Date(settings$state.data.assimilation$start.date), as.Date(settings$state.data.assimilation$end.date), by = settings$state.data.assimilation$forecast.time.step) obs.times <- lubridate::year(obs.times) -names(obs.mean) <- paste0(obs.times,'/12/31') -names(obs.cov) <- paste0(obs.times,'/12/31') +names(obs.mean) <- paste0(obs.times, "/12/31") +names(obs.cov) <- paste0(obs.times, "/12/31") status.end() #---------------- Build Initial Conditions ----------------------------------------------------------------------# diff --git a/scripts/workflow.wcr.assim.R b/scripts/workflow.wcr.assim.R index 8d8850f487f..8632d211600 100644 --- a/scripts/workflow.wcr.assim.R +++ b/scripts/workflow.wcr.assim.R @@ -2,7 +2,7 @@ #------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html @@ -20,33 +20,33 @@ options(error = quote({ q() } })) -#options(warning.expression=status.end("ERROR")) +# options(warning.expression=status.end("ERROR")) # ---------------------------------------------------------------------- # PEcAn Workflow # ---------------------------------------------------------------------- # Open and read in settings file for PEcAn run. args <- commandArgs(trailingOnly = TRUE) -if (is.na(args[1])){ - settings <- PEcAn.settings::read.settings("pecan.xml") +if (is.na(args[1])) { + settings <- PEcAn.settings::read.settings("pecan.xml") } else { - settings.file = args[1] + settings.file <- args[1] settings <- PEcAn.settings::read.settings(settings.file) } # Check for additional modules that will require adding settings -if("benchmarking" %in% names(settings)){ +if ("benchmarking" %in% names(settings)) { library(PEcAn.benchmark) settings <- papply(settings, read_settings_BRR) } -if("sitegroup" %in% names(settings)){ - if(is.null(settings$sitegroup$nSite)){ +if ("sitegroup" %in% names(settings)) { + if (is.null(settings$sitegroup$nSite)) { settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id) } else { - settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id,nSite = settings$sitegroup$nSite) + settings <- PEcAn.settings::createSitegroupMultiSettings(settings, sitegroupId = settings$sitegroup$id, nSite = settings$sitegroup$nSite) } settings$sitegroup <- NULL ## zero out so don't expand a second time if re-reading } # Update/fix/check settings. Will only run the first time it's called, unless force=TRUE -settings <- PEcAn.settings::prepare.settings(settings, force=FALSE) +settings <- PEcAn.settings::prepare.settings(settings, force = FALSE) # Write pecan.CHECKED.xml PEcAn.settings::write.settings(settings, outputfile = "pecan.CHECKED.xml") # start from scratch if no continue is passed in @@ -57,39 +57,39 @@ if (length(which(commandArgs() == "--continue")) == 0 && file.exists(statusFile) # Do conversions settings <- PEcAn.workflow::do_conversions(settings) # Query the trait database for data and priors -if (PEcAn.utils::status.check("TRAIT") == 0){ +if (PEcAn.utils::status.check("TRAIT") == 0) { PEcAn.utils::status.start("TRAIT") settings <- PEcAn.workflow::runModule.get.trait.data(settings) - PEcAn.settings::write.settings(settings, outputfile='pecan.TRAIT.xml') + PEcAn.settings::write.settings(settings, outputfile = "pecan.TRAIT.xml") PEcAn.utils::status.end() -} else if (file.exists(file.path(settings$outdir, 'pecan.TRAIT.xml'))) { - settings <- PEcAn.settings::read.settings(file.path(settings$outdir, 'pecan.TRAIT.xml')) +} else if (file.exists(file.path(settings$outdir, "pecan.TRAIT.xml"))) { + settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.TRAIT.xml")) } # Run the PEcAn meta.analysis -if(!is.null(settings$meta.analysis)) { - if (PEcAn.utils::status.check("META") == 0){ +if (!is.null(settings$meta.analysis)) { + if (PEcAn.utils::status.check("META") == 0) { PEcAn.utils::status.start("META") PEcAn.MA::runModule.run.meta.analysis(settings) PEcAn.utils::status.end() } } # Write model specific configs -if (PEcAn.utils::status.check("CONFIG") == 0){ +if (PEcAn.utils::status.check("CONFIG") == 0) { PEcAn.utils::status.start("CONFIG") settings <- PEcAn.workflow::runModule.run.write.configs(settings) - PEcAn.settings::write.settings(settings, outputfile='pecan.CONFIGS.xml') + PEcAn.settings::write.settings(settings, outputfile = "pecan.CONFIGS.xml") PEcAn.utils::status.end() -} else if (file.exists(file.path(settings$outdir, 'pecan.CONFIGS.xml'))) { - settings <- PEcAn.settings::read.settings(file.path(settings$outdir, 'pecan.CONFIGS.xml')) +} else if (file.exists(file.path(settings$outdir, "pecan.CONFIGS.xml"))) { + settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.CONFIGS.xml")) } if ((length(which(commandArgs() == "--advanced")) != 0) && (PEcAn.utils::status.check("ADVANCED") == 0)) { PEcAn.utils::status.start("ADVANCED") - q(); + q() } # Start ecosystem model runs if (PEcAn.utils::status.check("MODEL") == 0) { PEcAn.utils::status.start("MODEL") - PEcAn.workflow::runModule_start_model_runs(settings,stop.on.error=FALSE) + PEcAn.workflow::runModule_start_model_runs(settings, stop.on.error = FALSE) PEcAn.utils::status.end() } # Get results of model runs @@ -99,15 +99,15 @@ if (PEcAn.utils::status.check("OUTPUT") == 0) { PEcAn.utils::status.end() } print("Done reading stuff") -# Run ensemble analysis on model output. -if (FALSE && 'ensemble' %in% names(settings) & PEcAn.utils::status.check("ENSEMBLE") == 0) { +# Run ensemble analysis on model output. +if (FALSE && "ensemble" %in% names(settings) & PEcAn.utils::status.check("ENSEMBLE") == 0) { PEcAn.utils::status.start("ENSEMBLE") - runModule.run.ensemble.analysis(settings, TRUE) + runModule.run.ensemble.analysis(settings, TRUE) PEcAn.utils::status.end() } print("Past ensemble section") # Run sensitivity analysis and variance decomposition on model output -if (FALSE && 'sensitivity.analysis' %in% names(settings) & PEcAn.utils::status.check("SENSITIVITY") == 0) { +if (FALSE && "sensitivity.analysis" %in% names(settings) & PEcAn.utils::status.check("SENSITIVITY") == 0) { PEcAn.utils::status.start("SENSITIVITY") runModule.run.sensitivity.analysis(settings) PEcAn.utils::status.end() @@ -115,7 +115,7 @@ if (FALSE && 'sensitivity.analysis' %in% names(settings) & PEcAn.utils::status.c PEcAn.settings::write.settings(settings, outputfile = "sa.xml", outputdir = "~/") print("past SA section") # Run parameter data assimilation -if (FALSE && 'assim.batch' %in% names(settings)) { +if (FALSE && "assim.batch" %in% names(settings)) { if (PEcAn.utils::status.check("PDA") == 0) { PEcAn.utils::status.start("PDA") settings <- PEcAn.assim.batch::runModule.assim.batch(settings) @@ -124,17 +124,17 @@ if (FALSE && 'assim.batch' %in% names(settings)) { } print("past pda section") # Run state data assimilation -if ('state.data.assimilation' %in% names(settings)) { +if ("state.data.assimilation" %in% names(settings)) { print("Entering SDA section") if (PEcAn.utils::status.check("SDA") == 0) { PEcAn.utils::status.start("SDA") - #removed call to prep.data.assim fcn in the uncertainty package,because the function no longer exists, contact Alexis Helgeson with further questions + # removed call to prep.data.assim fcn in the uncertainty package,because the function no longer exists, contact Alexis Helgeson with further questions PEcAn.utils::status.end() } } print("past sda section") # Run benchmarking -if("benchmarking" %in% names(settings) & "benchmark" %in% names(settings$benchmarking)){ +if ("benchmarking" %in% names(settings) & "benchmark" %in% names(settings$benchmarking)) { PEcAn.utils::status.start("BENCHMARKING") results <- papply(settings, function(x) calc_benchmark(x, bety)) PEcAn.utils::status.end() @@ -144,13 +144,15 @@ print("past benchmarking") if (PEcAn.utils::status.check("FINISHED") == 0) { PEcAn.utils::status.start("FINISHED") PEcAn.remote::kill.tunnel(settings) - db.query(paste("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, "AND finished_at IS NULL"), params=settings$database$bety) - + db.query(paste("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, "AND finished_at IS NULL"), params = settings$database$bety) + # Send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, - paste0("Workflow has finished executing at ", base::date()), - paste0("You can find the results on ", settings$email$url)) + sendmail( + settings$email$from, settings$email$to, + paste0("Workflow has finished executing at ", base::date()), + paste0("You can find the results on ", settings$email$url) + ) } PEcAn.utils::status.end() } diff --git a/shiny/BenchmarkReport/server.R b/shiny/BenchmarkReport/server.R index 21ae79b2bef..80ad3ea070e 100644 --- a/shiny/BenchmarkReport/server.R +++ b/shiny/BenchmarkReport/server.R @@ -1,9 +1,9 @@ # -# This is the server logic of a Shiny web application. You can run the +# This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: -# +# # http://shiny.rstudio.com/ # @@ -11,18 +11,16 @@ library(shiny) # Define server logic required to draw a histogram shinyServer(function(input, output) { - output$p <- renderPlot({ metric_dat <- results[[input$b]]$aligned.dat[[input$v]] - colnames(metric_dat)<- c("model","obvs","time") - fcn <- paste0("metric.",input$metric.plot) - do.call(fcn, args <- list(metric_dat,input$v,draw.plot=TRUE)) + colnames(metric_dat) <- c("model", "obvs", "time") + fcn <- paste0("metric.", input$metric.plot) + do.call(fcn, args <- list(metric_dat, input$v, draw.plot = TRUE)) }) - - output$scores <- renderDataTable(results[[input$b]]$bench.results[-grep("plot",metrics),]) - output$obvs <- renderDataTable(results[[input$b]]$obvs) + + output$scores <- renderDataTable(results[[input$b]]$bench.results[-grep("plot", metrics), ]) + output$obvs <- renderDataTable(results[[input$b]]$obvs) output$model <- renderDataTable(results[[input$b]]$model) output$dat <- renderDataTable(results[[input$b]]$aligned.dat[[input$v]]) output$format <- renderDataTable(results[[input$b]]$format) - }) diff --git a/shiny/BenchmarkReport/ui.R b/shiny/BenchmarkReport/ui.R index a01482fdb59..b67771ab44b 100644 --- a/shiny/BenchmarkReport/ui.R +++ b/shiny/BenchmarkReport/ui.R @@ -3,7 +3,7 @@ # run the application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: -# +# # http://shiny.rstudio.com/ # @@ -13,26 +13,29 @@ library(shiny) shinyUI(pageWithSidebar( headerPanel = ("Benchmarking Visualization Prototype"), sidebarPanel( - conditionalPanel(condition="input.conditionedPanels==1", - selectInput(inputId = "b", label = "Benchmark Data Set", choices = names(results)), - selectInput(inputId = "v", label = "Variable", choices = dat.vars), - radioButtons(inputId = "metric.plot", label="What plot you like to see?", - choices = metrics.plot) + conditionalPanel( + condition = "input.conditionedPanels==1", + selectInput(inputId = "b", label = "Benchmark Data Set", choices = names(results)), + selectInput(inputId = "v", label = "Variable", choices = dat.vars), + radioButtons( + inputId = "metric.plot", label = "What plot you like to see?", + choices = metrics.plot + ) ), - conditionalPanel(condition="input.conditionedPanels==2", - selectInput(inputId = "b", label = "Benchmark Data Set", choices = names(results)) - ) + conditionalPanel( + condition = "input.conditionedPanels==2", + selectInput(inputId = "b", label = "Benchmark Data Set", choices = names(results)) + ) ), mainPanel( tabsetPanel( - tabPanel("Plot", plotOutput("p"), value = 1), - tabPanel("Scores", dataTableOutput('scores'), value=2), - tabPanel("Benchmarking Inputs", dataTableOutput('obvs'), value=2), - tabPanel("Model Outputs", dataTableOutput('model'), value=2), - tabPanel("Aligned data", dataTableOutput('dat'), value=2), - tabPanel("Formats", dataTableOutput('format'), value=2), + tabPanel("Plot", plotOutput("p"), value = 1), + tabPanel("Scores", dataTableOutput("scores"), value = 2), + tabPanel("Benchmarking Inputs", dataTableOutput("obvs"), value = 2), + tabPanel("Model Outputs", dataTableOutput("model"), value = 2), + tabPanel("Aligned data", dataTableOutput("dat"), value = 2), + tabPanel("Formats", dataTableOutput("format"), value = 2), id = "conditionedPanels" ) - ) -)) \ No newline at end of file +)) diff --git a/shiny/Data-Ingest/Data_Ingest_Functions.R b/shiny/Data-Ingest/Data_Ingest_Functions.R index e45a944a05b..744e9661cd2 100644 --- a/shiny/Data-Ingest/Data_Ingest_Functions.R +++ b/shiny/Data-Ingest/Data_Ingest_Functions.R @@ -1,5 +1,5 @@ # resolve.id <- function(attributeTable, attribute, input, output) dplyr::filter(attributeTable, attribute == input) %>% pull(output) -# +# # resolve.id(attributeTable = sites_sub, attribute = sitename, input = sitename_test, output = id) -# +# # dplyr::filter(sites_sub, sitename == sitename_test) %>% pull(id) diff --git a/shiny/Data-Ingest/app.R b/shiny/Data-Ingest/app.R index 2b5420660e5..fa633a71c65 100644 --- a/shiny/Data-Ingest/app.R +++ b/shiny/Data-Ingest/app.R @@ -1,55 +1,73 @@ -lapply(c( "shiny", - "shinydashboard", - "dataone", - "stringr", - "DT", - "shiny", - "shinytoastr", - "shinyWidgets", - "shinyjs", - "shinyTime", - "dplyr"),function(pkg){ - if (!(pkg %in% installed.packages()[,1])){ - install.packages(pkg) - } - library(pkg,character.only = TRUE,quietly = TRUE) - } - ) +lapply(c( + "shiny", + "shinydashboard", + "dataone", + "stringr", + "DT", + "shiny", + "shinytoastr", + "shinyWidgets", + "shinyjs", + "shinyTime", + "dplyr" +), function(pkg) { + if (!(pkg %in% installed.packages()[, 1])) { + install.packages(pkg) + } + library(pkg, character.only = TRUE, quietly = TRUE) +}) + +lapply(c( + "PEcAn.data.land", + "PEcAn.visualization", + "PEcAn.utils", + "PEcAn.DB" +), function(pkg) { + library(pkg, character.only = TRUE, quietly = TRUE) +}) + + + +source("ui_utils.R", local = TRUE) +source("helper.R", local = TRUE) + + +##### Bety Calls ###### +bety <- betyConnect() + +sites <- dplyr::tbl(bety, "sites") %>% + dplyr::select(sitename, id) %>% + dplyr::arrange(sitename) +sitenames <- sites %>% pull(sitename) + +inputs <- dplyr::tbl(bety, "inputs") %>% + dplyr::select(name, id) %>% + dplyr::arrange(name) +input_names <- inputs %>% pull(name) + +formats <- dplyr::tbl(bety, "formats") %>% + distinct(name) %>% + dplyr::arrange(name) %>% + pull(name) +formats_sub <- dplyr::tbl(bety, "formats") %>% + dplyr::select(name, id) %>% + dplyr::arrange(name) -lapply(c( "PEcAn.data.land", - "PEcAn.visualization", - "PEcAn.utils", - "PEcAn.DB"),function(pkg){ - library(pkg,character.only = TRUE,quietly = TRUE) - } - ) - - - - source("ui_utils.R", local = TRUE) - source("helper.R", local = TRUE) - - - ##### Bety Calls ###### - bety <- betyConnect() - - sites <- dplyr::tbl(bety, "sites") %>% dplyr::select(sitename, id) %>% dplyr::arrange(sitename) - sitenames <- sites %>% pull(sitename) - - inputs <- dplyr::tbl(bety, "inputs") %>% dplyr::select(name, id) %>% dplyr::arrange(name) - input_names <- inputs %>% pull(name) - - formats <- dplyr::tbl(bety, "formats") %>% distinct(name) %>% dplyr::arrange(name) %>% pull(name) - formats_sub <- dplyr::tbl(bety, "formats") %>% dplyr::select(name, id) %>% dplyr::arrange(name) - - variables_ids <- dplyr::tbl(bety, "variables") %>% dplyr::select(id, name) %>% dplyr::arrange(name) - variables <- variables_ids %>% pull(name) - - # machines <- dplyr::tbl(bety, "machines") %>% distinct(hostname) %>% dplyr::arrange(hostname)%>% pull(hostname) - # machines_sub <- dplyr::tbl(bety, "machines") %>% dplyr::select(hostname, id) %>% dplyr::arrange(hostname) - - mimetypes <- dplyr::tbl(bety, "mimetypes") %>% distinct(type_string) %>% dplyr::arrange(type_string) %>% pull(type_string) - mimetype_sub <- dplyr::tbl(bety, "mimetypes") %>% dplyr::select(type_string, id) %>% dplyr::arrange(type_string) +variables_ids <- dplyr::tbl(bety, "variables") %>% + dplyr::select(id, name) %>% + dplyr::arrange(name) +variables <- variables_ids %>% pull(name) + +# machines <- dplyr::tbl(bety, "machines") %>% distinct(hostname) %>% dplyr::arrange(hostname)%>% pull(hostname) +# machines_sub <- dplyr::tbl(bety, "machines") %>% dplyr::select(hostname, id) %>% dplyr::arrange(hostname) + +mimetypes <- dplyr::tbl(bety, "mimetypes") %>% + distinct(type_string) %>% + dplyr::arrange(type_string) %>% + pull(type_string) +mimetype_sub <- dplyr::tbl(bety, "mimetypes") %>% + dplyr::select(type_string, id) %>% + dplyr::arrange(type_string) ############################################################################# @@ -57,64 +75,67 @@ lapply(c( "PEcAn.data.land", ############################################################################# ui <- dashboardPage( - dashboardHeader(title = "Data Ingest Workflow"), - dashboardSidebar(collapsed = TRUE, + dashboardHeader(title = "Data Ingest Workflow"), + dashboardSidebar( + collapsed = TRUE, source_ui("sidebar_ui.R") ), dashboardBody( useToastr(), # Call error handling package - useShinyjs(), #Include shinyjs + useShinyjs(), # Include shinyjs tabItems( - ## Tab 1 -- Ingest Workflow - tabItem(tabName = "ingestWorkflow", - source_ui("ingest_workflow_ui.R") + ## Tab 1 -- Ingest Workflow + tabItem( + tabName = "ingestWorkflow", + source_ui("ingest_workflow_ui.R") ), - ## Tab 2 -- About - tabItem(tabName = "About", - source_ui("homepage_ui.R") - ) - - )), + ## Tab 2 -- About + tabItem( + tabName = "About", + source_ui("homepage_ui.R") + ) + ) + ), title = "PEcAn Data Ingest", - skin = "green" + skin = "green" ) #################################################################################### ################################ SERVER ############################################ #################################################################################### server <- function(input, output, session) { - options(shiny.maxRequestSize = 100 * 1024 ^ 2) #maximum file input size - + options(shiny.maxRequestSize = 100 * 1024^2) # maximum file input size + ## Setup ## - Shared.data <- reactiveValues(downloaded = NULL, selected_row = NULL, - local_files = NULL, selected_row_local = NULL, - new_format = NULL, input_record_df = NULL, - format_vars_df = NULL, input_method = NULL) - + Shared.data <- reactiveValues( + downloaded = NULL, selected_row = NULL, + local_files = NULL, selected_row_local = NULL, + new_format = NULL, input_record_df = NULL, + format_vars_df = NULL, input_method = NULL + ) + Shared.data$variables_rd <- variables - - temp <- tempdir() + + temp <- tempdir() PEcAn_path <- PEcAn.utils::read_web_config("../../web/config.php")$dbfiles_folder - + ## Create two sub-directories in the tempfile ## d1_tempdir <<- file.path(temp, "d1_tempdir") dir.create(d1_tempdir, showWarnings = F) local_tempdir <<- file.path(temp, "local_tempdir") dir.create(local_tempdir, showWarnings = F) - + ##################### DataONE Download ##################### source("server_files/d1_download_svr.R", local = TRUE) ######### FileInput ######################################## source("server_files/local_upload_svr.R", local = TRUE) - + ######### Ingest Workflow ############################## source("server_files/ingest_workflow_svr.R", local = TRUE) - - } -# Run the application... = +# Run the application... = shinyApp(ui = ui, server = server) # example data: doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87, doi:10.6073-pasta-f31b28b912e6051bf1d383ff1ef18987 diff --git a/shiny/Data-Ingest/helper.R b/shiny/Data-Ingest/helper.R index b19abbed8c3..14d88791b8a 100644 --- a/shiny/Data-Ingest/helper.R +++ b/shiny/Data-Ingest/helper.R @@ -4,23 +4,21 @@ #' @param format_name Format name in type: character string #' @param site_id Integer, scientific notation, or character string that will be converted to an integer and will be the trailing identifier for the firectory name #' -#' @return Self-generated directory name to store files uploaded via local upload. +#' @return Self-generated directory name to store files uploaded via local upload. #' @export #' @author Liam Burke (liam.burke24@gmail.com) #' #' @examples #' auto.name.directory(format_name = "LTER-hf-103", site_id = 1000004955) -auto.name.directory <- function(format_name, site_id){ - +auto.name.directory <- function(format_name, site_id) { # replace all non-alphanumeric characters with "_" basename <- base::gsub("[^a-zA-Z0-9 :]", "_", format_name) - + ## Convert site_id into shorter format long_id <- as.numeric(site_id) # convert scientific notation to numeric - new_id <- paste0(long_id %/% 1e+09, "-", long_id %% 1e+09) - + new_id <- paste0(long_id %/% 1e+09, "-", long_id %% 1e+09) + # Combine autoDirName <- paste(basename, "site", new_id, sep = "_") return(autoDirName) } - diff --git a/shiny/Data-Ingest/modules/d1_download_module.R b/shiny/Data-Ingest/modules/d1_download_module.R index f5366ba5e74..e43951ef36e 100644 --- a/shiny/Data-Ingest/modules/d1_download_module.R +++ b/shiny/Data-Ingest/modules/d1_download_module.R @@ -1,78 +1,87 @@ -d1DownloadUI <- function(id){ +d1DownloadUI <- function(id) { ns <- NS(id) - - box(width = 4, title = h2("Import From DataONE"), solidHeader = TRUE, status = "success", - textInput( - ns("id"), - label = "Import from dataONE", - placeholder = "Enter doi or id here" - ), - p("Copy and Paste the following example data sets:"), - p("doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87"), - p("doi:10.6073-pasta-f31b28b912e6051bf1d383ff1ef18987"), - actionButton(ns("D1Button"), label = "Download"), - # actionButton(inputId = "CancelD1Download", label = "Cancel Download"), This is WAY tricky. Not sure if I can add this functionality... - hr(), - conditionalPanel(condition="$('html').hasClass('shiny-busy')", - tags$div(id="loadmessage", - HTML(paste0("

Download in Progress.

This download may take a couple of minutes.

")) - )), - DT::DTOutput(ns("identifier")), - p("Selected Row (For Testing Purposes)"), - verbatimTextOutput(ns("rowSelection")), ## For testing only - actionButton(ns("D1FinishButton"), label = "Finish Download"), - hr(), - p("Location of Downloaded files:"), - verbatimTextOutput(ns("D1dbfilesPath")) + + box( + width = 4, title = h2("Import From DataONE"), solidHeader = TRUE, status = "success", + textInput( + ns("id"), + label = "Import from dataONE", + placeholder = "Enter doi or id here" + ), + p("Copy and Paste the following example data sets:"), + p("doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87"), + p("doi:10.6073-pasta-f31b28b912e6051bf1d383ff1ef18987"), + actionButton(ns("D1Button"), label = "Download"), + # actionButton(inputId = "CancelD1Download", label = "Cancel Download"), This is WAY tricky. Not sure if I can add this functionality... + hr(), + conditionalPanel( + condition = "$('html').hasClass('shiny-busy')", + tags$div( + id = "loadmessage", + HTML(paste0("

Download in Progress.

This download may take a couple of minutes.

")) + ) + ), + DT::DTOutput(ns("identifier")), + p("Selected Row (For Testing Purposes)"), + verbatimTextOutput(ns("rowSelection")), ## For testing only + actionButton(ns("D1FinishButton"), label = "Finish Download"), + hr(), + p("Location of Downloaded files:"), + verbatimTextOutput(ns("D1dbfilesPath")) ) - } -d1Download <- function(input, output, session){ - +d1Download <- function(input, output, session) { ## Create two sub-directories in the tempfile ## d1_tempdir <- file.path(temp, "d1_tempdir") dir.create(d1_tempdir, showWarnings = F) local_tempdir <- file.path(temp, "local_tempdir") dir.create(local_tempdir, showWarnings = F) - + observeEvent(input$D1Button, { # run dataone_download with input from id on click # PEcAn.data.land::dataone_download(trimws(input$id), filepath = d1_tempdir) # store files in tempfile - list_of_d1_files <<- c("f1", "f2", "f3", "f4", "f5")# list.files(newdir_D1) + list_of_d1_files <<- c("f1", "f2", "f3", "f4", "f5") # list.files(newdir_D1) D1_file_df <- as.data.frame(list_of_d1_files) - + names(D1_file_df) <- "Available Files" - + Shared.data$d1fileList <- list_of_d1_files - + # Grab the name of the D1_file from newdir_D1 # d1_dirname <<- base::sub("/tmp/Rtmp[[:alnum:]]{6}/d1_tempdir/", "", newdir_D1) # let users create their own filenames eventually - - Shared.data$downloaded <- D1_file_df # Reactive Variable + + Shared.data$downloaded <- D1_file_df # Reactive Variable }) - + # Display downloaded files in data.frame - output$identifier <- DT::renderDT({Shared.data$downloaded}, selection = 'single', options = list(ordering = F, dom = 'tp')) - + output$identifier <- DT::renderDT( + { + Shared.data$downloaded + }, + selection = "single", + options = list(ordering = F, dom = "tp") + ) + observe({ - Shared.data$selected_row <- as.character(Shared.data$downloaded[input$identifier_rows_selected,]) + Shared.data$selected_row <- as.character(Shared.data$downloaded[input$identifier_rows_selected, ]) + }) + + output$rowSelection <- renderPrint({ + Shared.data$selected_row }) - - output$rowSelection <- renderPrint({Shared.data$selected_row}) - + # Move files to correct dbfiles location (make a custom function for this?) observeEvent(input$D1FinishButton, { # create the new directory in /dbfiles dir.create(paste0(PEcAn_path, d1_dirname)) - - n <- length(list_of_d1_files) - for (i in 1:n){ + + n <- length(list_of_d1_files) + for (i in 1:n) { base::file.copy(file.path(newdir_D1, list_of_d1_files[i]), file.path(PEcAn_path, d1_dirname, list_of_d1_files[i])) } - output$D1dbfilesPath <- renderText({paste0(PEcAn_path, d1_dirname)}) # Print path to data + output$D1dbfilesPath <- renderText({ + paste0(PEcAn_path, d1_dirname) + }) # Print path to data }) - - - -} \ No newline at end of file +} diff --git a/shiny/Data-Ingest/modules/dbFiles_module.R b/shiny/Data-Ingest/modules/dbFiles_module.R index 359e18667a8..ae50aa8f175 100644 --- a/shiny/Data-Ingest/modules/dbFiles_module.R +++ b/shiny/Data-Ingest/modules/dbFiles_module.R @@ -1,25 +1,29 @@ ### UI ### dbfilesUI <- function(id) { ns <- NS(id) - + box( - title = h2("2. DbFiles Record"), width = 3, collapsible = TRUE, collapsed = TRUE, + title = h2("2. DbFiles Record"), width = 3, collapsible = TRUE, collapsed = TRUE, hr(), - selectizeInput(ns("InputMachineName"), label = "Machine *", choices = NULL, #remember to set default to local - options = list( - placeholder = 'Please search machine by name', - onInitialize = I('function() { this.setValue(""); }') - )), + selectizeInput(ns("InputMachineName"), + label = "Machine *", choices = NULL, # remember to set default to local + options = list( + placeholder = "Please search machine by name", + onInitialize = I('function() { this.setValue(""); }') + ) + ), hr(), textInput( ns("InputFilePath"), label = "File Path *", - placeholder = "This file path will be autogenerated by the download process. The user can edit the filepath here?"), + placeholder = "This file path will be autogenerated by the download process. The user can edit the filepath here?" + ), hr(), textInput( ns("InputFileName"), label = "File Name *", - placeholder = "This file name will be displayed from the download process. The user can edit the file name here"), + placeholder = "This file name will be displayed from the download process. The user can edit the file name here" + ), actionButton(ns("createDBFilesRecord"), label = "Create dbFiles Record"), p("* Denotes a Required Field"), hr(), @@ -28,27 +32,25 @@ dbfilesUI <- function(id) { } ### Server ### -dbfiles <- function(input, output, session){ - - ##Output list +dbfiles <- function(input, output, session) { + ## Output list dbFilesRecordList <- list() - + ## Machine Name ## updateSelectizeInput(session, "InputMachineName", choices = machines, server = TRUE) - + observeEvent(input$createDBFilesRecord, { - ## MachineID + ## MachineID dbFilesRecordList$machine <- input$InputMachineName - dbFilesRecordList$machineID <- machines_sub %>% dplyr::filter(hostname %in% dbFilesRecordList$machine) %>% pull(id) - + dbFilesRecordList$machineID <- machines_sub %>% + dplyr::filter(hostname %in% dbFilesRecordList$machine) %>% + pull(id) + dbFilesRecordList$filePath <- input$InputFilePath dbFilesRecordList$fileName <- input$InputFileName - - output$dbFilesRecordOut <- renderPrint({print(dbFilesRecordList)}) - + + output$dbFilesRecordOut <- renderPrint({ + print(dbFilesRecordList) + }) }) - - - - -} \ No newline at end of file +} diff --git a/shiny/Data-Ingest/modules/formats_module.R b/shiny/Data-Ingest/modules/formats_module.R index 3183f238f6b..dddbd1ac682 100644 --- a/shiny/Data-Ingest/modules/formats_module.R +++ b/shiny/Data-Ingest/modules/formats_module.R @@ -1,73 +1,79 @@ #### UI ##### -formatsRecordUI <- function(id){ +formatsRecordUI <- function(id) { ns <- NS(id) - + shinyjs::hidden( - div(id = "formatbox", - box(title = h2("3. Create New Format"), - width = 4, collapsible = TRUE, collapsed = FALSE, solidHeader = TRUE, status = "warning", - selectizeInput(ns("MimetypeName"), label = "Mimetype *", choices = NULL, - options = list( - placeholder = 'Please search inputs by name or site', - onInitialize = I('function() { this.setValue(""); }') - )), - p("or"), - # uiOutput(ns("tab")), ## Link to BETYdb - a(id = ns("betyURL"), "Create New Mimetype", href = "https://www.betydb.org/formats/new", target = "_blank"), - hr(), - textInput( - ns("NewFormatName"), - label = "New Format Name *", - placeholder = "Create a New Format Name"), - hr(), - radioButtons( - ns("HeaderBoolean"), - label = "Is There a Header ?", - choices = c("Yes", "No") - ), - hr(), - textInput( # I should Render UI only if Header = TRUE - ns("SkipLines"), - label = "Skip", - placeholder = "Enter number of header lines to skip."), - hr(), - textAreaInput( - ns("FormatNotes"), - label = "Notes", - height = '150px' - ), - actionButton(ns("createFormatRecord"), label = "Create Format Record"), - p("* Denotes a Required Field"), - hr(), - verbatimTextOutput(ns("FormatRecordOut")) + div( + id = "formatbox", + box( + title = h2("3. Create New Format"), + width = 4, collapsible = TRUE, collapsed = FALSE, solidHeader = TRUE, status = "warning", + selectizeInput(ns("MimetypeName"), + label = "Mimetype *", choices = NULL, + options = list( + placeholder = "Please search inputs by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + p("or"), + # uiOutput(ns("tab")), ## Link to BETYdb + a(id = ns("betyURL"), "Create New Mimetype", href = "https://www.betydb.org/formats/new", target = "_blank"), + hr(), + textInput( + ns("NewFormatName"), + label = "New Format Name *", + placeholder = "Create a New Format Name" + ), + hr(), + radioButtons( + ns("HeaderBoolean"), + label = "Is There a Header ?", + choices = c("Yes", "No") + ), + hr(), + textInput( # I should Render UI only if Header = TRUE + ns("SkipLines"), + label = "Skip", + placeholder = "Enter number of header lines to skip." + ), + hr(), + textAreaInput( + ns("FormatNotes"), + label = "Notes", + height = "150px" + ), + actionButton(ns("createFormatRecord"), label = "Create Format Record"), + p("* Denotes a Required Field"), + hr(), + verbatimTextOutput(ns("FormatRecordOut")) ) ) ) } #### Server ##### -formatsRecord <- function(input, output, session){ - +formatsRecord <- function(input, output, session) { ## Output List ## FormatRecordList <- list() - + ######### Mimetype Name ################## updateSelectizeInput(session, "MimetypeName", choices = mimetypes, server = TRUE) - + observeEvent(input$createFormatRecord, { ## MimetypeID FormatRecordList$mimetypeName <- input$MimetypeName - FormatRecordList$mimetypeID <- mimetype_sub %>% dplyr::filter(type_string %in% FormatRecordList$mimetypeName) %>% pull(id) - + FormatRecordList$mimetypeID <- mimetype_sub %>% + dplyr::filter(type_string %in% FormatRecordList$mimetypeName) %>% + pull(id) + ## Everything else FormatRecordList$NewFormatName <- input$NewFormatName FormatRecordList$HeaderBoolean <- input$HeaderBoolean - FormatRecordList$SkipLines <- input$SkipLines #This should appear only if header = TRUE + FormatRecordList$SkipLines <- input$SkipLines # This should appear only if header = TRUE FormatRecordList$FormatNotes <- input$FormatNotes - - output$FormatRecordOut <- renderPrint({print(FormatRecordList)}) - - }) - -} \ No newline at end of file + output$FormatRecordOut <- renderPrint({ + print(FormatRecordList) + }) + }) +} diff --git a/shiny/Data-Ingest/modules/inputs_module.R b/shiny/Data-Ingest/modules/inputs_module.R index ea4667318ae..74bd4bea8e4 100644 --- a/shiny/Data-Ingest/modules/inputs_module.R +++ b/shiny/Data-Ingest/modules/inputs_module.R @@ -1,36 +1,40 @@ ## UI ##### -inputsRecordUI <- function(id){ +inputsRecordUI <- function(id) { ns <- NS(id) - + box( title = h2("2. Input Record"), width = 4, collapsible = TRUE, solidHeader = TRUE, status = "success", - selectizeInput(ns("InputSiteName"), label = "Site *", choices = NULL, - options = list( - placeholder = 'Please search or select a site below', - onInitialize = I('function() { this.setValue(""); }') - ) + selectizeInput(ns("InputSiteName"), + label = "Site *", choices = NULL, + options = list( + placeholder = "Please search or select a site below", + onInitialize = I('function() { this.setValue(""); }') + ) ), hr(), - selectizeInput(ns("InputParentName"), label = "Parent *", choices = NULL, - options = list( - placeholder = 'Please search inputs by name or site', - onInitialize = I('function() { this.setValue(""); }') - ) + selectizeInput(ns("InputParentName"), + label = "Parent *", choices = NULL, + options = list( + placeholder = "Please search inputs by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) ), hr(), textInput(ns("InputName"), - label = "Name *", - placeholder = ""), + label = "Name *", + placeholder = "" + ), verbatimTextOutput(ns("autoname")), hr(), - selectizeInput(ns("InputFormatName"), label = "Choose Format *", choices = NULL, - options = list( - placeholder = 'Please search Formats by name', - onInitialize = I('function() { this.setValue(""); }') - ) + selectizeInput(ns("InputFormatName"), + label = "Choose Format *", choices = NULL, + options = list( + placeholder = "Please search Formats by name", + onInitialize = I('function() { this.setValue(""); }') + ) ), p("or"), - # actionButton("NewFormat", label = "Create New Format"), + # actionButton("NewFormat", label = "Create New Format"), hr(), dateInput( ns("InputStartDate"), @@ -39,24 +43,28 @@ inputsRecordUI <- function(id){ startview = "decade" ), shinyTime::timeInput(ns("StartTimeInput"), - label = "Start Time (Hours - Minutes)", - seconds = FALSE), + label = "Start Time (Hours - Minutes)", + seconds = FALSE + ), dateInput( - ns('InputEndDate'), - label = 'End Date', - format = 'yyyy-mm-dd', - startview = 'decade' + ns("InputEndDate"), + label = "End Date", + format = "yyyy-mm-dd", + startview = "decade" ), shinyTime::timeInput(ns("EndTimeInput"), - label = "End Time (Hours-Minutes)", - seconds = FALSE), + label = "End Time (Hours-Minutes)", + seconds = FALSE + ), textInput(ns("Timezone"), - label = "Timezone (UTC)", - placeholder = "UTC +/-"), + label = "Timezone (UTC)", + placeholder = "UTC +/-" + ), hr(), textAreaInput(ns("InputNotes"), - label = "Notes", - height = '150px'), + label = "Notes", + height = "150px" + ), actionButton(ns("createInput"), label = "Create Input"), p("* Denotes a Required Field"), hr(), @@ -66,35 +74,41 @@ inputsRecordUI <- function(id){ ### Server ##### -inputsRecord <- function(input, output, session){ +inputsRecord <- function(input, output, session) { ## List of outputs inputsList <- list() - - + + ######### Select Site ############### - updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) - + updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) + ######### Select Parent ID ################# - updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) - + updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) + ####### Select Format ############## updateSelectizeInput(session, "InputFormatName", choices = formats, server = TRUE) - + ####### Print all selections for Testing ########## - + observeEvent(input$createInput, { ## siteID inputsList$siteName <- input$InputSiteName - inputsList$siteID <- sites %>% dplyr::filter(sitename %in% inputsList$siteName) %>% pull(id) - + inputsList$siteID <- sites %>% + dplyr::filter(sitename %in% inputsList$siteName) %>% + pull(id) + ## ParentID inputsList$parentName <- input$InputParentName - inputsList$parentID <- inputs %>% dplyr::filter(name %in% inputsList$parentName) %>% pull(id) - + inputsList$parentID <- inputs %>% + dplyr::filter(name %in% inputsList$parentName) %>% + pull(id) + ## FormatID inputsList$formatName <- input$InputFormatName - inputsList$formatID <- formats_sub %>% dplyr::filter(name %in% inputsList$formatName) %>% pull(id) - + inputsList$formatID <- formats_sub %>% + dplyr::filter(name %in% inputsList$formatName) %>% + pull(id) + ## Other Info inputsList$Name <- input$InputName inputsList$StartDate <- input$InputStartDate @@ -103,9 +117,10 @@ inputsRecord <- function(input, output, session){ inputsList$EndTime <- input$EndTimeInput inputsList$Timezone <- input$Timezone inputsList$Notes <- input$InputNotes - - output$summInputs <- renderPrint({print(inputsList)}) - }) -#output$autoname <- renderPrint({Shared.data$selected_row_local}) + output$summInputs <- renderPrint({ + print(inputsList) + }) + }) + # output$autoname <- renderPrint({Shared.data$selected_row_local}) } diff --git a/shiny/Data-Ingest/modules/local_upload_module.R b/shiny/Data-Ingest/modules/local_upload_module.R index ea06421aadd..ffca776bebd 100644 --- a/shiny/Data-Ingest/modules/local_upload_module.R +++ b/shiny/Data-Ingest/modules/local_upload_module.R @@ -1,47 +1,47 @@ -localUploadUI <- function(id){ +localUploadUI <- function(id) { ns <- NS(id) - - box(width = 4, title = h2("Upload Local Files"), solidHeader = T, status = "success", - # https://github.com/rstudio/shiny-examples/blob/master/009-upload/app.R - fileInput( - inputId = ns("file"), - label = "Upload Local Files", - accept = NULL, - multiple = TRUE, - placeholder = "Drag and drop files here" - ), - DT::DTOutput(ns("dtfiles")), - verbatimTextOutput(ns("test")), - hr(), - textInput( - ns("new_local_filename"), - label = "Set Destination Directory (for testing only)", - placeholder = "Enter New Directory Name Here" - ), - actionButton(ns("LocalFinishButton"), label = "Finish Download"), - hr(), - p("Location of Downloaded Files: (Testing Only)"), - verbatimTextOutput(ns("LocaldbfilesPath")) + + box( + width = 4, title = h2("Upload Local Files"), solidHeader = T, status = "success", + # https://github.com/rstudio/shiny-examples/blob/master/009-upload/app.R + fileInput( + inputId = ns("file"), + label = "Upload Local Files", + accept = NULL, + multiple = TRUE, + placeholder = "Drag and drop files here" + ), + DT::DTOutput(ns("dtfiles")), + verbatimTextOutput(ns("test")), + hr(), + textInput( + ns("new_local_filename"), + label = "Set Destination Directory (for testing only)", + placeholder = "Enter New Directory Name Here" + ), + actionButton(ns("LocalFinishButton"), label = "Finish Download"), + hr(), + p("Location of Downloaded Files: (Testing Only)"), + verbatimTextOutput(ns("LocaldbfilesPath")) ) - } -localUpload <- function(input, output, session){ - +localUpload <- function(input, output, session) { observe({ inFile <- input$file n <- length(inFile$name) names <- inFile$name - - if (is.null(inFile)) + + if (is.null(inFile)) { return(NULL) - + } + splits <- list() - + for (i in 1:n) { - splits <- base::sub("/tmp/Rtmp[[:alnum:]]{6}/", "", inFile[i, "datapath"]) # Consider making this more program agnostic? + splits <- base::sub("/tmp/Rtmp[[:alnum:]]{6}/", "", inFile[i, "datapath"]) # Consider making this more program agnostic? print(splits) - + filenames <- list.files(temp) oldpath <- file.path(temp, splits[i]) print(oldpath[i]) @@ -52,34 +52,40 @@ localUpload <- function(input, output, session){ } uploaded_local <- as.data.frame(list.files(file.path(temp, "local_tempdir"))) names(uploaded_local) <- "Available Files" - Shared.data$local_files <- uploaded_local - + Shared.data$local_files <- uploaded_local }) - - output$dtfiles <- DT::renderDT({Shared.data$local_files}, selection = 'single', options = list(ordering = F, dom = 'tp')) - + + output$dtfiles <- DT::renderDT( + { + Shared.data$local_files + }, + selection = "single", + options = list(ordering = F, dom = "tp") + ) + observe({ - Shared.data$selected_row_local <- as.character(Shared.data$local_files[input$dtfiles_rows_selected,]) + Shared.data$selected_row_local <- as.character(Shared.data$local_files[input$dtfiles_rows_selected, ]) + }) + + output$test <- renderPrint({ + Shared.data$selected_row_local }) - - output$test <- renderPrint({Shared.data$selected_row_local}) - + # Move files to correct dbfiles location (make a custom function for this?) observeEvent(input$LocalFinishButton, { # create the new directory in /dbfiles - local_dirname <- gsub(" ", "_", input$new_local_filename) # Are there any other types of breaking chatacters that I should avoid with directory naming? + local_dirname <- gsub(" ", "_", input$new_local_filename) # Are there any other types of breaking chatacters that I should avoid with directory naming? dir.create(file.path(PEcAn_path, local_dirname)) - + path_to_local_tempdir <- file.path(local_tempdir) - list_of_local_files <- list.files(path_to_local_tempdir) - + list_of_local_files <- list.files(path_to_local_tempdir) + n <- length(list_of_d1_files) - for (i in 1:n){ + for (i in 1:n) { base::file.copy(file.path(path_to_local_tempdir, list_of_local_files[i]), file.path(PEcAn_path, local_dirname, list_of_local_files[i])) } - output$LocaldbfilesPath <- renderText({paste0(PEcAn_path, local_dirname)}) # Print path to dbfiles + output$LocaldbfilesPath <- renderText({ + paste0(PEcAn_path, local_dirname) + }) # Print path to dbfiles }) - - - -} \ No newline at end of file +} diff --git a/shiny/Data-Ingest/server_files/create_input_record_svr.R b/shiny/Data-Ingest/server_files/create_input_record_svr.R index 0b64bf1a5af..86cbd2c96c9 100644 --- a/shiny/Data-Ingest/server_files/create_input_record_svr.R +++ b/shiny/Data-Ingest/server_files/create_input_record_svr.R @@ -1,8 +1,8 @@ ######### Select Site ############### -updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) +updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) ######### Select Parent ID ################# -updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) +updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) ####### Select Format ############## updateSelectizeInput(session, "InputFormatName", choices = formats, server = TRUE) @@ -12,16 +12,22 @@ updateSelectizeInput(session, "InputFormatName", choices = formats, server = TRU observeEvent(input$createInput, { ## siteID inputsList$siteName <- input$InputSiteName - inputsList$siteID <- sites %>% dplyr::filter(sitename %in% inputsList$siteName) %>% pull(id) - + inputsList$siteID <- sites %>% + dplyr::filter(sitename %in% inputsList$siteName) %>% + pull(id) + ## ParentID inputsList$parentName <- input$InputParentName - inputsList$parentID <- inputs %>% dplyr::filter(name %in% inputsList$parentName) %>% pull(id) - + inputsList$parentID <- inputs %>% + dplyr::filter(name %in% inputsList$parentName) %>% + pull(id) + ## FormatID inputsList$formatName <- input$InputFormatName - inputsList$formatID <- formats_sub %>% dplyr::filter(name %in% inputsList$formatName) %>% pull(id) - + inputsList$formatID <- formats_sub %>% + dplyr::filter(name %in% inputsList$formatName) %>% + pull(id) + ## Other Info inputsList$Name <- input$InputName inputsList$StartDate <- input$InputStartDate @@ -30,6 +36,8 @@ observeEvent(input$createInput, { inputsList$EndTime <- input$EndTimeInput inputsList$Timezone <- input$Timezone inputsList$Notes <- input$InputNotes - - output$summInputs <- renderPrint({print(inputsList)}) + + output$summInputs <- renderPrint({ + print(inputsList) + }) }) diff --git a/shiny/Data-Ingest/server_files/d1_download_svr.R b/shiny/Data-Ingest/server_files/d1_download_svr.R index 9f2441a54ae..8378abfb64e 100644 --- a/shiny/Data-Ingest/server_files/d1_download_svr.R +++ b/shiny/Data-Ingest/server_files/d1_download_svr.R @@ -8,40 +8,48 @@ observe({ }) observeEvent(input$D1Button, { - tryCatch({ - # run dataone_download with input from id on click - PEcAn.data.land::dataone_download(trimws(input$id), filepath = d1_tempdir) #store files in tempfile - toastr_success("Files Successfully Downloadded") - }, - error = function(e){ - toastr_error(title = "Error in Select DataONE Files", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Warning in Select DataONE Files", conditionMessage(e)) - } + tryCatch( + { + # run dataone_download with input from id on click + PEcAn.data.land::dataone_download(trimws(input$id), filepath = d1_tempdir) # store files in tempfile + toastr_success("Files Successfully Downloadded") + }, + error = function(e) { + toastr_error(title = "Error in Select DataONE Files", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Warning in Select DataONE Files", conditionMessage(e)) + } ) - list_of_d1_files <<- list.files(newdir_D1) - - # ## df to store href buttons for file preview ## Uncomment when preview is functional. + list_of_d1_files <<- list.files(newdir_D1) + + # ## df to store href buttons for file preview ## Uncomment when preview is functional. # d1_paths_df <- data.frame(href = NA) # for(i in 1:length(list_of_d1_files)){ # d1_paths_df[i,1] <- as.character(a("Preview", href = file.path(newdir_D1, list_of_d1_files[i]), target = "_blank")) # } - - D1_file_df <- as.data.frame(list_of_d1_files) #cbind(list_of_d1_files, d1_paths_df) - + + D1_file_df <- as.data.frame(list_of_d1_files) # cbind(list_of_d1_files, d1_paths_df) + names(D1_file_df) <- c("Available Files") # c("Available Files", "") - + Shared.data$d1fileList <- list_of_d1_files - + # Grab the name of the D1_file from newdir_D1 - d1_dirname <<- base::sub("/tmp/Rtmp[[:alnum:]]{6}/d1_tempdir/", "", newdir_D1) - - Shared.data$downloaded <- D1_file_df # Reactive Variable + d1_dirname <<- base::sub("/tmp/Rtmp[[:alnum:]]{6}/d1_tempdir/", "", newdir_D1) + + Shared.data$downloaded <- D1_file_df # Reactive Variable }) # Display downloaded files in data.frame -output$identifier <- DT::renderDT(datatable({Shared.data$downloaded}, escape = FALSE, selection = 'single', options = list(ordering = F, dom = 'tp'))) +output$identifier <- DT::renderDT(datatable( + { + Shared.data$downloaded + }, + escape = FALSE, + selection = "single", + options = list(ordering = F, dom = "tp") +)) observe({ Shared.data$selected_row <- as.character(Shared.data$downloaded[input$identifier_rows_selected, 1]) @@ -49,23 +57,26 @@ observe({ # Move files to correct dbfiles location (make a custom function for this?) observeEvent(input$complete_ingest_d1, { - tryCatch({ - # create the new directory in /dbfiles - dir.create(paste0(PEcAn_path, d1_dirname)) - - n <- length(list_of_d1_files) - for (i in 1:n){ - base::file.copy(file.path(newdir_D1, list_of_d1_files[i]), file.path(PEcAn_path, d1_dirname, list_of_d1_files[i])) - } - show("d1_new_dir_output") # print message when rendering path to dbfiles - output$D1dbfilesPath <- renderText({paste0(PEcAn_path, d1_dirname)}) # Print path to data - }, - error = function(e){ - toastr_error(title = "Error in Select DataONE Files", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Warning in Select DataONE Files", conditionMessage(e)) - } + tryCatch( + { + # create the new directory in /dbfiles + dir.create(paste0(PEcAn_path, d1_dirname)) + + n <- length(list_of_d1_files) + for (i in 1:n) { + base::file.copy(file.path(newdir_D1, list_of_d1_files[i]), file.path(PEcAn_path, d1_dirname, list_of_d1_files[i])) + } + show("d1_new_dir_output") # print message when rendering path to dbfiles + output$D1dbfilesPath <- renderText({ + paste0(PEcAn_path, d1_dirname) + }) # Print path to data + }, + error = function(e) { + toastr_error(title = "Error in Select DataONE Files", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Warning in Select DataONE Files", conditionMessage(e)) + } ) }) @@ -73,4 +84,3 @@ observeEvent(input$nextFromD1, { show("input_record_box") hide("nextFromD1_div") }) - diff --git a/shiny/Data-Ingest/server_files/dbfiles_record_svr.R b/shiny/Data-Ingest/server_files/dbfiles_record_svr.R index 8f632156c3b..d3989b04214 100644 --- a/shiny/Data-Ingest/server_files/dbfiles_record_svr.R +++ b/shiny/Data-Ingest/server_files/dbfiles_record_svr.R @@ -6,13 +6,16 @@ updateSelectizeInput(session, "InputMachineName", choices = machines, server = TRUE) observeEvent(input$createDBFilesRecord, { - ## MachineID + ## MachineID dbFilesRecordList$machine <- input$InputMachineName - dbFilesRecordList$machineID <- machines_sub %>% dplyr::filter(hostname %in% dbFilesRecordList$machine) %>% pull(id) - + dbFilesRecordList$machineID <- machines_sub %>% + dplyr::filter(hostname %in% dbFilesRecordList$machine) %>% + pull(id) + dbFilesRecordList$filePath <- input$InputFilePath dbFilesRecordList$fileName <- input$InputFileName - - output$dbFilesRecordOut <- renderPrint({print(dbFilesRecordList)}) - -}) \ No newline at end of file + + output$dbFilesRecordOut <- renderPrint({ + print(dbFilesRecordList) + }) +}) diff --git a/shiny/Data-Ingest/server_files/formats_record_svr.R b/shiny/Data-Ingest/server_files/formats_record_svr.R index 73dda05f984..9841fa92bde 100644 --- a/shiny/Data-Ingest/server_files/formats_record_svr.R +++ b/shiny/Data-Ingest/server_files/formats_record_svr.R @@ -8,15 +8,18 @@ updateSelectizeInput(session, "MimetypeName", choices = mimetypes, server = TRUE observeEvent(input$createFormatRecord, { ## MimetypeID FormatRecordList$mimetypeName <- input$MimetypeName - FormatRecordList$mimetypeID <- mimetype_sub %>% dplyr::filter(type_string %in% FormatRecordList$mimetypeName) %>% pull(id) - + FormatRecordList$mimetypeID <- mimetype_sub %>% + dplyr::filter(type_string %in% FormatRecordList$mimetypeName) %>% + pull(id) + ## Everything else FormatRecordList$NewMimeType <- input$NewMimeType FormatRecordList$NewFormatName <- input$NewFormatName FormatRecordList$HeaderBoolean <- input$HeaderBoolean - FormatRecordList$SkipLines <- input$SkipLines #This should appear only if header = TRUE + FormatRecordList$SkipLines <- input$SkipLines # This should appear only if header = TRUE FormatRecordList$FormatNotes <- input$FormatNotes - - output$FormatRecordOut <- renderPrint({print(FormatRecordList)}) - -}) \ No newline at end of file + + output$FormatRecordOut <- renderPrint({ + print(FormatRecordList) + }) +}) diff --git a/shiny/Data-Ingest/server_files/ingest_workflow_svr.R b/shiny/Data-Ingest/server_files/ingest_workflow_svr.R index 03da8f33cb6..57e5d7e97c0 100644 --- a/shiny/Data-Ingest/server_files/ingest_workflow_svr.R +++ b/shiny/Data-Ingest/server_files/ingest_workflow_svr.R @@ -1,9 +1,9 @@ #### Conditional Pannel to Switch between d1 and local upload #### -observeEvent(input$inputMethod,{ - if(input$inputMethod == "DataONE"){ +observeEvent(input$inputMethod, { + if (input$inputMethod == "DataONE") { show("d1_ui") hide("lcl_ui") - }else{ + } else { show("lcl_ui") hide("d1_ui") } @@ -14,24 +14,23 @@ observeEvent(input$lclUpload, { hide("d1_ui") }) -########### Inputs svr ############# +########### Inputs svr ############# ## List of outputs## inputsList <- list() ######### Select Site ############### -updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) +updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) ######### Select Parent ID ################# -updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) +updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) ####### Select Format ############## updateSelectizeInput(session, "InputFormatName", choices = formats, server = TRUE) observeEvent(input$createFormatRecord, { new_format <- input$NewFormatName - updateSelectizeInput(session, "InputFormatName", choices = new_format, selected = tail(new_format,1), server = TRUE) - + updateSelectizeInput(session, "InputFormatName", choices = new_format, selected = tail(new_format, 1), server = TRUE) }) ###### Select Mimetype ######### @@ -39,16 +38,15 @@ updateSelectizeInput(session, "MimetypeNameCurrent", choices = mimetypes, server observeEvent(input$createFormatRecord, { updateSelectizeInput(session, "MimetypeNameCurrent", choices = input$MimetypeName, selected = input$MimetypeName, server = TRUE) - }) ####### Update Text Input for fileName ###### observe({ -updateTextInput(session, "InputName", value = Shared.data$selected_row) + updateTextInput(session, "InputName", value = Shared.data$selected_row) }) ####### Update Selections of Format and Corresponding Mimetype ############ -observeEvent(input$FormatRecordDone,{ +observeEvent(input$FormatRecordDone, { updateSelectizeInput(session, "InputFormatName", choices = c(input$NewFormatName), selected = c(input$NewFormatName), server = TRUE) updateSelectizeInput(session, "MimetypeNameCurrent", choices = c(input$MimetypeName), selected = c(input$MimetypeName), server = TRUE) }) @@ -61,40 +59,44 @@ observeEvent(input$FormatRecordDone,{ observeEvent(input$nextFromInput, { ## siteID - if(input$InputSiteName == ""){ + if (input$InputSiteName == "") { inputsList$siteName <<- "" inputsList$siteID <<- "" - }else{ + } else { inputsList$siteName <<- input$InputSiteName - inputsList$siteID <<- sites %>% dplyr::filter(sitename %in% input$InputSiteName) %>% pull(id) + inputsList$siteID <<- sites %>% + dplyr::filter(sitename %in% input$InputSiteName) %>% + pull(id) } - + ## ParentID - if(input$InputParentName == ""){ + if (input$InputParentName == "") { inputsList$parentName <<- "" inputsList$parentID <<- NA - }else{ + } else { inputsList$parentName <<- input$InputParentName - inputsList$parentID <<- inputs %>% dplyr::filter(name %in% input$InputParentName) %>% pull(id) + inputsList$parentID <<- inputs %>% + dplyr::filter(name %in% input$InputParentName) %>% + pull(id) } ## FormatID - if(input$InputFormatName == ""){ + if (input$InputFormatName == "") { inputsList$formatName <<- "" - }else{ + } else { inputsList$formatName <<- input$InputFormatName } ## Mimetype (should I find the ID as well?)## inputsList$Mimetype <<- input$MimetypeNameCurrent - + inputsList$StartTime_sub <<- trimws(base::sub("[0-9]{4}[.-][0-9]{2}[.-][0-9]{2}[ \t]", "", input$StartTimeInput)) inputsList$EndTime_sub <<- trimws(base::sub("[0-9]{4}[.-][0-9]{2}[.-][0-9]{2}[ \t]", "", input$EndTimeInput)) - + inputsList$StartDateTime <<- trimws(paste(input$InputStartDate, inputsList$StartTime_sub, " ")) inputsList$EndDateTime <<- trimws(paste(input$InputEndDate, inputsList$EndTime_sub, " ")) - - + + ## Other Info inputsList$Method <<- input$inputMethod @@ -106,15 +108,19 @@ observeEvent(input$nextFromInput, { inputsList$EndTime <<- input$EndTimeInput inputsList$Timezone <<- input$Timezone inputsList$Notes <<- input$InputNotes - + ## Print List - #output$summInputs <- renderPrint({inputsList}) + # output$summInputs <- renderPrint({inputsList}) }) -output$input_record_df <- renderPrint({Shared.data$input_record_df}) +output$input_record_df <- renderPrint({ + Shared.data$input_record_df +}) ######### Formats Svr ############# -output$autoname <- renderPrint({Shared.data$selected_row}) #_local +output$autoname <- renderPrint({ + Shared.data$selected_row +}) # _local ######### Mimetype Name ################## updateSelectizeInput(session, "MimetypeName", choices = mimetypes, server = TRUE) @@ -122,18 +128,18 @@ updateSelectizeInput(session, "MimetypeName", choices = mimetypes, server = TRUE observeEvent(input$FormatRecordDone, { ## Output List ## FormatRecordList <<- list() - + ## MimetypeID FormatRecordList$MimetypeName <- input$MimetypeName FormatRecordList$NewmimetypeID <- ifelse((input$MimetypeName == ""), "", mimetype_sub %>% dplyr::filter(type_string %in% input$MimetypeName) %>% pull(id)) - + ## Everything else FormatRecordList$NewFormatName <- input$NewFormatName FormatRecordList$HeaderBoolean <- ifelse((input$HeaderBoolean == "Yes"), "TRUE", "FALSE") - FormatRecordList$SkipLines <- input$SkipLines #This should appear only if header = TRUE + FormatRecordList$SkipLines <- input$SkipLines # This should appear only if header = TRUE FormatRecordList$FormatNotes <- input$FormatNotes - - ## Make 'data.frame' for format record query + + ## Make 'data.frame' for format record query FormatsRecord_df <- data.frame( header = FormatRecordList$HeaderBoolean, skip = FormatRecordList$SkipLines, @@ -142,28 +148,27 @@ observeEvent(input$FormatRecordDone, { name = FormatRecordList$NewFormatName ) ## Print format record for testing -# output$FormatRecordOut <- renderPrint({print(FormatRecordList)}) - + # output$FormatRecordOut <- renderPrint({print(FormatRecordList)}) }) # ## Insert Format Record # PEcAn.DB::insert.format.vars(con = con, formats_df = FormatsRecord_df, formats_variables_df = NULL) ###### Formats Vars Server ############## -##Output list +## Output list FormatVars <- list() ## Variable Name ## updateSelectizeInput(session = getDefaultReactiveDomain(), "pecan_var", choices = variables, server = TRUE) #### Show inputs on click only #### -observeEvent(input$nextFromInput,{ - show("formats.vars_box") - if(input$inputMethod == "DataONE"){ - show("finishButton_d1") - }else{ - show("finishButton_lcl") - } +observeEvent(input$nextFromInput, { + show("formats.vars_box") + if (input$inputMethod == "DataONE") { + show("finishButton_d1") + } else { + show("finishButton_lcl") + } }) ### Create empty matrix with headers to store infinite entries ### @@ -171,112 +176,126 @@ Shared.data$format_vars_df <- matrix(data = NA, nrow = 0, ncol = 6, dimnames = l ### Store inputs in a data.frame ### observeEvent(input$register_variable, { -format_vars_entry <- tibble::tibble( - var_name = input$pecan_var, - variable_id = variables_ids %>% dplyr::filter(name %in% input$pecan_var) %>% pull(id), - name = input$var_name, - unit = input$var_unit, - storage_type = input$storage_type, - column_number = as.numeric(input$col_num) - ) - -Shared.data$format_vars_df <- rbind(format_vars_entry, Shared.data$format_vars_df) -output$format_vars_df <- DT::renderDT(datatable({Shared.data$format_vars_df}, escape = FALSE, selection = 'single', options = list(ordering = F, dom = 'tp'))) + format_vars_entry <- tibble::tibble( + var_name = input$pecan_var, + variable_id = variables_ids %>% dplyr::filter(name %in% input$pecan_var) %>% pull(id), + name = input$var_name, + unit = input$var_unit, + storage_type = input$storage_type, + column_number = as.numeric(input$col_num) + ) + + Shared.data$format_vars_df <- rbind(format_vars_entry, Shared.data$format_vars_df) + output$format_vars_df <- DT::renderDT(datatable( + { + Shared.data$format_vars_df + }, + escape = FALSE, + selection = "single", + options = list(ordering = F, dom = "tp") + )) }) observeEvent(input$complete_ingest_d1, { # Drop var_name column from format_vars_df Shared.data$format_vars_df <- Shared.data$format_vars_df %>% select(-one_of("var_name")) - + # 1. Create Format and the format variable records - tryCatch({ - PEcAn.DB::insert.format.vars(con = con, - format_name = input$NewFormatName, - mimetype_id = ifelse((input$MimetypeName == ""), "", mimetype_sub %>% dplyr::filter(type_string %in% input$MimetypeName) %>% pull(id)), - header = ifelse((input$HeaderBoolean == "Yes"), TRUE, FALSE), - skip = input$SkipLines, - notes = input$FormatNotes, - formats_variables = Shared.data$format_vars_df - ) - toastr_success("Successfully Created Format Record") - }, - error = function(e){ - toastr_error(title = "Error in Creating Format & Format Variable Record", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Format & Format-Variable Warning", conditionMessage(e)) - } + tryCatch( + { + PEcAn.DB::insert.format.vars( + con = con, + format_name = input$NewFormatName, + mimetype_id = ifelse((input$MimetypeName == ""), "", mimetype_sub %>% dplyr::filter(type_string %in% input$MimetypeName) %>% pull(id)), + header = ifelse((input$HeaderBoolean == "Yes"), TRUE, FALSE), + skip = input$SkipLines, + notes = input$FormatNotes, + formats_variables = Shared.data$format_vars_df + ) + toastr_success("Successfully Created Format Record") + }, + error = function(e) { + toastr_error(title = "Error in Creating Format & Format Variable Record", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Format & Format-Variable Warning", conditionMessage(e)) + } + ) + tryCatch( + { + # 2. Create the Inputs Record and dbfiles record + Shared.data$input_record_df <- PEcAn.DB::dbfile.input.insert( + in.path = inputsList$Path, + in.prefix = inputsList$Name, + siteid = inputsList$siteID, + startdate = inputsList$StartDateTime, + enddate = inputsList$EndDateTime, + mimetype = inputsList$Mimetype, + formatname = inputsList$formatName, + parentid = inputsList$parentID, + con = con + # hostname = localhost #?, #default to localhost for now + # allow.conflicting.dates#? #default to FALSE for now + ) + toastr_success("Successfully Created Input Record") + }, + error = function(e) { + toastr_error(title = "Error in Completing Input Record", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Input Record Warning", conditionMessage(e)) + } ) - tryCatch({ - #2. Create the Inputs Record and dbfiles record - Shared.data$input_record_df <- PEcAn.DB::dbfile.input.insert(in.path = inputsList$Path, - in.prefix = inputsList$Name, - siteid = inputsList$siteID, - startdate = inputsList$StartDateTime, - enddate = inputsList$EndDateTime, - mimetype = inputsList$Mimetype, - formatname = inputsList$formatName, - parentid = inputsList$parentID, - con = con - #hostname = localhost #?, #default to localhost for now - #allow.conflicting.dates#? #default to FALSE for now - ) - toastr_success("Successfully Created Input Record") - }, - error = function(e){ - toastr_error(title = "Error in Completing Input Record", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Input Record Warning", conditionMessage(e)) - } -) - }) observeEvent(input$complete_ingest_lcl, { # Drop var_name column from format_vars_df Shared.data$format_vars_df <- Shared.data$format_vars_df %>% select(-one_of("var_name")) # 1. Create Format and the format variable records - tryCatch({ - PEcAn.DB::insert.format.vars(con = con, - format_name = input$NewFormatName, - mimetype_id = ifelse((input$MimetypeName == ""), "", mimetype_sub %>% dplyr::filter(type_string %in% input$MimetypeName) %>% pull(id)), - header = ifelse((input$HeaderBoolean == "Yes"), TRUE, FALSE), - skip = input$SkipLines, - notes = input$FormatNotes, - formats_variables = Shared.data$format_vars_df - ) - toastr_success("Successfully Created Format Record") - }, - error = function(e){ - toastr_error(title = "Error in Creating Format & Format-Variable Record", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Format & Format-Variable Record Warning", conditionMessage(e)) - } + tryCatch( + { + PEcAn.DB::insert.format.vars( + con = con, + format_name = input$NewFormatName, + mimetype_id = ifelse((input$MimetypeName == ""), "", mimetype_sub %>% dplyr::filter(type_string %in% input$MimetypeName) %>% pull(id)), + header = ifelse((input$HeaderBoolean == "Yes"), TRUE, FALSE), + skip = input$SkipLines, + notes = input$FormatNotes, + formats_variables = Shared.data$format_vars_df + ) + toastr_success("Successfully Created Format Record") + }, + error = function(e) { + toastr_error(title = "Error in Creating Format & Format-Variable Record", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Format & Format-Variable Record Warning", conditionMessage(e)) + } ) - tryCatch({ - #2. Create the Inputs Record and dbfiles record - Shared.data$input_record_df <- PEcAn.DB::dbfile.input.insert(in.path = inputsList$Path, - in.prefix = inputsList$Name, - siteid = inputsList$siteID, - startdate = inputsList$StartDateTime, - enddate = inputsList$EndDateTime, - mimetype = inputsList$Mimetype, - formatname = inputsList$formatName, - parentid = inputsList$parentID, - con = con - #hostname = localhost #?, #default to localhost for now - #allow.conflicting.dates#? #default to FALSE for now - ) - toastr_success("Successfully Created Input Record") - }, - error = function(e){ - toastr_error(title = "Error in Creating Input Record", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Input Record Warning", conditionMessage(e)) - } + tryCatch( + { + # 2. Create the Inputs Record and dbfiles record + Shared.data$input_record_df <- PEcAn.DB::dbfile.input.insert( + in.path = inputsList$Path, + in.prefix = inputsList$Name, + siteid = inputsList$siteID, + startdate = inputsList$StartDateTime, + enddate = inputsList$EndDateTime, + mimetype = inputsList$Mimetype, + formatname = inputsList$formatName, + parentid = inputsList$parentID, + con = con + # hostname = localhost #?, #default to localhost for now + # allow.conflicting.dates#? #default to FALSE for now + ) + toastr_success("Successfully Created Input Record") + }, + error = function(e) { + toastr_error(title = "Error in Creating Input Record", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Input Record Warning", conditionMessage(e)) + } ) }) diff --git a/shiny/Data-Ingest/server_files/input_record_svr.R b/shiny/Data-Ingest/server_files/input_record_svr.R index 072c98dc368..fb5d4676507 100644 --- a/shiny/Data-Ingest/server_files/input_record_svr.R +++ b/shiny/Data-Ingest/server_files/input_record_svr.R @@ -2,10 +2,10 @@ inputsList <- list() dbFilesRecordList <- list() FormatRecordList <- list() ######### Select Site ############### -updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) +updateSelectizeInput(session, "InputSiteName", choices = sitenames, server = TRUE) ######### Select Parent ID ################# -updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) +updateSelectizeInput(session, "InputParentName", choices = input_names, server = TRUE) ####### Select Format ############## updateSelectizeInput(session, "InputFormatName", choices = formats, server = TRUE) @@ -15,15 +15,21 @@ updateSelectizeInput(session, "InputFormatName", choices = formats, server = TRU observeEvent(input$createInput, { ## siteID inputsList$siteName <- input$InputSiteName - inputsList$siteID <- sites %>% dplyr::filter(sitename %in% inputsList$siteName) %>% pull(id) - + inputsList$siteID <- sites %>% + dplyr::filter(sitename %in% inputsList$siteName) %>% + pull(id) + ## ParentID inputsList$parentName <- input$InputParentName - inputsList$parentID <- inputs %>% dplyr::filter(name %in% inputsList$parentName) %>% pull(id) - + inputsList$parentID <- inputs %>% + dplyr::filter(name %in% inputsList$parentName) %>% + pull(id) + ## FormatID inputsList$formatName <- input$InputFormatName - inputsList$formatID <- formats_sub %>% dplyr::filter(name %in% inputsList$formatName) %>% pull(id) + inputsList$formatID <- formats_sub %>% + dplyr::filter(name %in% inputsList$formatName) %>% + pull(id) ## Other Info inputsList$Name <- input$InputName @@ -34,7 +40,9 @@ observeEvent(input$createInput, { inputsList$Timezone <- input$Timezone inputsList$Notes <- input$InputNotes - output$summInputs <- renderPrint({print(inputsList)}) + output$summInputs <- renderPrint({ + print(inputsList) + }) }) ################################################### @@ -45,15 +53,18 @@ observeEvent(input$createInput, { updateSelectizeInput(session, "InputMachineName", choices = machines, server = TRUE) observeEvent(input$createDBFilesRecord, { - ## MachineID + ## MachineID dbFilesRecordList$machine <- input$InputMachineName - dbFilesRecordList$machineID <- machines_sub %>% dplyr::filter(hostname %in% dbFilesRecordList$machine) %>% pull(id) - + dbFilesRecordList$machineID <- machines_sub %>% + dplyr::filter(hostname %in% dbFilesRecordList$machine) %>% + pull(id) + dbFilesRecordList$filePath <- input$InputFilePath dbFilesRecordList$fileName <- input$InputFileName - - output$dbFilesRecordOut <- renderPrint({print(dbFilesRecordList)}) - + + output$dbFilesRecordOut <- renderPrint({ + print(dbFilesRecordList) + }) }) ################################################### @@ -66,16 +77,18 @@ updateSelectizeInput(session, "MimetypeName", choices = mimetypes, server = TRUE observeEvent(input$createFormatRecord, { ## MimetypeID FormatRecordList$mimetypeName <- input$MimetypeName - FormatRecordList$mimetypeID <- mimetype_sub %>% dplyr::filter(type_string %in% FormatRecordList$mimetypeName) %>% pull(id) - + FormatRecordList$mimetypeID <- mimetype_sub %>% + dplyr::filter(type_string %in% FormatRecordList$mimetypeName) %>% + pull(id) + ## Everything else FormatRecordList$NewMimeType <- input$NewMimeType FormatRecordList$NewFormatName <- input$NewFormatName FormatRecordList$HeaderBoolean <- input$HeaderBoolean - FormatRecordList$SkipLines <- input$SkipLines #This should appear only if header = TRUE + FormatRecordList$SkipLines <- input$SkipLines # This should appear only if header = TRUE FormatRecordList$FormatNotes <- input$FormatNotes - - output$FormatRecordOut <- renderPrint({print(FormatRecordList)}) - -}) + output$FormatRecordOut <- renderPrint({ + print(FormatRecordList) + }) +}) diff --git a/shiny/Data-Ingest/server_files/local_upload_svr.R b/shiny/Data-Ingest/server_files/local_upload_svr.R index a55ebd1945c..86f2b11a947 100644 --- a/shiny/Data-Ingest/server_files/local_upload_svr.R +++ b/shiny/Data-Ingest/server_files/local_upload_svr.R @@ -2,14 +2,15 @@ observe({ inFile <- input$file n <- length(inFile$name) names <- inFile$name - - if (is.null(inFile)) + + if (is.null(inFile)) { return(NULL) - + } + splits <- list() - + for (i in 1:n) { - splits <- base::sub("/tmp/Rtmp[[:alnum:]]{6}/", "", inFile[i, "datapath"]) # Consider making this more program agnostic? + splits <- base::sub("/tmp/Rtmp[[:alnum:]]{6}/", "", inFile[i, "datapath"]) # Consider making this more program agnostic? filenames <- list.files(temp) oldpath <- file.path(temp, splits[i]) base::file.rename(oldpath[i], file.path(temp, "local_tempdir", inFile[i, "name"])) # rename the file to include the original filename @@ -17,38 +18,46 @@ observe({ } uploaded_local <- as.data.frame(list.files(file.path(temp, "local_tempdir"))) names(uploaded_local) <- "Available Files" - Shared.data$local_files <- uploaded_local - + Shared.data$local_files <- uploaded_local }) -output$dtfiles <- DT::renderDT({Shared.data$local_files}, selection = 'single', options = list(ordering = F, dom = 'tp')) +output$dtfiles <- DT::renderDT( + { + Shared.data$local_files + }, + selection = "single", + options = list(ordering = F, dom = "tp") +) observe({ - Shared.data$selected_row <- as.character(Shared.data$local_files[input$dtfiles_rows_selected,]) #_local + Shared.data$selected_row <- as.character(Shared.data$local_files[input$dtfiles_rows_selected, ]) # _local }) observeEvent(input$complete_ingest_lcl, { - tryCatch({ - # create the new directory in /dbfiles - local_dirname <- auto.name.directory(format_name = input$InputFormatName, site_id = inputsList$siteID) # Create name from format and site_id - dir.create(file.path(PEcAn_path, local_dirname)) - - path_to_local_tempdir <- file.path(local_tempdir) - list_of_local_files <- list.files(path_to_local_tempdir) - - n <- length(list_of_local_files) - for (i in 1:n){ - base::file.copy(file.path(path_to_local_tempdir, list_of_local_files[i]), file.path(PEcAn_path, local_dirname, list_of_local_files[i])) - } - show("local_path_out") # Message to render path to dbfiles - output$LocaldbfilesPath <- renderText({paste0(PEcAn_path, local_dirname)}) # Print path to dbfiles - }, - error = function(e){ - toastr_error(title = "Error in Select Local Files", conditionMessage(e)) - }, - warning = function(e){ - toastr_warning(title = "Warning in Select Local Files", conditionMessage(e)) - } + tryCatch( + { + # create the new directory in /dbfiles + local_dirname <- auto.name.directory(format_name = input$InputFormatName, site_id = inputsList$siteID) # Create name from format and site_id + dir.create(file.path(PEcAn_path, local_dirname)) + + path_to_local_tempdir <- file.path(local_tempdir) + list_of_local_files <- list.files(path_to_local_tempdir) + + n <- length(list_of_local_files) + for (i in 1:n) { + base::file.copy(file.path(path_to_local_tempdir, list_of_local_files[i]), file.path(PEcAn_path, local_dirname, list_of_local_files[i])) + } + show("local_path_out") # Message to render path to dbfiles + output$LocaldbfilesPath <- renderText({ + paste0(PEcAn_path, local_dirname) + }) # Print path to dbfiles + }, + error = function(e) { + toastr_error(title = "Error in Select Local Files", conditionMessage(e)) + }, + warning = function(e) { + toastr_warning(title = "Warning in Select Local Files", conditionMessage(e)) + } ) }) diff --git a/shiny/Data-Ingest/ui_files/create_input_record_ui.R b/shiny/Data-Ingest/ui_files/create_input_record_ui.R index 966d146f4f4..86f6d0dd112 100644 --- a/shiny/Data-Ingest/ui_files/create_input_record_ui.R +++ b/shiny/Data-Ingest/ui_files/create_input_record_ui.R @@ -1,27 +1,31 @@ box( - title = h2("1. Input Record"), width = 3, collapsible = TRUE, + title = h2("1. Input Record"), width = 3, collapsible = TRUE, hr(), - selectizeInput("InputSiteName", label = "Site *", choices = NULL, + selectizeInput("InputSiteName", + label = "Site *", choices = NULL, options = list( - placeholder = 'Please search or select a site below', + placeholder = "Please search or select a site below", onInitialize = I('function() { this.setValue(""); }') ) ), hr(), - selectizeInput("InputParentName", label = "Parent", choices = NULL, + selectizeInput("InputParentName", + label = "Parent", choices = NULL, options = list( - placeholder = 'Please search inputs by name or site', + placeholder = "Please search inputs by name or site", onInitialize = I('function() { this.setValue(""); }') ) ), hr(), textInput("InputName", - label = "Name *", - placeholder = ""), + label = "Name *", + placeholder = "" + ), hr(), - selectizeInput("InputFormatName", label = "Format *", choices = NULL, + selectizeInput("InputFormatName", + label = "Format *", choices = NULL, options = list( - placeholder = 'Please search Formats by name', + placeholder = "Please search Formats by name", onInitialize = I('function() { this.setValue(""); }') ) ), @@ -33,27 +37,31 @@ box( startview = "decade" ), shinyTime::timeInput("StartTimeInput", - label = "Start Time (Hours - Minutes) *", - seconds = FALSE), + label = "Start Time (Hours - Minutes) *", + seconds = FALSE + ), dateInput( - 'InputEndDate', - label = 'End Date', - format = 'yyyy-mm-dd', - startview = 'decade' + "InputEndDate", + label = "End Date", + format = "yyyy-mm-dd", + startview = "decade" ), shinyTime::timeInput("EndTimeInput", - label = "End Time (Hours-Minutes) *", - seconds = FALSE), + label = "End Time (Hours-Minutes) *", + seconds = FALSE + ), textInput("Timezone", - label = "Timezone (UTC) *", - placeholder = "UTC +/-"), + label = "Timezone (UTC) *", + placeholder = "UTC +/-" + ), hr(), textAreaInput("InputNotes", - label = "Notes", - height = '150px'), + label = "Notes", + height = "150px" + ), actionButton("createInput", label = "Create Input"), # Not sure if I want this here or only available when all forms are filled in. p("* Denotes a Required Field"), hr(), verbatimTextOutput("summInputs") - ) \ No newline at end of file +) diff --git a/shiny/Data-Ingest/ui_files/d1_download_ui.R b/shiny/Data-Ingest/ui_files/d1_download_ui.R index db5fe041930..980da265389 100644 --- a/shiny/Data-Ingest/ui_files/d1_download_ui.R +++ b/shiny/Data-Ingest/ui_files/d1_download_ui.R @@ -1,5 +1,6 @@ -div(id = "d1_ui", -tagList( +div( + id = "d1_ui", + tagList( textInput( "id", label = "Import from dataONE", @@ -7,25 +8,31 @@ tagList( ), actionBttn(inputId = "D1Button", label = "Download", size = "sm", color = "success"), hr(), - conditionalPanel(condition="$('html').hasClass('shiny-busy')", - tags$div(id="loadmessage", - HTML(paste0("

Download in Progress.

This download may take a couple of minutes.

")) - )), + conditionalPanel( + condition = "$('html').hasClass('shiny-busy')", + tags$div( + id = "loadmessage", + HTML(paste0("

Download in Progress.

This download may take a couple of minutes.

")) + ) + ), DT::DTOutput("identifier"), - div(id = "nextFromD1_div", - fluidRow( - column(8), - column(4, - actionBttn(inputId = "nextFromD1", label = "Next Step", size = "sm", color = "success") + div( + id = "nextFromD1_div", + fluidRow( + column(8), + column( + 4, + actionBttn(inputId = "nextFromD1", label = "Next Step", size = "sm", color = "success") ) ) ), shinyjs::hidden( - div(id = "d1_new_dir_output", - hr(), - p("Location of Downloaded files:"), - verbatimTextOutput("D1dbfilesPath") + div( + id = "d1_new_dir_output", + hr(), + p("Location of Downloaded files:"), + verbatimTextOutput("D1dbfilesPath") ) ) + ) ) -) \ No newline at end of file diff --git a/shiny/Data-Ingest/ui_files/dbfiles_record_ui.R b/shiny/Data-Ingest/ui_files/dbfiles_record_ui.R index 31ecd927cdb..9e209539271 100644 --- a/shiny/Data-Ingest/ui_files/dbfiles_record_ui.R +++ b/shiny/Data-Ingest/ui_files/dbfiles_record_ui.R @@ -1,23 +1,27 @@ box( - title = h2("2. DbFiles Record"), width = 3, collapsible = TRUE, collapsed = TRUE, - hr(), - selectizeInput("InputMachineName", label = "Machine *", choices = NULL, #remember to set default to local - options = list( - placeholder = 'Please search machine by name', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - textInput( - "InputFilePath", - label = "File Path *", - placeholder = "This file path will be autogenerated by the download process. The user can edit the filepath here?"), - hr(), - textInput( - "InputFileName", - label = "File Name *", - placeholder = "This file name will be displayed from the download process. The user can edit the file name here"), - actionButton("createDBFilesRecord", label = "Create dbFiles Record"), - p("* Denotes a Required Field"), - hr(), - verbatimTextOutput("dbFilesRecordOut") + title = h2("2. DbFiles Record"), width = 3, collapsible = TRUE, collapsed = TRUE, + hr(), + selectizeInput("InputMachineName", + label = "Machine *", choices = NULL, # remember to set default to local + options = list( + placeholder = "Please search machine by name", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + textInput( + "InputFilePath", + label = "File Path *", + placeholder = "This file path will be autogenerated by the download process. The user can edit the filepath here?" + ), + hr(), + textInput( + "InputFileName", + label = "File Name *", + placeholder = "This file name will be displayed from the download process. The user can edit the file name here" + ), + actionButton("createDBFilesRecord", label = "Create dbFiles Record"), + p("* Denotes a Required Field"), + hr(), + verbatimTextOutput("dbFilesRecordOut") ) diff --git a/shiny/Data-Ingest/ui_files/formats_record_ui.R b/shiny/Data-Ingest/ui_files/formats_record_ui.R index dedbb121a5c..d3a2e44bd79 100644 --- a/shiny/Data-Ingest/ui_files/formats_record_ui.R +++ b/shiny/Data-Ingest/ui_files/formats_record_ui.R @@ -1,41 +1,45 @@ -box(title = h2("3. Format Record"), width = 3, collapsible = TRUE, collapsed = TRUE, - hr(), - selectizeInput("MimetypeName", label = "Mimetype *", choices = NULL, - options = list( - placeholder = 'Please search inputs by name or site', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - textInput( - "NewMimeType", - label = "New Mime Type *", - placeholder = "Create a New Mimetype"), - hr(), - textInput( - "NewFormatName", - label = "New Format Name *", - placeholder = "Create a New Format Name"), - hr(), - radioButtons( - "HeaderBoolean", - label = "Is There a Header ?", - choices = c("Yes", "No") - ), - hr(), - textInput( # I should Render UI only if Header = TRUE - "SkipLines", - label = "Skip", - placeholder = "Enter number of header lines to skip."), - hr(), - textAreaInput( - "FormatNotes", - label = "Notes", - height = '150px' - ), - actionButton("createFormatRecord", label = "Create Format Record"), - p("* Denotes a Required Field"), - hr(), - verbatimTextOutput("FormatRecordOut") +box( + title = h2("3. Format Record"), width = 3, collapsible = TRUE, collapsed = TRUE, + hr(), + selectizeInput("MimetypeName", + label = "Mimetype *", choices = NULL, + options = list( + placeholder = "Please search inputs by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + textInput( + "NewMimeType", + label = "New Mime Type *", + placeholder = "Create a New Mimetype" + ), + hr(), + textInput( + "NewFormatName", + label = "New Format Name *", + placeholder = "Create a New Format Name" + ), + hr(), + radioButtons( + "HeaderBoolean", + label = "Is There a Header ?", + choices = c("Yes", "No") + ), + hr(), + textInput( # I should Render UI only if Header = TRUE + "SkipLines", + label = "Skip", + placeholder = "Enter number of header lines to skip." + ), + hr(), + textAreaInput( + "FormatNotes", + label = "Notes", + height = "150px" + ), + actionButton("createFormatRecord", label = "Create Format Record"), + p("* Denotes a Required Field"), + hr(), + verbatimTextOutput("FormatRecordOut") ) - - diff --git a/shiny/Data-Ingest/ui_files/homepage_ui.R b/shiny/Data-Ingest/ui_files/homepage_ui.R index 98e921b30f3..2ab47c8853f 100644 --- a/shiny/Data-Ingest/ui_files/homepage_ui.R +++ b/shiny/Data-Ingest/ui_files/homepage_ui.R @@ -1,7 +1,8 @@ -box(width = 12, +box( + width = 12, title = h2("Welcome to the PEcAn Data Ingest Workflow."), hr(), h3("This application is designed to make it easier for researchers to import data from local and remote sources into PEcAn. To do so, this application combines two methods of ingesting data with an improved ingest workflow that registers new data with the BETY database. To begin, please select one method of importing data from in the sidebar on the left.") -) \ No newline at end of file +) diff --git a/shiny/Data-Ingest/ui_files/ingest_workflow_ui.R b/shiny/Data-Ingest/ui_files/ingest_workflow_ui.R index aa0e52ebbbd..1fd7e9857f4 100644 --- a/shiny/Data-Ingest/ui_files/ingest_workflow_ui.R +++ b/shiny/Data-Ingest/ui_files/ingest_workflow_ui.R @@ -1,195 +1,244 @@ fluidPage( -fluidRow( -## 1. D1 Download or Local Upload -box( - title = h2("1. Select Files"), width = 4, solidHeader = TRUE, status = "success", collapsible = TRUE, - shinyWidgets::radioGroupButtons("inputMethod", label = "Select Input Method", - choices = c("DataONE", "Local Files"), status = "success", selected = NULL), - shinyjs::hidden(source_ui("d1_download_ui.R")), - shinyjs::hidden(source_ui("local_file_upload_ui.R")) -), - -### 2. Inputs -shinyjs::hidden( - div(id = "input_record_box", - box( - title = h2("2. Input Record"), width = 8, collapsible = TRUE, solidHeader = TRUE, status = "success", - fluidRow(column(6, - selectizeInput("InputSiteName", label = "Site *", choices = NULL, - options = list( - placeholder = 'Please search or select a site below', - onInitialize = I('function() { this.setValue(""); }') - ) - ) - ), - column(6, - selectizeInput("InputParentName", label = "Parent *", choices = NULL, - options = list( - placeholder = 'Please search inputs by name or site', - onInitialize = I('function() { this.setValue(""); }') - ) - ) - )), - textInput("InputName", - label = "Name *", - placeholder = ""), - hr(), - fluidRow(column(6, - selectizeInput("InputFormatName", label = "Format *", choices = NULL, - options = list( - placeholder = 'Please search Formats by name', - onInitialize = I('function() { this.setValue(""); }') - ) - ) - ), - column(6, - selectizeInput("MimetypeNameCurrent", label = "Corresponding Mimetype *", choices = NULL, - options = list( - placeholder = 'Please search mimetypes by name', - onInitialize = I('function() { this.setValue(""); }') - ) - ) - ) - ), - p("or"), - shinyWidgets::dropdownButton(circle = FALSE, label = "Create New Format", width = '350px', - box(width = '350px', solidHeader = TRUE, status = "warning", - selectizeInput("MimetypeName", label = "Mimetype *", choices = NULL, - options = list( - placeholder = 'Please search formats by name or site', - onInitialize = I('function() { this.setValue(""); }') - )), - p("or"), - a(id = "betyURL", "Create New Mimetype", href = "https://www.betydb.org/formats/new", target = "_blank"), - hr(), - textInput( - "NewFormatName", - label = "New Format Name *", - placeholder = "Create a New Format Name"), - radioButtons( - "HeaderBoolean", - label = "Is There a Header ?", - choices = c("Yes", "No") - ), - textInput( # I should Render UI only if Header = TRUE - "SkipLines", - label = "Skip", - placeholder = "Enter number of header lines to skip."), - textAreaInput( - "FormatNotes", - label = "Notes", - height = '75px' + fluidRow( + ## 1. D1 Download or Local Upload + box( + title = h2("1. Select Files"), width = 4, solidHeader = TRUE, status = "success", collapsible = TRUE, + shinyWidgets::radioGroupButtons("inputMethod", + label = "Select Input Method", + choices = c("DataONE", "Local Files"), status = "success", selected = NULL ), - actionBttn("FormatRecordDone", label = "Done", color = "warning", size = "sm"), - p("* Denotes a Required Field") - ) + shinyjs::hidden(source_ui("d1_download_ui.R")), + shinyjs::hidden(source_ui("local_file_upload_ui.R")) ), - hr(), -fluidRow(column(6, - dateInput( - "InputStartDate", - label = "Start Date", - format = "yyyy-mm-dd", - startview = "decade" - ) - ), - column(3, - shinyTime::timeInput("StartTimeInput", - label = "Start Time (HH-MM)", - seconds = FALSE) - ), - column(3 ## Now I'm not sure if we need timezone since sites are known - # selectizeInput("Timezone", label = "Timezone *", choices = NULL, - # options = list( - # placeholder = 'Please search inputs by name or site', - # onInitialize = I('function() { this.setValue(""); }') - # )) - ) - ), -fluidRow(column(6, - dateInput( - 'InputEndDate', - label = 'End Date', - format = 'yyyy-mm-dd', - startview = 'decade' - ) - ), - column(3, - shinyTime::timeInput("EndTimeInput", - label = "End Time (HH-MM)", - seconds = FALSE) - ), -column(3)), # Empty Space - hr(), - textAreaInput("InputNotes", + + ### 2. Inputs + shinyjs::hidden( + div( + id = "input_record_box", + box( + title = h2("2. Input Record"), width = 8, collapsible = TRUE, solidHeader = TRUE, status = "success", + fluidRow( + column( + 6, + selectizeInput("InputSiteName", + label = "Site *", choices = NULL, + options = list( + placeholder = "Please search or select a site below", + onInitialize = I('function() { this.setValue(""); }') + ) + ) + ), + column( + 6, + selectizeInput("InputParentName", + label = "Parent *", choices = NULL, + options = list( + placeholder = "Please search inputs by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) + ) + ) + ), + textInput("InputName", + label = "Name *", + placeholder = "" + ), + hr(), + fluidRow( + column( + 6, + selectizeInput("InputFormatName", + label = "Format *", choices = NULL, + options = list( + placeholder = "Please search Formats by name", + onInitialize = I('function() { this.setValue(""); }') + ) + ) + ), + column( + 6, + selectizeInput("MimetypeNameCurrent", + label = "Corresponding Mimetype *", choices = NULL, + options = list( + placeholder = "Please search mimetypes by name", + onInitialize = I('function() { this.setValue(""); }') + ) + ) + ) + ), + p("or"), + shinyWidgets::dropdownButton( + circle = FALSE, label = "Create New Format", width = "350px", + box( + width = "350px", solidHeader = TRUE, status = "warning", + selectizeInput("MimetypeName", + label = "Mimetype *", choices = NULL, + options = list( + placeholder = "Please search formats by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + p("or"), + a(id = "betyURL", "Create New Mimetype", href = "https://www.betydb.org/formats/new", target = "_blank"), + hr(), + textInput( + "NewFormatName", + label = "New Format Name *", + placeholder = "Create a New Format Name" + ), + radioButtons( + "HeaderBoolean", + label = "Is There a Header ?", + choices = c("Yes", "No") + ), + textInput( # I should Render UI only if Header = TRUE + "SkipLines", + label = "Skip", + placeholder = "Enter number of header lines to skip." + ), + textAreaInput( + "FormatNotes", label = "Notes", - height = '50px'), -fluidRow( - column(10), - column(2, - actionBttn("nextFromInput", - label = "Next Step", - color = "success", - size = "sm") - ) -), - p("* Denotes a Required Field") - ) -) -) -),#End Fluid Row -fluidRow( - ## 4. Formats-Variables -shinyjs::hidden( -div(id = "formats.vars_box", - box(title = h2("3. Formats-Variables"), width = 12, solidHeader = TRUE, status = "success", collapsible = TRUE, collapsed = FALSE, - fluidRow(column(3, - selectizeInput("pecan_var", choices = NULL, label = "Variable", - options = list( - placeholder = 'Please search for a variable in PEcAn', - onInitialize = I('function() { this.setValue(""); }') - ) - ) - ), - column(3, - textInput("var_name", label = "Name") - ), - column(2, - textInput("var_unit", label = "Unit") + height = "75px" + ), + actionBttn("FormatRecordDone", label = "Done", color = "warning", size = "sm"), + p("* Denotes a Required Field") + ) + ), + hr(), + fluidRow( + column( + 6, + dateInput( + "InputStartDate", + label = "Start Date", + format = "yyyy-mm-dd", + startview = "decade" + ) + ), + column( + 3, + shinyTime::timeInput("StartTimeInput", + label = "Start Time (HH-MM)", + seconds = FALSE + ) + ), + column( + 3 ## Now I'm not sure if we need timezone since sites are known + # selectizeInput("Timezone", label = "Timezone *", choices = NULL, + # options = list( + # placeholder = 'Please search inputs by name or site', + # onInitialize = I('function() { this.setValue(""); }') + # )) + ) + ), + fluidRow( + column( + 6, + dateInput( + "InputEndDate", + label = "End Date", + format = "yyyy-mm-dd", + startview = "decade" + ) + ), + column( + 3, + shinyTime::timeInput("EndTimeInput", + label = "End Time (HH-MM)", + seconds = FALSE + ) + ), + column(3) + ), # Empty Space + hr(), + textAreaInput("InputNotes", + label = "Notes", + height = "50px" + ), + fluidRow( + column(10), + column( + 2, + actionBttn("nextFromInput", + label = "Next Step", + color = "success", + size = "sm" + ) + ) + ), + p("* Denotes a Required Field") + ) + ) + ) + ), # End Fluid Row + fluidRow( + ## 4. Formats-Variables + shinyjs::hidden( + div( + id = "formats.vars_box", + box( + title = h2("3. Formats-Variables"), width = 12, solidHeader = TRUE, status = "success", collapsible = TRUE, collapsed = FALSE, + fluidRow( + column( + 3, + selectizeInput("pecan_var", + choices = NULL, label = "Variable", + options = list( + placeholder = "Please search for a variable in PEcAn", + onInitialize = I('function() { this.setValue(""); }') + ) + ) + ), + column( + 3, + textInput("var_name", label = "Name") + ), + column( + 2, + textInput("var_unit", label = "Unit") + ), + column( + 2, + textInput("storage_type", label = "Storage Type", placeholder = "e.g. POSIX code") + ), + column( + 2, + textInput("col_num", label = "Column Number") + ) + ), + actionButton("register_variable", label = "Add Variable"), + DT::DTOutput("format_vars_df") + ) ), - column(2, - textInput("storage_type", label = "Storage Type", placeholder = "e.g. POSIX code") + div( + id = "finishButton_d1", + fluidRow( + column(10), + column( + 2, + actionBttn("complete_ingest_d1", + label = "Complete Ingest of DataONE Files", + color = "success", + size = "md" + ) + ) + ) ), - column(2, - textInput("col_num", label = "Column Number") + div( + id = "finishButton_lcl", + fluidRow( + column(10), + column( + 2, + actionBttn("complete_ingest_lcl", + label = "Complete Ingest of Local Files", + color = "success", + size = "md" + ) + ) + ) ) - ), - actionButton("register_variable", label = "Add Variable"), - DT::DTOutput("format_vars_df") - ) -), -div(id = "finishButton_d1", - fluidRow( - column(10), - column(2, - actionBttn("complete_ingest_d1", - label = "Complete Ingest of DataONE Files", - color = "success", - size = "md") ) ) -), -div(id = "finishButton_lcl", - fluidRow( - column(10), - column(2, - actionBttn("complete_ingest_lcl", - label = "Complete Ingest of Local Files", - color = "success", - size = "md") - ) - ) -) -) -) -)# End Fluid Page \ No newline at end of file +) # End Fluid Page diff --git a/shiny/Data-Ingest/ui_files/input_record_ui.R b/shiny/Data-Ingest/ui_files/input_record_ui.R index d7d05cf9f36..d8f603acae2 100644 --- a/shiny/Data-Ingest/ui_files/input_record_ui.R +++ b/shiny/Data-Ingest/ui_files/input_record_ui.R @@ -1,129 +1,146 @@ -fluidRow(title = "New Input", - box(title = h2("New Input"), width = 4, collapsible = TRUE, - hr(), - selectizeInput("InputSiteName", label = "Site *", choices = NULL, - options = list( - placeholder = 'Please search or select a site below', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - selectizeInput("InputParentName", label = "Parent", choices = NULL, - options = list( - placeholder = 'Please search inputs by name or site', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - textInput( - "InputName", - label = "Name *", - placeholder = "" - ), - hr(), - selectizeInput("InputFormatName", label = "Format*", choices = NULL, - options = list( - placeholder = 'Please search Formats by name', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - dateInput( - "InputStartDate", - label = "Start Date *", - format = "yyyy-mm-dd", - startview = "decade" - ), - shinyTime::timeInput( - "StartTimeInput", - label = "Start Time (Hours - Minutes) *", - seconds = FALSE - ), - dateInput( - 'InputEndDate', - label = 'End Date *', - format = 'yyyy-mm-dd', - startview = 'decade' - ), - shinyTime::timeInput( - "EndTimeInput", - label = "End Time (Hours-Minutes) *", - seconds = FALSE - ), - textInput( - "Timezone", - label = "Timezone (UTC) *", - placeholder = "UTC +/-" - ), - hr(), - textAreaInput( - "InputNotes", - label = "Notes", - height = '150px' - ), - actionButton("createInput", label = "Create Input"), # Not sure if I want this here or only available when all forms are filled in. - p("* Denotes a Required Field"), - hr(), - verbatimTextOutput("summInputs") - ), - box(title = h2("DbFiles Record"), width = 4, collapsible = TRUE, collapsed = TRUE, - hr(), - selectizeInput("InputMachineName", label = "Machine *", choices = NULL, #remember to set default to local - options = list( - placeholder = 'Please search machine by name', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - textInput( - "InputFilePath", - label = "File Path *", - placeholder = "This file path will be autogenerated by the download process. The user can edit the filepath here?"), - hr(), - textInput( - "InputFileName", - label = "File Name *", - placeholder = "This file name will be displayed from the download process. The user can edit the file name here"), - actionButton("createDBFilesRecord", label = "Create dbFiles Record"), - p("* Denotes a Required Field"), - hr(), - verbatimTextOutput("dbFilesRecordOut") - ), - box(title = h2("Format ID"), width = 4, collapsible = TRUE, collapsed = TRUE, - hr(), - selectizeInput("MimetypeName", label = "Mimetype *", choices = NULL, - options = list( - placeholder = 'Please search inputs by name or site', - onInitialize = I('function() { this.setValue(""); }') - )), - hr(), - textInput( - "NewMimeType", - label = "New Mime Type *", - placeholder = "Create a New Mimetype"), - hr(), - textInput( - "NewFormatName", - label = "New Format Name *", - placeholder = "Create a New Format Name"), - hr(), - radioButtons( - "HeaderBoolean", - label = "Is There a Header ?", - choices = c("Yes", "No") - ), - hr(), - textInput( # I should Render UI only if Header = TRUE - "SkipLines", - label = "Skip", - placeholder = "Enter number of header lines to skip."), - hr(), - textAreaInput( - "FormatNotes", - label = "Notes", - height = '150px' - ), - actionButton("createFormatRecord", label = "Create Format Record"), - p("* Denotes a Required Field"), - hr(), - verbatimTextOutput("FormatRecordOut") - ) - - ) - +fluidRow( + title = "New Input", + box( + title = h2("New Input"), width = 4, collapsible = TRUE, + hr(), + selectizeInput("InputSiteName", + label = "Site *", choices = NULL, + options = list( + placeholder = "Please search or select a site below", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + selectizeInput("InputParentName", + label = "Parent", choices = NULL, + options = list( + placeholder = "Please search inputs by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + textInput( + "InputName", + label = "Name *", + placeholder = "" + ), + hr(), + selectizeInput("InputFormatName", + label = "Format*", choices = NULL, + options = list( + placeholder = "Please search Formats by name", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + dateInput( + "InputStartDate", + label = "Start Date *", + format = "yyyy-mm-dd", + startview = "decade" + ), + shinyTime::timeInput( + "StartTimeInput", + label = "Start Time (Hours - Minutes) *", + seconds = FALSE + ), + dateInput( + "InputEndDate", + label = "End Date *", + format = "yyyy-mm-dd", + startview = "decade" + ), + shinyTime::timeInput( + "EndTimeInput", + label = "End Time (Hours-Minutes) *", + seconds = FALSE + ), + textInput( + "Timezone", + label = "Timezone (UTC) *", + placeholder = "UTC +/-" + ), + hr(), + textAreaInput( + "InputNotes", + label = "Notes", + height = "150px" + ), + actionButton("createInput", label = "Create Input"), # Not sure if I want this here or only available when all forms are filled in. + p("* Denotes a Required Field"), + hr(), + verbatimTextOutput("summInputs") + ), + box( + title = h2("DbFiles Record"), width = 4, collapsible = TRUE, collapsed = TRUE, + hr(), + selectizeInput("InputMachineName", + label = "Machine *", choices = NULL, # remember to set default to local + options = list( + placeholder = "Please search machine by name", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + textInput( + "InputFilePath", + label = "File Path *", + placeholder = "This file path will be autogenerated by the download process. The user can edit the filepath here?" + ), + hr(), + textInput( + "InputFileName", + label = "File Name *", + placeholder = "This file name will be displayed from the download process. The user can edit the file name here" + ), + actionButton("createDBFilesRecord", label = "Create dbFiles Record"), + p("* Denotes a Required Field"), + hr(), + verbatimTextOutput("dbFilesRecordOut") + ), + box( + title = h2("Format ID"), width = 4, collapsible = TRUE, collapsed = TRUE, + hr(), + selectizeInput("MimetypeName", + label = "Mimetype *", choices = NULL, + options = list( + placeholder = "Please search inputs by name or site", + onInitialize = I('function() { this.setValue(""); }') + ) + ), + hr(), + textInput( + "NewMimeType", + label = "New Mime Type *", + placeholder = "Create a New Mimetype" + ), + hr(), + textInput( + "NewFormatName", + label = "New Format Name *", + placeholder = "Create a New Format Name" + ), + hr(), + radioButtons( + "HeaderBoolean", + label = "Is There a Header ?", + choices = c("Yes", "No") + ), + hr(), + textInput( # I should Render UI only if Header = TRUE + "SkipLines", + label = "Skip", + placeholder = "Enter number of header lines to skip." + ), + hr(), + textAreaInput( + "FormatNotes", + label = "Notes", + height = "150px" + ), + actionButton("createFormatRecord", label = "Create Format Record"), + p("* Denotes a Required Field"), + hr(), + verbatimTextOutput("FormatRecordOut") + ) +) diff --git a/shiny/Data-Ingest/ui_files/local_file_upload_ui.R b/shiny/Data-Ingest/ui_files/local_file_upload_ui.R index bff8c39d54c..d8052cf225e 100644 --- a/shiny/Data-Ingest/ui_files/local_file_upload_ui.R +++ b/shiny/Data-Ingest/ui_files/local_file_upload_ui.R @@ -1,30 +1,36 @@ -div(id = "lcl_ui", +div( + id = "lcl_ui", tagList( - fileInput( - inputId = "file", - label = "Upload Local Files", - accept = NULL, - multiple = TRUE, - placeholder = "Drag and drop files here" - ), - DT::DTOutput("dtfiles"), - fluidRow( - column(8), - column(4, - div(id = "nextFromLocal_div", - actionBttn(inputId = "nextFromLocal", - label = "Next Step", - size = "sm", - color = "success") - ) - ) - ), - shinyjs::hidden( - div(id = "local_path_out", - hr(), - p("Location of Downloaded Files:"), - verbatimTextOutput("LocaldbfilesPath") - ) - ) + fileInput( + inputId = "file", + label = "Upload Local Files", + accept = NULL, + multiple = TRUE, + placeholder = "Drag and drop files here" + ), + DT::DTOutput("dtfiles"), + fluidRow( + column(8), + column( + 4, + div( + id = "nextFromLocal_div", + actionBttn( + inputId = "nextFromLocal", + label = "Next Step", + size = "sm", + color = "success" + ) + ) + ) + ), + shinyjs::hidden( + div( + id = "local_path_out", + hr(), + p("Location of Downloaded Files:"), + verbatimTextOutput("LocaldbfilesPath") + ) ) - ) \ No newline at end of file + ) +) diff --git a/shiny/Data-Ingest/ui_files/sidebar_ui.R b/shiny/Data-Ingest/ui_files/sidebar_ui.R index 88b5bec519c..9d350efd3bf 100644 --- a/shiny/Data-Ingest/ui_files/sidebar_ui.R +++ b/shiny/Data-Ingest/ui_files/sidebar_ui.R @@ -1,5 +1,4 @@ sidebarMenu( - menuItem( "Ingest Workflow", tabName = "ingestWorkflow", @@ -11,13 +10,18 @@ sidebarMenu( icon = icon("info-circle", lib = "font-awesome") ), shinyjs::hidden( - div(id = "select_in", - actionBttn("d1Input", label = "Import from DataONE", - icon = icon("download", lib = "font-awesome"), - size = "xs", color = "success"), - actionBttn("lclUpload", label = "Upload Local Files", - icon = icon("upload", lib = "font-awesome"), - size = "xs", color = "success") + div( + id = "select_in", + actionBttn("d1Input", + label = "Import from DataONE", + icon = icon("download", lib = "font-awesome"), + size = "xs", color = "success" + ), + actionBttn("lclUpload", + label = "Upload Local Files", + icon = icon("upload", lib = "font-awesome"), + size = "xs", color = "success" + ) ) ) ) diff --git a/shiny/Data-Ingest/ui_utils.R b/shiny/Data-Ingest/ui_utils.R index daea5e9797c..1ad946a5e00 100644 --- a/shiny/Data-Ingest/ui_utils.R +++ b/shiny/Data-Ingest/ui_utils.R @@ -4,4 +4,3 @@ source_ui <- function(...) { local = TRUE )$value } - diff --git a/shiny/Elicitation/server.R b/shiny/Elicitation/server.R index cfcc78805fd..200f06b46b1 100644 --- a/shiny/Elicitation/server.R +++ b/shiny/Elicitation/server.R @@ -1,13 +1,13 @@ -lapply(c( "shiny", - "ggplot" - ),function(pkg){ - if (!(pkg %in% installed.packages()[,1])){ - install.packages(pkg) - } - library(pkg,character.only = TRUE,quietly = TRUE) - } - ) -if(!("SHELF" %in% install.packages()[,1])) devtools::install_github('OakleyJ/SHELF') +lapply(c( + "shiny", + "ggplot" +), function(pkg) { + if (!(pkg %in% installed.packages()[, 1])) { + install.packages(pkg) + } + library(pkg, character.only = TRUE, quietly = TRUE) +}) +if (!("SHELF" %in% install.packages()[, 1])) devtools::install_github("OakleyJ/SHELF") library(SHELF) library(PEcAn.DB) @@ -15,130 +15,130 @@ library(PEcAn.DB) # Define server logic server <- shinyServer(function(input, output, session) { - ## assumes that variable allPriors is defined and saved in prior.RData - if(!file.exists("prior.RData")){ + if (!file.exists("prior.RData")) { allPriors <- list() - save(allPriors,file="prior.RData") + save(allPriors, file = "prior.RData") } load("prior.RData") - + ## eventually get users, variables, PFTs from database - bety <- NULL #PEcAn.visualization::betyConnect() - + bety <- NULL # PEcAn.visualization::betyConnect() + ## quantiles to be elicited, in order of elicitation - std.prior <- data.frame(quantiles=c(0,1,0.975,0.025,0.25,0.75,0.5),parameters = rep(NA,7)) + std.prior <- data.frame(quantiles = c(0, 1, 0.975, 0.025, 0.25, 0.75, 0.5), parameters = rep(NA, 7)) prior <- std.prior - ii <- 1 ## within CDF counter - jj <- length(allPriors) + 1 ## OVERALL prior counter + ii <- 1 ## within CDF counter + jj <- length(allPriors) + 1 ## OVERALL prior counter myfit <- NA - + updatePrior <- eventReactive(input$Next, { ## determine which parameter needs updating i <- ii - msg = "" - cdf = "" - + msg <- "" + cdf <- "" + ## get value from UI newVal <- as.numeric(input$paramVal) - + ## check that values in increasing order, if so insert myPrior <- prior - if(!is.na(newVal) && i <= nrow(prior)){ - myPrior$parameters[i] <- newVal - sortPrior <- myPrior[order(myPrior$quantiles),] + if (!is.na(newVal) && i <= nrow(prior)) { + myPrior$parameters[i] <- newVal + sortPrior <- myPrior[order(myPrior$quantiles), ] print(sortPrior$parameters) - if(is.unsorted(sortPrior$parameters,na.rm=TRUE)){ + if (is.unsorted(sortPrior$parameters, na.rm = TRUE)) { print("UNSORTED") - msg <- paste(msg,"ERROR: CDF must be ascending\n") + msg <- paste(msg, "ERROR: CDF must be ascending\n") } else { i <- i + 1 ## increment counter for message prior <- myPrior } - - } else if(i < 3){ + } else if (i < 3) { ## only upper and lower values can be NA prior$parameters[i] <- NA i <- i + 1 } else { ## Save and Reset for next - allPriors[[jj]] <<- list(user = input$user, - variable = input$var, - prior=prior, - cdf = input$paramVal, - fit = myfit - ) + allPriors[[jj]] <<- list( + user = input$user, + variable = input$var, + prior = prior, + cdf = input$paramVal, + fit = myfit + ) print("SAVED") - i = 1 ## reset counter + i <- 1 ## reset counter prior <<- std.prior ## reset prior myfit <<- NA ## reset prior fit } - + ## prep instructions - if(i <= nrow(prior)){ - msg = paste0(msg,"Enter parameter value for ",prior$quantiles[i]*100,"% quantile") + if (i <= nrow(prior)) { + msg <- paste0(msg, "Enter parameter value for ", prior$quantiles[i] * 100, "% quantile") } else { - msg = "Choose CDF" + msg <- "Choose CDF" } - + ii <<- i ## update global counter - + ## debug messages print(getwd()) - print(c(jj,ii)) - + print(c(jj, ii)) + ## save and return current state - save(prior,allPriors,file="prior.RData") - list(prior=prior,msg=msg) + save(prior, allPriors, file = "prior.RData") + list(prior = prior, msg = msg) }) output$instructions <- renderText({ updatePrior()$msg - }) - + }) + output$table <- renderTable({ - prior <<- updatePrior()$prior ## update the global scope - sortPrior <- prior[order(prior$quantiles),] - sortPrior}) - - - output$outputPlot <- renderPlot({ - myPrior <- updatePrior()$prior - sortPrior <- myPrior[order(prior$quantiles),] - ## Empirical CDF Plot - with(data = sortPrior, plot(parameters,quantiles,main=paste(input$user),type='b',lwd=2)) - - ## when all values elicited, add fit CDFs to plot - if(ii > nrow(prior)){ - ## Fit alternative functions - len = nrow(prior) - lower = ifelse(is.na(sortPrior$parameters[1]),-Inf,sortPrior$parameters[1]) - upper = upper = ifelse(is.na(sortPrior$parameters[len]),Inf,sortPrior$parameters[len]) - myfit <<- with(data = sortPrior, - fitdist(vals=parameters[2:(len-1)],probs=quantiles[2:(len-1)],lower,upper) - ) - lwd = 5*min(myfit$ssq,na.rm = TRUE)/myfit$ssq - - ## Would like to extend fitdist to additional BETY supported distributions: - ## chisq, exp, f, unif, weibull - xval = seq(min(prior$parameters,na.rm = TRUE),max(prior$parameters,na.rm=TRUE),length=1000) - if(!is.na(myfit$Normal[1])){ - lines(xval,pnorm(xval,myfit$Normal$mean,myfit$Normal$sd),col=2,lwd=lwd[1]) - } - if(!is.na(myfit$Gamma[1])){ - lines(xval,pgamma(xval,myfit$Gamma$shape,myfit$Gamma$rate),col=3,lwd=lwd[3]) - } - if(!is.na(myfit$Log.normal[1])){ - lines(xval,plnorm(xval,myfit$Log.normal$mean.log.X,myfit$Log.normal$sd.log.X),col=4,lwd=lwd[4]) - } - if(!is.na(myfit$Beta[1])){ - lines(xval,pbeta(xval,myfit$Beta$shape1,myfit$Beta$shape2),col=5,lwd=lwd[5]) - } - legend("bottomright",legend=c("Normal","Gamma","Log.normal","Beta"),col=2:5,lwd=2) - - } - PEcAn.visualization::add_icon() - }) + prior <<- updatePrior()$prior ## update the global scope + sortPrior <- prior[order(prior$quantiles), ] + sortPrior + }) + + + output$outputPlot <- renderPlot({ + myPrior <- updatePrior()$prior + sortPrior <- myPrior[order(prior$quantiles), ] + ## Empirical CDF Plot + with(data = sortPrior, plot(parameters, quantiles, main = paste(input$user), type = "b", lwd = 2)) + + ## when all values elicited, add fit CDFs to plot + if (ii > nrow(prior)) { + ## Fit alternative functions + len <- nrow(prior) + lower <- ifelse(is.na(sortPrior$parameters[1]), -Inf, sortPrior$parameters[1]) + upper <- upper <- ifelse(is.na(sortPrior$parameters[len]), Inf, sortPrior$parameters[len]) + myfit <<- with( + data = sortPrior, + fitdist(vals = parameters[2:(len - 1)], probs = quantiles[2:(len - 1)], lower, upper) + ) + lwd <- 5 * min(myfit$ssq, na.rm = TRUE) / myfit$ssq + + ## Would like to extend fitdist to additional BETY supported distributions: + ## chisq, exp, f, unif, weibull + xval <- seq(min(prior$parameters, na.rm = TRUE), max(prior$parameters, na.rm = TRUE), length = 1000) + if (!is.na(myfit$Normal[1])) { + lines(xval, pnorm(xval, myfit$Normal$mean, myfit$Normal$sd), col = 2, lwd = lwd[1]) + } + if (!is.na(myfit$Gamma[1])) { + lines(xval, pgamma(xval, myfit$Gamma$shape, myfit$Gamma$rate), col = 3, lwd = lwd[3]) + } + if (!is.na(myfit$Log.normal[1])) { + lines(xval, plnorm(xval, myfit$Log.normal$mean.log.X, myfit$Log.normal$sd.log.X), col = 4, lwd = lwd[4]) + } + if (!is.na(myfit$Beta[1])) { + lines(xval, pbeta(xval, myfit$Beta$shape1, myfit$Beta$shape2), col = 5, lwd = lwd[5]) + } + legend("bottomright", legend = c("Normal", "Gamma", "Log.normal", "Beta"), col = 2:5, lwd = 2) + } + PEcAn.visualization::add_icon() + }) }) # runApp(port=5658, launch.browser=FALSE) diff --git a/shiny/Elicitation/ui.R b/shiny/Elicitation/ui.R index 75f94650daa..9c9649ddd7c 100644 --- a/shiny/Elicitation/ui.R +++ b/shiny/Elicitation/ui.R @@ -4,21 +4,23 @@ library(shiny) ui <- shinyUI(fluidPage( # Application title titlePanel("Prior Elicitation"), - sidebarLayout( sidebarPanel( - textInput("user", "User", c()) , - textInput("var","Variable",c()), + textInput("user", "User", c()), + textInput("var", "Variable", c()), textOutput("instructions"), textInput("paramVal", "Enter parameter value", "No Limit"), actionButton("Next", label = "Next"), - tableOutput('table') + tableOutput("table") ), mainPanel( plotOutput("outputPlot", - brush = brushOpts(id = "plot_brush", - resetOnNew = TRUE), - dblclick = "plot_dblclick") + brush = brushOpts( + id = "plot_brush", + resetOnNew = TRUE + ), + dblclick = "plot_dblclick" + ) ) ) )) diff --git a/shiny/ForecastingDashboard/animated_WCr_graphs.R b/shiny/ForecastingDashboard/animated_WCr_graphs.R index 962017e86b6..ff2828dc5f1 100755 --- a/shiny/ForecastingDashboard/animated_WCr_graphs.R +++ b/shiny/ForecastingDashboard/animated_WCr_graphs.R @@ -1,5 +1,4 @@ - -setwd('/fs/data3/kzarada/NEFI/Willow_Creek') +setwd("/fs/data3/kzarada/NEFI/Willow_Creek") library("ggplot2") library("plotly") library("gganimate") @@ -9,42 +8,36 @@ source("/fs/data3/kzarada/pecan/modules/assim.sequential/inst/WillowCreek/downlo ### Site numbers ### # WCr = 676 # Syv = 622 -#Wlef = 678 +# Wlef = 678 # Los = 679 -frame_end = Sys.Date() + lubridate::days(16) -frame_start = Sys.Date() - lubridate::days(10) - -ftime = seq(as.Date(frame_start), as.Date(frame_end), by="days") -ctime = seq(as.Date(frame_start), Sys.Date(), by = "days") -vars = c("NEE", "LE") -site = 676 -for(j in 1:length(vars)){ - - -for(i in 1:length(ctime)){ - - args = c(as.character(ctime[i]), vars[j], site) - - assign(paste0(ctime[i], "_", vars[j]), wcr.graphs(args)) - -} +frame_end <- Sys.Date() + lubridate::days(16) +frame_start <- Sys.Date() - lubridate::days(10) + +ftime <- seq(as.Date(frame_start), as.Date(frame_end), by = "days") +ctime <- seq(as.Date(frame_start), Sys.Date(), by = "days") +vars <- c("NEE", "LE") +site <- 676 +for (j in 1:length(vars)) { + for (i in 1:length(ctime)) { + args <- c(as.character(ctime[i]), vars[j], site) + + assign(paste0(ctime[i], "_", vars[j]), wcr.graphs(args)) + } } -NEE.index <- ls(pattern = paste0("_NEE"), envir=.GlobalEnv) -LE.index <- ls(pattern = paste0("_LE"), envir=.GlobalEnv) +NEE.index <- ls(pattern = paste0("_NEE"), envir = .GlobalEnv) +LE.index <- ls(pattern = paste0("_LE"), envir = .GlobalEnv) -nee.data = get(NEE.index[1]) -for(i in 2:length(NEE.index)){ - - nee.data = rbind(nee.data, get(NEE.index[i])) +nee.data <- get(NEE.index[1]) +for (i in 2:length(NEE.index)) { + nee.data <- rbind(nee.data, get(NEE.index[i])) } -le.data = get(LE.index[1]) -for(i in 2:length(LE.index)){ - - le.data = rbind(le.data, get(LE.index[i])) +le.data <- get(LE.index[1]) +for (i in 2:length(LE.index)) { + le.data <- rbind(le.data, get(LE.index[i])) } @@ -54,14 +47,14 @@ nee.data$start_date <- as.factor(nee.data$start_date) le.data$Time <- as.factor(paste(le.data$date, le.data$Time, sep = "_")) le.data$start_date <- as.factor(le.data$start_date) -#Download observed data +# Download observed data real_data <- PEcAn.data.atmosphere::download.US_WCr(frame_start, frame_end, timestep = 6) real_data <- do.call(cbind.data.frame, real_data) colnames(real_data) <- c("NEE", "LE") -#combine observed with predicted data -real_data_nee <- as_tibble(real_data %>% dplyr::select(NEE) %>% mutate(Time= unique(nee.data$Time))) -real_data_le <- as_tibble(real_data %>% dplyr::select(LE) %>% mutate(Time= unique(le.data$Time))) +# combine observed with predicted data +real_data_nee <- as_tibble(real_data %>% dplyr::select(NEE) %>% mutate(Time = unique(nee.data$Time))) +real_data_le <- as_tibble(real_data %>% dplyr::select(LE) %>% mutate(Time = unique(le.data$Time))) nee.data <- left_join(as_tibble(nee.data), real_data_nee, by = c("Time"), suffix = c("nee", "real")) le.data <- left_join(as_tibble(le.data), real_data_le, by = c("Time"), suffix = c("le", "real")) @@ -71,67 +64,67 @@ x.breaks <- unique(nee.data$Time[seq(1, length(nee.data$Time), by = 4)]) # These variables control the start and end dates of the y axis -nee_upper = 1e-07 -nee_lower = min(nee.data$Lower) -qle_upper = max(le.data$Upper) -qle_lower = -50 - -p <-ggplot(nee.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label = NEE - Predicted - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = NEE, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + - ggtitle(paste0("Net Ecosystem Exchange for ", frame_start, " to ", frame_end, ", Willow Creek, Wisconsin")) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="NEE (kg C m-2 s-1)", limits = c(nee_lower, nee_upper)) + - scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - -q <- ggplot(le.data, aes(group = start_date, ids = start_date, frame = start_date)) + #, label= LE - Predicted - geom_ribbon(aes(x = Time, ymin=Lower, ymax=Upper, fill="95% Confidence Interval"), alpha = 0.4) + - geom_line(aes(x = Time, y = LE, color = "Observed Data"), size = 1) + - geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + +nee_upper <- 1e-07 +nee_lower <- min(nee.data$Lower) +qle_upper <- max(le.data$Upper) +qle_lower <- -50 + +p <- ggplot(nee.data, aes(group = start_date, ids = start_date, frame = start_date)) + # , label = NEE - Predicted + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = NEE, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + + ggtitle(paste0("Net Ecosystem Exchange for ", frame_start, " to ", frame_end, ", Willow Creek, Wisconsin")) + + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "NEE (kg C m-2 s-1)", limits = c(nee_lower, nee_upper)) + + scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + +q <- ggplot(le.data, aes(group = start_date, ids = start_date, frame = start_date)) + # , label= LE - Predicted + geom_ribbon(aes(x = Time, ymin = Lower, ymax = Upper, fill = "95% Confidence Interval"), alpha = 0.4) + + geom_line(aes(x = Time, y = LE, color = "Observed Data"), size = 1) + + geom_line(aes(x = Time, y = Predicted, color = "Predicted Mean")) + ggtitle(paste0("Latent Energy for ", frame_start, " to ", frame_end, ", Willow Creek, Wisconsin")) + - scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values=c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + - scale_fill_manual(labels = c("95% Confidence Interval"), values=c("95% Confidence Interval" = "blue1")) + - scale_y_continuous(name="LE (W m-2 s-1)", limits = c(qle_lower, qle_upper)) + - scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + - theme_minimal() + - theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) - - -ggplot.nee<-ggplotly(p, tooltip = c("Time", "y", "Lower", "Upper")) %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - - ggplot.nee$x$data[[1]]$name <-"95% Confidence Interval" - ggplot.nee$x$data[[2]]$name <- "Observed Data" - ggplot.nee$x$data[[3]]$name <- "Predicted Mean" - - -ggplot.le<-ggplotly(q, tooltip = c("Time", "y", "Lower", "Upper"), layerData = 2) %>% - animation_opts(frame = 1200, easing = 'linear-in', transition = 0, redraw = F, mode = "next") %>% - animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = 'black'))) %>% - animation_button(x = 0, xanchor = "left", y = 1.5, yanchor= "top") %>% - layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% - layout(showlegend = T, margin = c(30,50,30,50)) - - ggplot.le$x$data[[1]]$name <-"95% Confidence Interval" - ggplot.le$x$data[[2]]$name <- "Observed Data" - ggplot.le$x$data[[3]]$name <- "Predicted Mean" - - -#for shiny app - met = download_US_WCr_met(frame_start, Sys.Date()) - met$Time = paste0(format(met$date, "%Y-%m-%d"), "_" ,as.numeric(met$Hour)) - met <- as_tibble(met) %>% dplyr::select(Time, Tair, rH, Tsoil, Rg) %>% mutate(Time = as.factor(Time)) - nee.met <- nee.data %>% inner_join(met,nee.data, by = c("Time")) - + scale_color_manual(name = "Legend", labels = c("Predicted Mean", "Observed Data"), values = c("Predicted Mean" = "skyblue1", "Observed Data" = "firebrick4")) + + scale_fill_manual(labels = c("95% Confidence Interval"), values = c("95% Confidence Interval" = "blue1")) + + scale_y_continuous(name = "LE (W m-2 s-1)", limits = c(qle_lower, qle_upper)) + + scale_x_discrete(name = "", breaks = x.breaks, labels = format(ftime[-length(ftime)], "%m-%d")) + + theme_minimal() + + theme(plot.title = element_text(hjust = 0.5, size = 16), legend.title = element_blank(), legend.text = element_text(size = 12), axis.text.x = element_text(size = 12, angle = 45), axis.text.y = element_text(size = 13), axis.title.y = element_text(size = 16)) + + +ggplot.nee <- ggplotly(p, tooltip = c("Time", "y", "Lower", "Upper")) %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) + +ggplot.nee$x$data[[1]]$name <- "95% Confidence Interval" +ggplot.nee$x$data[[2]]$name <- "Observed Data" +ggplot.nee$x$data[[3]]$name <- "Predicted Mean" -save.image("/fs/data3/kzarada/NEFI/Willow_Creek/wcr.RData") +ggplot.le <- ggplotly(q, tooltip = c("Time", "y", "Lower", "Upper"), layerData = 2) %>% + animation_opts(frame = 1200, easing = "linear-in", transition = 0, redraw = F, mode = "next") %>% + animation_slider(x = 0, y = -0.1, visible = T, currentvalue = list(prefix = "Forecast Date:", font = list(color = "black"))) %>% + animation_button(x = 0, xanchor = "left", y = 1.5, yanchor = "top") %>% + layout(legend = list(orientation = "h", x = 0.25, y = 1.1)) %>% + layout(showlegend = T, margin = c(30, 50, 30, 50)) +ggplot.le$x$data[[1]]$name <- "95% Confidence Interval" +ggplot.le$x$data[[2]]$name <- "Observed Data" +ggplot.le$x$data[[3]]$name <- "Predicted Mean" + + +# for shiny app +met <- download_US_WCr_met(frame_start, Sys.Date()) +met$Time <- paste0(format(met$date, "%Y-%m-%d"), "_", as.numeric(met$Hour)) +met <- as_tibble(met) %>% + dplyr::select(Time, Tair, rH, Tsoil, Rg) %>% + mutate(Time = as.factor(Time)) +nee.met <- nee.data %>% inner_join(met, nee.data, by = c("Time")) + + +save.image("/fs/data3/kzarada/NEFI/Willow_Creek/wcr.RData") diff --git a/shiny/ForecastingDashboard/wcr.graphs.R b/shiny/ForecastingDashboard/wcr.graphs.R index 88e74048e8d..84a513daf8c 100644 --- a/shiny/ForecastingDashboard/wcr.graphs.R +++ b/shiny/ForecastingDashboard/wcr.graphs.R @@ -1,7 +1,9 @@ #### need to create a graph funciton here to call with the args of start time -wcr.graphs <- function(args){ - start_date <- tryCatch(as.POSIXct(args[1]), error = function(e) {NULL} ) +wcr.graphs <- function(args) { + start_date <- tryCatch(as.POSIXct(args[1]), error = function(e) { + NULL + }) if (is.null(start_date)) { in_wid <- as.integer(args[1]) } @@ -11,22 +13,25 @@ wcr.graphs <- function(args){ host = "128.197.168.114", user = "bety", password = "bety", - driver = "Postgres") + driver = "Postgres" + ) con <- PEcAn.DB::db.open(dbparms) # Identify the workflow with the proper information if (!is.null(start_date)) { - workflows <- PEcAn.DB::db.query(paste0("SELECT * FROM workflows WHERE start_date='", format(start_date, "%Y-%m-%d %H:%M:%S"), - "' ORDER BY id"), con) + workflows <- PEcAn.DB::db.query(paste0( + "SELECT * FROM workflows WHERE start_date='", format(start_date, "%Y-%m-%d %H:%M:%S"), + "' ORDER BY id" + ), con) } else { workflows <- PEcAn.DB::db.query(paste0("SELECT * FROM workflows WHERE id='", in_wid, "'"), con) } print(workflows) - workflows <- workflows[which(workflows$site_id == args[3]),] + workflows <- workflows[which(workflows$site_id == args[3]), ] if (nrow(workflows) > 1) { - workflow <- workflows[nrow(workflows),] + workflow <- workflows[nrow(workflows), ] } else { workflow <- workflows } @@ -34,14 +39,14 @@ wcr.graphs <- function(args){ print(paste0("Using workflow ", workflow$id)) wid <- workflow$id - pecan_out_dir <- paste0("/fs/data3/kzarada/output/PEcAn_", wid, "/out"); + pecan_out_dir <- paste0("/fs/data3/kzarada/output/PEcAn_", wid, "/out") pecan_out_dirs <- list.dirs(path = pecan_out_dir) if (is.na(pecan_out_dirs[1])) { print(paste0(pecan_out_dirs, " does not exist.")) } - neemat <- matrix(1:64, nrow=1, ncol=64) # Proxy row, will be deleted later. - qlemat <- matrix(1:64, nrow=1, ncol=64) # Proxy row, will be deleted later. - num_results <- 0; + neemat <- matrix(1:64, nrow = 1, ncol = 64) # Proxy row, will be deleted later. + qlemat <- matrix(1:64, nrow = 1, ncol = 64) # Proxy row, will be deleted later. + num_results <- 0 for (i in 2:length(pecan_out_dirs)) { datafile <- file.path(pecan_out_dirs[i], format(workflow$start_date, "%Y.nc")) if (!file.exists(datafile)) { @@ -51,8 +56,8 @@ wcr.graphs <- function(args){ num_results <- num_results + 1 - #open netcdf file - ncptr <- ncdf4::nc_open(datafile); + # open netcdf file + ncptr <- ncdf4::nc_open(datafile) # Attach data to matricies nee <- ncdf4::ncvar_get(ncptr, "NEE") @@ -71,32 +76,34 @@ wcr.graphs <- function(args){ print(paste0(num_results, " results found.")) } # Strip away proxy rows - neemat <- neemat[-1,] - qlemat <- qlemat[-1,] + neemat <- neemat[-1, ] + qlemat <- qlemat[-1, ] # Time - time <- seq(6, 6 * ncol(neemat), by=6) + time <- seq(6, 6 * ncol(neemat), by = 6) # Caluclate means neemins <- NULL neemaxes <- NULL - quantiles <- apply(neemat,2,quantile,c(0.025,0.5,0.975), na.rm=TRUE) - neelower95 <- quantiles[1,] - neemeans <- quantiles[2,] - neeupper95 <- quantiles[3,] + quantiles <- apply(neemat, 2, quantile, c(0.025, 0.5, 0.975), na.rm = TRUE) + neelower95 <- quantiles[1, ] + neemeans <- quantiles[2, ] + neeupper95 <- quantiles[3, ] needf <- data.frame(time = time, Lower = neelower95, Predicted = neemeans, Upper = neeupper95) - needf$date <- c(rep(as.Date(start_date), 3), rep(seq(as.Date(start_date) + lubridate::days(1), as.Date(workflow$end_date)-lubridate::days(1), by="days"), each = 4), as.Date(workflow$end_date)) - needf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(needf$date) - 3))) + needf$date <- c(rep(as.Date(start_date), 3), rep(seq(as.Date(start_date) + lubridate::days(1), as.Date(workflow$end_date) - lubridate::days(1), by = "days"), each = 4), as.Date(workflow$end_date)) + needf$Time <- c(6, 12, 18, rep(c(0, 6, 12, 18), length.out = (length(needf$date) - 3))) needf$start_date <- rep(start_date, each = 64) - quantiles <- apply(qlemat,2,quantile,c(0.025,0.5,0.975), na.rm=TRUE) - qlelower95 <- quantiles[1,] - qlemeans <- quantiles[2,] - qleupper95 <- quantiles[3,] + quantiles <- apply(qlemat, 2, quantile, c(0.025, 0.5, 0.975), na.rm = TRUE) + qlelower95 <- quantiles[1, ] + qlemeans <- quantiles[2, ] + qleupper95 <- quantiles[3, ] qledf <- data.frame(time = time, Lower = qlelower95, Predicted = qlemeans, Upper = qleupper95) - qledf$date <- c(rep(as.Date(start_date), 3), rep(seq(as.Date(start_date) + lubridate::days(1), as.Date(workflow$end_date)-lubridate::days(1), by="days"), each = 4), as.Date(workflow$end_date)) - qledf$Time <- c(6,12,18, rep(c(0,6,12,18),length.out = (length(qledf$date) - 3))) + qledf$date <- c(rep(as.Date(start_date), 3), rep(seq(as.Date(start_date) + lubridate::days(1), as.Date(workflow$end_date) - lubridate::days(1), by = "days"), each = 4), as.Date(workflow$end_date)) + qledf$Time <- c(6, 12, 18, rep(c(0, 6, 12, 18), length.out = (length(qledf$date) - 3))) qledf$start_date <- rep(start_date, each = 64) - if(args[2] == "NEE"){ - return(needf)} - else(return(qledf)) + if (args[2] == "NEE") { + return(needf) + } else { + (return(qledf)) + } } diff --git a/shiny/Pecan.depend/SERVER.R b/shiny/Pecan.depend/SERVER.R index 82d66f50976..ffa1ff0b903 100644 --- a/shiny/Pecan.depend/SERVER.R +++ b/shiny/Pecan.depend/SERVER.R @@ -2,313 +2,298 @@ optionsDT_fixe <- list(paging = FALSE, searching = FALSE, bInfo = FALSE, search. shinyServer(function(input, output, session) { - observe({ - input$GOPackage - isolate({ - # print(input$Pack) - if (length(input$packages) > 0) { - data <- Pck.load.to.vis(input$packages) - - func <- c(input$packages) - # print(func) - - - nb.func.slave = NULL - nb.func.master = NULL - for (i in 1:length(func)) { - - id.call <- as.numeric(as.character(data$Nomfun$id[which(func[i] == data$Nomfun$label)])) - - id.call.slave <- as.numeric(as.character(data$fromto$from[which(id.call == data$fromto$to)])) - id.call.master <- as.numeric(as.character(data$fromto$from[which(id.call == data$fromto$from)])) - - nb.call <- length(as.character(data$Nomfun$label[id.call.slave])) - nb.func.slave[i] = nb.call - - nb.call <- length(as.character(data$Nomfun$label[id.call.master])) - nb.func.master[i] = nb.call - - } - - optionsDT_fixe$drawCallback <- I("function( settings ) {document.getElementById('tabledep').style.width = '400px';}") - ## Output first graph - df <- data.frame(Package = func, Import = nb.func.master, `Imported by` = nb.func.slave) - - - - output$tabledep <- renderDataTable({ - df - }, options = optionsDT_fixe) - - output$main_plot <- renderVisNetwork({ - data$fromto%>%filter(title%>%gsub('

','',.)%>%gsub('

','',.)%in%input$variablesp)->data$fromto - if (nrow(data$fromto)){ - net <- plot(data, block = TRUE) - - # add legend - data_legend <- unique(data$fromto[, c("title", "color")]) - data_legend$label <- gsub("

", "", data_legend$title, fixed = TRUE) - data_legend$label <- gsub("

", "", data_legend$label, fixed = TRUE) - data_legend$title <- NULL - data_legend$arrows <- "to" - - net %>% - visLegend(addEdges = data_legend, useGroups = FALSE, width = 0.1) - } - - - }) - curentd1 <<- data - output$titledatatabel <- renderText({ - "Dependencies between package(s)" - }) - - } - }) - }) - - - observe({ - current.package <- input$main_plot_selected - current.package <- as.character(curentd1$Nomfun[as.numeric(current.package), "label"]) - updateSelectizeInput(session, "package", NULL, choices = installed.packages()[, 1]%>%grep("^PEcAn",.,value = T,ignore.case = T), selected = current.package) - }) - - observe({ - input$GOFunc2 - isolate({ - if (input$package != "" && input$GOFunc2 > 0) { - - func <- input$package - # print(func) - func - - if (!func %in% installed.packages()[, 1]) { - install.packages(func) - } - library(func, character.only = TRUE) - dep1 <- envirDependencies(paste0("package:", func)) - nb.fun <- length(dep1$Nomfun$label) - - - updateTabsetPanel(session, "Tabsetpan", selected = "Functions") - optionsDT_fixe$drawCallback <- I("function( settings ) {document.getElementById('datatable2').style.width = '100px';}") - output$datatable2 <- renderDataTable(data.frame(Number.of.functions = nb.fun), options = optionsDT_fixe) - - output$zoomin <- renderText(paste("Zoom on package ", func)) - output$info <- renderText(paste("Information on package ", func)) - curentd3 <<- func - - output$main_plot1 <- renderVisNetwork({ - plot(dep1, block = TRUE) - }) - curentd2 <<- dep1 - } + observe({ + input$GOPackage + isolate({ + # print(input$Pack) + if (length(input$packages) > 0) { + data <- Pck.load.to.vis(input$packages) + + func <- c(input$packages) + # print(func) + + + nb.func.slave <- NULL + nb.func.master <- NULL + for (i in 1:length(func)) { + id.call <- as.numeric(as.character(data$Nomfun$id[which(func[i] == data$Nomfun$label)])) + + id.call.slave <- as.numeric(as.character(data$fromto$from[which(id.call == data$fromto$to)])) + id.call.master <- as.numeric(as.character(data$fromto$from[which(id.call == data$fromto$from)])) + + nb.call <- length(as.character(data$Nomfun$label[id.call.slave])) + nb.func.slave[i] <- nb.call + + nb.call <- length(as.character(data$Nomfun$label[id.call.master])) + nb.func.master[i] <- nb.call + } + + optionsDT_fixe$drawCallback <- I("function( settings ) {document.getElementById('tabledep').style.width = '400px';}") + ## Output first graph + df <- data.frame(Package = func, Import = nb.func.master, `Imported by` = nb.func.slave) + + + + output$tabledep <- renderDataTable( + { + df + }, + options = optionsDT_fixe + ) + + output$main_plot <- renderVisNetwork({ + data$fromto %>% filter(title %>% gsub("

", "", .) %>% gsub("

", "", .) %in% input$variablesp) -> data$fromto + if (nrow(data$fromto)) { + net <- plot(data, block = TRUE) + + # add legend + data_legend <- unique(data$fromto[, c("title", "color")]) + data_legend$label <- gsub("

", "", data_legend$title, fixed = TRUE) + data_legend$label <- gsub("

", "", data_legend$label, fixed = TRUE) + data_legend$title <- NULL + data_legend$arrows <- "to" + + net %>% + visLegend(addEdges = data_legend, useGroups = FALSE, width = 0.1) + } }) - }) - - observe({ - input$GOFunc1 - isolate({ - if (!is.null(input$main_plot_selected) && input$main_plot_selected != "" && input$GOFunc1 > 0) { - - func <- as.character(curentd1$Nomfun$label[input$main_plot_selected == curentd1$Nomfun$id]) - # print(func) - func - - if (!func %in% installed.packages()[, 1]) { - install.packages(func) - } - library(func, character.only = TRUE) - dep1 <- envirDependencies(paste0("package:", func)) - nb.fun <- length(dep1$Nomfun$label) - - - updateTabsetPanel(session, "Tabsetpan", selected = "Functions") - optionsDT_fixe$drawCallback <- I("function( settings ) {document.getElementById('datatable2').style.width = '100px';}") - output$datatable2 <- renderDataTable(data.frame(Number.of.functions = nb.fun), options = optionsDT_fixe) - - output$zoomin <- renderText(paste("Zoom on package : ", func)) - output$info <- renderText(paste("Information on : ", func)) - curentd3 <<- func - - output$main_plot1 <- renderVisNetwork({ - plot(dep1, block = TRUE) - }) - curentd2 <<- dep1 - } + curentd1 <<- data + output$titledatatabel <- renderText({ + "Dependencies between package(s)" }) + } }) - - ### chossefunction - - observe({ - input$chargedf - isolate({ - input$packageslist - sapply(input$packageslist, function(x) { - library(x, character.only = TRUE) - }) - allFun <- unique(unlist(sapply(input$packageslist, function(x) { - allFunctionEnv(paste0("package:", x)) - }))) - - updateSelectizeInput(session, inputId = "functionlist", choices = allFun) + }) + + + observe({ + current.package <- input$main_plot_selected + current.package <- as.character(curentd1$Nomfun[as.numeric(current.package), "label"]) + updateSelectizeInput(session, "package", NULL, choices = installed.packages()[, 1] %>% grep("^PEcAn", ., value = T, ignore.case = T), selected = current.package) + }) + + observe({ + input$GOFunc2 + isolate({ + if (input$package != "" && input$GOFunc2 > 0) { + func <- input$package + # print(func) + func + + if (!func %in% installed.packages()[, 1]) { + install.packages(func) + } + library(func, character.only = TRUE) + dep1 <- envirDependencies(paste0("package:", func)) + nb.fun <- length(dep1$Nomfun$label) + + + updateTabsetPanel(session, "Tabsetpan", selected = "Functions") + optionsDT_fixe$drawCallback <- I("function( settings ) {document.getElementById('datatable2').style.width = '100px';}") + output$datatable2 <- renderDataTable(data.frame(Number.of.functions = nb.fun), options = optionsDT_fixe) + + output$zoomin <- renderText(paste("Zoom on package ", func)) + output$info <- renderText(paste("Information on package ", func)) + curentd3 <<- func + + output$main_plot1 <- renderVisNetwork({ + plot(dep1, block = TRUE) }) + curentd2 <<- dep1 + } }) - - output$chossefunctionplot <- renderVisNetwork({ - input$makegraph - - isolate({ - if (input$makegraph >= 1) { - - dep<-my.allDepFunction(input$packageslist, unlist(strsplit(input$functionlist, split = ";"))) - #- lets exlude the ones in the base or more widely used ones - # which(dep[["Nomfun"]]$label%in%c('papply','stop','warning','logger.warn','logger.error','logger.debug','logger.severe','logger.info'))->excludes - # dep[["fromto"]]<-(dep[["fromto"]])%>%dplyr::filter(!(from%in%excludes))%>%dplyr::filter(!(to%in%excludes)) - #- nolink - # which(!(dep[["Nomfun"]]$id%in%as.numeric(unlist(dep[["fromto"]]))))->nolinks - # dep[["Nomfun"]]<-dep[["Nomfun"]]%>%dplyr::filter(!(id%in%c(excludes,nolinks))) - #-- plotting - visNetwork(dep[[1]], dep[[2]])%>% - visGroups()%>% - visOptions(selectedBy = "group", - collapse = TRUE, - highlightNearest = TRUE, - nodesIdSelection = list(enabled = TRUE))%>% - visExport() %>% - visPhysics(stabilization=list(iterations=100))%>% - visEdges(arrows =list(from = list(enabled = TRUE)), - color = list(color = "lightblue", highlight = "red")) %>% - visLegend(zoom = T,width = 0.1)->netw - if (input$igraphcheck) - netw%>%visIgraphLayout()->netw - return(netw) - - } + }) + + observe({ + input$GOFunc1 + isolate({ + if (!is.null(input$main_plot_selected) && input$main_plot_selected != "" && input$GOFunc1 > 0) { + func <- as.character(curentd1$Nomfun$label[input$main_plot_selected == curentd1$Nomfun$id]) + # print(func) + func + + if (!func %in% installed.packages()[, 1]) { + install.packages(func) + } + library(func, character.only = TRUE) + dep1 <- envirDependencies(paste0("package:", func)) + nb.fun <- length(dep1$Nomfun$label) + + + updateTabsetPanel(session, "Tabsetpan", selected = "Functions") + optionsDT_fixe$drawCallback <- I("function( settings ) {document.getElementById('datatable2').style.width = '100px';}") + output$datatable2 <- renderDataTable(data.frame(Number.of.functions = nb.fun), options = optionsDT_fixe) + + output$zoomin <- renderText(paste("Zoom on package : ", func)) + output$info <- renderText(paste("Information on : ", func)) + curentd3 <<- func + + output$main_plot1 <- renderVisNetwork({ + plot(dep1, block = TRUE) }) - - }) - - observeEvent(input$showsourcebtn,{ - if (length(isolate(input$packageslist))>0){ - dep<-my.allDepFunction(isolate(input$packageslist), unlist(strsplit(input$functionlist, split = ";"))) - showModal( - modalDialog( - # Output: Tabset w/ plot, summary, and table ---- - tabsetPanel(type = "tabs", - tabPanel("Source code", fluidRow( column(12,verbatimTextOutput("console")))), - tabPanel("Variable definitions timelines", plotOutput('VDT',width = "100%",height = "500px")) - - ), - size='l', - easyClose = T - ) - ) - + curentd2 <<- dep1 } - - #----------- finding and printing the source code - srcode<-getAnywhere( dep[["Nomfun"]][input$chossefunctionplot_selected,2]%>%as.character()) - #readScript(txt= paste(deparse(body()%>%as.character()))) - - srcode$objs[[1]]%>% - body(.)%>% - as.list()%>% - #deparse(.)%>% - paste(.,collapse = " \n ")%>% - readScript(txt=.)->sc - - output$console<-renderPrint({ - srcode - }) - output$consoleH<-renderPrint({ - help( dep[["Nomfun"]][input$chossefunctionplot_selected,2]%>%as.character())->shelp - utils:::print.help_files_with_topic(shelp) - }) - #Variable definitions timelines - output$VDT<-renderPlot({ - - dtm = getDetailedTimelines(sc, getInputs(sc)) - #-plotting - dtm$start<-ifelse(dtm$defined,dtm$step,NA) - dtm$end<-ifelse(dtm$used,dtm$step,NA) - dtm%>%filter(!is.na(start) | !is.na(end))->dtm - - - dtm%>% - ggplot()+ - geom_point(aes(x=start,y=var,color="Start"),shape=16,size=5)+ - geom_point(aes(x=end,y=var,color="End"),size=5,shape=2)+ - scale_color_manual(values=c("red","blue"),name="")+ - theme_minimal(base_size = 16)+ - labs(x="Steps",y="variable")+ - theme(legend.position = "top") - + }) + }) + + ### chossefunction + + observe({ + input$chargedf + isolate({ + input$packageslist + sapply(input$packageslist, function(x) { + library(x, character.only = TRUE) }) + allFun <- unique(unlist(sapply(input$packageslist, function(x) { + allFunctionEnv(paste0("package:", x)) + }))) + + updateSelectizeInput(session, inputId = "functionlist", choices = allFun) }) - - observe({ - - if (!is.null(input$main_plot1_selected) && input$main_plot1_selected != "") { - isolate({ - pck <- curentd3 - - # print(pck) - - func <- as.character(curentd2$Nomfun$label[input$main_plot1_selected == curentd2$Nomfun$id]) - # print(func) - try(add.html.help(pck, func), TRUE) - - if (length(htmlTreeParse(paste0(getwd(), "/temp.html"))$children$html) > 0) { - output$help <- renderUI(includeHTML(paste0(getwd(), "/temp.html"))) - - } else { - output$help <- renderUI("Not available help for this function") - } - }) - } else { - - output$help <- renderUI("Select a function") + }) + + output$chossefunctionplot <- renderVisNetwork({ + input$makegraph + + isolate({ + if (input$makegraph >= 1) { + dep <- my.allDepFunction(input$packageslist, unlist(strsplit(input$functionlist, split = ";"))) + #- lets exlude the ones in the base or more widely used ones + # which(dep[["Nomfun"]]$label%in%c('papply','stop','warning','logger.warn','logger.error','logger.debug','logger.severe','logger.info'))->excludes + # dep[["fromto"]]<-(dep[["fromto"]])%>%dplyr::filter(!(from%in%excludes))%>%dplyr::filter(!(to%in%excludes)) + #- nolink + # which(!(dep[["Nomfun"]]$id%in%as.numeric(unlist(dep[["fromto"]]))))->nolinks + # dep[["Nomfun"]]<-dep[["Nomfun"]]%>%dplyr::filter(!(id%in%c(excludes,nolinks))) + #-- plotting + visNetwork(dep[[1]], dep[[2]]) %>% + visGroups() %>% + visOptions( + selectedBy = "group", + collapse = TRUE, + highlightNearest = TRUE, + nodesIdSelection = list(enabled = TRUE) + ) %>% + visExport() %>% + visPhysics(stabilization = list(iterations = 100)) %>% + visEdges( + arrows = list(from = list(enabled = TRUE)), + color = list(color = "lightblue", highlight = "red") + ) %>% + visLegend(zoom = T, width = 0.1) -> netw + if (input$igraphcheck) { + netw %>% visIgraphLayout() -> netw } - + return(netw) + } }) - - observe({ - - if (!is.null(input$main_plot_selected) && input$main_plot_selected != "") { - - func <- as.character(curentd1$Nomfun$label[input$main_plot_selected == curentd1$Nomfun$id]) - - output$Groupebutton <- renderUI({ - - div(hr(), actionButton("GOFunc1", paste0("Launch zoom on : ", func), icon = icon("line-chart")), align = "center") - - }) - } else { - output$Groupebutton <- renderUI({ - NULL - }) - } - + }) + + observeEvent(input$showsourcebtn, { + if (length(isolate(input$packageslist)) > 0) { + dep <- my.allDepFunction(isolate(input$packageslist), unlist(strsplit(input$functionlist, split = ";"))) + showModal( + modalDialog( + # Output: Tabset w/ plot, summary, and table ---- + tabsetPanel( + type = "tabs", + tabPanel("Source code", fluidRow(column(12, verbatimTextOutput("console")))), + tabPanel("Variable definitions timelines", plotOutput("VDT", width = "100%", height = "500px")) + ), + size = "l", + easyClose = T + ) + ) + } + + #----------- finding and printing the source code + srcode <- getAnywhere(dep[["Nomfun"]][input$chossefunctionplot_selected, 2] %>% as.character()) + # readScript(txt= paste(deparse(body()%>%as.character()))) + + srcode$objs[[1]] %>% + body(.) %>% + as.list() %>% + # deparse(.)%>% + paste(., collapse = " \n ") %>% + readScript(txt = .) -> sc + + output$console <- renderPrint({ + srcode }) - - - - observe({ - - input$GObott - # input$file1 will be NULL initially. After the user selects and uploads a file, it will be a data frame with 'name', 'size', - # 'type', and 'datapath' columns. The 'datapath' column will contain the local filenames where the data can be found. - - inFile <- input$file1 - - if (!is.null(inFile)) { - dep <- data.graph.script(inFile$datapath) - output$plotscript <- renderVisNetwork({ - plot(dep, block = TRUE) - }) - } + output$consoleH <- renderPrint({ + help(dep[["Nomfun"]][input$chossefunctionplot_selected, 2] %>% as.character()) -> shelp + utils:::print.help_files_with_topic(shelp) + }) + # Variable definitions timelines + output$VDT <- renderPlot({ + dtm <- getDetailedTimelines(sc, getInputs(sc)) + #-plotting + dtm$start <- ifelse(dtm$defined, dtm$step, NA) + dtm$end <- ifelse(dtm$used, dtm$step, NA) + dtm %>% filter(!is.na(start) | !is.na(end)) -> dtm + + + dtm %>% + ggplot() + + geom_point(aes(x = start, y = var, color = "Start"), shape = 16, size = 5) + + geom_point(aes(x = end, y = var, color = "End"), size = 5, shape = 2) + + scale_color_manual(values = c("red", "blue"), name = "") + + theme_minimal(base_size = 16) + + labs(x = "Steps", y = "variable") + + theme(legend.position = "top") }) + }) + + observe({ + if (!is.null(input$main_plot1_selected) && input$main_plot1_selected != "") { + isolate({ + pck <- curentd3 + + # print(pck) + + func <- as.character(curentd2$Nomfun$label[input$main_plot1_selected == curentd2$Nomfun$id]) + # print(func) + try(add.html.help(pck, func), TRUE) + + if (length(htmlTreeParse(paste0(getwd(), "/temp.html"))$children$html) > 0) { + output$help <- renderUI(includeHTML(paste0(getwd(), "/temp.html"))) + } else { + output$help <- renderUI("Not available help for this function") + } + }) + } else { + output$help <- renderUI("Select a function") + } + }) + + observe({ + if (!is.null(input$main_plot_selected) && input$main_plot_selected != "") { + func <- as.character(curentd1$Nomfun$label[input$main_plot_selected == curentd1$Nomfun$id]) + + output$Groupebutton <- renderUI({ + div(hr(), actionButton("GOFunc1", paste0("Launch zoom on : ", func), icon = icon("line-chart")), align = "center") + }) + } else { + output$Groupebutton <- renderUI({ + NULL + }) + } + }) + + + + observe({ + input$GObott + # input$file1 will be NULL initially. After the user selects and uploads a file, it will be a data frame with 'name', 'size', + # 'type', and 'datapath' columns. The 'datapath' column will contain the local filenames where the data can be found. + + inFile <- input$file1 + + if (!is.null(inFile)) { + dep <- data.graph.script(inFile$datapath) + output$plotscript <- renderVisNetwork({ + plot(dep, block = TRUE) + }) + } + }) }) diff --git a/shiny/Pecan.depend/UI.R b/shiny/Pecan.depend/UI.R index 082a58d8942..dc445cd5cb0 100644 --- a/shiny/Pecan.depend/UI.R +++ b/shiny/Pecan.depend/UI.R @@ -1,86 +1,95 @@ dashboardPage( dashboardHeader(title = "PEcAn dependencies graphs"), - dashboardSidebar(sidebarMenu(id = "Tabsetpan", - menuItem("Packages", tabName = "Packages", icon = icon("archive")), - conditionalPanel(condition = "input.Tabsetpan === 'Packages'", - selectInput('packages', "Package(s) :", choices = installed.packages()[,1]%>%grep("^PEcAn",.,value = T,ignore.case = T), multiple = T, width = "100%"), - div(actionButton("GOPackage", "Go !",icon = icon("line-chart")), align = "center") - ), - menuItem("Functions", tabName = "Functions", icon = icon("code")), - conditionalPanel(condition = "input.Tabsetpan === 'Functions'", - selectInput('package', "Package : ", choices = installed.packages()[,1]%>%grep("^PEcAn",.,value = T,ignore.case = T), multiple = FALSE, width = "100%"), - div(actionButton("GOFunc2", "Go !",icon = icon("line-chart")), align = "center") - ), - menuItem("Custom", tabName = "Custom", icon = icon("th")) - + dashboardSidebar(sidebarMenu( + id = "Tabsetpan", + menuItem("Packages", tabName = "Packages", icon = icon("archive")), + conditionalPanel( + condition = "input.Tabsetpan === 'Packages'", + selectInput("packages", "Package(s) :", choices = installed.packages()[, 1] %>% grep("^PEcAn", ., value = T, ignore.case = T), multiple = T, width = "100%"), + div(actionButton("GOPackage", "Go !", icon = icon("line-chart")), align = "center") + ), + menuItem("Functions", tabName = "Functions", icon = icon("code")), + conditionalPanel( + condition = "input.Tabsetpan === 'Functions'", + selectInput("package", "Package : ", choices = installed.packages()[, 1] %>% grep("^PEcAn", ., value = T, ignore.case = T), multiple = FALSE, width = "100%"), + div(actionButton("GOFunc2", "Go !", icon = icon("line-chart")), align = "center") + ), + menuItem("Custom", tabName = "Custom", icon = icon("th")) )), dashboardBody( # Boxes need to be put in a row (or column) - tags$head(tags$link(rel='stylesheet', type='text/css', href='style.css')), + tags$head(tags$link(rel = "stylesheet", type = "text/css", href = "style.css")), tabItems( # First tab content - tabItem(tabName = "Packages", - # fluidRow( - # column(3, div(h3('Package(s) selection :'), align = "center")), - # column(6, br(), selectInput('packages', NULL, choices = installed.packages()[,1], multiple = T, width = "100%")), - # column(3, br(), div(actionButton("GOPackage", "Launch",icon = icon("line-chart")), align = "center")) - # ), - # hr(), - - fluidRow( - box( - solidHeader = TRUE, collapsible = TRUE, title = "Dependencies between package(s)", - status = "primary", - checkboxGroupInput("variablesp", "Dependencies to show:", - c("Imports" = "Imports", - "Suggests" = "Suggests", - "Depends" = "Depends"),selected = "Depends"), - visNetworkOutput("main_plot", width = "100%",height = "750px"), - br() - ,width = 12 - ), - box( - solidHeader = TRUE, collapsible = TRUE, title = "Informations", - status = "primary", - div( - dataTableOutput("tabledep"), - uiOutput("Groupebutton"), - align="center" - ), - width=12) - ) - - ), - tabItem(tabName = "Functions", - fluidRow( - box( - solidHeader = TRUE, collapsible = TRUE, title = "Dependencies between functions", - status = "primary", - div(h4(textOutput("zoomin")), align = "center"), - visNetworkOutput("main_plot1", width = "100%",height = "750px"), - br() - ,width = 12 - ), - box( - solidHeader = TRUE, collapsible = TRUE, title = "Informations", - status = "primary", - div( - # h4(textOutput("info")), - dataTableOutput("datatable2") - ,align="center" - ), - width=12) + tabItem( + tabName = "Packages", + # fluidRow( + # column(3, div(h3('Package(s) selection :'), align = "center")), + # column(6, br(), selectInput('packages', NULL, choices = installed.packages()[,1], multiple = T, width = "100%")), + # column(3, br(), div(actionButton("GOPackage", "Launch",icon = icon("line-chart")), align = "center")) + # ), + # hr(), + + fluidRow( + box( + solidHeader = TRUE, collapsible = TRUE, title = "Dependencies between package(s)", + status = "primary", + checkboxGroupInput("variablesp", "Dependencies to show:", + c( + "Imports" = "Imports", + "Suggests" = "Suggests", + "Depends" = "Depends" ), - - fluidRow( - box( - uiOutput("help"),width = 12 - ) - ) + selected = "Depends" + ), + visNetworkOutput("main_plot", width = "100%", height = "750px"), + br(), + width = 12 + ), + box( + solidHeader = TRUE, collapsible = TRUE, title = "Informations", + status = "primary", + div( + dataTableOutput("tabledep"), + uiOutput("Groupebutton"), + align = "center" + ), + width = 12 + ) + ) + ), + tabItem( + tabName = "Functions", + fluidRow( + box( + solidHeader = TRUE, collapsible = TRUE, title = "Dependencies between functions", + status = "primary", + div(h4(textOutput("zoomin")), align = "center"), + visNetworkOutput("main_plot1", width = "100%", height = "750px"), + br(), + width = 12 + ), + box( + solidHeader = TRUE, collapsible = TRUE, title = "Informations", + status = "primary", + div( + # h4(textOutput("info")), + dataTableOutput("datatable2"), + align = "center" + ), + width = 12 + ) + ), + fluidRow( + box( + uiOutput("help"), + width = 12 + ) + ) ), # tabPanel("Script", - # - # + # + # # fluidRow( # box( # fileInput('file1', 'Choose R File', @@ -89,57 +98,57 @@ dashboardPage( # ,width = 12) # ) # ), - - tabItem(tabName = "Custom", - - - fluidRow( - box( - fluidRow( - column(width=4, - pickerInput(inputId = "packageslist" , "Package(s) :", choices = installed.packages()[,1]%>%grep("^PEcAn",.,value = T,ignore.case = T), multiple = TRUE, - options = list( - `actions-box` = TRUE, - `live-search` = TRUE) - ) - ), - column(width=2, - br(), div(actionButton("chargedf", "Find functions", style = "padding: 8px 20px 8px 20px;"),align="center") - ), - column(width=4, - #selectizeInput(inputId = "functionlist" , "Function(s) :", choices = NULL, multiple = TRUE), - pickerInput( - inputId = "functionlist", - label = "Function(s) :", - choices = NULL, - options = list( - `actions-box` = TRUE, - `live-search` = TRUE), - multiple = TRUE - ) - ), - column(width=2, - br(), div(actionButton("makegraph", "Make graph", style = "padding: 8px 20px 8px 20px;"),align = "center") - ) - ), - fluidRow( - column(4,checkboxInput('igraphcheck','Igraph Layout (more stable layout)',value = F)), - column(4, actionButton("showsourcebtn", "Show the source", style = "padding: 8px 20px 8px 20px;")), - column(4) - + + tabItem( + tabName = "Custom", + fluidRow( + box( + fluidRow( + column( + width = 4, + pickerInput( + inputId = "packageslist", "Package(s) :", choices = installed.packages()[, 1] %>% grep("^PEcAn", ., value = T, ignore.case = T), multiple = TRUE, + options = list( + `actions-box` = TRUE, + `live-search` = TRUE + ) + ) + ), + column( + width = 2, + br(), div(actionButton("chargedf", "Find functions", style = "padding: 8px 20px 8px 20px;"), align = "center") + ), + column( + width = 4, + # selectizeInput(inputId = "functionlist" , "Function(s) :", choices = NULL, multiple = TRUE), + pickerInput( + inputId = "functionlist", + label = "Function(s) :", + choices = NULL, + options = list( + `actions-box` = TRUE, + `live-search` = TRUE ), - - hr(), - visNetworkOutput("chossefunctionplot", width = "100%",height = "750px"), - br(), - width = 12) + multiple = TRUE + ) + ), + column( + width = 2, + br(), div(actionButton("makegraph", "Make graph", style = "padding: 8px 20px 8px 20px;"), align = "center") ) + ), + fluidRow( + column(4, checkboxInput("igraphcheck", "Igraph Layout (more stable layout)", value = F)), + column(4, actionButton("showsourcebtn", "Show the source", style = "padding: 8px 20px 8px 20px;")), + column(4) + ), + hr(), + visNetworkOutput("chossefunctionplot", width = "100%", height = "750px"), + br(), + width = 12 + ) + ) ) - - ) ) ) - - - diff --git a/shiny/Pecan.depend/global.R b/shiny/Pecan.depend/global.R index 62e498c7d04..620637daf12 100644 --- a/shiny/Pecan.depend/global.R +++ b/shiny/Pecan.depend/global.R @@ -1,27 +1,27 @@ -#install what we need -lapply(c('dplyr', - 'shinyWidgets', - 'CodeDepends', - 'visNetwork', - 'ggplot2', - 'XML', - 'shinydashboard'),function(pkg){ - if (!(pkg %in% installed.packages()[,1])){ - install.packages(pkg) - } - library(pkg,character.only = TRUE,quietly = TRUE) - } -) +# install what we need +lapply(c( + "dplyr", + "shinyWidgets", + "CodeDepends", + "visNetwork", + "ggplot2", + "XML", + "shinydashboard" +), function(pkg) { + if (!(pkg %in% installed.packages()[, 1])) { + install.packages(pkg) + } + library(pkg, character.only = TRUE, quietly = TRUE) +}) -if (!('DependenciesGraphs' %in% installed.packages()[,1])) devtools::install_github("datastorm-open/DependenciesGraphs") +if (!("DependenciesGraphs" %in% installed.packages()[, 1])) devtools::install_github("datastorm-open/DependenciesGraphs") curentd2 <<- NULL curentd1 <<- NULL curentd3 <<- NULL #------------- My dependen - I added group to this -my.allDepFunction<-function (envir, name.functions) -{ +my.allDepFunction <- function(envir, name.functions) { envir2 <- paste0("package:", envir) toutfonc <- linksForOne(envir2, name.functions) link <- toutfonc @@ -29,35 +29,32 @@ my.allDepFunction<-function (envir, name.functions) Visdata <- list() Nomfun <- functions.list #--- source env - envs.found<-lapply(functions.list,find) + envs.found <- lapply(functions.list, find) #-taking out the my own all functions packge - lapply(envs.found, function(ll){ - ll[which(!(ll%in%c("package:Pecan.functions")))][1]->tmp - if (length(tmp)!=0){ - return((strsplit(tmp,":")[[1]])[2]) - }else{ + lapply(envs.found, function(ll) { + ll[which(!(ll %in% c("package:Pecan.functions")))][1] -> tmp + if (length(tmp) != 0) { + return((strsplit(tmp, ":")[[1]])[2]) + } else { return("Pecan.functions") } - - })%>%unlist()->envs.fi - Nomfun <- data.frame(cbind(id = 1:length(Nomfun), label = Nomfun,group=envs.fi)) - + }) %>% unlist() -> envs.fi + Nomfun <- data.frame(cbind(id = 1:length(Nomfun), label = Nomfun, group = envs.fi)) + if (!is.null(link)) { fromto <- matrix(0, ncol = dim(link)[2], nrow = dim(link)[1]) if (length(fromto) > 0) { for (i in 1:dim(link)[1]) { - fromto[i, 1] <- which(as.character(link[i, 2]) == - Nomfun[, 2]) - fromto[i, 2] <- which(as.character(link[i, 1]) == - Nomfun[, 2]) + fromto[i, 1] <- which(as.character(link[i, 2]) == + Nomfun[, 2]) + fromto[i, 2] <- which(as.character(link[i, 1]) == + Nomfun[, 2]) if (dim(link)[2] > 2) { - fromto[i, 3:length(link[i, ])] <- link[i, 3:length(link[i, - ])] + fromto[i, 3:length(link[i, ])] <- link[i, 3:length(link[i, ])] } } } - } - else { + } else { fromto <- cbind(0, 0) } fromto <- data.frame(fromto) @@ -67,4 +64,3 @@ my.allDepFunction<-function (envir, name.functions) class(Visdata) <- "dependenciesGraphs" return(Visdata) } - diff --git a/shiny/SDAdashboard/Utilities/Math_Utility.R b/shiny/SDAdashboard/Utilities/Math_Utility.R index 7b24fd1c62a..ebeeaaa188f 100644 --- a/shiny/SDAdashboard/Utilities/Math_Utility.R +++ b/shiny/SDAdashboard/Utilities/Math_Utility.R @@ -1,48 +1,48 @@ -ts.producer.FA <-function(All.my.data, site_id, step=1){ +ts.producer.FA <- function(All.my.data, site_id, step = 1) { + site.cols <- which(attr(All.my.data[["FORECAST"]][[1]], "Site") == site_id) -site.cols<-which(attr(All.my.data[["FORECAST"]][[1]],'Site')==site_id) - -output <- c('FORECAST','ANALYSIS')%>% - purrr::map_df(function(listFA){ - curr_obj <-All.my.data[[listFA]] - curr_obj<- curr_obj[1:step] - curr_obj%>% - purrr::map2_df(seq_along(curr_obj),function(state.vars,t){ - as.data.frame(state.vars) [,site.cols] %>% - tidyr::gather(Variable, Value) %>% - group_by(Variable) %>% - summarise( - Means=mean(Value, na.rm=T), - Lower=quantile(Value,0.025, na.rm=T), - Upper=quantile(Value,0.975, na.rm=T) + output <- c("FORECAST", "ANALYSIS") %>% + purrr::map_df(function(listFA) { + curr_obj <- All.my.data[[listFA]] + curr_obj <- curr_obj[1:step] + curr_obj %>% + purrr::map2_df(seq_along(curr_obj), function(state.vars, t) { + as.data.frame(state.vars)[, site.cols] %>% + tidyr::gather(Variable, Value) %>% + group_by(Variable) %>% + summarise( + Means = mean(Value, na.rm = T), + Lower = quantile(Value, 0.025, na.rm = T), + Upper = quantile(Value, 0.975, na.rm = T) ) %>% - mutate(Time=t, - Type=listFA) - - }) - }) - -return(output) + mutate( + Time = t, + Type = listFA + ) + }) + }) + + return(output) } expand.matrix <- function(X) { rows.c <- colnames(X)[(which(!(colnames(X) %in% row.names(X))))] rbind(X, matrix(0, length(rows.c), length(colnames(X))) %>% - `rownames<-`(rows.c)) -> X2 + `rownames<-`(rows.c)) -> X2 X2 } -generate_colors_sda <-function(){ - pink <<- col2rgb("deeppink") - alphapink <<- rgb(pink[1], pink[2], pink[3], 180, max = 255) - green <<- col2rgb("green") +generate_colors_sda <- function() { + pink <<- col2rgb("deeppink") + alphapink <<- rgb(pink[1], pink[2], pink[3], 180, max = 255) + green <<- col2rgb("green") alphagreen <<- rgb(green[1], green[2], green[3], 75, max = 255) - blue <<- col2rgb("blue") - alphablue <<- rgb(blue[1], blue[2], blue[3], 75, max = 255) - purple <<- col2rgb("purple") + blue <<- col2rgb("blue") + alphablue <<- rgb(blue[1], blue[2], blue[3], 75, max = 255) + purple <<- col2rgb("purple") alphapurple <<- rgb(purple[1], purple[2], purple[3], 75, max = 255) - brown <<- col2rgb("brown") + brown <<- col2rgb("brown") alphabrown <<- rgb(brown[1], brown[2], brown[3], 30, max = 255) } diff --git a/shiny/SDAdashboard/Utilities/uihelp.R b/shiny/SDAdashboard/Utilities/uihelp.R index e85da95e581..074230fff05 100644 --- a/shiny/SDAdashboard/Utilities/uihelp.R +++ b/shiny/SDAdashboard/Utilities/uihelp.R @@ -1,27 +1,28 @@ -jumbotron<-function(...){ - HTML(paste0("
",...,"
")) +jumbotron <- function(...) { + HTML(paste0("
", ..., "
")) } -card.light<-function(header="Header",title="Primary card title",border="light",txc=NULL,Tcol=NULL,...){ - classs<-paste0("card border-",border) - bg<-ifelse(!is.null(Tcol),paste0('bg-',Tcol),"") - classs<-paste(classs,bg) - txcc<-ifelse(!is.null(txc),paste0('text-',txc),"") - classs<-paste(classs,txcc) - - HTML(paste0('
',ifelse(!is.null(header),paste0('

',header,'

'),''),' +card.light <- function(header = "Header", title = "Primary card title", border = "light", txc = NULL, Tcol = NULL, ...) { + classs <- paste0("card border-", border) + bg <- ifelse(!is.null(Tcol), paste0("bg-", Tcol), "") + classs <- paste(classs, bg) + txcc <- ifelse(!is.null(txc), paste0("text-", txc), "") + classs <- paste(classs, txcc) + + HTML(paste0('
', ifelse(!is.null(header), paste0('

', header, "

"), ""), '
-

',...,'

+

', ..., "

-
')) +
")) } -empty.card<-function(...){ - +empty.card <- function(...) { withTags( - div(class = "card", - div(class="card-body", - p(class="card-text",...) - ) + div( + class = "card", + div( + class = "card-body", + p(class = "card-text", ...) + ) ) ) } diff --git a/shiny/SDAdashboard/server.R b/shiny/SDAdashboard/server.R index dbb7ef1913b..daa914b6de0 100644 --- a/shiny/SDAdashboard/server.R +++ b/shiny/SDAdashboard/server.R @@ -1,4 +1,3 @@ - # This is the server logic for a Shiny web application. # You can find out more about building applications with Shiny here: # @@ -9,9 +8,9 @@ library(shiny) library(shinytoastr) library(highcharter) library(leaflet) -source(file.path('Utilities','uihelp.R'),local = T)$value -source(file.path('Utilities','Math_Utility.R'),local = T)$value -suppressPackageStartupMessages(require(leaflet.extras,quietly = T)) +source(file.path("Utilities", "uihelp.R"), local = T)$value +source(file.path("Utilities", "Math_Utility.R"), local = T)$value +suppressPackageStartupMessages(require(leaflet.extras, quietly = T)) library(visNetwork) library(dplyr) library(purrr) @@ -22,7 +21,7 @@ library(geosphere) library(sf) library(RColorBrewer) library(devtools) -options(shiny.maxRequestSize=30*1024^2) +options(shiny.maxRequestSize = 30 * 1024^2) # Setting up the maps @@ -31,602 +30,628 @@ pretty_map <- "https://api.mapbox.com/styles/v1/para2x/cja2s4zhq10si2ul5btb1q6w9 mb_attribution <- "
PEcAn" shinyServer(function(input, output, session) { - #Data holders - values <- reactiveValues(MultiSite=NULL, dead_data=NULL, Multisite.info=NULL, Selected.Site=NULL) - + # Data holders + values <- reactiveValues(MultiSite = NULL, dead_data = NULL, Multisite.info = NULL, Selected.Site = NULL) + #----- First shiny modal ---- showModal( modalDialog( - title = "Help me find the SDA data", - tagList( - tabsetPanel( - tabPanel('Submit a SDA job', + title = "Help me find the SDA data", + tagList( + tabsetPanel( + tabPanel( + "Submit a SDA job", br(), - HTML( - '
+ HTML( + '
Settings
' - ),br(), - fluidRow( - column(12, - fileInput('xmlfileinput2',h4('Pecan xml settings file:'),width = "100%")) - ), + ), br(), + fluidRow( + column( + 12, + fileInput("xmlfileinput2", h4("Pecan xml settings file:"), width = "100%") + ) + ), HTML( '
PecanAssimSequential package
' - ),br(), + ), br(), fluidRow( - column(4,textInput('gitrepo',h4('Github repo:'),width = "100%", value="para2x/pecan")), - column(4,textInput('gitb',h4('Github ref/branch:'),width = "100%", value="MultiSite_SDA")), - column(4,textInput('gitsdir',h4('Github subdir:'),width = "100%", value="modules/assim.sequential")) - - ),HTML( + column(4, textInput("gitrepo", h4("Github repo:"), width = "100%", value = "para2x/pecan")), + column(4, textInput("gitb", h4("Github ref/branch:"), width = "100%", value = "MultiSite_SDA")), + column(4, textInput("gitsdir", h4("Github subdir:"), width = "100%", value = "modules/assim.sequential")) + ), HTML( '
Observe data
' - ),br(), + ), br(), fluidRow( - column(8, - fileInput('obsfileinput2',h4('Observed Rdata:'),width = "100%")), - column(4,textInput('pathsda2',h4('Input ID:'),width = "100%", value="")) - ),HTML( - '
'), + column( + 8, + fileInput("obsfileinput2", h4("Observed Rdata:"), width = "100%") + ), + column(4, textInput("pathsda2", h4("Input ID:"), width = "100%", value = "")) + ), HTML( + '
' + ), fluidRow( column(3), - column(6,br(),actionButton('submitjob',h4('Submit'), width = "100%", class="btn-primary")), + column(6, br(), actionButton("submitjob", h4("Submit"), width = "100%", class = "btn-primary")), column(3) ) - ), - tabPanel('Load an SDA output', - fluidRow( - column(12, - fileInput('sdainputf',h4('SDA output file:'),width = "100%")) - ), - HTML( - '
+ ), + tabPanel( + "Load an SDA output", + fluidRow( + column( + 12, + fileInput("sdainputf", h4("SDA output file:"), width = "100%") + ) + ), + HTML( + '
OR
' - ),br(), - fluidRow( - column(12, - fileInput('xmlfileinput',h4('Pecan xml settings file:'),width = "100%")) - ), - HTML( - '
+ ), br(), + fluidRow( + column( + 12, + fileInput("xmlfileinput", h4("Pecan xml settings file:"), width = "100%") + ) + ), + HTML( + '
OR
' - ),br(), - fluidRow( - column(12,textInput('pathsda',h4('Path to the SDA folder:'),width = "100%")) - ), - HTML( - '
+ ), br(), + fluidRow( + column(12, textInput("pathsda", h4("Path to the SDA folder:"), width = "100%")) + ), + HTML( + '
OR
' - ),br(), - fluidRow( - column(12,textInput('workflowid.inp',h4('Workflow ID:'),width = "100%")) - ), - HTML( - '
+ ), br(), + fluidRow( + column(12, textInput("workflowid.inp", h4("Workflow ID:"), width = "100%")) + ), + HTML( + '
Options
' - ),br(), - fluidRow( - column(6,selectInput('machinid',h4('Machine'),c(), width = "100%")), - column(6,awesomeCheckboxGroup( - inputId = "options_input", - label = h4("Options"), - choices = c("Active run ?","Multi-Site ?"), - selected = c("Multi-Site ?"), - inline = TRUE, - status = "primary" - )) - ),HTML( - '
'), - fluidRow( - column(3), - column(6,br(),actionButton('loadinput',h4('Load'), width = "100%", class="btn-primary")), - column(3) - )) - ) - ), - footer = NULL, - size = 'm' - ) + ), br(), + fluidRow( + column(6, selectInput("machinid", h4("Machine"), c(), width = "100%")), + column(6, awesomeCheckboxGroup( + inputId = "options_input", + label = h4("Options"), + choices = c("Active run ?", "Multi-Site ?"), + selected = c("Multi-Site ?"), + inline = TRUE, + status = "primary" + )) + ), HTML( + '
' + ), + fluidRow( + column(3), + column(6, br(), actionButton("loadinput", h4("Load"), width = "100%", class = "btn-primary")), + column(3) + ) + ) + ) + ), + footer = NULL, + size = "m" + ) ) - - - - #-- - observeEvent(input$submitjob,{ - - - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', value = 0, { - browser() - # lets first try to minimaly install the version of pecan assim that user is interested in - tryCatch( - { - tmpd<-tempdir() - withr::with_libpaths( - new=tmpd, - devtools::install_github( - repo = input$gitrepo, - ref = input$gitb, - subdir = input$gitsdir, - dependencies = FALSE, - quick = TRUE, - reload=TRUE, - force = TRUE, - upgrade_dependencies = FALSE - ) - ) - incProgress(1/15, message="Finished installing packages") - library(PEcAnAssimSequential, lib.loc =tmpd) - # Use a promise here to send the job and then close the dialog - toastr_success("PEcAnAssimSequential package was installed successfully.") - }, - error = function(e) { - toastr_error(title = "There is an error in installing assimilation package.", conditionMessage(e)) - } - ) - - - - }) + + + #-- + observeEvent(input$submitjob, { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + browser() + # lets first try to minimaly install the version of pecan assim that user is interested in + tryCatch( + { + tmpd <- tempdir() + withr::with_libpaths( + new = tmpd, + devtools::install_github( + repo = input$gitrepo, + ref = input$gitb, + subdir = input$gitsdir, + dependencies = FALSE, + quick = TRUE, + reload = TRUE, + force = TRUE, + upgrade_dependencies = FALSE + ) + ) + incProgress(1 / 15, message = "Finished installing packages") + library(PEcAnAssimSequential, lib.loc = tmpd) + # Use a promise here to send the job and then close the dialog + toastr_success("PEcAnAssimSequential package was installed successfully.") + }, + error = function(e) { + toastr_error(title = "There is an error in installing assimilation package.", conditionMessage(e)) + } + ) + } + ) }) - + #-- load function---- - observeEvent(input$loadinput,{ - + observeEvent(input$loadinput, { tryCatch( { # I create a new env and then load the data into that and then convert it to list load_data.env <- new.env(parent = baseenv()) load(input$sdainputf$datapath, load_data.env) load_data.list <- as.list(load_data.env) - values$dead_data <-load_data.list - values$MultiSite <- ifelse(any(grepl("Multi-Site",input$options_input)),TRUE,NULL) + values$dead_data <- load_data.list + values$MultiSite <- ifelse(any(grepl("Multi-Site", input$options_input)), TRUE, NULL) - #if it's a multi site lets read in the basics - if (values$MultiSite) values$Multisite.info <-list(Sites=values$dead_data$FORECAST[[1]] %>% attr('Site') %>% unique(), - Variables=values$dead_data$FORECAST[[1]] %>% colnames() %>% unique() - ) + # if it's a multi site lets read in the basics + if (values$MultiSite) { + values$Multisite.info <- list( + Sites = values$dead_data$FORECAST[[1]] %>% attr("Site") %>% unique(), + Variables = values$dead_data$FORECAST[[1]] %>% colnames() %>% unique() + ) + } removeModal() - toastr_success(paste0("

",input$sdainputf$name," was successfully imported.

"), newestOnTop=T,position = "top-right") + toastr_success(paste0("

", input$sdainputf$name, " was successfully imported.

"), newestOnTop = T, position = "top-right") }, error = function(e) { toastr_error(title = "Loading error", conditionMessage(e)) } ) }) - - + + # Load the UI ----- - output$mainui <-renderUI({ + output$mainui <- renderUI({ req(values$MultiSite) # if we wanted have Multisite UI - ML.site<-tagList( - + ML.site <- tagList( bs_accordion(id = "meet_the_beatles") %>% - bs_append(title = "Setup", - content = tagList(fluidRow( - column(12,leafletOutput("mymap",height = "800px",width = "100%"), - # Shiny versions prior to 0.11 should use class = "modal" instead. - absolutePanel(id = "controls", class = "panel panel-default", fixed = FALSE, - draggable = TRUE, top = 1, left = "auto", right = 15, bottom = "auto", - #style="z-index:15000;", - width = 250, height = "auto", - - - column(12, - fluidRow( - column(12, HTML( - '
+ bs_append( + title = "Setup", + content = tagList(fluidRow( + column( + 12, leafletOutput("mymap", height = "800px", width = "100%"), + # Shiny versions prior to 0.11 should use class = "modal" instead. + absolutePanel( + id = "controls", class = "panel panel-default", fixed = FALSE, + draggable = TRUE, top = 1, left = "auto", right = 15, bottom = "auto", + # style="z-index:15000;", + width = 250, height = "auto", + column( + 12, + fluidRow( + column( + 12, HTML( + '
Description
' - ),br(), - HTML(paste0(' + ), br(), + HTML(paste0('
- + - + - + -

# of sites:

',values$Multisite.info$Sites %>% length,'

', values$Multisite.info$Sites %>% length(), '

# of state variables:

',values$Multisite.info$Variables %>% length,'

', values$Multisite.info$Variables %>% length(), '

# of observations:

',values$dead_data$t,'

', values$dead_data$t, "

')), - - HTML( - '
+ ")), + HTML( + '
Options
' - )) - ), - br(), - - selectizeInput('boverlay','Background overlay', - choices= list('None', - 'Eco-region L1 overlay'='e1', - 'Eco-region L2 overlay'='e2')), - sliderInput('tileop','Background Opacity',value = 0.25,min=0.1,max=1,step = 0.05), - prettyCheckboxGroup( - inputId = "AllmapOpt", - label = "Analysis overlay :", - choices = list("Forecast Spatial Correlation"='spcor', - "Raster Map"='rastMap'), - inline = TRUE, - status = "success", - fill = TRUE - ), - conditionalPanel( - condition = "input.AllmapOpt !=''", - sliderInput('TFAll','Time Frame',value = 1,min=1,max=16,step = 1, animate = animationOptions(interval = 2000, loop = F)) - ) - - - ) - )) - )) + ) + ) ), + br(), + selectizeInput("boverlay", "Background overlay", + choices = list("None", + "Eco-region L1 overlay" = "e1", + "Eco-region L2 overlay" = "e2" + ) + ), + sliderInput("tileop", "Background Opacity", value = 0.25, min = 0.1, max = 1, step = 0.05), + prettyCheckboxGroup( + inputId = "AllmapOpt", + label = "Analysis overlay :", + choices = list( + "Forecast Spatial Correlation" = "spcor", + "Raster Map" = "rastMap" + ), + inline = TRUE, + status = "success", + fill = TRUE + ), + conditionalPanel( + condition = "input.AllmapOpt !=''", + sliderInput("TFAll", "Time Frame", value = 1, min = 1, max = 16, step = 1, animate = animationOptions(interval = 2000, loop = F)) + ) + ) + ) + ) + )) + ), br() ) - - if (values$MultiSite==T) return(ML.site) + + if (values$MultiSite == T) { + return(ML.site) + } }) - + # Map ------------------- output$mymap <- renderLeaflet({ - - - #aoi_boundary_HARV <- sf::st_read(file.path("Utilities", "eco-region.json")) + # aoi_boundary_HARV <- sf::st_read(file.path("Utilities", "eco-region.json")) providers <- c("Stamen.TonerLite", "Stamen.Watercolor", "CartoDB.Positron", "Acetate.terrain") - #browser() + # browser() # maping - leaflet() %>% + leaflet() %>% enableTileCaching() %>% - addProviderTiles("CartoDB.DarkMatter", group = "Dark theme")%>% - addTiles(urlTemplate = pretty_map, attribution = mb_attribution, group = "Defualt",layerId='tile') %>% - addLayersControl(position = c("bottomleft"), - baseGroups = c("Defualt","Dark theme"))%>% - addScaleBar(position="bottomleft")%>% + addProviderTiles("CartoDB.DarkMatter", group = "Dark theme") %>% + addTiles(urlTemplate = pretty_map, attribution = mb_attribution, group = "Defualt", layerId = "tile") %>% + addLayersControl( + position = c("bottomleft"), + baseGroups = c("Defualt", "Dark theme") + ) %>% + addScaleBar(position = "bottomleft") %>% addFullscreenControl() %>% - #addMeasure(position="bottomleft") %>% - addMarkers(values$dead_data$site.locs[,1] %>% as.numeric(), - values$dead_data$site.locs[,2] %>% as.numeric(), - label=row.names(values$dead_data$site.locs), group ='cities' + # addMeasure(position="bottomleft") %>% + addMarkers(values$dead_data$site.locs[, 1] %>% as.numeric(), + values$dead_data$site.locs[, 2] %>% as.numeric(), + label = row.names(values$dead_data$site.locs), group = "cities" ) %>% - #addResetMapButton() %>% + # addResetMapButton() %>% # addSearchFeatures( # targetGroups = 'cities', # options = searchFeaturesOptions( # zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,textPlaceholder="Search a site id", # autoCollapse = TRUE, hideMarkerOnCollapse = TRUE )) %>% - addControl("

Help:
Please select a site.

", - position='bottomright',layerId='help') + addControl("

Help:
Please select a site.

", + position = "bottomright", layerId = "help" + ) }) # Map event ---------- - observeEvent(input$mymap_marker_click,{ + observeEvent(input$mymap_marker_click, { # cdata <- session$clientData # Finding the site ID based on selected lat long - site_id <- row.names(values$dead_data$site.locs)[which(values$dead_data$site.locs[,2]==input$mymap_marker_click$lat & values$dead_data$site.locs[,1]==input$mymap_marker_click$lng)] - - All.my.data <-list( - FORECAST=values$dead_data$FORECAST, - ANALYSIS=values$dead_data$ANALYSIS + site_id <- row.names(values$dead_data$site.locs)[which(values$dead_data$site.locs[, 2] == input$mymap_marker_click$lat & values$dead_data$site.locs[, 1] == input$mymap_marker_click$lng)] + + All.my.data <- list( + FORECAST = values$dead_data$FORECAST, + ANALYSIS = values$dead_data$ANALYSIS ) showModal( modalDialog( - size='l', + size = "l", easyClose = T, fluidRow( - column(12,h3(paste0("SiteID:",site_id," - You can either choose a time or click on Play !")), - shiny::sliderInput("timelaps", NULL, - min = 1, max = values$dead_data$t, - value = 1, step = 1, width = "100%", - animate = animationOptions(interval = 1500, loop = F)) + column( + 12, h3(paste0("SiteID:", site_id, " - You can either choose a time or click on Play !")), + shiny::sliderInput("timelaps", NULL, + min = 1, max = values$dead_data$t, + value = 1, step = 1, width = "100%", + animate = animationOptions(interval = 1500, loop = F) + ) ) ), fluidRow( - column(12, - - tabsetPanel( - tabPanel(h4("Time Series"),br(),plotlyOutput('tsplotly',height = "auto")), - tabPanel(h4("Forecast Var-Cov"),br(), - fluidRow( - column(12,visNetworkOutput("network",height = "600px")) - ) - ) - - ) + column( + 12, + tabsetPanel( + tabPanel(h4("Time Series"), br(), plotlyOutput("tsplotly", height = "auto")), + tabPanel( + h4("Forecast Var-Cov"), br(), + fluidRow( + column(12, visNetworkOutput("network", height = "600px")) + ) + ) + ) ) ) ) ) - - values$Selected.Site <- site_id + + values$Selected.Site <- site_id # time series - output$tsplotly <-renderPlotly({ - generate_colors_sda() - out.ts <- ts.producer.FA(All.my.data, site_id, 1) - #out.ts %>% filter(Variable=="AbvGrndWood") ->out.ts - - p<-out.ts %>% - ggplot(aes(x=Time,y=Means,ymax=Upper,ymin=Lower))+ - geom_ribbon(aes(fill=Type),color="white")+ - geom_line(aes(y=Means, color=Type),lwd=1.02,linetype=2)+ - #geom_point(aes(y=Means, color=Type),size=3,alpha=0.75)+ - scale_fill_manual(values = c(alphapink,alphagreen,alphablue),name="")+ - scale_color_manual(values = c(alphapink,alphagreen,alphablue),name="")+ - theme_bw(base_size = 17)+ - labs(y="")+ - facet_wrap(~Variable,scales = "free",ncol=2)+ - theme(legend.position = "top", - strip.background = element_blank(), - panel.background = element_rect(fill="#242F39"), - panel.grid.major = element_line(color="#2E3740"), - panel.grid.minor = element_line(color="#2E3740")) - - ggplotly(p,height=850) %>% - layout( - showlegend = FALSE, - margin = list(r = 10, l=0) - ) - - - }) - - #Network and matrix in one---------------------------------------- - output$network <- renderVisNetwork({ + output$tsplotly <- renderPlotly({ + generate_colors_sda() + out.ts <- ts.producer.FA(All.my.data, site_id, 1) + # out.ts %>% filter(Variable=="AbvGrndWood") ->out.ts + p <- out.ts %>% + ggplot(aes(x = Time, y = Means, ymax = Upper, ymin = Lower)) + + geom_ribbon(aes(fill = Type), color = "white") + + geom_line(aes(y = Means, color = Type), lwd = 1.02, linetype = 2) + + # geom_point(aes(y=Means, color=Type),size=3,alpha=0.75)+ + scale_fill_manual(values = c(alphapink, alphagreen, alphablue), name = "") + + scale_color_manual(values = c(alphapink, alphagreen, alphablue), name = "") + + theme_bw(base_size = 17) + + labs(y = "") + + facet_wrap(~Variable, scales = "free", ncol = 2) + + theme( + legend.position = "top", + strip.background = element_blank(), + panel.background = element_rect(fill = "#242F39"), + panel.grid.major = element_line(color = "#2E3740"), + panel.grid.minor = element_line(color = "#2E3740") + ) - # sites_orders <- map_chr(colnames(values$dead_data[["enkf.params"]][[1]][["Pf"]]), function(j) gsub("[\\(\\)]", + ggplotly(p, height = 850) %>% + layout( + showlegend = FALSE, + margin = list(r = 10, l = 0) + ) + }) + + # Network and matrix in one---------------------------------------- + output$network <- renderVisNetwork({ + # sites_orders <- map_chr(colnames(values$dead_data[["enkf.params"]][[1]][["Pf"]]), function(j) gsub("[\\(\\)]", # "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) # cols <- which(sites_orders == site_id) - # + # # rows <-which(sites_orders == site_id) - - - values$dead_data$enkf.params %>% map(~.x$Pf)->p - p<-p[[input$timelaps]] - + + values$dead_data$enkf.params %>% map(~ .x$Pf) -> p + p <- p[[input$timelaps]] + + # if nothing found - + g <- igraph::graph.adjacency(p, - mode="min", - diag = F) # For directed networks - g<-igraph::simplify(g) - igraph::V(g)$label.cex <-0.59 + mode = "min", + diag = F + ) # For directed networks + g <- igraph::simplify(g) + igraph::V(g)$label.cex <- 0.59 igraph::V(g)$label.color <- rgb(0, 0, .2, .8) - - + + data <- toVisNetworkData(g) # Adding group - data$nodes$group <- map_chr(data$nodes$id, function(j)gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) - data$edges$groupf <- map_chr(data$edges$from, function(j)gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) - data$edges$groupt <- map_chr(data$edges$to, function(j)gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) - + data$nodes$group <- map_chr(data$nodes$id, function(j) gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) + data$edges$groupf <- map_chr(data$edges$from, function(j) gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) + data$edges$groupt <- map_chr(data$edges$to, function(j) gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) + # # filter network - # - data$edges <- data$edges %>% filter(groupt ==site_id | groupf ==site_id) - data$nodes <- data$nodes %>% filter(group %in% c(data$edges$groupf,data$edges$groupt)) - - if (nrow(data$nodes)==0){ + # + data$edges <- data$edges %>% filter(groupt == site_id | groupf == site_id) + data$nodes <- data$nodes %>% filter(group %in% c(data$edges$groupf, data$edges$groupt)) + + if (nrow(data$nodes) == 0) { toastr_warning("No corrolation exists !") return(NULL) } # - + visNetwork(nodes = data$nodes, edges = data$edges) %>% - # visNetworkEditor()%>% + # visNetworkEditor()%>% visGroups() %>% - visOptions(selectedBy = list(variable="group", selected=site_id),# - collapse = TRUE, - highlightNearest = TRUE)%>% - + visOptions( + selectedBy = list(variable = "group", selected = site_id), # + collapse = TRUE, + highlightNearest = TRUE + ) %>% visExport() %>% - visPhysics(solver = "forceAtlas2Based", - forceAtlas2Based = list(gravitationalConstant = -150)) + visPhysics( + solver = "forceAtlas2Based", + forceAtlas2Based = list(gravitationalConstant = -150) + ) }) - toastr_success(paste0(site_id ," was selected.")) + toastr_success(paste0(site_id, " was selected.")) }) - + # timelaps slider ----- - observeEvent(input$timelaps,{ + observeEvent(input$timelaps, { generate_colors_sda() - site_id<- isolate(values$Selected.Site ) - - All.my.data <-list( - FORECAST=values$dead_data$FORECAST, - ANALYSIS=values$dead_data$ANALYSIS + site_id <- isolate(values$Selected.Site) + + All.my.data <- list( + FORECAST = values$dead_data$FORECAST, + ANALYSIS = values$dead_data$ANALYSIS ) - + # Time series plotly proxy out.ts <- ts.producer.FA(All.my.data, site_id, input$timelaps) - - p<-out.ts %>% - ggplot(aes(x=Time,y=Means,ymax=Upper,ymin=Lower))+ - geom_ribbon(aes(fill=Type),color="white")+ - geom_line(aes(y=Means, color=Type),lwd=1.02,linetype=2)+ - geom_point(aes(y=Means, color=Type),size=3,alpha=0.75)+ - scale_fill_manual(values = c(alphapink,alphagreen,alphablue),name="")+ - scale_color_manual(values = c(alphapink,alphagreen,alphablue),name="")+ - theme_bw(base_size = 17)+ - labs(y="")+ - facet_wrap(~Variable,scales = "free",ncol=2)+ - theme(legend.position = "top", - strip.background = element_blank(), - panel.background = element_rect(fill="#242F39"), - panel.grid.major = element_line(color="#2E3740"), - panel.grid.minor = element_line(color="#2E3740")) - - plotlygg <-ggplotly(p,height=850) %>% + + p <- out.ts %>% + ggplot(aes(x = Time, y = Means, ymax = Upper, ymin = Lower)) + + geom_ribbon(aes(fill = Type), color = "white") + + geom_line(aes(y = Means, color = Type), lwd = 1.02, linetype = 2) + + geom_point(aes(y = Means, color = Type), size = 3, alpha = 0.75) + + scale_fill_manual(values = c(alphapink, alphagreen, alphablue), name = "") + + scale_color_manual(values = c(alphapink, alphagreen, alphablue), name = "") + + theme_bw(base_size = 17) + + labs(y = "") + + facet_wrap(~Variable, scales = "free", ncol = 2) + + theme( + legend.position = "top", + strip.background = element_blank(), + panel.background = element_rect(fill = "#242F39"), + panel.grid.major = element_line(color = "#2E3740"), + panel.grid.minor = element_line(color = "#2E3740") + ) + + plotlygg <- ggplotly(p, height = 850) %>% layout( showlegend = FALSE, - margin = list(r = 10, l=0) + margin = list(r = 10, l = 0) ) - - plotlygg$x$layout$legend <-NULL - plotlygg$x$layout$showlegend <-FALSE - - - plotlyProxy("tsplotly", session) %>% - #plotlyProxyInvoke("deleteTraces", list(as.integer(0))) %>% - plotlyProxyInvoke("addTraces", plotlygg$x$data) %>% - plotlyProxyInvoke("relayout", plotlygg$x$layout) - + plotlygg$x$layout$legend <- NULL + plotlygg$x$layout$showlegend <- FALSE + + + plotlyProxy("tsplotly", session) %>% + # plotlyProxyInvoke("deleteTraces", list(as.integer(0))) %>% + plotlyProxyInvoke("addTraces", plotlygg$x$data) %>% + plotlyProxyInvoke("relayout", plotlygg$x$layout) }) - - + + # Map proxy spatial correlation ---------------- observe({ - - # if the spatial corrlation was not checked + # if the spatial corrlation was not checked if ("spcor" %in% input$AllmapOpt) { + site_id <- isolate(values$Selected.Site) + + values$dead_data$enkf.params %>% map(~ .x$Pf) -> p + p <- p[[input$TFAll]] + - site_id<- isolate(values$Selected.Site) - - values$dead_data$enkf.params %>% map(~.x$Pf)->p - p<-p[[input$TFAll]] - - # if nothing found g <- igraph::graph.adjacency(p, - mode="min", - diag = F) # For directed networks - g<-igraph::simplify(g) - igraph::V(g)$label.cex <-0.59 + mode = "min", + diag = F + ) # For directed networks + g <- igraph::simplify(g) + igraph::V(g)$label.cex <- 0.59 igraph::V(g)$label.color <- rgb(0, 0, .2, .8) - - + + data <- toVisNetworkData(g) # Adding group - data$nodes$group <- map_chr(data$nodes$id, function(j)gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) - data$edges$groupf <- map_chr(data$edges$from, function(j)gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) - data$edges$groupt <- map_chr(data$edges$to, function(j)gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) + data$nodes$group <- map_chr(data$nodes$id, function(j) gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) + data$edges$groupf <- map_chr(data$edges$from, function(j) gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) + data$edges$groupt <- map_chr(data$edges$to, function(j) gsub("[\\(\\)]", "", regmatches(j, gregexpr("\\(.*?\\)", j))[[1]])) alllines <- data$edges %>% filter(groupf != groupt) - - if (nrow(alllines)==0) return(NULL) - - - p1<-map_df(alllines$groupt, function(x){ - values$dead_data$site.locs[which(row.names(values$dead_data$site.locs)==x),c(1,2)] %>% t %>%as.data.frame() + + if (nrow(alllines) == 0) { + return(NULL) + } + + + p1 <- map_df(alllines$groupt, function(x) { + values$dead_data$site.locs[which(row.names(values$dead_data$site.locs) == x), c(1, 2)] %>% + t() %>% + as.data.frame() }) - - p2<-map_df(alllines$groupf, function(x){ - values$dead_data$site.locs[which(row.names(values$dead_data$site.locs)==x),c(1,2)]%>% t %>%as.data.frame() + + p2 <- map_df(alllines$groupf, function(x) { + values$dead_data$site.locs[which(row.names(values$dead_data$site.locs) == x), c(1, 2)] %>% + t() %>% + as.data.frame() }) - - + + # Estimating the tickness based on value of cov - tmp.covs<-alllines %>% + tmp.covs <- alllines %>% left_join(reshape2::melt(p) %>% - mutate(from=as.character(Var1), - to=as.character(Var2))) %>% + mutate( + from = as.character(Var1), + to = as.character(Var2) + )) %>% dplyr::select(value) %>% - unlist()%>% + unlist() %>% as.numeric() - + lwds <- tmp.covs %>% - scales::rescale(to=c(2,30)) %>% + scales::rescale(to = c(2, 30)) %>% round(3) - - - leafletProxy("mymap", data=gcIntermediate(p1, - p2, - n=100, - addStartEnd=TRUE, - sp=TRUE)) %>% - removeShape(layerId = paste0("line",1:1000))%>% - #clearShapes()%>% - addPolylines(popup=paste0("

",alllines$from," & ",alllines$to,"
",tmp.covs %>% round(2),"

"), - weight = lwds, color="#cc0033", layerId = paste0("line",seq_along(alllines$to)))%>% - removeControl('help') %>% + + + leafletProxy("mymap", data = gcIntermediate(p1, + p2, + n = 100, + addStartEnd = TRUE, + sp = TRUE + )) %>% + removeShape(layerId = paste0("line", 1:1000)) %>% + # clearShapes()%>% + addPolylines( + popup = paste0("

", alllines$from, " & ", alllines$to, "
", tmp.covs %>% round(2), "

"), + weight = lwds, color = "#cc0033", layerId = paste0("line", seq_along(alllines$to)) + ) %>% + removeControl("help") %>% addControl("

Help:
The thickness of the lines shows the strength of the correlation.

", - position='bottomright',layerId='help') - + position = "bottomright", layerId = "help" + ) } - - if (!("spcor" %in% input$AllmapOpt)){ + + if (!("spcor" %in% input$AllmapOpt)) { # clear the map leafletProxy("mymap") %>% - removeShape(layerId = paste0("line",1:1000))%>% - removeControl('help') + removeShape(layerId = paste0("line", 1:1000)) %>% + removeControl("help") } - - }) - #background overlay -------------------------- - observeEvent(c(input$boverlay,input$tileop),{ - - #making colors - qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',] - col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors/2, rownames(qual_col_pals))) - - #cleaning - if (input$boverlay=='None'){ + # background overlay -------------------------- + observeEvent(c(input$boverlay, input$tileop), { + # making colors + qual_col_pals <- brewer.pal.info[brewer.pal.info$category == "qual", ] + col_vector <- unlist(mapply(brewer.pal, qual_col_pals$maxcolors / 2, rownames(qual_col_pals))) + + # cleaning + if (input$boverlay == "None") { # clear the map leafletProxy("mymap") %>% - clearShapes()%>% - removeControl('help') - }else if(input$boverlay=="e1"){ - #Reading eco layer - aoi_boundary_HARV <- sf::read_sf("Utilities/l1.json") + clearShapes() %>% + removeControl("help") + } else if (input$boverlay == "e1") { + # Reading eco layer + aoi_boundary_HARV <- sf::read_sf("Utilities/l1.json") aoi_boundary_HARV %>% st_set_crs("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs") %>% st_transform("+proj=longlat +datum=WGS84") %>% - leafletProxy("mymap", data=.) %>% - clearShapes()%>% - addPolygons(stroke = FALSE, fillOpacity = input$tileop, smoothFactor = 0.5, - fillColor = col_vector, label=~NA_L1KEY %>%map(~HTML(sprintf("
%s
",.x))) + leafletProxy("mymap", data = .) %>% + clearShapes() %>% + addPolygons( + stroke = FALSE, fillOpacity = input$tileop, smoothFactor = 0.5, + fillColor = col_vector, label = ~ NA_L1KEY %>% map(~ HTML(sprintf("
%s
", .x))) ) - - }else if(input$boverlay=="e2"){ - #Reading eco layer - aoi_boundary_HARV <- sf::read_sf("Utilities/l2.json") - + } else if (input$boverlay == "e2") { + # Reading eco layer + aoi_boundary_HARV <- sf::read_sf("Utilities/l2.json") + aoi_boundary_HARV %>% st_set_crs("+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs") %>% st_transform("+proj=longlat +datum=WGS84") %>% - leafletProxy("mymap", data=.) %>% - clearShapes()%>% - addPolygons(stroke = FALSE, fillOpacity = input$tileop, smoothFactor = 0.5, - fillColor = col_vector, label=~NA_L2KEY %>%map(~HTML(sprintf("
%s
",.x))) + leafletProxy("mymap", data = .) %>% + clearShapes() %>% + addPolygons( + stroke = FALSE, fillOpacity = input$tileop, smoothFactor = 0.5, + fillColor = col_vector, label = ~ NA_L2KEY %>% map(~ HTML(sprintf("
%s
", .x))) ) - } - - - }) - }) diff --git a/shiny/SDAdashboard/ui.R b/shiny/SDAdashboard/ui.R index 0079b43fba5..ed1096278cf 100644 --- a/shiny/SDAdashboard/ui.R +++ b/shiny/SDAdashboard/ui.R @@ -5,38 +5,42 @@ library(shinyWidgets) library(shinycssloaders) suppressPackageStartupMessages(require(shinyjs)) suppressPackageStartupMessages(require(V8)) -source(file.path('Utilities','uihelp.R'),local = T)$value +source(file.path("Utilities", "uihelp.R"), local = T)$value -#cosmo,flatly,sandstone, yeti -fluidPage( tags$head(HTML('SDA Dashboard')), - navbarPage(h4("SDA Dashboard"),theme =shinytheme("cosmo"),id = "inTabset", - - tabPanel(h4("Home"), - uiOutput('mainui'), - tags$head(tags$script(src="scripts.js")), - - tags$head( - tags$link(rel = "stylesheet", type = "text/css", href = "style.css") - ), - tags$head( - tags$link(rel = "stylesheet", type = "text/css", href = "https://fonts.googleapis.com/css?family=Lato:400,700,900") - ), - useShinyjs(), - use_bs_tooltip(), - use_bs_popover(), - shinytoastr::useToastr() - )#, - # tabPanel(h4("Signals"), - # fluidRow( - # column(12) - # - # ) - ), +# cosmo,flatly,sandstone, yeti +fluidPage( + tags$head(HTML("SDA Dashboard")), + navbarPage(h4("SDA Dashboard"), + theme = shinytheme("cosmo"), id = "inTabset", + tabPanel( + h4("Home"), + uiOutput("mainui"), + tags$head(tags$script(src = "scripts.js")), + tags$head( + tags$link(rel = "stylesheet", type = "text/css", href = "style.css") + ), + tags$head( + tags$link(rel = "stylesheet", type = "text/css", href = "https://fonts.googleapis.com/css?family=Lato:400,700,900") + ), + useShinyjs(), + use_bs_tooltip(), + use_bs_popover(), + shinytoastr::useToastr() + ) # , + # tabPanel(h4("Signals"), + # fluidRow( + # column(12) + # + # ) + ), -# - conditionalPanel(condition="$('html').hasClass('shiny-busy')", - tags$div(id="loadmessage", - HTML(paste0("
+ # + conditionalPanel( + condition = "$('html').hasClass('shiny-busy')", + tags$div( + id = "loadmessage", + HTML(paste0("
")) - )) - ) \ No newline at end of file + ) + ) +) diff --git a/shiny/ViewMet/server.R b/shiny/ViewMet/server.R index 3cb45c022e0..df5e8e7b0c4 100644 --- a/shiny/ViewMet/server.R +++ b/shiny/ViewMet/server.R @@ -1,203 +1,240 @@ -# ViewMet Server -lapply(c( "shiny", - "ggplot2", - "stringr", - "ncdf4", - "DT", - "plyr", - "dplyr"),function(pkg){ - if (!(pkg %in% installed.packages()[,1])){ - install.packages(pkg) - } - library(pkg,character.only = TRUE,quietly = TRUE) - } -) +# ViewMet Server +lapply(c( + "shiny", + "ggplot2", + "stringr", + "ncdf4", + "DT", + "plyr", + "dplyr" +), function(pkg) { + if (!(pkg %in% installed.packages()[, 1])) { + install.packages(pkg) + } + library(pkg, character.only = TRUE, quietly = TRUE) +}) -lapply(c( "PEcAn.benchmark", - "PEcAn.visualization", - "PEcAn.logger", - "PEcAn.remote"),function(pkg){ - library(pkg,character.only = TRUE,quietly = TRUE) - } -) +lapply(c( + "PEcAn.benchmark", + "PEcAn.visualization", + "PEcAn.logger", + "PEcAn.remote" +), function(pkg) { + library(pkg, character.only = TRUE, quietly = TRUE) +}) -options(shiny.maxRequestSize=30*1024^2) #maximum file input size +options(shiny.maxRequestSize = 30 * 1024^2) # maximum file input size server <- function(input, output, session) { - bety <- betyConnect() rv <- reactiveValues() - + observe({ - # Look in the database for all inputs that are in CF format + # Look in the database for all inputs that are in CF format # and grab their site ids - inputs <- tbl(bety, "inputs") %>% filter(format_id == 33) %>% collect + inputs <- tbl(bety, "inputs") %>% + filter(format_id == 33) %>% + collect() updateSelectizeInput(session, "site.id", choices = sort(unique(inputs$site_id))) - }) - - - observeEvent({input$site.id}, ignoreInit = TRUE, { - - site <- input$site.id - PEcAn.logger::logger.debug("Site", site, "selected") - - if(is.na(as.numeric(site))){ - full.paths <- "" - }else{ - - # Given site id, get info about the input - inputs <- tbl(bety, "inputs") %>% filter(format_id == 33) %>% - filter(site_id == site) %>% collect - - # Check the machine - # If the machine is one of the three pecan servers then files that have - # dbfile entries for any of the three servers can be loaded - host <- PEcAn.remote::fqdn() - if(host %in% c("test-pecan.bu.edu", "pecan1.bu.edu", "pecan2.bu.edu")){ - host <- c("test-pecan.bu.edu", "pecan1.bu.edu", "pecan2.bu.edu") - } - machine <- tbl(bety, "machines") %>% filter(hostname %in% host) %>% collect - - dbfiles <- tbl(bety, "dbfiles") %>% - filter(container_type == "Input") %>% - filter(container_id %in% inputs$id) %>% - filter(machine_id %in% machine$id) %>% - collect - - if(all(dim(dbfiles) == 0)){ + + + observeEvent( + { + input$site.id + }, + ignoreInit = TRUE, + { + site <- input$site.id + PEcAn.logger::logger.debug("Site", site, "selected") + + if (is.na(as.numeric(site))) { full.paths <- "" - }else{ - dbfiles <- dbfiles %>% - mutate(file = gsub("//", "/",file.path(file_path, file_name))) - - types <- unique(dbfiles$file_path) %>% basename() %>% - gsub(pattern = "\\_site_.*",replacement = "", x = .) %>% - unique() %>% sort() - - updateCheckboxGroupInput(session, "met", choices = types) - - paths <- unique(dbfiles$file) - full.paths <- c() - yrs <- c() - for(i in seq_along(paths)){ - new.files <- dir(dirname(paths[i]), pattern = basename(paths[i]), - full.names = TRUE) - yrs <- c(yrs, stringr::str_extract(new.files, pattern="[0-9]{4}")) - full.paths <- c(full.paths,new.files) + } else { + # Given site id, get info about the input + inputs <- tbl(bety, "inputs") %>% + filter(format_id == 33) %>% + filter(site_id == site) %>% + collect() + + # Check the machine + # If the machine is one of the three pecan servers then files that have + # dbfile entries for any of the three servers can be loaded + host <- PEcAn.remote::fqdn() + if (host %in% c("test-pecan.bu.edu", "pecan1.bu.edu", "pecan2.bu.edu")) { + host <- c("test-pecan.bu.edu", "pecan1.bu.edu", "pecan2.bu.edu") + } + machine <- tbl(bety, "machines") %>% + filter(hostname %in% host) %>% + collect() + + dbfiles <- tbl(bety, "dbfiles") %>% + filter(container_type == "Input") %>% + filter(container_id %in% inputs$id) %>% + filter(machine_id %in% machine$id) %>% + collect() + + if (all(dim(dbfiles) == 0)) { + full.paths <- "" + } else { + dbfiles <- dbfiles %>% + mutate(file = gsub("//", "/", file.path(file_path, file_name))) + + types <- unique(dbfiles$file_path) %>% + basename() %>% + gsub(pattern = "\\_site_.*", replacement = "", x = .) %>% + unique() %>% + sort() + + updateCheckboxGroupInput(session, "met", choices = types) + + paths <- unique(dbfiles$file) + full.paths <- c() + yrs <- c() + for (i in seq_along(paths)) { + new.files <- dir(dirname(paths[i]), + pattern = basename(paths[i]), + full.names = TRUE + ) + yrs <- c(yrs, stringr::str_extract(new.files, pattern = "[0-9]{4}")) + full.paths <- c(full.paths, new.files) + } + updateCheckboxGroupInput(session, "years", choices = sort(unique(yrs))) } - updateCheckboxGroupInput(session, "years", choices = sort(unique(yrs))) } + rv$full.paths <- full.paths } - rv$full.paths <- full.paths - - }) - - # Once met and years are selected, the paths to available files on the server + ) + + # Once met and years are selected, the paths to available files on the server # will show in a tabel - observeEvent({ - input$met - input$years},{ - - met <- input$met - years <- input$years - - full.paths <- rv$full.paths - - load.paths <- c() - for(i in seq_along(met)){ - new.paths <- full.paths[grep(paste0(met[i],"_site_"), full.paths)] - year_sub <- stringr::str_extract(new.paths, pattern="[0-9]{4}") %in% years - new.paths <- new.paths[year_sub] - load.paths <- c(load.paths, new.paths) + observeEvent( + { + input$met + input$years + }, + { + met <- input$met + years <- input$years + + full.paths <- rv$full.paths + + load.paths <- c() + for (i in seq_along(met)) { + new.paths <- full.paths[grep(paste0(met[i], "_site_"), full.paths)] + year_sub <- stringr::str_extract(new.paths, pattern = "[0-9]{4}") %in% years + new.paths <- new.paths[year_sub] + load.paths <- c(load.paths, new.paths) + } + rv$load.paths <- load.paths } - rv$load.paths <- load.paths - }) - - - observeEvent({rv$load.paths},{ - output$results_table <- DT::renderDataTable(DT::datatable(as.matrix(rv$load.paths))) - }) - + ) + + + observeEvent( + { + rv$load.paths + }, + { + output$results_table <- DT::renderDataTable(DT::datatable(as.matrix(rv$load.paths))) + } + ) + # Click the load data to read in the met data load.model.data <- eventReactive(input$load_data, { req(input$met) req(input$years) - + PEcAn.logger::logger.debug("Loading", input$met) PEcAn.logger::logger.debug("Loading", input$years) - + data <- list() - for(i in seq_along(rv$load.paths)){ - + for (i in seq_along(rv$load.paths)) { fpath <- dirname(rv$load.paths[i]) - inputid <- tbl(bety, "dbfiles") %>% filter(file_path == fpath) %>% - pull(container_id) %>% unique() %>% .[1] - formatid <- tbl(bety, "inputs") %>% filter(id == inputid) %>% pull(format_id) - siteid <- tbl(bety, "inputs") %>% filter(id == inputid) %>% pull(site_id) - - site = query.site(con = bety, siteid) - + inputid <- tbl(bety, "dbfiles") %>% + filter(file_path == fpath) %>% + pull(container_id) %>% + unique() %>% + .[1] + formatid <- tbl(bety, "inputs") %>% + filter(id == inputid) %>% + pull(format_id) + siteid <- tbl(bety, "inputs") %>% + filter(id == inputid) %>% + pull(site_id) + + site <- query.site(con = bety, siteid) + current_nc <- ncdf4::nc_open(rv$load.paths[i]) vars_in_file <- names(current_nc[["var"]]) ncdf4::nc_close(current_nc) - format = query.format.vars(bety, inputid, formatid) + format <- query.format.vars(bety, inputid, formatid) format$vars <- format$vars %>% filter(input_name %in% vars_in_file) - - - dat <- try(load_data(data.path = rv$load.paths[i], - format = format, site = site, )) - - if(inherits(dat, "data.frame")) { - dat$met <- rv$load.paths[i] %>% dirname() %>% basename() %>% - gsub(pattern = "\\_site_.*",replacement = "", x = .) + + + dat <- try(load_data( + data.path = rv$load.paths[i], + format = format, site = site, + )) + + if (inherits(dat, "data.frame")) { + dat$met <- rv$load.paths[i] %>% + dirname() %>% + basename() %>% + gsub(pattern = "\\_site_.*", replacement = "", x = .) data[[i]] <- dat } } data.final <- do.call(plyr::rbind.fill, data) return(data.final) }) - + observeEvent(input$load_data, { data.final <- load.model.data() rv$data.final <- data.final updateSelectizeInput(session, "var", choices = colnames(data.final)) }) - - observeEvent({input$var},{ - PEcAn.logger::logger.debug(input$var) - var <- input$var - if(input$var != ""){ - plot.data <- rv$data.final %>% dplyr::select(one_of(c("posix", var, "met"))) - # print(head(plot.data)) - colnames(plot.data) <- c("date", "var", "met") - rv$plot.data <- plot.data + + observeEvent( + { + input$var + }, + { + PEcAn.logger::logger.debug(input$var) + var <- input$var + if (input$var != "") { + plot.data <- rv$data.final %>% dplyr::select(one_of(c("posix", var, "met"))) + # print(head(plot.data)) + colnames(plot.data) <- c("date", "var", "met") + rv$plot.data <- plot.data + } } - }) - - observeEvent(input$plot_data,{ + ) + + observeEvent(input$plot_data, { req(rv$plot.data) req(input$var) - rv$plot.data$met <- factor(rv$plot.data$met, - levels = sort(unique(rv$plot.data$met), decreasing = TRUE)) - - p_overlay <- ggplot(rv$plot.data) + geom_line(aes(x=date, y=var, color=met)) + - ylab(input$var) + ggtitle(input$var) - - p_facet <- ggplot(rv$plot.data) + geom_line(aes(x=date, y=var, color=met), size=1) + - ylab(input$var) + ggtitle(input$var) + + rv$plot.data$met <- factor(rv$plot.data$met, + levels = sort(unique(rv$plot.data$met), decreasing = TRUE) + ) + + p_overlay <- ggplot(rv$plot.data) + + geom_line(aes(x = date, y = var, color = met)) + + ylab(input$var) + + ggtitle(input$var) + + p_facet <- ggplot(rv$plot.data) + + geom_line(aes(x = date, y = var, color = met), size = 1) + + ylab(input$var) + + ggtitle(input$var) + facet_grid(met ~ .) - + output$plot_overlay <- renderPlot(p_overlay) PEcAn.logger::logger.debug("Overlay plot finished") output$plot_facet <- renderPlot(p_facet) PEcAn.logger::logger.debug("Facet plot finished") - }) - - - } diff --git a/shiny/ViewMet/ui.R b/shiny/ViewMet/ui.R index f818ee52250..89d391dcd33 100644 --- a/shiny/ViewMet/ui.R +++ b/shiny/ViewMet/ui.R @@ -1,29 +1,33 @@ -# ViewMet UI +# ViewMet UI -ui <- fluidPage(sidebarPanel( - h3("Select Files"), - wellPanel( - selectizeInput(inputId = "site.id", "Site ID", c()), - checkboxGroupInput(inputId = "met", label = "Met product", choices = c()), - checkboxGroupInput(inputId = "years", label = "Years", choices = c()), - actionButton(inputId ="load_data", label = "Load Met Data") - ), - h3("Select Plots"), - wellPanel( - selectizeInput(inputId = "var", "Variable", c()), - actionButton(inputId ="plot_data", label = "Plot Met Data") - ) - ), - mainPanel(navbarPage(title = NULL, - tabPanel("Files to be loaded", - DT::dataTableOutput("results_table") - ), - tabPanel("Combined Plot", - plotOutput("plot_overlay") - ), - tabPanel("Facet Plot", - plotOutput("plot_facet") - ) - ) - ) -) \ No newline at end of file +ui <- fluidPage( + sidebarPanel( + h3("Select Files"), + wellPanel( + selectizeInput(inputId = "site.id", "Site ID", c()), + checkboxGroupInput(inputId = "met", label = "Met product", choices = c()), + checkboxGroupInput(inputId = "years", label = "Years", choices = c()), + actionButton(inputId = "load_data", label = "Load Met Data") + ), + h3("Select Plots"), + wellPanel( + selectizeInput(inputId = "var", "Variable", c()), + actionButton(inputId = "plot_data", label = "Plot Met Data") + ) + ), + mainPanel(navbarPage( + title = NULL, + tabPanel( + "Files to be loaded", + DT::dataTableOutput("results_table") + ), + tabPanel( + "Combined Plot", + plotOutput("plot_overlay") + ), + tabPanel( + "Facet Plot", + plotOutput("plot_facet") + ) + )) +) diff --git a/shiny/dbsync/app.R b/shiny/dbsync/app.R index da4f3065247..6b39303ff69 100644 --- a/shiny/dbsync/app.R +++ b/shiny/dbsync/app.R @@ -22,10 +22,10 @@ allow_no_url <- FALSE # mapping to fix hostnames host_mapping <- list( - "wisconsin"="tree.aos.wisc.edu", - "terra-mepp.igb.illinois.edu"="terra-mepp.illinois.edu", - "ecn.purdue.edu"="engineering.purdue.edu", - "paleon-pecan.virtual.crc.nd.edu"="crc.nd.edu" + "wisconsin" = "tree.aos.wisc.edu", + "terra-mepp.igb.illinois.edu" = "terra-mepp.illinois.edu", + "ecn.purdue.edu" = "engineering.purdue.edu", + "paleon-pecan.virtual.crc.nd.edu" = "crc.nd.edu" ) # ignored servers, is reset on refresh @@ -34,190 +34,198 @@ ignored_servers <- c() # given a IP address lookup geo spatital info # uses a cache to prevent to many requests (1000 per day) get_geoip <- function(ip) { - if (length(geoip) == 0 && file.exists("geoip.json")) { - geoip <<- jsonlite::read_json(geocache, simplifyVector = TRUE) + if (length(geoip) == 0 && file.exists("geoip.json")) { + geoip <<- jsonlite::read_json(geocache, simplifyVector = TRUE) + } + if (!ip %in% geoip$ip) { + print(paste("CACHE MISS", ip)) + res <- curl::curl_fetch_memory(paste0("http://free.ipwhois.io/json/", ip)) + if (res$status - -200) { + geoloc <- jsonlite::parse_json(rawToChar(res$content)) + geoloc[lengths(geoloc) == 0] <- NA + geoloc <- type.convert(geoloc, as.is = TRUE) + } else { + geoloc <- list(ip = ip, lat = 0, lon = 0, city = "?", countr = "?") } - if (! ip %in% geoip$ip) { - print(paste("CACHE MISS", ip)) - res <- curl::curl_fetch_memory(paste0("http://free.ipwhois.io/json/", ip)) - if (res$status -- 200) { - geoloc <- jsonlite::parse_json(rawToChar(res$content)) - geoloc[lengths(geoloc) == 0] <- NA - geoloc <- type.convert(geoloc, as.is = TRUE) - } else { - geoloc <- list(ip=ip, lat=0, lon=0, city="?", countr="?") - } - if (length(geoip) == 0) { - geoip <<- as.data.frame(geoloc) - } else { - geoip <<- rbind(geoip, as.data.frame(geoloc)) - } - jsonlite::write_json(geoip, geocache) + if (length(geoip) == 0) { + geoip <<- as.data.frame(geoloc) + } else { + geoip <<- rbind(geoip, as.data.frame(geoloc)) } + jsonlite::write_json(geoip, geocache) + } } # get a list of all servers in BETY and their geospatial location get_servers <- function() { - ignored_servers <<- c() - - # connect to BETYdb - bety <- DBI::dbConnect( - DBI::dbDriver("PostgreSQL"), - dbname = Sys.getenv("PGDATABASE", "bety"), - host = Sys.getenv("PGHOST", "localhost"), - user = Sys.getenv("PGUSER", "bety"), - password = Sys.getenv("PGPASSWORD", "bety") - ) - - servers <- dplyr::tbl(bety, "machines") %>% - dplyr::filter(!is.na(sync_host_id)) %>% - dplyr::filter(sync_url != "" || allow_no_url) %>% - dplyr::arrange(sync_host_id) %>% - dplyr::select(hostname, sync_host_id, sync_url, sync_start, sync_end) %>% - dplyr::collect() %>% - dplyr::mutate(ip = unlist(lapply(hostname, function(x) { - if (x %in% names(host_mapping)) { - ip <- nsl(host_mapping[[x]]) - } else { - ip <- nsl(x) - } - ifelse(is.null(ip), NA, ip) - }))) %>% - dplyr::mutate(version = NA, lastdump = NA, migrations = NA) %>% - dplyr::filter(!is.na(ip)) %>% - dplyr::arrange(ip) - - # close connection - DBI::dbDisconnect(bety) - - # convert ip address to geo location - lapply(servers$ip, get_geoip) - locations <- geoip %>% - dplyr::filter(ip %in% servers$ip) %>% - dplyr::arrange(ip) %>% - dplyr::select("city", "country", "latitude", "longitude") - - # combine tables - servers <- cbind(servers, locations) - - # add columns for all sync_ids - servers[, paste0("server_", servers$sync_host_id)] <- NA - - # return servers - servers %>% dplyr::arrange(sync_host_id) + ignored_servers <<- c() + + # connect to BETYdb + bety <- DBI::dbConnect( + DBI::dbDriver("PostgreSQL"), + dbname = Sys.getenv("PGDATABASE", "bety"), + host = Sys.getenv("PGHOST", "localhost"), + user = Sys.getenv("PGUSER", "bety"), + password = Sys.getenv("PGPASSWORD", "bety") + ) + + servers <- dplyr::tbl(bety, "machines") %>% + dplyr::filter(!is.na(sync_host_id)) %>% + dplyr::filter(sync_url != "" || allow_no_url) %>% + dplyr::arrange(sync_host_id) %>% + dplyr::select(hostname, sync_host_id, sync_url, sync_start, sync_end) %>% + dplyr::collect() %>% + dplyr::mutate(ip = unlist(lapply(hostname, function(x) { + if (x %in% names(host_mapping)) { + ip <- nsl(host_mapping[[x]]) + } else { + ip <- nsl(x) + } + ifelse(is.null(ip), NA, ip) + }))) %>% + dplyr::mutate(version = NA, lastdump = NA, migrations = NA) %>% + dplyr::filter(!is.na(ip)) %>% + dplyr::arrange(ip) + + # close connection + DBI::dbDisconnect(bety) + + # convert ip address to geo location + lapply(servers$ip, get_geoip) + locations <- geoip %>% + dplyr::filter(ip %in% servers$ip) %>% + dplyr::arrange(ip) %>% + dplyr::select("city", "country", "latitude", "longitude") + + # combine tables + servers <- cbind(servers, locations) + + # add columns for all sync_ids + servers[, paste0("server_", servers$sync_host_id)] <- NA + + # return servers + servers %>% dplyr::arrange(sync_host_id) } # fetch information from the actual servers check_servers <- function(servers, progress) { - check_servers <- servers$sync_url[! servers$sync_host_id %in% ignored_servers] + check_servers <- servers$sync_url[!servers$sync_host_id %in% ignored_servers] - # generic failure message to increment progress - failure <- function(res) { - print(res) - progress$inc(amount = 1) - } - - # version information - server_version <- function(res) { - url <- sub("version.txt", "bety.tar.gz", res$url) - progress$inc(amount = 0, message = paste("Processing", progress$getValue(), "of", progress$getMax())) - print(paste(res$status, url)) - if (res$status == 200 || res$status == 226) { - check_servers <<- check_servers[check_servers != url] - version <- strsplit(rawToChar(res$content), '\t', fixed = TRUE)[[1]] - if (!is.na(as.numeric(version[1]))) { - servers[servers$sync_url == url,'version'] <<- version[2] - servers[servers$sync_url == url,'lastdump'] <<- version[4] - servers[servers$sync_url == url,'migrations'] <<- version[1] - } else { - servers[servers$sync_url == url,'version'] <<- NA - servers[servers$sync_url == url,'lastdump'] <<- NA - servers[servers$sync_url == url,'migrations'] <<- NA - } - } - progress$inc(amount = 1) + # generic failure message to increment progress + failure <- function(res) { + print(res) + progress$inc(amount = 1) + } + + # version information + server_version <- function(res) { + url <- sub("version.txt", "bety.tar.gz", res$url) + progress$inc(amount = 0, message = paste("Processing", progress$getValue(), "of", progress$getMax())) + print(paste(res$status, url)) + if (res$status == 200 || res$status == 226) { + check_servers <<- check_servers[check_servers != url] + version <- strsplit(rawToChar(res$content), "\t", fixed = TRUE)[[1]] + if (!is.na(as.numeric(version[1]))) { + servers[servers$sync_url == url, "version"] <<- version[2] + servers[servers$sync_url == url, "lastdump"] <<- version[4] + servers[servers$sync_url == url, "migrations"] <<- version[1] + } else { + servers[servers$sync_url == url, "version"] <<- NA + servers[servers$sync_url == url, "lastdump"] <<- NA + servers[servers$sync_url == url, "migrations"] <<- NA + } } - urls <- sapply(check_servers, function(x) { sub("bety.tar.gz", "version.txt", x) }) - lapply(urls, function(x) { curl::curl_fetch_multi(x, done = server_version, fail = failure) } ) - - # log information - server_log <- function(res) { - url <- sub("sync.log", "bety.tar.gz", res$url) - progress$inc(amount = 0, message = paste("Processing", progress$getValue(), "of", progress$getMax())) - print(paste(res$status, url)) - if (res$status == 200 || res$status == 226) { - lines <- strsplit(rawToChar(res$content), '\n', fixed = TRUE)[[1]] - now <- as.POSIXlt(Sys.time(), tz="UTC") - for (line in tail(lines, maxlines)) { - pieces <- strsplit(trimws(line), ' ', fixed=TRUE)[[1]] - if (length(pieces) == 8) { - if (pieces[8] == 0) { - when <- strptime(paste(pieces[1:6], collapse = " "), format="%a %b %d %T UTC %Y", tz="UTC") - tdiff <- min(maxtime, difftime(now, when, units = "hours")) - servers[servers$sync_url == url, paste0('server_', pieces[7])] <<- tdiff - } - } else { - print(line) - } - } + progress$inc(amount = 1) + } + urls <- sapply(check_servers, function(x) { + sub("bety.tar.gz", "version.txt", x) + }) + lapply(urls, function(x) { + curl::curl_fetch_multi(x, done = server_version, fail = failure) + }) + + # log information + server_log <- function(res) { + url <- sub("sync.log", "bety.tar.gz", res$url) + progress$inc(amount = 0, message = paste("Processing", progress$getValue(), "of", progress$getMax())) + print(paste(res$status, url)) + if (res$status == 200 || res$status == 226) { + lines <- strsplit(rawToChar(res$content), "\n", fixed = TRUE)[[1]] + now <- as.POSIXlt(Sys.time(), tz = "UTC") + for (line in tail(lines, maxlines)) { + pieces <- strsplit(trimws(line), " ", fixed = TRUE)[[1]] + if (length(pieces) == 8) { + if (pieces[8] == 0) { + when <- strptime(paste(pieces[1:6], collapse = " "), format = "%a %b %d %T UTC %Y", tz = "UTC") + tdiff <- min(maxtime, difftime(now, when, units = "hours")) + servers[servers$sync_url == url, paste0("server_", pieces[7])] <<- tdiff + } + } else { + print(line) } - progress$inc(amount = 1) + } } - urls <- sapply(check_servers, function(x) { sub("bety.tar.gz", "sync.log", x) }) - lapply(urls, function(x) { curl::curl_fetch_multi(x, done = server_log, fail = failure) } ) - - # run queries in parallel - curl::multi_run() - ignored_servers <<- c(ignored_servers, servers[servers$sync_url %in% check_servers, "sync_host_id"]) - - return(servers) + progress$inc(amount = 1) + } + urls <- sapply(check_servers, function(x) { + sub("bety.tar.gz", "sync.log", x) + }) + lapply(urls, function(x) { + curl::curl_fetch_multi(x, done = server_log, fail = failure) + }) + + # run queries in parallel + curl::multi_run() + ignored_servers <<- c(ignored_servers, servers[servers$sync_url %in% check_servers, "sync_host_id"]) + + return(servers) } # return vector to use in polylines check_sync <- function(servers) { - ids <- servers$sync_host_id - - # helper function to see if two servers are connected - connected <- function(src, dst) { - v <- servers[servers$sync_host_id==src, paste0("server_", dst)] - !is.null(v) && !is.na(v) - } - - # build up list of all connections - lat <- c() - lon <- c() - tdiff <- c() - - for (src in ids) { - src_x <- servers[servers$sync_host_id==src, 'longitude'] - src_y <- servers[servers$sync_host_id==src, 'latitude'] - for (dst in ids) { - if (connected(src, dst)) { - tdiff <- c(tdiff, servers[servers$sync_host_id==src, paste0("server_", dst)]) - - dst_x <- servers[servers$sync_host_id==dst, 'longitude'] - lon <- c(lon, c(src_x, (src_x + (dst_x - src_x) / 2), NA)) - - dst_y <- servers[servers$sync_host_id==dst, 'latitude'] - lat <- c(lat, c(src_y, (src_y + (dst_y - src_y) / 2), NA)) - } - } - } - - # need to have at least one polyline, will draw a line from server 1 to server 1 - if (length(tdiff) == 0) { - src_x <- servers[1, 'longitude'] - src_y <- servers[1, 'latitude'] - return(list(latitude=c(src_y, src_y, NA), longitude=c(src_x, src_x, NA), value=c(0))) - } else { - return(list(latitude=lat, longitude=lon, value=tdiff)) + ids <- servers$sync_host_id + + # helper function to see if two servers are connected + connected <- function(src, dst) { + v <- servers[servers$sync_host_id == src, paste0("server_", dst)] + !is.null(v) && !is.na(v) + } + + # build up list of all connections + lat <- c() + lon <- c() + tdiff <- c() + + for (src in ids) { + src_x <- servers[servers$sync_host_id == src, "longitude"] + src_y <- servers[servers$sync_host_id == src, "latitude"] + for (dst in ids) { + if (connected(src, dst)) { + tdiff <- c(tdiff, servers[servers$sync_host_id == src, paste0("server_", dst)]) + + dst_x <- servers[servers$sync_host_id == dst, "longitude"] + lon <- c(lon, c(src_x, (src_x + (dst_x - src_x) / 2), NA)) + + dst_y <- servers[servers$sync_host_id == dst, "latitude"] + lat <- c(lat, c(src_y, (src_y + (dst_y - src_y) / 2), NA)) + } } + } + + # need to have at least one polyline, will draw a line from server 1 to server 1 + if (length(tdiff) == 0) { + src_x <- servers[1, "longitude"] + src_y <- servers[1, "latitude"] + return(list(latitude = c(src_y, src_y, NA), longitude = c(src_x, src_x, NA), value = c(0))) + } else { + return(list(latitude = lat, longitude = lon, value = tdiff)) + } } # Define UI for application that draws a histogram ui <- fluidPage( - singleton(tags$head(HTML( - ' + singleton(tags$head(HTML( + ' ' - ))), - - # Application title - titlePanel("PEcAn DB Sync"), + ))), + + # Application title + titlePanel("PEcAn DB Sync"), - # Map with sites - leaflet::leafletOutput("map"), + # Map with sites + leaflet::leafletOutput("map"), - # data table - DT::dataTableOutput("table"), + # data table + DT::dataTableOutput("table"), - # Refresh button - actionButton("refresh_servers", "Update Servers"), - actionButton("refresh_sync", "Update Sync") + # Refresh button + actionButton("refresh_servers", "Update Servers"), + actionButton("refresh_sync", "Update Sync") ) # Define server logic required to draw map server <- function(input, output, session) { - # red -> green color spectrum - colors <- leaflet::colorBin("RdYlGn", domain = c(0, maxtime), bins = maxbins, na.color = "purple", reverse = TRUE) - - # servers is what is changed, start with just data from database - servers <- get_servers() - values <- reactiveValues(servers=servers, - sync=check_sync(servers)) - - # update server list (quick) - observeEvent(input$refresh_servers, { - session$sendCustomMessage("disableUI", "") - values$servers <- get_servers() - session$sendCustomMessage("enableUI", "") - }) - - # update sync list (slow) - observeEvent(input$refresh_sync, { - servers <- values$servers - session$sendCustomMessage("disableUI", "") - progress <- Progress$new(session, min=0, max=2*(nrow(servers)-length(ignored_servers))) - servers <- check_servers(servers, progress) - sync <- check_sync(servers) - progress$close() - session$sendCustomMessage("enableUI", "") - values$servers <- servers - values$sync <- sync - }) - - # create a map of all servers that have a sync_host_id and sync_url - output$map <- renderLeaflet({ - leaflet(values$servers) %>% - addProviderTiles(providers$Stamen.TonerLite, - options = providerTileOptions(noWrap = TRUE) - ) %>% - addMarkers(~longitude, ~latitude, - label = ~htmltools::htmlEscape(hostname), - clusterOptions = markerClusterOptions(maxClusterRadius = 1)) %>% - addPolylines(~longitude, ~latitude, - color = colors(values$sync$value), data=values$sync) %>% - addLegend("bottomright", colors, values$sync$value, - title = "since last sync", labFormat = labelFormat(suffix =" hours")) - }) + # red -> green color spectrum + colors <- leaflet::colorBin("RdYlGn", domain = c(0, maxtime), bins = maxbins, na.color = "purple", reverse = TRUE) - # create a table of all servers that have a sync_host_id and sync_url - output$table <- DT::renderDataTable({ - ignored <- rep("gray", length(ignored_servers) + 1) - DT::datatable(values$servers %>% - dplyr::select("sync_host_id", "hostname", "city", "country", "lastdump", "migrations"), - rownames = FALSE) %>% - DT::formatStyle('sync_host_id', target = "row", color = DT::styleEqual(c(ignored_servers, "-1"), ignored)) - }) + # servers is what is changed, start with just data from database + servers <- get_servers() + values <- reactiveValues( + servers = servers, + sync = check_sync(servers) + ) + + # update server list (quick) + observeEvent(input$refresh_servers, { + session$sendCustomMessage("disableUI", "") + values$servers <- get_servers() + session$sendCustomMessage("enableUI", "") + }) + + # update sync list (slow) + observeEvent(input$refresh_sync, { + servers <- values$servers + session$sendCustomMessage("disableUI", "") + progress <- Progress$new(session, min = 0, max = 2 * (nrow(servers) - length(ignored_servers))) + servers <- check_servers(servers, progress) + sync <- check_sync(servers) + progress$close() + session$sendCustomMessage("enableUI", "") + values$servers <- servers + values$sync <- sync + }) + + # create a map of all servers that have a sync_host_id and sync_url + output$map <- renderLeaflet({ + leaflet(values$servers) %>% + addProviderTiles(providers$Stamen.TonerLite, + options = providerTileOptions(noWrap = TRUE) + ) %>% + addMarkers(~longitude, ~latitude, + label = ~ htmltools::htmlEscape(hostname), + clusterOptions = markerClusterOptions(maxClusterRadius = 1) + ) %>% + addPolylines(~longitude, ~latitude, + color = colors(values$sync$value), data = values$sync + ) %>% + addLegend("bottomright", colors, values$sync$value, + title = "since last sync", labFormat = labelFormat(suffix = " hours") + ) + }) + + # create a table of all servers that have a sync_host_id and sync_url + output$table <- DT::renderDataTable({ + ignored <- rep("gray", length(ignored_servers) + 1) + DT::datatable( + values$servers %>% + dplyr::select("sync_host_id", "hostname", "city", "country", "lastdump", "migrations"), + rownames = FALSE + ) %>% + DT::formatStyle("sync_host_id", target = "row", color = DT::styleEqual(c(ignored_servers, "-1"), ignored)) + }) } # Run the application diff --git a/shiny/global-sensitivity/load_ensemble.R b/shiny/global-sensitivity/load_ensemble.R index 6501ed02d3b..3bf00c9117e 100644 --- a/shiny/global-sensitivity/load_ensemble.R +++ b/shiny/global-sensitivity/load_ensemble.R @@ -2,67 +2,68 @@ #' @param settings PEcAn settings list #' @param variable Variable names to read, as a character vector #' @param quiet If TRUE, don't show status messages from `read.ensemble.output` -load_ensemble <- function(workflow_dir, settings, variable){ - library(PEcAn.all) +load_ensemble <- function(workflow_dir, settings, variable) { + library(PEcAn.all) - # Load the model output - ## ANS -- NOTE: There may be a faster/better way to do this using built-in PEcAn functions - ## ANS -- or...should these be automatically stored somewhere? - - message("Workflow dir: ", workflow_dir) - message("Model outdir: ", settings$modeloutdir) - message("Start year: ", settings$ensemble$start.year) - message("End year: ", settings$ensemble$end.year) - message("Read ensemble output...") - - # Read samples file - samples.file <- file.path(workflow_dir, "samples.Rdata") - if (file.exists(samples.file)) { - load(samples.file) - ens.run.ids <- runs.samples$ensemble - } else { - stop(samples.file, "not found required by read.ensemble.output") - } + # Load the model output + ## ANS -- NOTE: There may be a faster/better way to do this using built-in PEcAn functions + ## ANS -- or...should these be automatically stored somewhere? - ensemble.output.raw <- list() - for (row in rownames(ens.run.ids)) { - run.id <- ens.run.ids[row, "id"] - PEcAn.logger::logger.info("reading ensemble output from run id: ", run.id) - ensemble.output.raw[[row]] <- sapply(read.output(run.id, - file.path(settings$modeloutdir, run.id), - as.numeric(settings$ensemble$start.year), - as.numeric(settings$ensemble$end.year), - variable), mean, na.rm = TRUE) - } + message("Workflow dir: ", workflow_dir) + message("Model outdir: ", settings$modeloutdir) + message("Start year: ", settings$ensemble$start.year) + message("End year: ", settings$ensemble$end.year) + message("Read ensemble output...") - message("Rbind ensemble output...") - ensemble.output <- data.frame(do.call(rbind, ensemble.output.raw)) - - ## NOTE: read.ensemble.output only returns the mean value at each timestep. - ## If we want other statistics, they need to be either hard-coded (loop with read.output), - ## or read.ensemble.output needs to be modified. - - # Load parameter values - message("Load parameter values...") - load(file.path(workflow_dir, "samples.Rdata")) + # Read samples file + samples.file <- file.path(workflow_dir, "samples.Rdata") + if (file.exists(samples.file)) { + load(samples.file) + ens.run.ids <- runs.samples$ensemble + } else { + stop(samples.file, "not found required by read.ensemble.output") + } - ## "samples.RData" contains the following: - ## ensemble.samples -- For each PFT, data.frame of sampled parameter values. Not linked to run IDs, but presumably in same order - ## pft.names -- Names of each PFT - ## runs.samples -- Run IDs, not paired with anything, but probably in same order as samples - ## sa.samples -- Sensitivity analysis samples? Here it's blank - ## trait.names -- Names of parameters (traits) sampled; list by PFT. - ## trait.samples -- Samples from meta-analysis? 5004 samples per trait. - - message("Get run samples...") - ensemble.output$runid <- runs.samples$ensemble$id + ensemble.output.raw <- list() + for (row in rownames(ens.run.ids)) { + run.id <- ens.run.ids[row, "id"] + PEcAn.logger::logger.info("reading ensemble output from run id: ", run.id) + ensemble.output.raw[[row]] <- sapply(read.output( + run.id, + file.path(settings$modeloutdir, run.id), + as.numeric(settings$ensemble$start.year), + as.numeric(settings$ensemble$end.year), + variable + ), mean, na.rm = TRUE) + } - message('Cbind ensemble samples...') - ensemble.samples.cbind <- do.call(cbind, ensemble.samples[pft.names]) + message("Rbind ensemble output...") + ensemble.output <- data.frame(do.call(rbind, ensemble.output.raw)) - message('Cbind ensemble output and samples...') - ensemble.output.full <- cbind(ensemble.output, ensemble.samples.cbind) - - return(ensemble.output.full) -} + ## NOTE: read.ensemble.output only returns the mean value at each timestep. + ## If we want other statistics, they need to be either hard-coded (loop with read.output), + ## or read.ensemble.output needs to be modified. + + # Load parameter values + message("Load parameter values...") + load(file.path(workflow_dir, "samples.Rdata")) + + ## "samples.RData" contains the following: + ## ensemble.samples -- For each PFT, data.frame of sampled parameter values. Not linked to run IDs, but presumably in same order + ## pft.names -- Names of each PFT + ## runs.samples -- Run IDs, not paired with anything, but probably in same order as samples + ## sa.samples -- Sensitivity analysis samples? Here it's blank + ## trait.names -- Names of parameters (traits) sampled; list by PFT. + ## trait.samples -- Samples from meta-analysis? 5004 samples per trait. + + message("Get run samples...") + ensemble.output$runid <- runs.samples$ensemble$id + message("Cbind ensemble samples...") + ensemble.samples.cbind <- do.call(cbind, ensemble.samples[pft.names]) + + message("Cbind ensemble output and samples...") + ensemble.output.full <- cbind(ensemble.output, ensemble.samples.cbind) + + return(ensemble.output.full) +} diff --git a/shiny/global-sensitivity/plotEnsemble.R b/shiny/global-sensitivity/plotEnsemble.R index 9986e7ab136..475a593df90 100644 --- a/shiny/global-sensitivity/plotEnsemble.R +++ b/shiny/global-sensitivity/plotEnsemble.R @@ -3,79 +3,82 @@ #' @param y Name of variable to plot on Y axis #' @param pdfs Display probability density functions outside plots. Default=TRUE. #' @param fit.method Method for regression fit. Either "lm" for linear OLS regression (default) or "spline" for `smooth.spline` fit. -plotEnsemble <- function(ensemble.out, x, y, pdfs=TRUE, fit.method="lm", ...){ - message('plotEnsemble x: ', x) - message('plotEnsemble y: ', y) - error_plot <- function(err){ +plotEnsemble <- function(ensemble.out, x, y, pdfs = TRUE, fit.method = "lm", ...) { + message("plotEnsemble x: ", x) + message("plotEnsemble y: ", y) + error_plot <- function(err) { plot.new() text(0.5, 0.5, err[1]) } - if(pdfs){ - zones <- matrix(c(1, 4, 3, 2), ncol=2, byrow=TRUE) - layout(zones, widths=c(4/5, 1/5), heights=c(1/5, 4/5)) - pdfx <- try(density(ensemble.out[,x])) - pdfy <- try(density(ensemble.out[,y])) + if (pdfs) { + zones <- matrix(c(1, 4, 3, 2), ncol = 2, byrow = TRUE) + layout(zones, widths = c(4 / 5, 1 / 5), heights = c(1 / 5, 4 / 5)) + pdfx <- try(density(ensemble.out[, x])) + pdfy <- try(density(ensemble.out[, y])) pdf_succ <- c(class(pdfx), c(class(pdfy))) != "try-error" - if(pdf_succ[1]){ - par(mar=c(0,3,1,1)) - plot(pdfx$x, pdfx$y, type="l", lwd=3, axes = FALSE, bty = "n", xlab = "", ylab = "") + if (pdf_succ[1]) { + par(mar = c(0, 3, 1, 1)) + plot(pdfx$x, pdfx$y, type = "l", lwd = 3, axes = FALSE, bty = "n", xlab = "", ylab = "") } else { error_plot(pdfx) } - if(pdf_succ[2]){ - par(mar=c(3,0,1,1)) - plot(pdfy$y, pdfy$x, type="l", lwd=3, axes = FALSE, bty = "n", xlab = "", ylab = "") + if (pdf_succ[2]) { + par(mar = c(3, 0, 1, 1)) + plot(pdfy$y, pdfy$x, type = "l", lwd = 3, axes = FALSE, bty = "n", xlab = "", ylab = "") } else { error_plot(pdfy) } - par(mar=c(4,4,1,1)) + par(mar = c(4, 4, 1, 1)) } form <- formula(sprintf("%s ~ %s", y, x)) - plot(form, ensemble.out, col="grey50", ...) - if(!is.na(fit.method)){ - if(fit.method == "lm"){ - fitline <- lm(form, data=ensemble.out) - abline(fitline, lwd=3) - } else if(fit.method == "spline"){ - fitline <- smooth.spline(ensemble.out[,x], ensemble.out[,y]) - lines(fitline, lwd=3) - } - else{ + plot(form, ensemble.out, col = "grey50", ...) + if (!is.na(fit.method)) { + if (fit.method == "lm") { + fitline <- lm(form, data = ensemble.out) + abline(fitline, lwd = 3) + } else if (fit.method == "spline") { + fitline <- smooth.spline(ensemble.out[, x], ensemble.out[, y]) + lines(fitline, lwd = 3) + } else { stop("Unrecognized fit function") } } } -plotAllParams <- function(ensemble.out, variable, param_names, plot_cols = 3){ +plotAllParams <- function(ensemble.out, variable, param_names, plot_cols = 3) { plot_rows <- ceiling(length(param_names) / plot_cols) - par(mfrow = c(plot_rows, plot_cols), mar=c(4, 2, 3, 1), - mgp = c(2, 1, 0), oma = c(0, 2, 0, 0)) - for(param in param_names){ + par( + mfrow = c(plot_rows, plot_cols), mar = c(4, 2, 3, 1), + mgp = c(2, 1, 0), oma = c(0, 2, 0, 0) + ) + for (param in param_names) { fit_summary <- fitSummary(ensemble.out, param, variable) - main <- with(fit_summary, sprintf("R2 = %.3g, m = %.3g, p = %.3g", r2, coefs[2,1], coefs[2,4])) - plotEnsemble(ensemble.out, param, variable, pdfs=FALSE, ylab=NA, main=main) + main <- with(fit_summary, sprintf("R2 = %.3g, m = %.3g, p = %.3g", r2, coefs[2, 1], coefs[2, 4])) + plotEnsemble(ensemble.out, param, variable, pdfs = FALSE, ylab = NA, main = main) } - mtext(variable, 2, outer = TRUE) + mtext(variable, 2, outer = TRUE) } -plotAllVars <- function(ensemble.out, param, var_names, plot_cols = 3){ +plotAllVars <- function(ensemble.out, param, var_names, plot_cols = 3) { plot_rows <- ceiling(length(var_names) / plot_cols) - par(mfrow = c(plot_rows, plot_cols), mar=c(2, 3, 2, 1), - mgp = c(2, 1, 0), oma = c(2, 0, 1, 0)) - for(variable in var_names){ + par( + mfrow = c(plot_rows, plot_cols), mar = c(2, 3, 2, 1), + mgp = c(2, 1, 0), oma = c(2, 0, 1, 0) + ) + for (variable in var_names) { fit_summary <- fitSummary(ensemble.out, param, variable) - main <- with(fit_summary, sprintf("R2 = %.3g, m = %.3g, p = %.3g", r2, coefs[2,1], coefs[2,4])) - plotEnsemble(ensemble.out, param, variable, pdfs=FALSE, xlab=NA, main=main) + main <- with(fit_summary, sprintf("R2 = %.3g, m = %.3g, p = %.3g", r2, coefs[2, 1], coefs[2, 4])) + plotEnsemble(ensemble.out, param, variable, pdfs = FALSE, xlab = NA, main = main) } - mtext(param, 1, outer = TRUE) + mtext(param, 1, outer = TRUE) } fitSummary <- function(ensemble.out, x, y) { - message('Ensemble out names: ', paste(names(ensemble.out), collapse = ', ')) - message('fitSummary x: ', x) - message('fitSummary y: ', y) + message("Ensemble out names: ", paste(names(ensemble.out), collapse = ", ")) + message("fitSummary x: ", x) + message("fitSummary y: ", y) form <- formula(sprintf("%s ~ %s", y, x)) - fitline <- lm(form, data=ensemble.out) + fitline <- lm(form, data = ensemble.out) fit_summary <- summary(fitline) coefs <- fit_summary$coefficients r2 <- fit_summary$r.squared diff --git a/shiny/global-sensitivity/server.R b/shiny/global-sensitivity/server.R index 1eb6ad2e2f4..12995bf9f81 100644 --- a/shiny/global-sensitivity/server.R +++ b/shiny/global-sensitivity/server.R @@ -9,86 +9,86 @@ message("Debugging!") message("Starting shiny server...") # Define server logic server <- shinyServer(function(input, output, session) { - message("Trying to connect to BETY...") - bety <- betyConnect() - # set the workflow id(s) - message("Getting workflow IDs...") - ids <- get_workflow_ids(bety, session) - updateSelectInput(session, "workflow_id", choices=ids) - workflow_id <- reactive({ - req(input$workflow_id) - workflow_id <- input$workflow_id - }) - - message("Getting run IDs...") - run_ids <- reactive(get_run_ids(bety, workflow_id())) - - var_names <- reactive({ - run_ids <- get_run_ids(bety, workflow_id()) - var_names <- get_var_names(bety, workflow_id(), run_ids[1]) - return(var_names) - }) - - param_names <- reactive({ - workflow_dir <- current_workflow()$folder - load(file.path(workflow_dir, "samples.Rdata")) - pft.names <- names(ensemble.samples) %>% .[. != "env"] - param_names <- paste0(pft.names, ".", colnames(ensemble.samples[[1]])) - return(param_names) - }) - - current_workflow <- reactive({ - id <- workflow_id() - current_workflow <- collect(workflow(bety, id)) - return(current_workflow) - }) - - # Update variable and parameter names based on current workflow ID - observe({ - updateSelectInput(session, "parameter", choices=param_names()) - updateSelectInput(session, "variable", choices=var_names()) - }) - - message("Loading ensemble output...") - ensemble.out <- reactive({ - req(current_workflow()) - workflow <- current_workflow() - if(nrow(workflow) > 0) { - workflow_dir <- workflow$folder - output_dir <- file.path(workflow_dir, "out") - settings <- XML::xmlToList(XML::xmlParse(file.path(workflow_dir, "pecan.CHECKED.xml"))) - # Load ensemble samples - ensemble.out <- load_ensemble(workflow_dir = workflow_dir, settings = settings, variable = var_names()) - return(ensemble.out) - } else { - return(NA) - } - }) - - output$ensemble_plot <- renderPlot({ - req(ensemble.out()) - plot_cols <- 3 - if(input$output_type == "Pairwise"){ - plotEnsemble(ensemble.out(), input$parameter, input$variable) - } else if (input$output_type == "All parameters"){ - plotAllParams(ensemble.out(), input$variable, param_names(), plot_cols=plot_cols) - } else if (input$output_type == "All variables"){ - plotAllVars(ensemble.out(), input$parameter, var_names(), plot_cols=plot_cols) - } - add_icon() - }) - - lm_fit <- reactive({ - req(ensemble.out()) - fitSummary(ensemble.out(), input$parameter, input$variable) - }) - - output$coef_table <- renderTable(lm_fit()$coefs, digits = 4, display = c("s", rep("g", 4))) - - output$r2 <- renderText({ - with(lm_fit(), sprintf("R2 = %.3g, Adj. R2 = %.3g", r2, adjr2)) - }) -}) # End shinyServer + message("Trying to connect to BETY...") + bety <- betyConnect() + # set the workflow id(s) + message("Getting workflow IDs...") + ids <- get_workflow_ids(bety, session) + updateSelectInput(session, "workflow_id", choices = ids) + workflow_id <- reactive({ + req(input$workflow_id) + workflow_id <- input$workflow_id + }) + + message("Getting run IDs...") + run_ids <- reactive(get_run_ids(bety, workflow_id())) + + var_names <- reactive({ + run_ids <- get_run_ids(bety, workflow_id()) + var_names <- get_var_names(bety, workflow_id(), run_ids[1]) + return(var_names) + }) + + param_names <- reactive({ + workflow_dir <- current_workflow()$folder + load(file.path(workflow_dir, "samples.Rdata")) + pft.names <- names(ensemble.samples) %>% .[. != "env"] + param_names <- paste0(pft.names, ".", colnames(ensemble.samples[[1]])) + return(param_names) + }) + + current_workflow <- reactive({ + id <- workflow_id() + current_workflow <- collect(workflow(bety, id)) + return(current_workflow) + }) + + # Update variable and parameter names based on current workflow ID + observe({ + updateSelectInput(session, "parameter", choices = param_names()) + updateSelectInput(session, "variable", choices = var_names()) + }) + + message("Loading ensemble output...") + ensemble.out <- reactive({ + req(current_workflow()) + workflow <- current_workflow() + if (nrow(workflow) > 0) { + workflow_dir <- workflow$folder + output_dir <- file.path(workflow_dir, "out") + settings <- XML::xmlToList(XML::xmlParse(file.path(workflow_dir, "pecan.CHECKED.xml"))) + # Load ensemble samples + ensemble.out <- load_ensemble(workflow_dir = workflow_dir, settings = settings, variable = var_names()) + return(ensemble.out) + } else { + return(NA) + } + }) + + output$ensemble_plot <- renderPlot({ + req(ensemble.out()) + plot_cols <- 3 + if (input$output_type == "Pairwise") { + plotEnsemble(ensemble.out(), input$parameter, input$variable) + } else if (input$output_type == "All parameters") { + plotAllParams(ensemble.out(), input$variable, param_names(), plot_cols = plot_cols) + } else if (input$output_type == "All variables") { + plotAllVars(ensemble.out(), input$parameter, var_names(), plot_cols = plot_cols) + } + add_icon() + }) + + lm_fit <- reactive({ + req(ensemble.out()) + fitSummary(ensemble.out(), input$parameter, input$variable) + }) + + output$coef_table <- renderTable(lm_fit()$coefs, digits = 4, display = c("s", rep("g", 4))) + + output$r2 <- renderText({ + with(lm_fit(), sprintf("R2 = %.3g, Adj. R2 = %.3g", r2, adjr2)) + }) +}) # End shinyServer # runApp(port=5658, launch.browser=FALSE) diff --git a/shiny/global-sensitivity/ui.R b/shiny/global-sensitivity/ui.R index e3c1eabde22..f745582bb4f 100644 --- a/shiny/global-sensitivity/ui.R +++ b/shiny/global-sensitivity/ui.R @@ -2,30 +2,30 @@ library(shiny) # Define UI ui <- shinyUI( - fluidPage( - titlePanel("Global sensitivity analysis"), - sidebarLayout( - sidebarPanel( - selectInput("workflow_id", "Workflow ID", c()), - selectInput("output_type", "Output type", c("Pairwise", "All parameters", "All variables")), - conditionalPanel( - condition = "input.output_type == 'Pairwise' || input.output_type == 'All variables'", - selectInput("parameter", "Parameter (X)", c()) - ), - conditionalPanel( - condition = "input.output_type == 'Pairwise' || input.output_type == 'All parameters'", - selectInput("variable", "Variable (Y)", c()) - ) - ), - mainPanel( - p("If no plot or error message appears, please be patient. Loading ensemble output can take a few minutes."), - plotOutput("ensemble_plot", height="800"), - conditionalPanel( - condition = "input.output_type == 'Pairwise'", - tableOutput("coef_table"), - textOutput("r2") - ) - ) # End mainPanel - ) # End sidebarLayout - ) # End fluidPage + fluidPage( + titlePanel("Global sensitivity analysis"), + sidebarLayout( + sidebarPanel( + selectInput("workflow_id", "Workflow ID", c()), + selectInput("output_type", "Output type", c("Pairwise", "All parameters", "All variables")), + conditionalPanel( + condition = "input.output_type == 'Pairwise' || input.output_type == 'All variables'", + selectInput("parameter", "Parameter (X)", c()) + ), + conditionalPanel( + condition = "input.output_type == 'Pairwise' || input.output_type == 'All parameters'", + selectInput("variable", "Variable (Y)", c()) + ) + ), + mainPanel( + p("If no plot or error message appears, please be patient. Loading ensemble output can take a few minutes."), + plotOutput("ensemble_plot", height = "800"), + conditionalPanel( + condition = "input.output_type == 'Pairwise'", + tableOutput("coef_table"), + textOutput("r2") + ) + ) # End mainPanel + ) # End sidebarLayout + ) # End fluidPage ) # End shinyUI diff --git a/shiny/workflowPlot/helper.R b/shiny/workflowPlot/helper.R index dc23e4656ac..d79d9066f2f 100644 --- a/shiny/workflowPlot/helper.R +++ b/shiny/workflowPlot/helper.R @@ -1,25 +1,25 @@ # Helper function which checks and downloads required packages -checkAndDownload<-function(packageNames) { - for(packageName in packageNames) { - if(!isInstalled(packageName)) { +checkAndDownload <- function(packageNames) { + for (packageName in packageNames) { + if (!isInstalled(packageName)) { install.packages(packageName) - } - library(packageName,character.only=TRUE,quietly=TRUE,verbose=FALSE) + } + library(packageName, character.only = TRUE, quietly = TRUE, verbose = FALSE) } } -isInstalled <- function(mypkg){ - is.element(mypkg, installed.packages()[,1]) +isInstalled <- function(mypkg) { + is.element(mypkg, installed.packages()[, 1]) } # checkAndDownload(c('plotly','scales','dplyr')) -# Stashing Code for file upload to shiny app +# Stashing Code for file upload to shiny app # Based on https://shiny.rstudio.com/gallery/file-upload.html -# ui.R +# ui.R # tags$hr(), # fileInput('file1', 'Choose CSV File to upload data', -# accept=c('text/csv', -# 'text/comma-separated-values,text/plain', +# accept=c('text/csv', +# 'text/comma-separated-values,text/plain', # '.csv')), # checkboxInput('header', 'Header', TRUE), # radioButtons('sep', 'Separator', @@ -36,7 +36,7 @@ isInstalled <- function(mypkg){ # textInput("formatID", "Format ID for file (Default CSV)", "5000000002"), # actionButton("load_data", "Load External Data") -# server.R +# server.R # loadExternalData <-eventReactive(input$load_data,{ # inFile <- input$file1 # if (is.null(inFile)) @@ -45,7 +45,7 @@ isInstalled <- function(mypkg){ # # # paste0(nrow(externalData)) # # paste0(inFile$datapath) # # }) -# externalData <- read.csv(inFile$datapath, header=input$header, sep=input$sep, +# externalData <- read.csv(inFile$datapath, header=input$header, sep=input$sep, # quote=input$quote) # return(externalData) -# }) +# }) diff --git a/shiny/workflowPlot/server.R b/shiny/workflowPlot/server.R index 7c0d1e23e91..0ac54179d3f 100644 --- a/shiny/workflowPlot/server.R +++ b/shiny/workflowPlot/server.R @@ -1,43 +1,44 @@ # Load PEcAn specific packages, does this need to be so specific? -lapply(c("PEcAn.visualization", - "PEcAn.DB", - "PEcAn.settings", - "PEcAn.benchmark", - "PEcAn.utils"),function(pkg){ - library(pkg,character.only = TRUE,quietly = TRUE) - } - ) +lapply(c( + "PEcAn.visualization", + "PEcAn.DB", + "PEcAn.settings", + "PEcAn.benchmark", + "PEcAn.utils" +), function(pkg) { + library(pkg, character.only = TRUE, quietly = TRUE) +}) # Shiny and plotting packages -lapply(c( "shiny", - "plotly", - "highcharter", - "shinyjs", - "dplyr", - "plyr", - "stringr", - "XML", - "xts", - "purrr", - "lubridate", - "listviewer", - "shinythemes", - "shinytoastr", - "shinyFiles", - "data.table", - "shinyWidgets" - ),function(pkg){ - if (!(pkg %in% installed.packages()[,1])){ - install.packages(pkg) - } - library(pkg,character.only = TRUE,quietly = TRUE) - } - ) +lapply(c( + "shiny", + "plotly", + "highcharter", + "shinyjs", + "dplyr", + "plyr", + "stringr", + "XML", + "xts", + "purrr", + "lubridate", + "listviewer", + "shinythemes", + "shinytoastr", + "shinyFiles", + "data.table", + "shinyWidgets" +), function(pkg) { + if (!(pkg %in% installed.packages()[, 1])) { + install.packages(pkg) + } + library(pkg, character.only = TRUE, quietly = TRUE) +}) # Maximum size of file allowed to be uploaded: 100MB -options(shiny.maxRequestSize=100*1024^2) +options(shiny.maxRequestSize = 100 * 1024^2) # Port forwarding # options(shiny.port = 6438) @@ -45,62 +46,62 @@ options(shiny.maxRequestSize=100*1024^2) # Define server logic server <- shinyServer(function(input, output, session) { - dbConnect <- reactiveValues(bety = NULL) # Try `betyConnect` function. # If it breaks, ask user to enter user, password and host information # then use the `db.open` function to connect to the database - tryCatch({ - #dbConnect$bety <- betyConnect() - #For betyConnect to break to test shiny modal - dbConnect$bety <- betyConnect(".") - }, - error = function(e){ - - #---- shiny modal---- - showModal( - modalDialog( - title = "Connect to Database", - fluidRow(column(12,textInput('user', h4('User:'), width = "100%", value = "bety"))), - fluidRow(column(12,textInput('password', h4('Password:'), width = "100%", value = "bety"))), - fluidRow(column(12,textInput('host', h4('Host:'), width = "100%", value = "psql-pecan.bu.edu"))), - fluidRow( - column(3), - column(6,br(),actionButton('submitInfo', 'Submit', width = "100%", class="btn-primary")), - column(3) - ), - footer = NULL, - size = 's' + tryCatch( + { + # dbConnect$bety <- betyConnect() + # For betyConnect to break to test shiny modal + dbConnect$bety <- betyConnect(".") + }, + error = function(e) { + #---- shiny modal---- + showModal( + modalDialog( + title = "Connect to Database", + fluidRow(column(12, textInput("user", h4("User:"), width = "100%", value = "bety"))), + fluidRow(column(12, textInput("password", h4("Password:"), width = "100%", value = "bety"))), + fluidRow(column(12, textInput("host", h4("Host:"), width = "100%", value = "psql-pecan.bu.edu"))), + fluidRow( + column(3), + column(6, br(), actionButton("submitInfo", "Submit", width = "100%", class = "btn-primary")), + column(3) + ), + footer = NULL, + size = "s" + ) ) - ) - - # --- connect to database --- - observeEvent(input$submitInfo,{ - tryCatch({ - - dbConnect$bety <- PEcAnDB::db.open( - params = list( - driver = "Postgres", - dbname ='bety' , - host =input$host, - user = input$user, - password = input$password - ) - ) - # For testing reactivity of bety connection - #dbConnect$bety <- betyConnect() + # --- connect to database --- + observeEvent(input$submitInfo, { + tryCatch( + { + dbConnect$bety <- PEcAnDB::db.open( + params = list( + driver = "Postgres", + dbname = "bety", + host = input$host, + user = input$user, + password = input$password + ) + ) - removeModal() - toastr_success("Connect to Database") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - } - ) - }) - }) + # For testing reactivity of bety connection + # dbConnect$bety <- betyConnect() + + removeModal() + toastr_success("Connect to Database") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) + }) + } + ) # Hiding the animation and showing the application content @@ -125,18 +126,17 @@ server <- shinyServer(function(input, output, session) { source("server_files/pdf_viewer_server.R", local = TRUE) # Page 4: Benchmarking - observeEvent(input$load_model,{ + observeEvent(input$load_model, { req(input$all_run_id) ids_DF <- parse_ids_from_input_runID(input$all_run_id) button <- FALSE print(nrow(ids_DF)) - if(nrow(ids_DF) == 1){ + if (nrow(ids_DF) == 1) { source("server_files/benchmarking_server.R", local = TRUE) - }else if(nrow(ids_DF) > 1){ + } else if (nrow(ids_DF) > 1) { brr_message <- "Benchmarking currently only works when one run is selected." - }else{ + } else { brr_message <- "Cannot do benchmarking" } }) - - }) # Shiny server closes here +}) # Shiny server closes here diff --git a/shiny/workflowPlot/server_files/benchmarking_server.R b/shiny/workflowPlot/server_files/benchmarking_server.R index e9d1453f6fc..3384f172b22 100644 --- a/shiny/workflowPlot/server_files/benchmarking_server.R +++ b/shiny/workflowPlot/server_files/benchmarking_server.R @@ -3,150 +3,178 @@ # Create reactive value bm <- reactiveValues() -##----------------------------------------------------------------------------## -## Observe when the model run is loaded and check to see if it is registered +## ----------------------------------------------------------------------------## +## Observe when the model run is loaded and check to see if it is registered ## as a reference run. If not, create the record upon button click -observeEvent(input$load_model,{ - tryCatch({ - req(input$all_run_id) - ids_DF <- parse_ids_from_input_runID(input$all_run_id) - button <- FALSE - if(nrow(ids_DF) == 1){ - - # Check to see if the run has been saved as a reference run - ens_id <- dplyr::tbl(dbConnect$bety, 'runs') %>% dplyr::filter(id == ids_DF$runID) %>% dplyr::pull(ensemble_id) - ens_wf <- dplyr::tbl(dbConnect$bety, 'ensembles') %>% dplyr::filter(id == ens_id) %>% - dplyr::rename(ensemble_id = id) %>% - dplyr::left_join(.,tbl(dbConnect$bety, "workflows") %>% dplyr::rename(workflow_id = id), by="workflow_id") %>% dplyr::collect() - bm$model_vars <- var_names_all(dbConnect$bety,ids_DF$wID,ids_DF$runID) - - clean <- PEcAn.benchmark::clean_settings_BRR(inputfile = file.path(ens_wf$folder,"pecan.CHECKED.xml")) - settings_xml <- toString(PEcAn.settings::listToXml(clean, "pecan")) - ref_run <- PEcAn.benchmark::check_BRR(settings_xml, dbConnect$bety) - - if(length(ref_run) == 0){ - # If not registered, button appears with option to run create.BRR - brr_message <- sprintf("Would you like to save this run (run id = %.0f, ensemble id = %0.f) as a reference run?", ids_DF$runID, ens_id) - button <- TRUE - }else if(dim(ref_run)[1] == 1){ - bm$BRR <- ref_run %>% dplyr::rename(.,reference_run_id = id) - bm$BRR - brr_message <- sprintf("This run has been registered as a reference run (id = %.0f)", bm$BRR$reference_run_id) - }else if(dim(ref_run)[1] > 1){ # There shouldn't be more than one reference run per run - brr_message <- ("There is more than one reference run in the database for this run. Review for duplicates.") +observeEvent(input$load_model, { + tryCatch( + { + req(input$all_run_id) + ids_DF <- parse_ids_from_input_runID(input$all_run_id) + button <- FALSE + if (nrow(ids_DF) == 1) { + # Check to see if the run has been saved as a reference run + ens_id <- dplyr::tbl(dbConnect$bety, "runs") %>% + dplyr::filter(id == ids_DF$runID) %>% + dplyr::pull(ensemble_id) + ens_wf <- dplyr::tbl(dbConnect$bety, "ensembles") %>% + dplyr::filter(id == ens_id) %>% + dplyr::rename(ensemble_id = id) %>% + dplyr::left_join(., tbl(dbConnect$bety, "workflows") %>% dplyr::rename(workflow_id = id), by = "workflow_id") %>% + dplyr::collect() + bm$model_vars <- var_names_all(dbConnect$bety, ids_DF$wID, ids_DF$runID) + + clean <- PEcAn.benchmark::clean_settings_BRR(inputfile = file.path(ens_wf$folder, "pecan.CHECKED.xml")) + settings_xml <- toString(PEcAn.settings::listToXml(clean, "pecan")) + ref_run <- PEcAn.benchmark::check_BRR(settings_xml, dbConnect$bety) + + if (length(ref_run) == 0) { + # If not registered, button appears with option to run create.BRR + brr_message <- sprintf("Would you like to save this run (run id = %.0f, ensemble id = %0.f) as a reference run?", ids_DF$runID, ens_id) + button <- TRUE + } else if (dim(ref_run)[1] == 1) { + bm$BRR <- ref_run %>% dplyr::rename(., reference_run_id = id) + bm$BRR + brr_message <- sprintf("This run has been registered as a reference run (id = %.0f)", bm$BRR$reference_run_id) + } else if (dim(ref_run)[1] > 1) { # There shouldn't be more than one reference run per run + brr_message <- ("There is more than one reference run in the database for this run. Review for duplicates.") + } + } else if (nrow(ids_DF) > 1) { + brr_message <- "Benchmarking currently only works when one run is selected." + } else { + brr_message <- "Cannot do benchmarking" } - }else if(nrow(ids_DF) > 1){ - brr_message <- "Benchmarking currently only works when one run is selected." - }else{ - brr_message <- "Cannot do benchmarking" + + # This is redundant but better for debugging + bm$brr_message <- brr_message + bm$button_BRR <- button + bm$ens_wf <- ens_wf + bm$ready <- 0 + # Signaling the success of the operation + toastr_success("Check for reference run") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) } - - # This is redundant but better for debugging - bm$brr_message <- brr_message - bm$button_BRR <- button - bm$ens_wf <- ens_wf - bm$ready <- 0 - #Signaling the success of the operation - toastr_success("Check for reference run") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + ) }) # When button to register run is clicked, create.BRR is run and the button is removed. -observeEvent(input$create_bm,{ - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', - value = 0,{ - bm$BRR <- PEcAn.benchmark::create_BRR(bm$ens_wf, con = dbConnect$bety) - incProgress( 10/ 15) - bm$brr_message <- sprintf("This run has been successfully registered as a reference run (id = %.0f)", bm$BRR$reference_run_id) - bm$button_BRR <- FALSE - bm$ready <- bm$ready + 1 - incProgress(5/15) - }) - #Signaling the success of the operation - toastr_success("Registered reference run") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) +observeEvent(input$create_bm, { + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + bm$BRR <- PEcAn.benchmark::create_BRR(bm$ens_wf, con = dbConnect$bety) + incProgress(10 / 15) + bm$brr_message <- sprintf("This run has been successfully registered as a reference run (id = %.0f)", bm$BRR$reference_run_id) + bm$button_BRR <- FALSE + bm$ready <- bm$ready + 1 + incProgress(5 / 15) + } + ) + # Signaling the success of the operation + toastr_success("Registered reference run") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) -observeEvent({ - bm$brr_message - bm$button_BRR -},{ - output$brr_message <- renderText({bm$brr_message}) - output$button_BRR <- renderUI({ - if(bm$button_BRR){actionButton("create_bm", "Create Benchmarking Reference Run")} - }) -}) +observeEvent( + { + bm$brr_message + bm$button_BRR + }, + { + output$brr_message <- renderText({ + bm$brr_message + }) + output$button_BRR <- renderUI({ + if (bm$button_BRR) { + actionButton("create_bm", "Create Benchmarking Reference Run") + } + }) + } +) -##----------------------------------------------------------------------------## -## Observe when the external data is loaded and check to see if any benchmarks +## ----------------------------------------------------------------------------## +## Observe when the external data is loaded and check to see if any benchmarks ## have already been run. In addition, setup and run new benchmarks. -observeEvent(input$load_data,{ - tryCatch({ - req(input$all_input_id) - req(input$all_site_id) - - bm$metrics <- dplyr::tbl(dbConnect$bety,'metrics') %>% dplyr::select(one_of("id","name","description")) %>% collect() - - # Need to write warning message that can only use one input id - bm$input <- getInputs(dbConnect$bety,c(input$all_site_id)) %>% - dplyr::filter(input_selection_list == input$all_input_id) - format <- PEcAn.DB::query.format.vars(bety = dbConnect$bety, input.id = bm$input$input_id) - - # If format has Null time.row and NAs for var$column_number, skip loading - if(is.null(format$time.row) & is.na(format$var$column_number)){ - print("File_format has Null time.row and NAs for var$column_number, skip loading") - toastr_warning("This file format cannot do benchmarking") - }else{ - # Are there more human readable names? - bm$vars <- dplyr::inner_join( - data.frame(read_name = names(bm$model_vars), - pecan_name = bm$model_vars, stringsAsFactors = FALSE), - format$vars[-grep("%",format$vars$storage_type), - c("variable_id", "pecan_name")], - #format$vars[c("variable_id", "pecan_name")], #for AmeriFlux.level2.h.nc, format$vars$storage_type is NA - by = "pecan_name") - - #This will be a longer set of conditions - bm$ready <- bm$ready + 1 - #Signaling the success of the operation - toastr_success("Check for benchmarks") +observeEvent(input$load_data, { + tryCatch( + { + req(input$all_input_id) + req(input$all_site_id) + + bm$metrics <- dplyr::tbl(dbConnect$bety, "metrics") %>% + dplyr::select(one_of("id", "name", "description")) %>% + collect() + + # Need to write warning message that can only use one input id + bm$input <- getInputs(dbConnect$bety, c(input$all_site_id)) %>% + dplyr::filter(input_selection_list == input$all_input_id) + format <- PEcAn.DB::query.format.vars(bety = dbConnect$bety, input.id = bm$input$input_id) + + # If format has Null time.row and NAs for var$column_number, skip loading + if (is.null(format$time.row) & is.na(format$var$column_number)) { + print("File_format has Null time.row and NAs for var$column_number, skip loading") + toastr_warning("This file format cannot do benchmarking") + } else { + # Are there more human readable names? + bm$vars <- dplyr::inner_join( + data.frame( + read_name = names(bm$model_vars), + pecan_name = bm$model_vars, stringsAsFactors = FALSE + ), + format$vars[ + -grep("%", format$vars$storage_type), + c("variable_id", "pecan_name") + ], + # format$vars[c("variable_id", "pecan_name")], #for AmeriFlux.level2.h.nc, format$vars$storage_type is NA + by = "pecan_name" + ) + + # This will be a longer set of conditions + bm$ready <- bm$ready + 1 + # Signaling the success of the operation + toastr_success("Check for benchmarks") + } + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) } - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + ) }) -observeEvent(bm$ready,{ - if(bm$ready > 0){ +observeEvent(bm$ready, { + if (bm$ready > 0) { bm$load_results <- 0 bm$results_message <- "No benchmarks have been calculated yet" - bm$blarg_message <- "No benchmarks have been calculated yet" - - - if(exists("output$results_table")) output$results_table<- NULL - if(exists("output$bm_plots")) output$bm_plots <- NULL - if(exists("output$bmPlot")) output$bmPlot <- NULL - + bm$blarg_message <- "No benchmarks have been calculated yet" + + + if (exists("output$results_table")) output$results_table <- NULL + if (exists("output$bm_plots")) output$bm_plots <- NULL + if (exists("output$bmPlot")) output$bmPlot <- NULL + bm.path <- file.path(bm$ens_wf$folder, "benchmarking", as.integer(bm$input$input_id)) - bench.out <- grep("benchmarking.output.Rdata", - dir(bm.path, full.names = TRUE) , value = TRUE) # Look for benchmarking directory - if(length(bench.out) == 1){ + bench.out <- grep("benchmarking.output.Rdata", + dir(bm.path, full.names = TRUE), + value = TRUE + ) # Look for benchmarking directory + if (length(bench.out) == 1) { bm$load_results <- bm$load_results + 1 bm$results_message <- "Benchmarks have already been calculated for this combination of model output and external data.
To see the results, look at the Benchmarking Scores and Benchmarking Plots tabs.
To calculate more benchmarks, select variables and metrics below.
" - }else{ + } else { bm$load_results <- 0 bm$results_message <- "No benchmarks have been calculated yet" } @@ -154,329 +182,374 @@ observeEvent(bm$ready,{ }) -observeEvent({ - bm$ready - bm$metrics - bm$vars -},{ - - plot_ind <- grep("_plot",bm$metrics$name) - - variable_choices <- bm$vars$variable_id - names(variable_choices) <- bm$vars$read_name - metrics_choices <- bm$metrics$id[-plot_ind] - names(metrics_choices) <- bm$metrics$description[-plot_ind] - plot_choices <- bm$metrics$id[plot_ind] - names(plot_choices) <- bm$metrics$description[plot_ind] - - output$bm_inputs <- renderUI({ - if(bm$ready > 0){ - wellPanel( +observeEvent( + { + bm$ready + bm$metrics + bm$vars + }, + { + plot_ind <- grep("_plot", bm$metrics$name) + + variable_choices <- bm$vars$variable_id + names(variable_choices) <- bm$vars$read_name + metrics_choices <- bm$metrics$id[-plot_ind] + names(metrics_choices) <- bm$metrics$description[-plot_ind] + plot_choices <- bm$metrics$id[plot_ind] + names(plot_choices) <- bm$metrics$description[plot_ind] + + output$bm_inputs <- renderUI({ + if (bm$ready > 0) { + wellPanel( + fluidRow( + column( + 4, + pickerInput("vars", "Variables", + choices = variable_choices, + multiple = TRUE, + options = list(`actions-box` = TRUE, `dropup-auto` = FALSE) + ) + ), + column( + 4, + pickerInput("metrics", "Numerical Metrics", + choices = metrics_choices, + multiple = TRUE, + options = list(`actions-box` = TRUE, `dropup-auto` = FALSE) + ) + ), + column( + 4, + pickerInput("plots", "Plot Metrics", + choices = plot_choices, + multiple = TRUE, + options = list(`actions-box` = TRUE, `dropup-auto` = FALSE) + ) + ) + ) + ) + } + }) + + output$calc_bm <- renderUI({ + if (bm$ready > 0) { fluidRow( - column(4, - pickerInput("vars", "Variables", - choices = variable_choices, - multiple = TRUE, - options = list(`actions-box` = TRUE, `dropup-auto` = FALSE)) - ), - column(4, - pickerInput("metrics", "Numerical Metrics", - choices = metrics_choices, - multiple = TRUE, - options = list(`actions-box` = TRUE, `dropup-auto` = FALSE)) + column(5), + column( + 2, + shinyjs::disabled( + actionButton("calc_bm_button", "Calculate", + icon = icon("calculator"), + width = "100%", class = "btn-primary" + ) + ) ), - column(4, - pickerInput("plots", "Plot Metrics", - choices = plot_choices, - multiple = TRUE, - options = list(`actions-box` = TRUE, `dropup-auto` = FALSE)) - ) + column(5) ) - ) + } + }) + } +) + +observeEvent( + { + input$vars + input$metrics + input$plots + }, + { + v <- ifelse(is.null(input$vars), 0, length(input$vars)) + n <- ifelse(is.null(input$metrics), 0, length(input$metrics)) + p <- ifelse(is.null(input$plots), 0, length(input$plots)) + m <- n + p + # output$report <- renderText(sprintf("Number of vars: %0.f, Number of metrics: %0.f", v,m)) + if (v > 0 & m > 0) { + shinyjs::enable("calc_bm_button") + bm$bm_vars <- input$vars + bm$bm_metrics <- c() + if (n > 0) bm$bm_metrics <- c(bm$bm_metrics, input$metrics) + if (p > 0) bm$bm_metrics <- c(bm$bm_metrics, input$plots) } - }) - - output$calc_bm <- renderUI({ - if(bm$ready > 0){ - fluidRow( - column(5), - column(2, - shinyjs::disabled( - actionButton('calc_bm_button', "Calculate", icon = icon("calculator"), - width = "100%", class="btn-primary") - ) - ), - column(5) + }, + ignoreNULL = FALSE +) + +observeEvent(input$calc_bm_button, { + tryCatch( + { + req(input$all_input_id) + req(input$all_site_id) + req(input$host) + req(input$user) + req(input$password) + + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + inputs_df <- getInputs(dbConnect$bety, c(input$all_site_id)) %>% + dplyr::filter(input_selection_list == input$all_input_id) + output$inputs_df_title <- renderText("Benchmark Input Data") + output$inputs_df_table <- DT::renderDataTable( + DT::datatable(inputs_df, + rownames = FALSE, + options = list( + dom = "t", + scrollX = TRUE, + initComplete = DT::JS( + "function(settings, json) {", + "$(this.api().table().header()).css({'background-color': '#404040', 'color': '#fff'});", + "}" + ) + ) + ) + ) + + + # config.list <- PEcAn.utils::read_web_config("../../web/config.php") + # output$config_list_table <- renderTable(as.data.frame.list(config.list)) + + + bm$bm_settings$info <- list(userid = 1000000003) # This is my user id. I have no idea how to get people to log in to their accounts through the web interface and right now the benchmarking code has sections dependent on user id - I will fix this. + # bm$bm_settings$database <- list( + # bety = list( + # user = config.list$db_bety_username, + # password = config.list$db_bety_password, + # host = config.list$db_bety_hostname, + # dbname = config.list$db_bety_database, + # driver = config.list$db_bety_type, + # write = TRUE + # ), + # dbfiles = config.list$dbfiles_folder + # ) + + bm$bm_settings$database <- list( + bety = list( + user = input$user, + password = input$password, + host = input$host, + dbname = "bety", + driver = "pgsql", + write = TRUE + ), + dbfiles = "/home/carya/output/dbfiles" + ) + bm$bm_settings$benchmarking <- list( + ensemble_id = bm$ens_wf$ensemble_id, + new_run = FALSE + ) + + + for (i in seq_along(bm$bm_vars)) { + benchmark <- list( + input_id = inputs_df$input_id, + variable_id = bm$bm_vars[i], + site_id = inputs_df$site_id, + metrics = list() + ) + for (j in seq_along(bm$bm_metrics)) { + benchmark$metrics <- append(benchmark$metrics, list(metric_id = bm$bm_metrics[j])) + } + bm$bm_settings$benchmarking <- append(bm$bm_settings$benchmarking, list(benchmark = benchmark)) + } + + + disable("calc_bm_button") + output$print_bm_settings <- renderPrint(print(bm$bm_settings)) + + + basePath <- dplyr::tbl(dbConnect$bety, "workflows") %>% + dplyr::filter(id %in% bm$ens_wf$workflow_id) %>% + dplyr::pull(folder) + + settings_path <- file.path(basePath, "pecan.BENCH.xml") + saveXML(PEcAn.settings::listToXml(bm$bm_settings, "pecan"), file = settings_path) + bm$settings_path <- settings_path + + output$settings_path <- renderText({ + sprintf("Benchmarking settings have been saved here: %s", bm$settings_path) + }) + incProgress(1 / 2) + + ############################################################################## + # Run the benchmarking functions + # The following seven functions are essentially + # "the benchmarking workflow" in its entirety + + settings <- PEcAn.settings::read.settings(bm$settings_path) + bm.settings <- PEcAn.benchmark::define_benchmark(settings, dbConnect$bety) + settings <- PEcAn.benchmark::add_workflow_info(settings, dbConnect$bety) + + settings$benchmarking <- PEcAn.benchmark::bm_settings2pecan_settings(bm.settings) + settings <- PEcAn.benchmark::read_settings_BRR(settings) + + # This is a hack to get old runs that don't have the right pecan.CHECKED.xml data working + if (is.null(settings$settings.info)) { + settings$settings.info <- list( + deprecated.settings.fixed = TRUE, + settings.updated = TRUE, + checked = TRUE + ) + } + + settings <- PEcAn.settings::prepare.settings(settings) + settings$host$name <- "localhost" # This may not be the best place to set this, but it isn't set by any of the other functions. Another option is to have it set by the default_hostname function (if input is NULL, set to localhost) + # browser() + # results <-calc_benchmark(settings, bety = dbConnect$bety) + # results <- PEcAn.settings::papply(settings, function(x) calc_benchmark(x, bety, start_year = input$start_year, end_year = input$end_year)) + results <- PEcAn.settings::papply(settings, function(x) { + calc_benchmark(settings = x, bety = dbConnect$bety) + }) + bm$load_results <- bm$load_results + 1 + + incProgress(1 / 2) + } ) + # Signaling the success of the operation + toastr_success("Calculate benchmarks") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) } - }) + ) }) -observeEvent({ - input$vars - input$metrics - input$plots -},{ - v <- ifelse(is.null(input$vars),0,length(input$vars)) - n <- ifelse(is.null(input$metrics),0,length(input$metrics)) - p <- ifelse(is.null(input$plots),0,length(input$plots)) - m <- n + p - #output$report <- renderText(sprintf("Number of vars: %0.f, Number of metrics: %0.f", v,m)) - if(v > 0 & m > 0){ - shinyjs::enable("calc_bm_button") - bm$bm_vars <- input$vars - bm$bm_metrics <- c() - if(n > 0) bm$bm_metrics <- c(bm$bm_metrics, input$metrics) - if(p > 0) bm$bm_metrics <- c(bm$bm_metrics, input$plots) - } - -}, ignoreNULL = FALSE) - -observeEvent(input$calc_bm_button,{ - tryCatch({ - req(input$all_input_id) - req(input$all_site_id) - req(input$host) - req(input$user) - req(input$password) - - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', - value = 0,{ - - inputs_df <- getInputs(dbConnect$bety,c(input$all_site_id)) %>% - dplyr::filter(input_selection_list == input$all_input_id) - output$inputs_df_title <- renderText("Benchmark Input Data") - output$inputs_df_table <- DT::renderDataTable( - DT::datatable(inputs_df, - rownames = FALSE, - options = list( - dom = 't', - scrollX = TRUE, - initComplete = DT::JS( - "function(settings, json) {", - "$(this.api().table().header()).css({'background-color': '#404040', 'color': '#fff'});", - "}"))) - ) - - - # config.list <- PEcAn.utils::read_web_config("../../web/config.php") - # output$config_list_table <- renderTable(as.data.frame.list(config.list)) - - - bm$bm_settings$info <- list(userid = 1000000003) # This is my user id. I have no idea how to get people to log in to their accounts through the web interface and right now the benchmarking code has sections dependent on user id - I will fix this. - # bm$bm_settings$database <- list( - # bety = list( - # user = config.list$db_bety_username, - # password = config.list$db_bety_password, - # host = config.list$db_bety_hostname, - # dbname = config.list$db_bety_database, - # driver = config.list$db_bety_type, - # write = TRUE - # ), - # dbfiles = config.list$dbfiles_folder - # ) - - bm$bm_settings$database <- list( - bety = list( - user = input$user, - password = input$password, - host = input$host, - dbname = "bety", - driver = "pgsql", - write = TRUE - ), - dbfiles = "/home/carya/output/dbfiles" - ) - bm$bm_settings$benchmarking <- list( - ensemble_id = bm$ens_wf$ensemble_id, - new_run = FALSE - ) - - - for(i in seq_along(bm$bm_vars)){ - benchmark <- list( - input_id = inputs_df$input_id, - variable_id = bm$bm_vars[i], - site_id = inputs_df$site_id, - metrics = list() - ) - for(j in seq_along(bm$bm_metrics)){ - benchmark$metrics = append(benchmark$metrics, list(metric_id = bm$bm_metrics[j])) - } - bm$bm_settings$benchmarking <- append(bm$bm_settings$benchmarking,list(benchmark = benchmark)) - } - - - disable("calc_bm_button") - output$print_bm_settings <- renderPrint(print(bm$bm_settings)) - - - basePath <- dplyr::tbl(dbConnect$bety, 'workflows') %>% dplyr::filter(id %in% bm$ens_wf$workflow_id) %>% dplyr::pull(folder) - - settings_path <- file.path(basePath, "pecan.BENCH.xml") - saveXML(PEcAn.settings::listToXml(bm$bm_settings,"pecan"), file = settings_path) - bm$settings_path <- settings_path - - output$settings_path <- renderText({ - sprintf("Benchmarking settings have been saved here: %s", bm$settings_path) - }) - incProgress(1/2) - - ############################################################################## - # Run the benchmarking functions - # The following seven functions are essentially - # "the benchmarking workflow" in its entirety - - settings <- PEcAn.settings::read.settings(bm$settings_path) - bm.settings <- PEcAn.benchmark::define_benchmark(settings,dbConnect$bety) - settings <- PEcAn.benchmark::add_workflow_info(settings,dbConnect$bety) - - settings$benchmarking <- PEcAn.benchmark::bm_settings2pecan_settings(bm.settings) - settings <- PEcAn.benchmark::read_settings_BRR(settings) - - # This is a hack to get old runs that don't have the right pecan.CHECKED.xml data working - if(is.null(settings$settings.info)){ - settings$settings.info <- list( - deprecated.settings.fixed = TRUE, - settings.updated = TRUE, - checked = TRUE - ) - } - - settings <- PEcAn.settings::prepare.settings(settings) - settings$host$name <- "localhost" # This may not be the best place to set this, but it isn't set by any of the other functions. Another option is to have it set by the default_hostname function (if input is NULL, set to localhost) - # browser() - #results <-calc_benchmark(settings, bety = dbConnect$bety) - # results <- PEcAn.settings::papply(settings, function(x) calc_benchmark(x, bety, start_year = input$start_year, end_year = input$end_year)) - results <- PEcAn.settings::papply(settings, function(x) - calc_benchmark(settings = x, bety = dbConnect$bety)) - bm$load_results <- bm$load_results + 1 - - incProgress(1/2) - }) - #Signaling the success of the operation - toastr_success("Calculate benchmarks") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) +observeEvent(bm$results_message, { + output$results_message <- renderText({ + bm$results_message }) - }) -observeEvent(bm$results_message,{ - output$results_message <- renderText({bm$results_message}) -}) +observeEvent(bm$load_results, { + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + if (bm$load_results > 0) { + load(file.path(bm$ens_wf$folder, "benchmarking", bm$input$input_id, "benchmarking.output.Rdata")) + incProgress(1 / 3) -observeEvent(bm$load_results,{ - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', - value = 0,{ - if(bm$load_results > 0){ - load(file.path(bm$ens_wf$folder,"benchmarking",bm$input$input_id,"benchmarking.output.Rdata")) - incProgress(1/3) - - bm$bench.results <- result.out$bench.results - bm$aligned.dat <- result.out$aligned.dat - plots_used <- grep("plot", result.out$bench.results$metric) - output$results_df_title <- renderText("Benchmark Scores") - output$results_table <- DT::renderDataTable( - DT::datatable(bm$bench.results[-plots_used,], - rownames = FALSE, - options = list(dom = 'ft', - initComplete = JS( - "function(settings, json) {", - "$(this.api().table().header()).css({'background-color': '#404040', 'color': '#fff'});", - "}") - ))) - incProgress(1/3) - - if(length(plots_used) > 0){ - plot_list <- apply( - result.out$bench.results[plots_used,c("variable", "metric")], - 1, paste, collapse = " ") - selection <- as.list(as.numeric(names(plot_list))) - names(selection) <- as.vector(plot_list) - output$plots_tilte <- renderText("Benchmark Plots") - output$bm_plots <- renderUI({ - selectInput("bench_plot", label = NULL, multiple = FALSE, - choices = selection) - }) - output$plotlybars <- renderUI({ - div( - id = "plot-container", - div( - class = "plotlybars-wrapper", - div( - class = "plotlybars", - div(class = "plotlybars-bar b1"), - div(class = "plotlybars-bar b2"), - div(class = "plotlybars-bar b3"), - div(class = "plotlybars-bar b4"), - div(class = "plotlybars-bar b5"), - div(class = "plotlybars-bar b6"), - div(class = "plotlybars-bar b7") - ), - div(class = "plotlybars-text", - p("Updating the plot. Hold tight!")) - ) - ) - }) - } - incProgress(1/3) - } - incProgress(1) - }) - #Signaling the success of the operation - toastr_success("Calculate Scores") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + bm$bench.results <- result.out$bench.results + bm$aligned.dat <- result.out$aligned.dat + plots_used <- grep("plot", result.out$bench.results$metric) + output$results_df_title <- renderText("Benchmark Scores") + output$results_table <- DT::renderDataTable( + DT::datatable(bm$bench.results[-plots_used, ], + rownames = FALSE, + options = list( + dom = "ft", + initComplete = JS( + "function(settings, json) {", + "$(this.api().table().header()).css({'background-color': '#404040', 'color': '#fff'});", + "}" + ) + ) + ) + ) + incProgress(1 / 3) + + if (length(plots_used) > 0) { + plot_list <- apply( + result.out$bench.results[plots_used, c("variable", "metric")], + 1, paste, + collapse = " " + ) + selection <- as.list(as.numeric(names(plot_list))) + names(selection) <- as.vector(plot_list) + output$plots_tilte <- renderText("Benchmark Plots") + output$bm_plots <- renderUI({ + selectInput("bench_plot", + label = NULL, multiple = FALSE, + choices = selection + ) + }) + output$plotlybars <- renderUI({ + div( + id = "plot-container", + div( + class = "plotlybars-wrapper", + div( + class = "plotlybars", + div(class = "plotlybars-bar b1"), + div(class = "plotlybars-bar b2"), + div(class = "plotlybars-bar b3"), + div(class = "plotlybars-bar b4"), + div(class = "plotlybars-bar b5"), + div(class = "plotlybars-bar b6"), + div(class = "plotlybars-bar b7") + ), + div( + class = "plotlybars-text", + p("Updating the plot. Hold tight!") + ) + ) + ) + }) + } + incProgress(1 / 3) + } + incProgress(1) + } + ) + # Signaling the success of the operation + toastr_success("Calculate Scores") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) -observeEvent(input$bench_plot,{ - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', - value = 0,{ - var <- bm$bench.results[input$bench_plot,"variable"] - metric_dat = bm$aligned.dat[[var]] - names(metric_dat)[grep("[.]m", names(metric_dat))] <- "model" - names(metric_dat)[grep("[.]o", names(metric_dat))] <- "obvs" - names(metric_dat)[grep("posix", names(metric_dat))] <- "time" - incProgress(2 / 15) - - fcn <- get(paste0("metric_",bm$bench.results[input$bench_plot,"metric"]), asNamespace("PEcAn.benchmark")) - # fcn <- paste0("metric_",bm$bench.results[input$bench_plot,"metric"]) - args <- list( - metric_dat = metric_dat, - var = var, - filename = NA, - draw.plot = TRUE - ) - p <- do.call(fcn, args) - incProgress(9 / 15) - - output$bmPlot <- renderPlotly({ - plotly::ggplotly(p)%>% - layout(height = "100%", width = "100%") - }) - incProgress(4 / 15) - }) - #Signaling the success of the operation - toastr_success("Generate Plots") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) +observeEvent(input$bench_plot, { + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + var <- bm$bench.results[input$bench_plot, "variable"] + metric_dat <- bm$aligned.dat[[var]] + names(metric_dat)[grep("[.]m", names(metric_dat))] <- "model" + names(metric_dat)[grep("[.]o", names(metric_dat))] <- "obvs" + names(metric_dat)[grep("posix", names(metric_dat))] <- "time" + incProgress(2 / 15) + + fcn <- get(paste0("metric_", bm$bench.results[input$bench_plot, "metric"]), asNamespace("PEcAn.benchmark")) + # fcn <- paste0("metric_",bm$bench.results[input$bench_plot,"metric"]) + args <- list( + metric_dat = metric_dat, + var = var, + filename = NA, + draw.plot = TRUE + ) + p <- do.call(fcn, args) + incProgress(9 / 15) + + output$bmPlot <- renderPlotly({ + plotly::ggplotly(p) %>% + layout(height = "100%", width = "100%") + }) + incProgress(4 / 15) + } + ) + # Signaling the success of the operation + toastr_success("Generate Plots") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) ################################################ # Action buttons to select all variables/metrics but that currently aren't working -# -# +# +# # observeEvent(input$selectall.var,{ # clicks <- as.numeric(input$selectall.var) # output$actionclickCount <- renderText({ @@ -493,7 +566,7 @@ observeEvent(input$bench_plot,{ # shinyjs::html("labelText", "Label") # } # }) -# +# # # Numerical metrics # if(input$selectall.num == 0){ # return(NULL) @@ -507,7 +580,7 @@ observeEvent(input$bench_plot,{ # selected = bm$metrics$description[-grep("_plot",bm$metrics$name)]) # shinyjs::html("labelText", "Label") # } -# +# # # Plot metrics # if(input$selectall.plot == 0){ # return(NULL) diff --git a/shiny/workflowPlot/server_files/history_server.R b/shiny/workflowPlot/server_files/history_server.R index 45887a82804..436e3bde25b 100644 --- a/shiny/workflowPlot/server_files/history_server.R +++ b/shiny/workflowPlot/server_files/history_server.R @@ -1,113 +1,123 @@ # db.query query statement -cmd <- paste0("SELECT workflows.id, workflows.folder, workflows.start_date, workflows.end_date, workflows.started_at, workflows.finished_at, attributes.value," , - "CONCAT(coalesce(sites.id, -99), ' / ', coalesce(sites.sitename, ''), ', ', ', ') AS sitename, " , - "CONCAT(coalesce(models.model_name, ''), ' ', coalesce(models.revision, '')) AS modelname, modeltypes.name " , - "FROM workflows " , - "LEFT OUTER JOIN sites on workflows.site_id=sites.id " , - "LEFT OUTER JOIN models on workflows.model_id=models.id " , - "LEFT OUTER JOIN modeltypes on models.modeltype_id=modeltypes.id " , - "LEFT OUTER JOIN attributes ON workflows.id=attributes.container_id AND attributes.container_type='workflows' ") +cmd <- paste0( + "SELECT workflows.id, workflows.folder, workflows.start_date, workflows.end_date, workflows.started_at, workflows.finished_at, attributes.value,", + "CONCAT(coalesce(sites.id, -99), ' / ', coalesce(sites.sitename, ''), ', ', ', ') AS sitename, ", + "CONCAT(coalesce(models.model_name, ''), ' ', coalesce(models.revision, '')) AS modelname, modeltypes.name ", + "FROM workflows ", + "LEFT OUTER JOIN sites on workflows.site_id=sites.id ", + "LEFT OUTER JOIN models on workflows.model_id=models.id ", + "LEFT OUTER JOIN modeltypes on models.modeltype_id=modeltypes.id ", + "LEFT OUTER JOIN attributes ON workflows.id=attributes.container_id AND attributes.container_type='workflows' " +) observeEvent(input$workflowclassrand, { - tryCatch({ - history <- PEcAn.DB::db.query(cmd, dbConnect$bety) - workflow_id <- strsplit(input$workflowselected, "_")[[1]] - workflow_id <- trimws(workflow_id[2]) - val.jason <- history$value[history$id == workflow_id] - fld <- history$folder[history$id == workflow_id] - - if (!is.na(val.jason)) { - # server and ui for the listviewer - output$jsed <- renderJsonedit({ - jsonedit(jsonlite::fromJSON(val.jason)) - - }) - - showModal(modalDialog( - title = "Details", - tabsetPanel( - tabPanel("Info", br(), - jsoneditOutput("jsed", height = "500px") - )), - easyClose = TRUE, - footer = NULL, - size = 'l' - )) + tryCatch( + { + history <- PEcAn.DB::db.query(cmd, dbConnect$bety) + workflow_id <- strsplit(input$workflowselected, "_")[[1]] + workflow_id <- trimws(workflow_id[2]) + val.jason <- history$value[history$id == workflow_id] + fld <- history$folder[history$id == workflow_id] + + if (!is.na(val.jason)) { + # server and ui for the listviewer + output$jsed <- renderJsonedit({ + jsonedit(jsonlite::fromJSON(val.jason)) + }) + + showModal(modalDialog( + title = "Details", + tabsetPanel( + tabPanel( + "Info", br(), + jsoneditOutput("jsed", height = "500px") + ) + ), + easyClose = TRUE, + footer = NULL, + size = "l" + )) + } + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) } - }, - error = function(e){ - toastr_error(title = "Error", conditionMessage(e)) - }) + ) }) observeEvent(input$workflow_explor_classrand, { - tryCatch({ - #history <- PEcAn.DB::db.query(cmd, dbConnect$bety) - workflow_id <- strsplit(input$workflows_explor_selected, "_")[[1]] - - workflow_id <- trimws(workflow_id[1]) - - updateSelectizeInput(session, - "all_workflow_id", - choices = c(input$all_workflow_id, workflow_id), - selected = c(input$all_workflow_id, workflow_id)) + tryCatch( + { + # history <- PEcAn.DB::db.query(cmd, dbConnect$bety) + workflow_id <- strsplit(input$workflows_explor_selected, "_")[[1]] + + workflow_id <- trimws(workflow_id[1]) - }, - error = function(e){ - toastr_error(title = "Error", conditionMessage(e)) - }) + updateSelectizeInput(session, + "all_workflow_id", + choices = c(input$all_workflow_id, workflow_id), + selected = c(input$all_workflow_id, workflow_id) + ) + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) observeEvent(input$submitInfo, { - tryCatch({ - history <- PEcAn.DB::db.query(cmd, dbConnect$bety) - output$historyfiles <- DT::renderDT( - DT::datatable(history %>% - dplyr::select(-value, -modelname) %>% - mutate(id = id %>% as.character()) %>% - mutate(id=paste0(""), - Action= paste0('
+ tryCatch( + { + history <- PEcAn.DB::db.query(cmd, dbConnect$bety) + output$historyfiles <- DT::renderDT( + DT::datatable( + history %>% + dplyr::select(-value, -modelname) %>% + mutate(id = id %>% as.character()) %>% + mutate( + id = paste0(""), + Action = paste0('
') - - )%>% - dplyr::rename(model=name), - escape = F, - filter = 'top', - selection="none", - style='bootstrap', - rownames = FALSE, - options = list( - autowidth = TRUE, - columnDefs = list(list(width = '90px', targets = -1)), #set column width for action button - dom = 'ftp', - pageLength = 10, - scrollX = FALSE, - scrollCollapse = FALSE, - initComplete = DT::JS( - "function(settings, json) {", - "$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});", - "}") - ) - ) - - ) - toastr_success("Generate history runs") - }, - error = function(e) { - toastr_error(title = "Error in History Runs Page", message = "" - #, conditionMessage(e) - ) - }) - +
  • 0, 'Select Load Data'), - need(length(input$all_site_id) == 1, 'Select only ONE Site ID'), - need(length(input$all_input_id) == 1, 'Select only ONE Input ID'), - need(input$load_data > 0, 'Select Load External Data') + need(input$load_model > 0, "Select Load Data"), + need(length(input$all_site_id) == 1, "Select only ONE Site ID"), + need(length(input$all_input_id) == 1, "Select only ONE Input ID"), + need(input$load_data > 0, "Select Load External Data") ) - highchart() %>% - hc_add_series(data = c(), showInLegend = F) %>% - hc_xAxis(title = list(text = "Time")) %>% - hc_yAxis(title = list(text = "y")) %>% - hc_title(text = "You are ready to plot!") %>% + highchart() %>% + hc_add_series(data = c(), showInLegend = F) %>% + hc_xAxis(title = list(text = "Time")) %>% + hc_yAxis(title = list(text = "y")) %>% + hc_title(text = "You are ready to plot!") %>% hc_add_theme(hc_theme_flat()) }) @@ -21,38 +21,50 @@ output$modelDataPlotscatter <- renderHighchart({ validate( need(length(input$all_workflow_id) == 1, "Select only ONE workflow ID"), need(length(input$all_run_id) == 1, "Select only ONE run ID"), - need(input$load_model > 0, 'Select Load Data'), - need(length(input$all_site_id) == 1, 'Select only ONE Site ID'), - need(length(input$all_input_id) == 1, 'Select only ONE Input ID'), - need(input$load_data > 0, 'Select Load External Data') + need(input$load_model > 0, "Select Load Data"), + need(length(input$all_site_id) == 1, "Select only ONE Site ID"), + need(length(input$all_input_id) == 1, "Select only ONE Input ID"), + need(input$load_data > 0, "Select Load External Data") ) - highchart() %>% - hc_add_series(data = c(), showInLegend = F) %>% - hc_xAxis(title = list(text = "Time")) %>% - hc_yAxis(title = list(text = "y")) %>% - hc_title(text = "You are ready to plot!") %>% + highchart() %>% + hc_add_series(data = c(), showInLegend = F) %>% + hc_xAxis(title = list(text = "Time")) %>% + hc_yAxis(title = list(text = "y")) %>% + hc_title(text = "You are ready to plot!") %>% hc_add_theme(hc_theme_flat()) }) # Update units every time a variable is selected observeEvent(input$var_name_modeldata, { model.df <- load.model() - default.unit <- model.df %>% filter(var_name == input$var_name_modeldata) %>% pull(ylab) %>% unique() + default.unit <- model.df %>% + filter(var_name == input$var_name_modeldata) %>% + pull(ylab) %>% + unique() updateTextInput(session, "units_modeldata", value = default.unit) }) # Check that new units are parsible and can be used for conversion -observeEvent(input$units_modeldata,{ - if(PEcAn.utils::unit_is_parseable(input$units_modeldata)){ +observeEvent(input$units_modeldata, { + if (PEcAn.utils::unit_is_parseable(input$units_modeldata)) { model.df <- load.model() - default.unit <- model.df %>% filter(var_name == input$var_name_modeldata) %>% pull(ylab) %>% unique() - if(units::ud_are_convertible(default.unit, input$units_modeldata)){ - output$unit_text2 <- renderText({"Conversion possible"}) - }else{ - output$unit_text2 <- renderText({"Units are parsible but conversion is not possible"}) + default.unit <- model.df %>% + filter(var_name == input$var_name_modeldata) %>% + pull(ylab) %>% + unique() + if (units::ud_are_convertible(default.unit, input$units_modeldata)) { + output$unit_text2 <- renderText({ + "Conversion possible" + }) + } else { + output$unit_text2 <- renderText({ + "Units are parsible but conversion is not possible" + }) } - }else{ - output$unit_text2 <- renderText({"Units are not parsible, please type units in udunits2 compatible format"}) + } else { + output$unit_text2 <- renderText({ + "Units are not parsible, please type units in udunits2 compatible format" + }) } }) @@ -60,149 +72,166 @@ observeEvent(input$units_modeldata,{ observe({ df <- load.model() updateDateRangeInput(session, "date_range2", - start = as.Date(min(df$dates)), - end = as.Date(max(df$dates)), - min = as.Date(min(df$dates)), - max = as.Date(max(df$dates)) + start = as.Date(min(df$dates)), + end = as.Date(max(df$dates)), + min = as.Date(min(df$dates)), + max = as.Date(max(df$dates)) ) }) # update "function" select box choice according to "agrregation" select box observe({ - if(input$agg2 == "NONE"){ + if (input$agg2 == "NONE") { updateSelectInput(session, "func2", choices = "NONE") - }else{ + } else { updateSelectInput(session, "func2", choices = c("mean", "sum")) } }) -observeEvent(input$ex_plot_modeldata,{ +observeEvent(input$ex_plot_modeldata, { output$modelDataPlot <- renderHighchart({ input$ex_plot_modeldata isolate({ - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...',{ - - var = input$var_name_modeldata - - model_data <- dplyr::filter(load.model(), var_name == var) - - #updateSliderInput(session,"smooth_n_modeldata", min = 0, max = nrow(model_data)) - title <- unique(model_data$title) - xlab <- unique(model_data$xlab) - ylab <- unique(model_data$ylab) - - model_data <- model_data %>% dplyr::select(posix = dates, !!var := vals) - external_data <- load.model.data() - aligned_data = PEcAn.benchmark::align_data( - model.calc = model_data, obvs.calc = external_data, - var = var, align_method = "mean_over_larger_timestep") %>% - dplyr::select(everything(), - model = matches("[.]m"), - observations = matches("[.]o"), - Date = posix) - - print(head(aligned_data)) - # Melt dataframe to plot two types of columns together - aligned_data <- tidyr::gather(aligned_data, variable, value, -Date) - - - - - model <- filter(aligned_data, variable == "model") - observasions <- filter(aligned_data, variable == "observations") - - #convert dataframe to xts object - model.xts <- xts(model$value, order.by = model$Date) - observasions.xts <- xts(observasions$value, order.by = observasions$Date) - - # subsetting of a date range - date_range2 <- paste0(input$date_range2, collapse = "/") - model.xts <- model.xts[date_range2] - observasions.xts <- observasions.xts[date_range2] - - # Aggregation function - aggr <- function(xts.df){ - - if(input$agg2=="NONE") return(xts.df) - - if(input$agg2 == "daily"){ - xts.df <- apply.daily(xts.df, input$func2) - }else if(input$agg2 == "weekly"){ - xts.df <- apply.weekly(xts.df, input$func2) - }else if(input$agg2 == "monthly"){ - xts.df <- apply.monthly(xts.df, input$func2) - }else if(input$agg2 == "quarterly"){ - xts.df <- apply.quarterly(xts.df, input$func2) - }else{ - xts.df <- apply.yearly(xts.df, input$func2) - } - } - - model.xts <- aggr(model.xts) - observasions.xts <- aggr(observasions.xts) - - #Scatter plot - output$modelDataPlotscatter <- renderHighchart({ - scatter.df <- data.frame ( - 'y' = zoo::coredata(model.xts), - 'x' = zoo::coredata(observasions.xts) - ) - hlim <- max(max(scatter.df$y, scatter.df$x)) - llim <- min(min(scatter.df$y, scatter.df$x)) - - - highchart() %>% - hc_chart(type = 'scatter') %>% - hc_add_series(scatter.df, name = "Model data comparison", showInLegend = FALSE) %>% - hc_legend(enabled = FALSE) %>% - hc_yAxis(title = list(text = "Simulated",fontSize=19), min=llim, max=hlim)%>% - hc_exporting(enabled = TRUE, filename=paste0("Model_data_comparison")) %>% - hc_add_theme(hc_theme_elementary(yAxis = list(title = list(style = list(color = "#373b42",fontSize=15)), - labels = list(style = list(color = "#373b42",fontSize=15))), - xAxis = list(title = list(style = list(color = "#373b42",fontSize=15)), - labels = list(style = list(color = "#373b42",fontSize=15))) - ))%>% - hc_xAxis(title = list(text ="Observed" ,fontSize=19), min=llim, max=hlim) - - }) - - unit <- ylab - if(input$units_modeldata != unit & units::ud_are_convertible(unit, input$units_modeldata)){ - aligned_data$value <- PEcAn.utils::ud_convert(aligned_data$value,unit,input$units_modeldata) - ylab <- input$units_modeldata - } - - - plot_type <- switch(input$plotType_model, point = "scatter", line = "line") - - #smooth_param <- input$smooth_n_model / nrow(df) *100 - smooth_param <- input$smooth_n_model * 100 - - ply <- highchart() %>% - hc_add_series(model.xts, name = "model", type = plot_type, - regression = TRUE, - regressionSettings = list(type = "loess", loessSmooth = smooth_param)) %>% - hc_add_series(observasions.xts, name = "observations", type = plot_type, - regression = TRUE, - regressionSettings = list(type = "loess", loessSmooth = smooth_param)) %>% - hc_add_dependency("plugins/highcharts-regression.js") %>% - hc_title(text = title) %>% - hc_xAxis(title = list(text = xlab), type = 'datetime') %>% - hc_yAxis(title = list(text = ylab)) %>% - hc_tooltip(pointFormat = " Date: {point.x:%Y-%m-%d %H:%M}
    y: {point.y}") %>% - hc_exporting(enabled = TRUE) %>% - hc_chart(zoomType = "x") - - }) - - #Signaling the success of the operation - toastr_success("Generate plot") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + { + var <- input$var_name_modeldata + + model_data <- dplyr::filter(load.model(), var_name == var) + + # updateSliderInput(session,"smooth_n_modeldata", min = 0, max = nrow(model_data)) + title <- unique(model_data$title) + xlab <- unique(model_data$xlab) + ylab <- unique(model_data$ylab) + + model_data <- model_data %>% dplyr::select(posix = dates, !!var := vals) + external_data <- load.model.data() + aligned_data <- PEcAn.benchmark::align_data( + model.calc = model_data, obvs.calc = external_data, + var = var, align_method = "mean_over_larger_timestep" + ) %>% + dplyr::select(everything(), + model = matches("[.]m"), + observations = matches("[.]o"), + Date = posix + ) + + print(head(aligned_data)) + # Melt dataframe to plot two types of columns together + aligned_data <- tidyr::gather(aligned_data, variable, value, -Date) + + + + + model <- filter(aligned_data, variable == "model") + observasions <- filter(aligned_data, variable == "observations") + + # convert dataframe to xts object + model.xts <- xts(model$value, order.by = model$Date) + observasions.xts <- xts(observasions$value, order.by = observasions$Date) + + # subsetting of a date range + date_range2 <- paste0(input$date_range2, collapse = "/") + model.xts <- model.xts[date_range2] + observasions.xts <- observasions.xts[date_range2] + + # Aggregation function + aggr <- function(xts.df) { + if (input$agg2 == "NONE") { + return(xts.df) + } + + if (input$agg2 == "daily") { + xts.df <- apply.daily(xts.df, input$func2) + } else if (input$agg2 == "weekly") { + xts.df <- apply.weekly(xts.df, input$func2) + } else if (input$agg2 == "monthly") { + xts.df <- apply.monthly(xts.df, input$func2) + } else if (input$agg2 == "quarterly") { + xts.df <- apply.quarterly(xts.df, input$func2) + } else { + xts.df <- apply.yearly(xts.df, input$func2) + } + } + + model.xts <- aggr(model.xts) + observasions.xts <- aggr(observasions.xts) + + # Scatter plot + output$modelDataPlotscatter <- renderHighchart({ + scatter.df <- data.frame( + "y" = zoo::coredata(model.xts), + "x" = zoo::coredata(observasions.xts) + ) + hlim <- max(max(scatter.df$y, scatter.df$x)) + llim <- min(min(scatter.df$y, scatter.df$x)) + + + highchart() %>% + hc_chart(type = "scatter") %>% + hc_add_series(scatter.df, name = "Model data comparison", showInLegend = FALSE) %>% + hc_legend(enabled = FALSE) %>% + hc_yAxis(title = list(text = "Simulated", fontSize = 19), min = llim, max = hlim) %>% + hc_exporting(enabled = TRUE, filename = paste0("Model_data_comparison")) %>% + hc_add_theme(hc_theme_elementary( + yAxis = list( + title = list(style = list(color = "#373b42", fontSize = 15)), + labels = list(style = list(color = "#373b42", fontSize = 15)) + ), + xAxis = list( + title = list(style = list(color = "#373b42", fontSize = 15)), + labels = list(style = list(color = "#373b42", fontSize = 15)) + ) + )) %>% + hc_xAxis(title = list(text = "Observed", fontSize = 19), min = llim, max = hlim) + }) + + unit <- ylab + if (input$units_modeldata != unit & units::ud_are_convertible(unit, input$units_modeldata)) { + aligned_data$value <- PEcAn.utils::ud_convert(aligned_data$value, unit, input$units_modeldata) + ylab <- input$units_modeldata + } + + + plot_type <- switch(input$plotType_model, + point = "scatter", + line = "line" + ) + + # smooth_param <- input$smooth_n_model / nrow(df) *100 + smooth_param <- input$smooth_n_model * 100 + + ply <- highchart() %>% + hc_add_series(model.xts, + name = "model", type = plot_type, + regression = TRUE, + regressionSettings = list(type = "loess", loessSmooth = smooth_param) + ) %>% + hc_add_series(observasions.xts, + name = "observations", type = plot_type, + regression = TRUE, + regressionSettings = list(type = "loess", loessSmooth = smooth_param) + ) %>% + hc_add_dependency("plugins/highcharts-regression.js") %>% + hc_title(text = title) %>% + hc_xAxis(title = list(text = xlab), type = "datetime") %>% + hc_yAxis(title = list(text = ylab)) %>% + hc_tooltip(pointFormat = " Date: {point.x:%Y-%m-%d %H:%M}
    y: {point.y}") %>% + hc_exporting(enabled = TRUE) %>% + hc_chart(zoomType = "x") + } + ) + + # Signaling the success of the operation + toastr_success("Generate plot") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) ply }) @@ -314,6 +343,3 @@ observeEvent(input$ex_plot_modeldata,{ # }) # # }) - - - diff --git a/shiny/workflowPlot/server_files/model_plots_server.R b/shiny/workflowPlot/server_files/model_plots_server.R index 5e42f2c8bdd..44f3e243fae 100644 --- a/shiny/workflowPlot/server_files/model_plots_server.R +++ b/shiny/workflowPlot/server_files/model_plots_server.R @@ -2,48 +2,62 @@ output$modelPlot <- renderHighchart({ validate( - need(input$all_workflow_id, 'Select workflow id'), - need(input$all_run_id, 'Select Run id'), - need(input$load_model > 0, 'Select Load Model Outputs') + need(input$all_workflow_id, "Select workflow id"), + need(input$all_run_id, "Select Run id"), + need(input$load_model > 0, "Select Load Model Outputs") ) - highchart() %>% - hc_add_series(data = c(), showInLegend = F) %>% - hc_xAxis(title = list(text = "Time")) %>% - hc_yAxis(title = list(text = "y")) %>% - hc_title(text = "You are ready to plot!") %>% + highchart() %>% + hc_add_series(data = c(), showInLegend = F) %>% + hc_xAxis(title = list(text = "Time")) %>% + hc_yAxis(title = list(text = "y")) %>% + hc_title(text = "You are ready to plot!") %>% hc_add_theme(hc_theme_flat()) }) # Update units every time a variable is selected observeEvent(input$var_name_model, { req(input$var_name_model) - tryCatch({ - model.df <- load.model() - default.unit <- - model.df %>% filter(var_name == input$var_name_model) %>% pull(ylab) %>% unique() - updateTextInput(session, "units_model", value = default.unit) - - #Signaling the success of the operation - toastr_success("Variables were updated.") - }, - error = function(e) { - toastr_error(title = "Error in reading the run files.", conditionMessage(e)) - }) + tryCatch( + { + model.df <- load.model() + default.unit <- + model.df %>% + filter(var_name == input$var_name_model) %>% + pull(ylab) %>% + unique() + updateTextInput(session, "units_model", value = default.unit) + + # Signaling the success of the operation + toastr_success("Variables were updated.") + }, + error = function(e) { + toastr_error(title = "Error in reading the run files.", conditionMessage(e)) + } + ) }) # Check that new units are parsible and can be used for conversion -observeEvent(input$units_model,{ - if(PEcAn.utils::unit_is_parseable(input$units_model)){ +observeEvent(input$units_model, { + if (PEcAn.utils::unit_is_parseable(input$units_model)) { model.df <- load.model() - default.unit <- model.df %>% filter(var_name == input$var_name_model) %>% pull(ylab) %>% unique() - if(units::ud_are_convertible(default.unit, input$units_model)){ - output$unit_text <- renderText({"Conversion possible"}) - }else{ - output$unit_text <- renderText({"Units are parsible but conversion is not possible"}) + default.unit <- model.df %>% + filter(var_name == input$var_name_model) %>% + pull(ylab) %>% + unique() + if (units::ud_are_convertible(default.unit, input$units_model)) { + output$unit_text <- renderText({ + "Conversion possible" + }) + } else { + output$unit_text <- renderText({ + "Units are parsible but conversion is not possible" + }) } - }else{ - output$unit_text <- renderText({"Units are not parsible, please type units in udunits2 compatible format"}) + } else { + output$unit_text <- renderText({ + "Units are not parsible, please type units in udunits2 compatible format" + }) } }) @@ -51,102 +65,111 @@ observeEvent(input$units_model,{ observe({ df <- load.model() updateDateRangeInput(session, "date_range", - start = as.Date(min(df$dates)), - end = as.Date(max(df$dates)), - min = as.Date(min(df$dates)), - max = as.Date(max(df$dates)) + start = as.Date(min(df$dates)), + end = as.Date(max(df$dates)), + min = as.Date(min(df$dates)), + max = as.Date(max(df$dates)) ) }) # update "function" select box choice according to "agrregation" select box observe({ - if(input$agg == "NONE"){ + if (input$agg == "NONE") { updateSelectInput(session, "func", choices = "NONE") - }else{ + } else { updateSelectInput(session, "func", choices = c("mean", "sum")) } }) -observeEvent(input$ex_plot_model,{ +observeEvent(input$ex_plot_model, { req(input$units_model) - + output$modelPlot <- renderHighchart({ - input$ex_plot_model isolate({ - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...',{ - - df <- dplyr::filter(load.model(), var_name == input$var_name_model) - - #updateSliderInput(session,"smooth_n_model", min = 0, max = nrow(df)) - - title <- unique(df$title) - xlab <- unique(df$xlab) - ylab <- unique(df$ylab) - - unit <- ylab - if(input$units_model != unit & units::ud_are_convertible(unit, input$units_model)){ - df$vals <- PEcAn.utils::ud_convert(df$vals,unit,input$units_model) - ylab <- input$units_model - } - - date_range <- paste0(input$date_range, collapse = "/") - - plot_type <- switch(input$plotType_model, point = "scatter", line = "line") - - smooth_param <- input$smooth_n_model * 100 - - # function that converts dataframe to xts object, - # selects subset of a date range and does data aggregtion - func <- function(df){ - xts.df <- xts(df$vals, order.by = df$dates) - xts.df <- xts.df[date_range] - - if(input$agg=="NONE") return(xts.df) - - if(input$agg == "daily"){ - xts.df <- apply.daily(xts.df, input$func) - }else if(input$agg == "weekly"){ - xts.df <- apply.weekly(xts.df, input$func) - }else if(input$agg == "monthly"){ - xts.df <- apply.monthly(xts.df, input$func) - }else if(input$agg == "quarterly"){ - xts.df <- apply.quarterly(xts.df, input$func) - }else{ - xts.df <- apply.yearly(xts.df, input$func) - } - } - - list <- split(df, df$run_id) - xts.list <- lapply(list, func) - - ply <- highchart() - - for(i in 1:length(xts.list)){ - ply <- ply %>% - hc_add_series(xts.list[[i]], type = plot_type, name = names(xts.list[i]), - regression = TRUE, - regressionSettings = list(type = "loess", loessSmooth = smooth_param)) - } - - ply <- ply %>% - hc_add_dependency("plugins/highcharts-regression.js") %>% - hc_title(text = title) %>% - hc_xAxis(title = list(text = xlab), type = 'datetime') %>% - hc_yAxis(title = list(text = ylab)) %>% - hc_tooltip(pointFormat = " Date: {point.x:%Y-%m-%d %H:%M}
    y: {point.y}") %>% - hc_exporting(enabled = TRUE) %>% - hc_chart(zoomType = "x") - - }) - #Signaling the success of the operation - toastr_success("Generate plot") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + { + df <- dplyr::filter(load.model(), var_name == input$var_name_model) + + # updateSliderInput(session,"smooth_n_model", min = 0, max = nrow(df)) + + title <- unique(df$title) + xlab <- unique(df$xlab) + ylab <- unique(df$ylab) + + unit <- ylab + if (input$units_model != unit & units::ud_are_convertible(unit, input$units_model)) { + df$vals <- PEcAn.utils::ud_convert(df$vals, unit, input$units_model) + ylab <- input$units_model + } + + date_range <- paste0(input$date_range, collapse = "/") + + plot_type <- switch(input$plotType_model, + point = "scatter", + line = "line" + ) + + smooth_param <- input$smooth_n_model * 100 + + # function that converts dataframe to xts object, + # selects subset of a date range and does data aggregtion + func <- function(df) { + xts.df <- xts(df$vals, order.by = df$dates) + xts.df <- xts.df[date_range] + + if (input$agg == "NONE") { + return(xts.df) + } + + if (input$agg == "daily") { + xts.df <- apply.daily(xts.df, input$func) + } else if (input$agg == "weekly") { + xts.df <- apply.weekly(xts.df, input$func) + } else if (input$agg == "monthly") { + xts.df <- apply.monthly(xts.df, input$func) + } else if (input$agg == "quarterly") { + xts.df <- apply.quarterly(xts.df, input$func) + } else { + xts.df <- apply.yearly(xts.df, input$func) + } + } + + list <- split(df, df$run_id) + xts.list <- lapply(list, func) + + ply <- highchart() + + for (i in 1:length(xts.list)) { + ply <- ply %>% + hc_add_series(xts.list[[i]], + type = plot_type, name = names(xts.list[i]), + regression = TRUE, + regressionSettings = list(type = "loess", loessSmooth = smooth_param) + ) + } + + ply <- ply %>% + hc_add_dependency("plugins/highcharts-regression.js") %>% + hc_title(text = title) %>% + hc_xAxis(title = list(text = xlab), type = "datetime") %>% + hc_yAxis(title = list(text = ylab)) %>% + hc_tooltip(pointFormat = " Date: {point.x:%Y-%m-%d %H:%M}
    y: {point.y}") %>% + hc_exporting(enabled = TRUE) %>% + hc_chart(zoomType = "x") + } + ) + # Signaling the success of the operation + toastr_success("Generate plot") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) ply }) diff --git a/shiny/workflowPlot/server_files/pdf_viewer_server.R b/shiny/workflowPlot/server_files/pdf_viewer_server.R index 66022d2e9f0..5c34a25d537 100644 --- a/shiny/workflowPlot/server_files/pdf_viewer_server.R +++ b/shiny/workflowPlot/server_files/pdf_viewer_server.R @@ -3,59 +3,62 @@ observe({ req(input$all_workflow_id) w_ids <- input$all_workflow_id folder.path <- c() - for(w_id in w_ids){ + for (w_id in w_ids) { folder.path <- c(folder.path, workflow(dbConnect$bety, w_id) %>% collect() %>% pull(folder)) } output$files <- DT::renderDT( - DT::datatable(list.files(folder.path,"*.pdf") %>% - as.data.frame()%>% - `colnames<-`(c("File name")), - escape = F, - selection="single", - style='bootstrap', - rownames = FALSE, - options = list( - dom = 'ft', - pageLength = 10, - scrollX = TRUE, - scrollCollapse = TRUE, - initComplete = DT::JS( - "function(settings, json) {", - "$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});", - "}") - ) + DT::datatable( + list.files(folder.path, "*.pdf") %>% + as.data.frame() %>% + `colnames<-`(c("File name")), + escape = F, + selection = "single", + style = "bootstrap", + rownames = FALSE, + options = list( + dom = "ft", + pageLength = 10, + scrollX = TRUE, + scrollCollapse = TRUE, + initComplete = DT::JS( + "function(settings, json) {", + "$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});", + "}" + ) + ) ) - ) }) # displays pdf views -observeEvent(input$files_cell_clicked, { +observeEvent(input$files_cell_clicked, { req(input$all_workflow_id) w_ids <- input$all_workflow_id folder.path <- c() - for(w_id in w_ids){ + for (w_id in w_ids) { folder.path <- c(folder.path, workflow(dbConnect$bety, w_id) %>% collect() %>% pull(folder)) } - + if (length(input$files_cell_clicked) > 0) { # File needs to be copied to the www folder - if(file.access("www", 2) == 0){ #check write permission - for(i in length(folder.path)){ + if (file.access("www", 2) == 0) { # check write permission + for (i in length(folder.path)) { file.copy(file.path(folder.path[i], input$files_cell_clicked$value), - "www", - overwrite = T) + "www", + overwrite = T + ) } - }else{ + } else { print("Pdf files cannot not be copied to www folfer. Do not have write permission.") } output$pdfview <- renderUI({ - tags$iframe(style = "height:800px; width:100%; border: 1px grey solid;", - src = input$files_cell_clicked$value) + tags$iframe( + style = "height:800px; width:100%; border: 1px grey solid;", + src = input$files_cell_clicked$value + ) }) } - }) diff --git a/shiny/workflowPlot/server_files/select_data_server.R b/shiny/workflowPlot/server_files/select_data_server.R index 3d828642e81..550eb3f8254 100644 --- a/shiny/workflowPlot/server_files/select_data_server.R +++ b/shiny/workflowPlot/server_files/select_data_server.R @@ -1,133 +1,146 @@ -observeEvent(input$load_model,{ - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', - value = 0,{ - - req(input$all_run_id) - incProgress(1 / 15) - - df <- load.model() - if (nrow(df)==0) return(NULL) - # output$results_table <- DT::renderDataTable(DT::datatable(head(masterDF))) - incProgress(10 / 15) - - ids_DF <- parse_ids_from_input_runID(input$all_run_id) - - select.df <- data.frame() - - for(i in seq(nrow(ids_DF))){ - - dfsub <- df %>% filter(run_id == ids_DF$runID[i]) - - diff.m <- diff(dfsub$dates) - mode.m <- diff.m[which.max(tabulate(match(unique(diff.m), diff.m)))] - diff_units.m = units(mode.m) - - diff_message <- sprintf("timestep: %.2f %s", mode.m, diff_units.m) - wf.folder <- workflow(dbConnect$bety, ids_DF$wID[i]) %>% collect() %>% pull(folder) - - README.text <- tryCatch({ - c(readLines(file.path(wf.folder, 'run', ids_DF$runID[i], "README.txt")), - diff_message) - }, - error = function(e){ - return(NULL) - }) - - README.df <- data.frame() - - if(!is.null(README.text)){ - README.df <- read.delim(textConnection(README.text), - header=FALSE,sep=":",strip.white=TRUE) - - if("pft names" %in% levels(README.df$V1)){ - levels(README.df$V1)[levels(README.df$V1)=="pft names"] <- "pft name" - } - if(!"trait" %in% levels(README.df$V1)){ - README.df <- rbind(README.df, data.frame(V1 = "trait", V2 = "-")) - } - if(!"quantile" %in% levels(README.df$V1)){ - README.df <- rbind(README.df, data.frame(V1 = "quantile", V2 = "-")) - } - } - - select.df <- rbind(select.df, README.df) - } - - #hide the into msg - shinyjs::hide("intromsg") - - select.data <- select.df %>% - dlply(.(V1), function(x) x[[2]]) %>% - as.data.frame() %>% - dplyr::rename(site.id = site..id) %>% - dplyr::select(runtype, workflow.id, ensemble.id, pft.name, quantile, trait, run.id, - model, site.id, start.date, end.date, hostname, timestep, rundir, outdir) - - output$runsui<-renderUI({ - seq_len(nrow(select.data)) %>% - map( - function(rown){ - - HTML(paste0(' -
    -

    ',select.data$workflow.id[rown],'

    +observeEvent(input$load_model, { + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + req(input$all_run_id) + incProgress(1 / 15) + + df <- load.model() + if (nrow(df) == 0) { + return(NULL) + } + # output$results_table <- DT::renderDataTable(DT::datatable(head(masterDF))) + incProgress(10 / 15) + + ids_DF <- parse_ids_from_input_runID(input$all_run_id) + + select.df <- data.frame() + + for (i in seq(nrow(ids_DF))) { + dfsub <- df %>% filter(run_id == ids_DF$runID[i]) + + diff.m <- diff(dfsub$dates) + mode.m <- diff.m[which.max(tabulate(match(unique(diff.m), diff.m)))] + diff_units.m <- units(mode.m) + + diff_message <- sprintf("timestep: %.2f %s", mode.m, diff_units.m) + wf.folder <- workflow(dbConnect$bety, ids_DF$wID[i]) %>% + collect() %>% + pull(folder) + + README.text <- tryCatch( + { + c( + readLines(file.path(wf.folder, "run", ids_DF$runID[i], "README.txt")), + diff_message + ) + }, + error = function(e) { + return(NULL) + } + ) + + README.df <- data.frame() + + if (!is.null(README.text)) { + README.df <- read.delim(textConnection(README.text), + header = FALSE, sep = ":", strip.white = TRUE + ) + + if ("pft names" %in% levels(README.df$V1)) { + levels(README.df$V1)[levels(README.df$V1) == "pft names"] <- "pft name" + } + if (!"trait" %in% levels(README.df$V1)) { + README.df <- rbind(README.df, data.frame(V1 = "trait", V2 = "-")) + } + if (!"quantile" %in% levels(README.df$V1)) { + README.df <- rbind(README.df, data.frame(V1 = "quantile", V2 = "-")) + } + } + + select.df <- rbind(select.df, README.df) + } + + # hide the into msg + shinyjs::hide("intromsg") + + select.data <- select.df %>% + dlply(.(V1), function(x) x[[2]]) %>% + as.data.frame() %>% + dplyr::rename(site.id = site..id) %>% + dplyr::select( + runtype, workflow.id, ensemble.id, pft.name, quantile, trait, run.id, + model, site.id, start.date, end.date, hostname, timestep, rundir, outdir + ) + + output$runsui <- renderUI({ + seq_len(nrow(select.data)) %>% + map( + function(rown) { + HTML(paste0(' +
    +

    ', select.data$workflow.id[rown], '

    - + - + - + - + - + - + - + - + - + - + -
    Runtype:
    ',select.data$runtype[rown],'', select.data$runtype[rown], '
    Ensemble.id:
    ',select.data$ensemble.id[rown],'', select.data$ensemble.id[rown], '
    Pft.name
    ',select.data$pft.name[rown],'', select.data$pft.name[rown], '
    Run.id
    ',select.data$run.id[rown],'', select.data$run.id[rown], '
    Model
    ',select.data$model[rown],'', select.data$model[rown], '
    Site.id
    ',select.data$site.id[rown],'', select.data$site.id[rown], '
    Start.date
    ',select.data$start.date[rown],'', select.data$start.date[rown], '
    End.date
    ',select.data$end.date[rown],'', select.data$end.date[rown], '
    Hostname
    ',select.data$hostname[rown],'', select.data$hostname[rown], '
    Outdir
    ',select.data$outdir[rown],'', select.data$outdir[rown], "
    +
    - - ')) - } - ) - }) - - #output$README <- renderUI({HTML(paste(README.text, collapse = '
    '))}) - - output$dim_message <- renderText({sprintf("This data has %.0f rows,\nthink about skipping exploratory plots if this is a large number...", dim(df)[1])}) - incProgress(4 / 15) - }) - - #Signaling the success of the operation - toastr_success("Load model outputs") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) -}) + ")) + } + ) + }) + + # output$README <- renderUI({HTML(paste(README.text, collapse = '
    '))}) + output$dim_message <- renderText({ + sprintf("This data has %.0f rows,\nthink about skipping exploratory plots if this is a large number...", dim(df)[1]) + }) + incProgress(4 / 15) + } + ) + + # Signaling the success of the operation + toastr_success("Load model outputs") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) +}) diff --git a/shiny/workflowPlot/server_files/sidebar_server.R b/shiny/workflowPlot/server_files/sidebar_server.R index 94827458df8..b116e21a8ea 100644 --- a/shiny/workflowPlot/server_files/sidebar_server.R +++ b/shiny/workflowPlot/server_files/sidebar_server.R @@ -5,28 +5,31 @@ # Update workflow ids observe({ - tryCatch({ - # get_workflow_ids function (line 137) in db/R/query.dplyr.R takes a flag to check - # if we want to load all workflow ids. - # get_workflow_id function from query.dplyr.R - all_ids <- get_workflow_ids(dbConnect$bety, query, all.ids=TRUE) - selectList <- as.data.table(all_ids) - - updateSelectizeInput(session, - "all_workflow_id", - choices = all_ids, - server = TRUE) - # Get URL prameters - query <- parseQueryString(session$clientData$url_search) - - # Pre-select workflow_id from URL prams - if(length(query)>0) updateSelectizeInput(session, "all_workflow_id", selected = query[["workflow_id"]]) - #Signaling the success of the operation - toastr_success("Update workflow IDs") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + tryCatch( + { + # get_workflow_ids function (line 137) in db/R/query.dplyr.R takes a flag to check + # if we want to load all workflow ids. + # get_workflow_id function from query.dplyr.R + all_ids <- get_workflow_ids(dbConnect$bety, query, all.ids = TRUE) + selectList <- as.data.table(all_ids) + + updateSelectizeInput(session, + "all_workflow_id", + choices = all_ids, + server = TRUE + ) + # Get URL prameters + query <- parseQueryString(session$clientData$url_search) + + # Pre-select workflow_id from URL prams + if (length(query) > 0) updateSelectizeInput(session, "all_workflow_id", selected = query[["workflow_id"]]) + # Signaling the success of the operation + toastr_success("Update workflow IDs") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) # Update run ids @@ -37,34 +40,36 @@ all_run_ids <- reactive({ w_ids <- input$all_workflow_id # Will return a list run_id_list <- c() - for(w_id in w_ids){ + for (w_id in w_ids) { # For all the workflow ids r_ids <- get_run_ids(dbConnect$bety, w_id) - for(r_id in r_ids){ + for (r_id in r_ids) { # Each workflow id can have more than one run ids # ',' as a separator between workflow id and run id - list_item <- paste0('workflow ', w_id,', run ', r_id) + list_item <- paste0("workflow ", w_id, ", run ", r_id) run_id_list <- c(run_id_list, list_item) } } return(run_id_list) }) # Update all run_ids ('workflow ',w_id,', run ',r_id) -observeEvent(input$all_workflow_id,{ - tryCatch({ - updateSelectizeInput(session, "all_run_id", choices = all_run_ids()) - # Get URL parameters - query <- parseQueryString(session$clientData$url_search) - # Make the run_id string with workflow_id - url_run_id <- paste0('workflow ', query[["workflow_id"]],', run ', query[["run_id"]]) - # Pre-select run_id from URL params - updateSelectizeInput(session, "all_run_id", selected = url_run_id) - #Signaling the success of the operation - toastr_success("Update run IDs") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) +observeEvent(input$all_workflow_id, { + tryCatch( + { + updateSelectizeInput(session, "all_run_id", choices = all_run_ids()) + # Get URL parameters + query <- parseQueryString(session$clientData$url_search) + # Make the run_id string with workflow_id + url_run_id <- paste0("workflow ", query[["workflow_id"]], ", run ", query[["run_id"]]) + # Pre-select run_id from URL params + updateSelectizeInput(session, "all_run_id", selected = url_run_id) + # Signaling the success of the operation + toastr_success("Update run IDs") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) @@ -72,19 +77,25 @@ observeEvent(input$all_workflow_id,{ # All information about a model is contained in 'all_run_id' string # Wrapper over 'load_data_single_run' in PEcAn.db::query.dplyr # Model data different from observations data -load.model <- eventReactive(input$load_model,{ +load.model <- eventReactive(input$load_model, { req(input$all_run_id) # Get IDs DF from 'all_run_id' string ids_DF <- parse_ids_from_input_runID(input$all_run_id) - globalDF <- map2_df(ids_DF$wID, ids_DF$runID, - ~tryCatch({ - load_data_single_run(dbConnect$bety, .x, .y) - }, - error = function(e){ - toastr_error(title = paste("Error in WorkflowID", .x), - conditionMessage(e)) - return() - })) + globalDF <- map2_df( + ids_DF$wID, ids_DF$runID, + ~ tryCatch( + { + load_data_single_run(dbConnect$bety, .x, .y) + }, + error = function(e) { + toastr_error( + title = paste("Error in WorkflowID", .x), + conditionMessage(e) + ) + return() + } + ) + ) print("Yay the model data is loaded!") print(head(globalDF)) globalDF$var_name <- as.character(globalDF$var_name) @@ -98,41 +109,47 @@ observeEvent(input$load_model, { # All information about a model is contained in 'all_run_id' string ids_DF <- parse_ids_from_input_runID(input$all_run_id) var_name_list <- c() - for(row_num in 1:nrow(ids_DF)){ - var_name_list <- c(var_name_list, - tryCatch({ - var_names_all(dbConnect$bety, ids_DF$wID[row_num], ids_DF$runID[row_num]) - }, - error = function(e){ - return(NULL) - })) + for (row_num in 1:nrow(ids_DF)) { + var_name_list <- c( + var_name_list, + tryCatch( + { + var_names_all(dbConnect$bety, ids_DF$wID[row_num], ids_DF$runID[row_num]) + }, + error = function(e) { + return(NULL) + } + ) + ) } updateSelectizeInput(session, "var_name_model", choices = var_name_list) }) -observeEvent(input$load_model,{ +observeEvent(input$load_model, { # Retrieves all site ids from multiple seleted run ids when load button is pressed req(input$all_run_id) ids_DF <- parse_ids_from_input_runID(input$all_run_id) site_id_list <- c() - for(row_num in 1:nrow(ids_DF)){ - settings <- - tryCatch({ - getSettingsFromWorkflowId(dbConnect$bety,ids_DF$wID[row_num]) - }, - error = function(e){ - return(NULL) - }) + for (row_num in 1:nrow(ids_DF)) { + settings <- + tryCatch( + { + getSettingsFromWorkflowId(dbConnect$bety, ids_DF$wID[row_num]) + }, + error = function(e) { + return(NULL) + } + ) site.id <- c(settings$run$site$id) - site_id_list <- c(site_id_list,site.id) + site_id_list <- c(site_id_list, site.id) } - updateSelectizeInput(session, "all_site_id", choices=site_id_list) + updateSelectizeInput(session, "all_site_id", choices = site_id_list) }) # Update input id list as (input id, name) observe({ req(input$all_site_id) inputs_df <- getInputs(dbConnect$bety, c(input$all_site_id)) - formats_1 <- dplyr::tbl(dbConnect$bety, 'formats_variables') %>% + formats_1 <- dplyr::tbl(dbConnect$bety, "formats_variables") %>% dplyr::filter(format_id %in% inputs_df$format_id) if (dplyr.count(formats_1) == 0) { logger.warn("No inputs found. Returning NULL.") @@ -142,17 +159,16 @@ observe({ dplyr::pull(format_id) %>% unique() inputs_df <- inputs_df %>% dplyr::filter(format_id %in% formats_sub) # Only data sets with formats with associated variables will show up - updateSelectizeInput(session, "all_input_id", choices=inputs_df$input_selection_list) + updateSelectizeInput(session, "all_input_id", choices = inputs_df$input_selection_list) } }) load.model.data <- eventReactive(input$load_data, { - req(input$all_input_id) - - inputs_df <- getInputs(dbConnect$bety,c(input$all_site_id)) + + inputs_df <- getInputs(dbConnect$bety, c(input$all_site_id)) inputs_df <- inputs_df %>% dplyr::filter(input_selection_list == input$all_input_id) - + input_id <- inputs_df$input_id # File_format <- getFileFormat(bety,input_id) File_format <- PEcAn.DB::query.format.vars(bety = dbConnect$bety, input.id = input_id) @@ -162,13 +178,14 @@ load.model.data <- eventReactive(input$load_data, { # TODO There is an issue with the db where file names are not saved properly. # To make it work with the VM, uncomment the line below - #File_path <- paste0(inputs_df$filePath,'.csv') + # File_path <- paste0(inputs_df$filePath,'.csv') site.id <- inputs_df$site_id - site <- PEcAn.DB::query.site(site.id,dbConnect$bety) - + site <- PEcAn.DB::query.site(site.id, dbConnect$bety) + observations <- PEcAn.benchmark::load_data( data.path = File_path, format = File_format, time.row = File_format$time.row, - site = site, start_year = start.year, end_year = end.year) + site = site, start_year = start.year, end_year = end.year + ) # Manually select variables to deal with the error # observations <- PEcAn.benchmark::load_data( # data.path = File_path, format = File_format, @@ -182,24 +199,30 @@ load.model.data <- eventReactive(input$load_data, { # Update all variable names observeEvent(input$load_data, { - tryCatch({ - withProgress(message = 'Calculation in progress', - detail = 'This may take a while...', - value = 0,{ - model.df <- load.model() - incProgress(7 / 15) - obvs.df <- load.model.data() - incProgress(7 / 15) - updateSelectizeInput(session, "var_name_modeldata", - choices = intersect(model.df$var_name, names(obvs.df))) - incProgress(1 / 15) - }) - #Signaling the success of the operation - toastr_success("Update variable names") - }, - error = function(e) { - toastr_error(title = "Error", conditionMessage(e)) - }) + tryCatch( + { + withProgress( + message = "Calculation in progress", + detail = "This may take a while...", + value = 0, + { + model.df <- load.model() + incProgress(7 / 15) + obvs.df <- load.model.data() + incProgress(7 / 15) + updateSelectizeInput(session, "var_name_modeldata", + choices = intersect(model.df$var_name, names(obvs.df)) + ) + incProgress(1 / 15) + } + ) + # Signaling the success of the operation + toastr_success("Update variable names") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) # These are required for shinyFiles which allows to select target folder on server machine @@ -209,169 +232,182 @@ shinyDirChoose(input, "regdirectory", roots = volumes, session = session, restri output$formatPreview <- DT::renderDT({ req(input$format_sel_pre) - tryCatch({ - Fids <- - PEcAn.DB::get.id("formats", - "name", - input$format_sel_pre, - dbConnect$bety) %>% - as.character() - - if (length(Fids) > 1) - toastr_warning(title = "Format Preview", - message = "More than one id was found for this format. The first one will be used.") - - mimt<-tbl(dbConnect$bety, "formats") %>% - left_join(tbl(dbConnect$bety, "mimetypes"), by=c('mimetype_id'='id'))%>% - dplyr::filter(id==Fids[1]) %>% - dplyr::pull(type_string) - - output$mimt_pre<-renderText({ - mimt - }) - - DT::datatable( - tbl(dbConnect$bety, "formats_variables") %>% - dplyr::filter(format_id == Fids[1]) %>% - dplyr::select(-id, -format_id,-variable_id,-created_at,-updated_at) %>% - dplyr::filter(name != "") %>% - collect(), - escape = F, - filter = 'none', - selection = "none", - style = 'bootstrap', - rownames = FALSE, - options = list( - autowidth = TRUE, - columnDefs = list(list( - width = '90px', targets = -1 - )), - #set column width for action button - dom = 'tp', - pageLength = 10, - scrollX = TRUE, - scrollCollapse = FALSE, - initComplete = DT::JS( - "function(settings, json) {", - "$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});", - "}" + tryCatch( + { + Fids <- + PEcAn.DB::get.id( + "formats", + "name", + input$format_sel_pre, + dbConnect$bety + ) %>% + as.character() + + if (length(Fids) > 1) { + toastr_warning( + title = "Format Preview", + message = "More than one id was found for this format. The first one will be used." + ) + } + + mimt <- tbl(dbConnect$bety, "formats") %>% + left_join(tbl(dbConnect$bety, "mimetypes"), by = c("mimetype_id" = "id")) %>% + dplyr::filter(id == Fids[1]) %>% + dplyr::pull(type_string) + + output$mimt_pre <- renderText({ + mimt + }) + + DT::datatable( + tbl(dbConnect$bety, "formats_variables") %>% + dplyr::filter(format_id == Fids[1]) %>% + dplyr::select(-id, -format_id, -variable_id, -created_at, -updated_at) %>% + dplyr::filter(name != "") %>% + collect(), + escape = F, + filter = "none", + selection = "none", + style = "bootstrap", + rownames = FALSE, + options = list( + autowidth = TRUE, + columnDefs = list(list( + width = "90px", targets = -1 + )), + # set column width for action button + dom = "tp", + pageLength = 10, + scrollX = TRUE, + scrollCollapse = FALSE, + initComplete = DT::JS( + "function(settings, json) {", + "$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});", + "}" + ) ) ) - ) - - - - }, - error = function(e) { - toastr_error(title = "Error in format preview", message = conditionMessage(e)) - }) -}) + }, + error = function(e) { + toastr_error(title = "Error in format preview", message = conditionMessage(e)) + } + ) +}) # Register external data -observeEvent(input$register_data,{ - #browser() +observeEvent(input$register_data, { + # browser() req(input$all_site_id) - + showModal( modalDialog( title = "Register External Data", tabsetPanel( - tabPanel("Register", - br(), - fluidRow( - column(6, - fileInput("Datafile", "Choose CSV/NC File", - width = "100%", - accept = c( - "text/csv", - "text/comma-separated-values,text/plain", - ".csv", - ".nc") - )), - column(6,br(), - shinyFiles::shinyDirButton("regdirectory", "Choose your target dir", "Please select a folder") - ), - tags$hr() - ), - fluidRow( - column(6, dateInput("date3", "Start Date:", value = Sys.Date()-10)), - column(6, dateInput("date4", "End Date:", value = Sys.Date()-10) ) - ),tags$hr(), - fluidRow( - column(6, shinyTime::timeInput("time2", "Start Time:", value = Sys.time())), - column(6, shinyTime::timeInput("time2", "End Time:", value = Sys.time())) - ),tags$hr(), - fluidRow( - column(6, selectizeInput("format_sel", "Format Name", tbl(dbConnect$bety,"formats") %>% - pull(name) %>% - unique() - ) ), - column(6) - ) - ), - tabPanel("Fromat Preview", br(), - fluidRow( - column(6,selectizeInput("format_sel_pre", "Format Name", tbl(dbConnect$bety,"formats") %>% - pull(name) %>% unique())), - column(6, h5(shiny::tags$b("Mimetypes")), textOutput("mimt_pre")) - ), - fluidRow( - column(12, - DT::dataTableOutput("formatPreview") - ) - - ) - ) + tabPanel( + "Register", + br(), + fluidRow( + column( + 6, + fileInput("Datafile", "Choose CSV/NC File", + width = "100%", + accept = c( + "text/csv", + "text/comma-separated-values,text/plain", + ".csv", + ".nc" + ) + ) + ), + column( + 6, br(), + shinyFiles::shinyDirButton("regdirectory", "Choose your target dir", "Please select a folder") + ), + tags$hr() + ), + fluidRow( + column(6, dateInput("date3", "Start Date:", value = Sys.Date() - 10)), + column(6, dateInput("date4", "End Date:", value = Sys.Date() - 10)) + ), tags$hr(), + fluidRow( + column(6, shinyTime::timeInput("time2", "Start Time:", value = Sys.time())), + column(6, shinyTime::timeInput("time2", "End Time:", value = Sys.time())) + ), tags$hr(), + fluidRow( + column(6, selectizeInput("format_sel", "Format Name", tbl(dbConnect$bety, "formats") %>% + pull(name) %>% + unique())), + column(6) + ) + ), + tabPanel( + "Fromat Preview", br(), + fluidRow( + column(6, selectizeInput("format_sel_pre", "Format Name", tbl(dbConnect$bety, "formats") %>% + pull(name) %>% unique())), + column(6, h5(shiny::tags$b("Mimetypes")), textOutput("mimt_pre")) + ), + fluidRow( + column( + 12, + DT::dataTableOutput("formatPreview") + ) + ) + ) ), footer = tagList( - actionButton("register_button", "Register", class="btn-primary"), + actionButton("register_button", "Register", class = "btn-primary"), modalButton("Cancel") ), - size = 'l' + size = "l" ) ) }) # register input file in database -observeEvent(input$register_button,{ - tryCatch({ - inFile <- input$Datafile - dir.name <- gsub(".[a-z]+", "", inFile$name) - dir.create(file.path(parseDirPath(volumes, input$regdirectory), dir.name)) - file.copy(inFile$datapath, - file.path(parseDirPath(volumes, input$regdirectory), dir.name, inFile$name), - overwrite = T) +observeEvent(input$register_button, { + tryCatch( + { + inFile <- input$Datafile + dir.name <- gsub(".[a-z]+", "", inFile$name) + dir.create(file.path(parseDirPath(volumes, input$regdirectory), dir.name)) + file.copy(inFile$datapath, + file.path(parseDirPath(volumes, input$regdirectory), dir.name, inFile$name), + overwrite = T + ) - mt <- tbl(dbConnect$bety,"formats") %>% - left_join(tbl(dbConnect$bety,"mimetypes"), by = c("mimetype_id" = "id")) %>% - filter(name == input$format_sel) %>% - pull(type_string) + mt <- tbl(dbConnect$bety, "formats") %>% + left_join(tbl(dbConnect$bety, "mimetypes"), by = c("mimetype_id" = "id")) %>% + filter(name == input$format_sel) %>% + pull(type_string) - PEcAn.DB::dbfile.input.insert(in.path = file.path(parseDirPath(volumes, input$regdirectory), dir.name), - in.prefix = inFile$name, - siteid = input$all_site_id, # select box - startdate = input$date3, - enddate = input$date4, - mimetype = mt, - formatname = input$format_sel, - #parentid = input$parentID, - con = dbConnect$bety - #hostname = localhost #?, #default to localhost for now - #allow.conflicting.dates#? #default to FALSE for now - ) - removeModal() - toastr_success("Register External Data") - }, - error = function(e){ - toastr_error(title = "Error", conditionMessage(e)) - }) + PEcAn.DB::dbfile.input.insert( + in.path = file.path(parseDirPath(volumes, input$regdirectory), dir.name), + in.prefix = inFile$name, + siteid = input$all_site_id, # select box + startdate = input$date3, + enddate = input$date4, + mimetype = mt, + formatname = input$format_sel, + # parentid = input$parentID, + con = dbConnect$bety + # hostname = localhost #?, #default to localhost for now + # allow.conflicting.dates#? #default to FALSE for now + ) + removeModal() + toastr_success("Register External Data") + }, + error = function(e) { + toastr_error(title = "Error", conditionMessage(e)) + } + ) }) # update input id list when register button is clicked -observeEvent(input$register_button,{ +observeEvent(input$register_button, { req(input$all_site_id) inputs_df <- getInputs(dbConnect$bety, c(input$all_site_id)) - formats_1 <- dplyr::tbl(dbConnect$bety, 'formats_variables') %>% + formats_1 <- dplyr::tbl(dbConnect$bety, "formats_variables") %>% dplyr::filter(format_id %in% inputs_df$format_id) if (dplyr.count(formats_1) == 0) { logger.warn("No inputs found. Returning NULL.") @@ -381,6 +417,6 @@ observeEvent(input$register_button,{ dplyr::pull(format_id) %>% unique() inputs_df <- inputs_df %>% dplyr::filter(format_id %in% formats_sub) # Only data sets with formats with associated variables will show up - updateSelectizeInput(session, "all_input_id", choices=inputs_df$input_selection_list) + updateSelectizeInput(session, "all_input_id", choices = inputs_df$input_selection_list) } }) diff --git a/shiny/workflowPlot/ui.R b/shiny/workflowPlot/ui.R index 9dd8c3d0d76..9dd987460cc 100644 --- a/shiny/workflowPlot/ui.R +++ b/shiny/workflowPlot/ui.R @@ -11,60 +11,67 @@ library(bsplus) source("ui_utils.R", local = TRUE) # Define UI -ui <- fluidPage(theme = shinytheme("paper"), - tags$head(HTML("PEcAn WorkFlow App")), - # Initializing shinyJs - useShinyjs(), - # Initializing shinytoastr - useToastr(), - shinyWidgets::useShinydashboard(), - # Adding CSS to head - tags$head( - tags$link(rel = "stylesheet", type = "text/css", href = "style.css") - ), - tags$head( - tags$script(src="scripts.js") - ), - tags$head( - tags$style(HTML(" +ui <- fluidPage( + theme = shinytheme("paper"), + tags$head(HTML("PEcAn WorkFlow App")), + # Initializing shinyJs + useShinyjs(), + # Initializing shinytoastr + useToastr(), + shinyWidgets::useShinydashboard(), + # Adding CSS to head + tags$head( + tags$link(rel = "stylesheet", type = "text/css", href = "style.css") + ), + tags$head( + tags$script(src = "scripts.js") + ), + tags$head( + tags$style(HTML(" .modal-lg {width: 85%;} .navbar-default .navbar-nav{font-size: 16px; padding-top: 10px; padding-bottom: 10px; } - ") - ) - ), - # Showing the animation - div( id = "loading-content", - div(class = "plotlybars-wrapper", - div( class="plotlybars", - div(class="plotlybars-bar b1"), - div(class="plotlybars-bar b2"), - div(class="plotlybars-bar b3"), - div(class="plotlybars-bar b4"), - div(class="plotlybars-bar b5"), - div(class="plotlybars-bar b6"), - div(class="plotlybars-bar b7") - ), - div(class="plotlybars-text", - p("Shiny is on its way!") - ) - ) - ), - # Hiding the application content till the page is ready - hidden( - div( - id = "app", - navbarPage(title = NULL, - tabPanel("Select Data", - icon = icon("hand-pointer"), - tagList( - column(3, - source_ui("sidebar_UI.R") - ), - column(9, - HTML(' + ")) + ), + # Showing the animation + div( + id = "loading-content", + div( + class = "plotlybars-wrapper", + div( + class = "plotlybars", + div(class = "plotlybars-bar b1"), + div(class = "plotlybars-bar b2"), + div(class = "plotlybars-bar b3"), + div(class = "plotlybars-bar b4"), + div(class = "plotlybars-bar b5"), + div(class = "plotlybars-bar b6"), + div(class = "plotlybars-bar b7") + ), + div( + class = "plotlybars-text", + p("Shiny is on its way!") + ) + ) + ), + # Hiding the application content till the page is ready + hidden( + div( + id = "app", + navbarPage( + title = NULL, + tabPanel("Select Data", + icon = icon("hand-pointer"), + tagList( + column( + 3, + source_ui("sidebar_UI.R") + ), + column( + 9, + HTML('

    Hello PEcAn user,

    - This app is designed to help you better explore your runs.

    @@ -78,37 +85,36 @@ ui <- fluidPage(theme = shinytheme("paper"),

    '), - source_ui("select_data_UI.R") - ) - ) - ), - tabPanel("History Runs", - icon = icon("history"), - DT::DTOutput("historyfiles") - ), - tabPanel("Exploratory Plots", - icon = icon("chart-bar"), - tabsetPanel( - source_ui("model_plots_UI.R"), - source_ui("model_data_plots_UI.R"), - source_ui("pdf_viewer_UI.R") - ) - ), - tabPanel("Benchmarking", - icon = icon("pencil-ruler"), - tabsetPanel( - source_ui("benchmarking_ScoresPlots_UI.R"), - source_ui("benchmarking_settings_UI.R") - ) - ), - tabPanel("Documentation", - icon = icon("book"), - #withMathJax(includeMarkdown("markdown/workflowPlot_doc.Rmd")) - source_ui("documentation_UI.R"), - use_bs_accordion_sidebar() - - ) - ) - ) - ) - ) + source_ui("select_data_UI.R") + ) + ) + ), + tabPanel("History Runs", + icon = icon("history"), + DT::DTOutput("historyfiles") + ), + tabPanel("Exploratory Plots", + icon = icon("chart-bar"), + tabsetPanel( + source_ui("model_plots_UI.R"), + source_ui("model_data_plots_UI.R"), + source_ui("pdf_viewer_UI.R") + ) + ), + tabPanel("Benchmarking", + icon = icon("pencil-ruler"), + tabsetPanel( + source_ui("benchmarking_ScoresPlots_UI.R"), + source_ui("benchmarking_settings_UI.R") + ) + ), + tabPanel("Documentation", + icon = icon("book"), + # withMathJax(includeMarkdown("markdown/workflowPlot_doc.Rmd")) + source_ui("documentation_UI.R"), + use_bs_accordion_sidebar() + ) + ) + ) + ) +) diff --git a/shiny/workflowPlot/ui_files/benchmarking_ScoresPlots_UI.R b/shiny/workflowPlot/ui_files/benchmarking_ScoresPlots_UI.R index d2ccc1b61b0..1f28cf2535f 100644 --- a/shiny/workflowPlot/ui_files/benchmarking_ScoresPlots_UI.R +++ b/shiny/workflowPlot/ui_files/benchmarking_ScoresPlots_UI.R @@ -1,40 +1,48 @@ -tabPanel("Scores/Plots", - column(12, h3("Setup Reference Run")), - column(12, - verbatimTextOutput("brr_message"), - uiOutput("button_BRR") - ), - column(12, - h3("Setup Benchmarks")), - column(12, - uiOutput("results_message"), - br(), - uiOutput("bm_inputs"), - uiOutput("calc_bm"), - tags$hr(), - br() - ), - column(12, - # verbatimTextOutput("report"), - textOutput("inputs_df_title"), - br(), - DT::dataTableOutput("inputs_df_table"), - br() - ), - fluidRow( - column(8, - fluidRow( - column(3,offset = 1, textOutput("plots_tilte")), - column(8, uiOutput("bm_plots")) - ), - uiOutput("plotlybars"), - plotlyOutput("bmPlot"), - br() - ), - column(4, - textOutput("results_df_title"), - br(), - DT::dataTableOutput("results_table") - ) - ) +tabPanel( + "Scores/Plots", + column(12, h3("Setup Reference Run")), + column( + 12, + verbatimTextOutput("brr_message"), + uiOutput("button_BRR") + ), + column( + 12, + h3("Setup Benchmarks") + ), + column( + 12, + uiOutput("results_message"), + br(), + uiOutput("bm_inputs"), + uiOutput("calc_bm"), + tags$hr(), + br() + ), + column( + 12, + # verbatimTextOutput("report"), + textOutput("inputs_df_title"), + br(), + DT::dataTableOutput("inputs_df_table"), + br() + ), + fluidRow( + column( + 8, + fluidRow( + column(3, offset = 1, textOutput("plots_tilte")), + column(8, uiOutput("bm_plots")) + ), + uiOutput("plotlybars"), + plotlyOutput("bmPlot"), + br() + ), + column( + 4, + textOutput("results_df_title"), + br(), + DT::dataTableOutput("results_table") + ) + ) ) diff --git a/shiny/workflowPlot/ui_files/benchmarking_settings_UI.R b/shiny/workflowPlot/ui_files/benchmarking_settings_UI.R index 8f1db5ab5c2..5506825eeb6 100644 --- a/shiny/workflowPlot/ui_files/benchmarking_settings_UI.R +++ b/shiny/workflowPlot/ui_files/benchmarking_settings_UI.R @@ -1,5 +1,6 @@ -tabPanel("Benchmark Settings", - br(), - verbatimTextOutput("settings_path"), - verbatimTextOutput("print_bm_settings") - ) +tabPanel( + "Benchmark Settings", + br(), + verbatimTextOutput("settings_path"), + verbatimTextOutput("print_bm_settings") +) diff --git a/shiny/workflowPlot/ui_files/documentation_UI.R b/shiny/workflowPlot/ui_files/documentation_UI.R index 96d5d7eb295..2e9ab1e3765 100644 --- a/shiny/workflowPlot/ui_files/documentation_UI.R +++ b/shiny/workflowPlot/ui_files/documentation_UI.R @@ -1,18 +1,20 @@ -bs_accordion_sidebar(id = "documentation", - spec_side = c(width = 3, offset = 0), - spec_main = c(width = 9, offset = 0)) %>% +bs_accordion_sidebar( + id = "documentation", + spec_side = c(width = 3, offset = 0), + spec_main = c(width = 9, offset = 0) +) %>% bs_append( - title_side = "App Documentation", + title_side = "App Documentation", content_side = NULL, content_main = withMathJax(includeMarkdown("markdown/app_documentation.Rmd")) ) %>% bs_append( - title_side = "Setup Page", + title_side = "Setup Page", content_side = NULL, content_main = withMathJax(includeMarkdown("markdown/setup_page.Rmd")) ) %>% bs_append( - title_side = "History Runs", + title_side = "History Runs", content_side = NULL, content_main = HTML("

    This page is for seaching history runs.

    @@ -21,28 +23,28 @@ bs_accordion_sidebar(id = "documentation", ") ) %>% bs_append( - title_side = "Exploratory Plots", + title_side = "Exploratory Plots", content_side = NULL, content_main = withMathJax(includeMarkdown("markdown/exploratory_plot.Rmd")) ) %>% bs_append( - title_side = "Benchmarking", + title_side = "Benchmarking", content_side = NULL, - content_main = + content_main = bs_accordion_sidebar(id = "benchmarking") %>% - bs_append( - title_side = "Settings", - content_side = NULL, - content_main = withMathJax(includeMarkdown("markdown/benchmarking_setting.Rmd")) - ) %>% - bs_append( - title_side = "Scores", - content_side = NULL, - content_main = withMathJax(includeMarkdown("markdown/benchmarking_scores.Rmd")) - ) %>% - bs_append( - title_side = "Plots", - content_side = NULL, - content_main = withMathJax(includeMarkdown("markdown/benchmarking_plots.Rmd")) - ) + bs_append( + title_side = "Settings", + content_side = NULL, + content_main = withMathJax(includeMarkdown("markdown/benchmarking_setting.Rmd")) + ) %>% + bs_append( + title_side = "Scores", + content_side = NULL, + content_main = withMathJax(includeMarkdown("markdown/benchmarking_scores.Rmd")) + ) %>% + bs_append( + title_side = "Plots", + content_side = NULL, + content_main = withMathJax(includeMarkdown("markdown/benchmarking_plots.Rmd")) + ) ) diff --git a/shiny/workflowPlot/ui_files/model_data_plots_UI.R b/shiny/workflowPlot/ui_files/model_data_plots_UI.R index 473776fd091..c406d63d573 100644 --- a/shiny/workflowPlot/ui_files/model_data_plots_UI.R +++ b/shiny/workflowPlot/ui_files/model_data_plots_UI.R @@ -6,18 +6,24 @@ tabPanel( wellPanel( selectInput("var_name_modeldata", "Variable Name", ""), textInput("units_modeldata", "Units", - placeholder = "Type units in udunits2 compatible format"), + placeholder = "Type units in udunits2 compatible format" + ), verbatimTextOutput("unit_text2"), dateRangeInput("date_range2", "Date Range", separator = " - "), fluidRow( - column(6, - selectInput("agg2", "Aggregation", - choices = c("NONE", "daily", "weekly", "monthly", "quarterly", "annually"), - selected = "daily")), - column(6, - selectInput("func2", "function", - choices = c("mean", "sum"), - selected = "mean") + column( + 6, + selectInput("agg2", "Aggregation", + choices = c("NONE", "daily", "weekly", "monthly", "quarterly", "annually"), + selected = "daily" + ) + ), + column( + 6, + selectInput("func2", "function", + choices = c("mean", "sum"), + selected = "mean" + ) ) ), radioButtons( @@ -34,8 +40,10 @@ tabPanel( value = 0.8 ), tags$hr(), - actionButton("ex_plot_modeldata", "Generate Plot", icon = icon("pencil-alt"), - width = "100%", class="btn-primary") + actionButton("ex_plot_modeldata", "Generate Plot", + icon = icon("pencil-alt"), + width = "100%", class = "btn-primary" + ) ) ), column( diff --git a/shiny/workflowPlot/ui_files/model_plots_UI.R b/shiny/workflowPlot/ui_files/model_plots_UI.R index 975f76eba85..f667b83bbd4 100644 --- a/shiny/workflowPlot/ui_files/model_plots_UI.R +++ b/shiny/workflowPlot/ui_files/model_plots_UI.R @@ -6,19 +6,25 @@ tabPanel( wellPanel( selectInput("var_name_model", "Variable Name", ""), textInput("units_model", "Units", - placeholder = "Type units in udunits2 compatible format"), + placeholder = "Type units in udunits2 compatible format" + ), verbatimTextOutput("unit_text"), dateRangeInput("date_range", "Date Range", separator = " - "), fluidRow( - column(6, - selectInput("agg", "Aggregation", - choices = c("NONE", "daily", "weekly", "monthly", "quarterly", "annually"), - selected = "daily")), - column(6, - selectInput("func", "function", - choices = c("mean", "sum"), - selected = "mean") - ) + column( + 6, + selectInput("agg", "Aggregation", + choices = c("NONE", "daily", "weekly", "monthly", "quarterly", "annually"), + selected = "daily" + ) + ), + column( + 6, + selectInput("func", "function", + choices = c("mean", "sum"), + selected = "mean" + ) + ) ), radioButtons( "plotType_model", @@ -34,8 +40,10 @@ tabPanel( value = 0.8 ), tags$hr(), - actionButton("ex_plot_model", "Generate Plot", icon = icon("pencil-alt"), - width = "100%", class="btn-primary") + actionButton("ex_plot_model", "Generate Plot", + icon = icon("pencil-alt"), + width = "100%", class = "btn-primary" + ) ) ), column( diff --git a/shiny/workflowPlot/ui_files/select_data_UI.R b/shiny/workflowPlot/ui_files/select_data_UI.R index 537fe3aa753..a3e225a63d5 100644 --- a/shiny/workflowPlot/ui_files/select_data_UI.R +++ b/shiny/workflowPlot/ui_files/select_data_UI.R @@ -1,8 +1,8 @@ # Select_Data tagList( - #column(6, htmlOutput("README")), -# DT::dataTableOutput("datatable"), + # column(6, htmlOutput("README")), + # DT::dataTableOutput("datatable"), br(), uiOutput("runsui"), verbatimTextOutput("dim_message") diff --git a/shiny/workflowPlot/ui_files/sidebar_UI.R b/shiny/workflowPlot/ui_files/sidebar_UI.R index ccb91b0d275..682b888f75f 100644 --- a/shiny/workflowPlot/ui_files/sidebar_UI.R +++ b/shiny/workflowPlot/ui_files/sidebar_UI.R @@ -2,18 +2,21 @@ tagList( h4("Load Model Output"), wellPanel( p("Please select the workflow IDs to continue. You can select multiple IDs"), - selectizeInput("all_workflow_id", "Mutliple Workflow IDs", c(), multiple=TRUE), + selectizeInput("all_workflow_id", "Mutliple Workflow IDs", c(), multiple = TRUE), p("Please select the run IDs. You can select multiple IDs"), - selectizeInput("all_run_id", "Mutliple Run IDs", c(), multiple=TRUE), + selectizeInput("all_run_id", "Mutliple Run IDs", c(), multiple = TRUE), fluidRow( - column(6, - actionButton("NewRun", "New Run", icon = icon("plus"), - width = "120%", class="btn-primary") - - ), - column(6, - actionButton("load_model", "Load", icon = icon("download"), width = "100%") - ) + column( + 6, + actionButton("NewRun", "New Run", + icon = icon("plus"), + width = "120%", class = "btn-primary" + ) + ), + column( + 6, + actionButton("load_model", "Load", icon = icon("download"), width = "100%") + ) ) ), h4("Load External Data"), @@ -23,13 +26,17 @@ tagList( # selectizeInput("all_site_id", "Select Site ID", c(), multiple=TRUE), selectizeInput("all_input_id", "Select Input ID", c()), fluidRow( - column(6, - actionButton("register_data", "Register", icon = icon("upload"), - width = "120%", class="btn-primary") - ), - column(6, - actionButton("load_data", "Load", icon = icon("download"), width = "100%") - ) + column( + 6, + actionButton("register_data", "Register", + icon = icon("upload"), + width = "120%", class = "btn-primary" + ) + ), + column( + 6, + actionButton("load_data", "Load", icon = icon("download"), width = "100%") + ) ) ) ) diff --git a/shiny/workflowPlot/ui_utils.R b/shiny/workflowPlot/ui_utils.R index 1c2630c2f75..bd9b0011bbd 100644 --- a/shiny/workflowPlot/ui_utils.R +++ b/shiny/workflowPlot/ui_utils.R @@ -8,4 +8,4 @@ source_ui <- function(...) { load_anim_div <- function(plot_div) { plot_var <- plot_div source_ui("load_animation_div.R") -} \ No newline at end of file +} diff --git a/shiny/workflowPlot/workflowPlot_fcns.R b/shiny/workflowPlot/workflowPlot_fcns.R index c9cd7819f2c..229a40b6f53 100644 --- a/shiny/workflowPlot/workflowPlot_fcns.R +++ b/shiny/workflowPlot/workflowPlot_fcns.R @@ -1,83 +1,92 @@ -##----------------------------------------------------------------------------## -return_DF_from_run_ID <- function(diff_ids){ +## ----------------------------------------------------------------------------## +return_DF_from_run_ID <- function(diff_ids) { # Called by function parse_ids_from_input_runID # which is a wrapper of this function # Returns a DF for a particular run_id - split_string <- strsplit(diff_ids,',')[[1]] + split_string <- strsplit(diff_ids, ",")[[1]] # Workflow id is the first element. Trim leading and ending white spaces. Split by space now - wID <- as.numeric(strsplit(trimws(split_string[1],which = c("both")),' ')[[1]][2]) + wID <- as.numeric(strsplit(trimws(split_string[1], which = c("both")), " ")[[1]][2]) # Run id is the second element - runID <- as.numeric(strsplit(trimws(split_string[2],which = c("both")),' ')[[1]][2]) - return(data.frame(wID,runID)) + runID <- as.numeric(strsplit(trimws(split_string[2], which = c("both")), " ")[[1]][2]) + return(data.frame(wID, runID)) } -##----------------------------------------------------------------------------## +## ----------------------------------------------------------------------------## # Wrapper over return_DF_from_run_ID # @param list of multiple run ids # run_id_string: ('workflow' workflow_ID, 'run' run_id) # @return Data Frame of workflow and run ids -parse_ids_from_input_runID <- function(run_id_list){ +parse_ids_from_input_runID <- function(run_id_list) { globalDF <- data.frame() for (w_run_id in run_id_list) { - globalDF <- rbind(globalDF,return_DF_from_run_ID(w_run_id)) + globalDF <- rbind(globalDF, return_DF_from_run_ID(w_run_id)) } return(globalDF) } -##----------------------------------------------------------------------------## +## ----------------------------------------------------------------------------## # Allows to load actual data (different from model output) following the tutorial # https://github.com/PecanProject/pecan/blob/develop/documentation/tutorials/AnalyzeOutput/modelVSdata.Rmd # @params: bety,settings,File_path,File_format # loadObservationData <- function(bety,settings,File_path,File_format){ -loadObservationData <- function(bety,inputs_df){ +loadObservationData <- function(bety, inputs_df) { input_id <- inputs_df$input_id # File_format <- getFileFormat(bety,input_id) File_format <- PEcAn.DB::query.format.vars(bety = bety, input.id = input_id) start.year <- as.numeric(lubridate::year(inputs_df$start_date)) end.year <- as.numeric(lubridate::year(inputs_df$end_date)) File_path <- inputs_df$filePath - # TODO There is an issue with the db where file names are not saved properly. + # TODO There is an issue with the db where file names are not saved properly. # To make it work with the VM, uncomment the line below # File_path <- paste0(inputs_df$filePath,'.csv') site.id <- inputs_df$site_id site <- PEcAn.DB::query.site(site.id, bety) observations <- PEcAn.benchmark::load_data( - data.path = File_path, format = File_format, time.row = File_format$time.row, - site = site, start_year = start.year, end_year = end.year) + data.path = File_path, format = File_format, time.row = File_format$time.row, + site = site, start_year = start.year, end_year = end.year + ) return(observations) } -getSettingsFromWorkflowId <- function(bety,workflowID){ - basePath <- dplyr::tbl(bety, 'workflows') %>% - dplyr::filter(id %in% workflowID) %>% dplyr::pull(folder) - configPath <- file.path(basePath, 'pecan.CONFIGS.xml') +getSettingsFromWorkflowId <- function(bety, workflowID) { + basePath <- dplyr::tbl(bety, "workflows") %>% + dplyr::filter(id %in% workflowID) %>% + dplyr::pull(folder) + configPath <- file.path(basePath, "pecan.CONFIGS.xml") settings <- PEcAn.settings::read.settings(configPath) return(settings) } -##----------------------------------------------------------------------------## +## ----------------------------------------------------------------------------## # Get input id from selected site id. Returns inputs_df which is used to load observation data -getInputs <- function(bety,site_Id){ +getInputs <- function(bety, site_Id) { # Subsetting the input id list based on the current (VM) machine my_hostname <- PEcAn.remote::fqdn() - my_machine_id <- dplyr::tbl(bety, 'machines') %>% - dplyr::filter(hostname == my_hostname) %>% dplyr::pull(id) + my_machine_id <- dplyr::tbl(bety, "machines") %>% + dplyr::filter(hostname == my_hostname) %>% + dplyr::pull(id) # Inner join 'inputs' table with 'dbfiles' table # inputs_df would contain all the information about the site and input id required for # the tutorial mentioned above to compare model run with actual observations - inputs_df <- dplyr::tbl(bety, 'dbfiles') %>% - dplyr::filter(container_type == 'Input', machine_id == my_machine_id) %>% - dplyr::inner_join(tbl(bety, 'inputs') %>% dplyr::filter(site_id %in% site_Id), - by = c('container_id' = 'id')) %>% dplyr::collect() + inputs_df <- dplyr::tbl(bety, "dbfiles") %>% + dplyr::filter(container_type == "Input", machine_id == my_machine_id) %>% + dplyr::inner_join(tbl(bety, "inputs") %>% dplyr::filter(site_id %in% site_Id), + by = c("container_id" = "id") + ) %>% + dplyr::collect() # Order by container id (==input id) - inputs_df <- inputs_df[order(inputs_df$container_id),] + inputs_df <- inputs_df[order(inputs_df$container_id), ] # Mutate column as (input id, name) to be shown to the user - inputs_df <- inputs_df %>% - dplyr::mutate(input_selection_list = paste(inputs_df$container_id, inputs_df$name), - filePath = paste0(inputs_df$file_path,'/', inputs_df$file_name)) %>% - dplyr::select(input_id = container_id, filePath, input_selection_list, - start_date, end_date, site_id,name, machine_id, file_name, file_path, format_id) + inputs_df <- inputs_df %>% + dplyr::mutate( + input_selection_list = paste(inputs_df$container_id, inputs_df$name), + filePath = paste0(inputs_df$file_path, "/", inputs_df$file_name) + ) %>% + dplyr::select( + input_id = container_id, filePath, input_selection_list, + start_date, end_date, site_id, name, machine_id, file_name, file_path, format_id + ) return(inputs_df) } -##----------------------------------------------------------------------------## +## ----------------------------------------------------------------------------## diff --git a/tests/interactive-workflow.R b/tests/interactive-workflow.R index 90e4a9e661e..859c05d6c45 100644 --- a/tests/interactive-workflow.R +++ b/tests/interactive-workflow.R @@ -2,8 +2,8 @@ # install_github("blernermhc/RDataTracker") library(RDataTracker) -#args <- commandArgs(trailingOnly = TRUE) -#settings.file = args[1] +# args <- commandArgs(trailingOnly = TRUE) +# settings.file = args[1] settings.file <- "tests/ebi-forecast.igb.illinois.edu.biocro.xml" ## See README in tests/ folder for details library(PEcAn.all) @@ -13,36 +13,38 @@ library(PEcAn.all) #--------------------------------------------------------------------------------# # remove previous runs -unlink("pecan", recursive=TRUE) +unlink("pecan", recursive = TRUE) # show all queries to the database -#db.showQueries(TRUE) +# db.showQueries(TRUE) # check settings settings <- read.settings(settings.file) # get traits of pfts settings$pfts <- get.trait.data(settings$pfts, settings$model$type, settings$database$dbfiles, settings$database$bety, settings$meta.analysis$update) -saveXML(PEcAn.settings::listToXml(settings, "pecan"), file=file.path(settings$outdir, 'pecan.xml')) +saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.xml")) # run meta-analysis -run.meta.analysis(settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects, - settings$meta.analysis$threshold, settings$database$dbfiles, settings$database$bety) +run.meta.analysis( + settings$pfts, settings$meta.analysis$iter, settings$meta.analysis$random.effects, + settings$meta.analysis$threshold, settings$database$dbfiles, settings$database$bety +) # do conversions -for(i in 1:length(settings$run$inputs)) { +for (i in 1:length(settings$run$inputs)) { input <- settings$run$inputs[[i]] if (is.null(input)) next if (length(input) == 1) next # fia database - if (input['input'] == 'fia') { + if (input["input"] == "fia") { fia.to.psscss(settings) } # met download - if (input['input'] == 'Ameriflux') { + if (input["input"] == "Ameriflux") { # start/end date for weather start_date <- settings$run$start.date end_date <- settings$run$end.date @@ -51,32 +53,32 @@ for(i in 1:length(settings$run$inputs)) { site <- sub(".* \\((.*)\\)", "\\1", settings$run$site$name) # download data - fcn <- paste("download", input['input'], sep=".") - do.call(fcn, list(site, file.path(settings$database$dbfiles, input['input']), start_date=start_date, end_date=end_date)) + fcn <- paste("download", input["input"], sep = ".") + do.call(fcn, list(site, file.path(settings$database$dbfiles, input["input"]), start_date = start_date, end_date = end_date)) # convert to CF - met2CF.Ameriflux(file.path(settings$database$dbfiles, input['input']), site, file.path(settings$database$dbfiles, "cf"), start_date=start_date, end_date=end_date) + met2CF.Ameriflux(file.path(settings$database$dbfiles, input["input"]), site, file.path(settings$database$dbfiles, "cf"), start_date = start_date, end_date = end_date) # gap filing - metgapfill(file.path(settings$database$dbfiles, "cf"), site, file.path(settings$database$dbfiles, "gapfill"), start_date=start_date, end_date=end_date) + metgapfill(file.path(settings$database$dbfiles, "cf"), site, file.path(settings$database$dbfiles, "gapfill"), start_date = start_date, end_date = end_date) # model specific - load.modelpkg(input['output']) - fcn <- paste("met2model", input['output'], sep=".") - r <- do.call(fcn, list(file.path(settings$database$dbfiles, "gapfill"), site, file.path(settings$database$dbfiles, input['output']), start_date=start_date, end_date=end_date)) - settings$run$inputs[[i]] <- r[['file']] + load.modelpkg(input["output"]) + fcn <- paste("met2model", input["output"], sep = ".") + r <- do.call(fcn, list(file.path(settings$database$dbfiles, "gapfill"), site, file.path(settings$database$dbfiles, input["output"]), start_date = start_date, end_date = end_date)) + settings$run$inputs[[i]] <- r[["file"]] } # narr download } -saveXML(PEcAn.settings::listToXml(settings, "pecan"), file=file.path(settings$outdir, 'pecan.xml')) +saveXML(PEcAn.settings::listToXml(settings, "pecan"), file = file.path(settings$outdir, "pecan.xml")) # write configurations if (!file.exists(file.path(settings$rundir, "runs.txt")) | settings$meta.analysis$update == "TRUE") { run.write.configs(settings, settings$database$bety$write) } else { - PEcAn.logger::logger.info("Already wrote configuraiton files") + PEcAn.logger::logger.info("Already wrote configuraiton files") } @@ -91,8 +93,8 @@ if (!file.exists(file.path(settings$rundir, "runs.txt"))) { get.results(settings) # ensemble analysis -if (!file.exists(file.path(settings$outdir,"ensemble.ts.pdf"))) { - run.ensemble.analysis(settings,TRUE) +if (!file.exists(file.path(settings$outdir, "ensemble.ts.pdf"))) { + run.ensemble.analysis(settings, TRUE) } else { PEcAn.logger::logger.info("Already executed run.ensemble.analysis()") } @@ -101,7 +103,7 @@ if (!file.exists(file.path(settings$outdir,"ensemble.ts.pdf"))) { if (!file.exists(file.path(settings$outdir, "sensitivity.results.Rdata"))) { run.sensitivity.analysis(settings) } else { - PEcAn.logger::logger.info("Already executed run.sensitivity.analysis()") + PEcAn.logger::logger.info("Already executed run.sensitivity.analysis()") } # all done @@ -109,16 +111,17 @@ status.start("FINISHED") # send email if configured if (!is.null(settings$email) && !is.null(settings$email$to) && (settings$email$to != "")) { - sendmail(settings$email$from, settings$email$to, - paste0("Workflow has finished executing at ", date()), - paste0("You can find the results on ", PEcAn.remote::fqdn(), " in ", normalizePath(settings$outdir))) + sendmail( + settings$email$from, settings$email$to, + paste0("Workflow has finished executing at ", date()), + paste0("You can find the results on ", PEcAn.remote::fqdn(), " in ", normalizePath(settings$outdir)) + ) } # write end time in database -if (settings$workflow$id != 'NA') { - db.query(paste0("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, " AND finished_at IS NULL"), params=settings$database$bety) +if (settings$workflow$id != "NA") { + db.query(paste0("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, " AND finished_at IS NULL"), params = settings$database$bety) } status.end() db.print.connections() - diff --git a/tests/testpfts.R b/tests/testpfts.R index 91a0adef6c8..ebe840abecf 100644 --- a/tests/testpfts.R +++ b/tests/testpfts.R @@ -5,28 +5,31 @@ library(RPostgreSQL) runmeta <- function(pftid, pftname, model, dbparam) { folder <- file.path(dbparam$dbfiles, "pfts", model, pftname) - unlink(folder, recursive=TRUE, force=TRUE) - dir.create(folder, recursive=TRUE) - pft <- list(name=pftname, outdir=folder) - cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : get.traits\n"), file=stderr()) + unlink(folder, recursive = TRUE, force = TRUE) + dir.create(folder, recursive = TRUE) + pft <- list(name = pftname, outdir = folder) + cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : get.traits\n"), file = stderr()) pfts <- get.trait.data(list(pft), model, dbparam$dbfiles, dbparam, TRUE) - cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : meta.analysis\n"), file=stderr()) + cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : meta.analysis\n"), file = stderr()) run.meta.analysis(pfts, 3000, FALSE, 1.2, dbparam$dbfiles, dbparam) - cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : OK\n"), file=stderr()) + cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : OK\n"), file = stderr()) } testpft <- function(pftid, pftname, model, dbparam) { tryCatch(runmeta(pftid, pftname, model, dbparam), - error=function(e) { - cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : BROKEN - ", e$message, "\n"), file=stderr()) - for(con in dbListConnections(dbDriver("PostgreSQL"))) { - db.close(con) - } - }) + error = function(e) { + cat(paste0("TESTING [id=", pftid, " pft='", pftname, "' model='", model, "'] : BROKEN - ", e$message, "\n"), file = stderr()) + for (con in dbListConnections(dbDriver("PostgreSQL"))) { + db.close(con) + } + } + ) } -dbparam <- list(dbname="bety", user="bety", password="bety", host="localhost", dbfiles=PEcAn.utils::full.path("testpfts"), write=FALSE, driver="PostgreSQL") -pfts <- db.query("SELECT pfts.id AS id, pfts.name AS pft, modeltypes.name AS model FROM pfts, modeltypes WHERE pfts.modeltype_id=modeltypes.id ORDER BY id;", param=dbparam) +dbparam <- list(dbname = "bety", user = "bety", password = "bety", host = "localhost", dbfiles = PEcAn.utils::full.path("testpfts"), write = FALSE, driver = "PostgreSQL") +pfts <- db.query("SELECT pfts.id AS id, pfts.name AS pft, modeltypes.name AS model FROM pfts, modeltypes WHERE pfts.modeltype_id=modeltypes.id ORDER BY id;", param = dbparam) -options(scipen=12) -apply(pfts, 1, function(x) { testpft(x[[1]], x[[2]], x[[3]], dbparam) }) +options(scipen = 12) +apply(pfts, 1, function(x) { + testpft(x[[1]], x[[2]], x[[3]], dbparam) +}) diff --git a/web/plot.netcdf.R b/web/plot.netcdf.R index 6b0386d3eb4..4634c1d33b6 100644 --- a/web/plot.netcdf.R +++ b/web/plot.netcdf.R @@ -1,25 +1,25 @@ # -------------------------------------------------------------------------------- # Copyright (c) 2012 University of Illinois, NCSA. # All rights reserved. This program and the accompanying materials -# are made available under the terms of the +# are made available under the terms of the # University of Illinois/NCSA Open Source License # which accompanies this distribution, and is available at # http://opensource.ncsa.illinois.edu/license.html # -------------------------------------------------------------------------------- -#library(PEcAn.visualization) +# library(PEcAn.visualization) # ---------------------------------------------------------------------- # COMMAND LINE ARGUMENTS # ---------------------------------------------------------------------- # arguments are-args year variable -args <- commandArgs(trailingOnly = TRUE) +args <- commandArgs(trailingOnly = TRUE) datafile <- args[1] -year <- args[2] -xvar <- args[3] -yvar <- args[4] -width <- as.numeric(args[5]) -height <- as.numeric(args[6]) +year <- args[2] +xvar <- args[3] +yvar <- args[4] +width <- as.numeric(args[5]) +height <- as.numeric(args[6]) filename <- args[7] # datafile="../../output/PEcAn_14/out/23/2006.nc" @@ -29,7 +29,6 @@ filename <- args[7] # width=800 # height=600 # filename="plot.png" - -#error_reporting(E_ALL | E_STRICT); -PEcAn.visualization::plot_netcdf(datafile, yvar, xvar, width, height, filename, year); - + +# error_reporting(E_ALL | E_STRICT); +PEcAn.visualization::plot_netcdf(datafile, yvar, xvar, width, height, filename, year) diff --git a/web/workflow.R b/web/workflow.R index ea5ae7d9510..d9c19dff5ec 100755 --- a/web/workflow.R +++ b/web/workflow.R @@ -110,8 +110,8 @@ if (PEcAn.utils::status.check("CONFIG") == 0) { settings <- PEcAn.settings::read.settings(file.path(settings$outdir, "pecan.CONFIGS.xml")) } -if ((length(which(commandArgs() == "--advanced")) != 0) -&& (PEcAn.utils::status.check("ADVANCED") == 0)) { +if ((length(which(commandArgs() == "--advanced")) != 0) && + (PEcAn.utils::status.check("ADVANCED") == 0)) { PEcAn.utils::status.start("ADVANCED") q() } @@ -124,7 +124,7 @@ if (PEcAn.utils::status.check("MODEL") == 0) { # If we're doing an ensemble run, don't stop. If only a single run, we # should be stopping. if (is.null(settings[["ensemble"]]) || - as.numeric(settings[[c("ensemble", "size")]]) == 1) { + as.numeric(settings[[c("ensemble", "size")]]) == 1) { stop_on_error <- TRUE } else { stop_on_error <- FALSE @@ -142,16 +142,16 @@ if (PEcAn.utils::status.check("OUTPUT") == 0) { } # Run ensemble analysis on model output. -if ("ensemble" %in% names(settings) -&& PEcAn.utils::status.check("ENSEMBLE") == 0) { +if ("ensemble" %in% names(settings) && + PEcAn.utils::status.check("ENSEMBLE") == 0) { PEcAn.utils::status.start("ENSEMBLE") runModule.run.ensemble.analysis(settings, TRUE) PEcAn.utils::status.end() } # Run sensitivity analysis and variance decomposition on model output -if ("sensitivity.analysis" %in% names(settings) -&& PEcAn.utils::status.check("SENSITIVITY") == 0) { +if ("sensitivity.analysis" %in% names(settings) && + PEcAn.utils::status.check("SENSITIVITY") == 0) { PEcAn.utils::status.start("SENSITIVITY") runModule.run.sensitivity.analysis(settings) PEcAn.utils::status.end() @@ -177,8 +177,8 @@ if ("state.data.assimilation" %in% names(settings)) { } # Run benchmarking -if ("benchmarking" %in% names(settings) -&& "benchmark" %in% names(settings$benchmarking)) { +if ("benchmarking" %in% names(settings) && + "benchmark" %in% names(settings$benchmarking)) { PEcAn.utils::status.start("BENCHMARKING") results <- papply(settings, function(x) { @@ -201,9 +201,9 @@ if (PEcAn.utils::status.check("FINISHED") == 0) { ) # Send email if configured - if (!is.null(settings$email) - && !is.null(settings$email$to) - && (settings$email$to != "")) { + if (!is.null(settings$email) && + !is.null(settings$email$to) && + (settings$email$to != "")) { sendmail( settings$email$from, settings$email$to, From d57bf71f8f17c172fa115f4d65a58c06439e9141 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Mon, 6 Jan 2025 16:43:52 +0530 Subject: [PATCH 2/5] Testing the script --- pre-commit-config.yml | 8 ++++++++ scripts/format.sh | 4 ++-- scripts/workflow.bm.R | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 pre-commit-config.yml diff --git a/pre-commit-config.yml b/pre-commit-config.yml new file mode 100644 index 00000000000..7a5fc457278 --- /dev/null +++ b/pre-commit-config.yml @@ -0,0 +1,8 @@ +repos: +- repo: local + hooks: + - id: r-format + name: R Formatter + entry: ./scripts/format.sh + language: script + files: \.R$ \ No newline at end of file diff --git a/scripts/format.sh b/scripts/format.sh index 40fce440aec..6e143be9326 100755 --- a/scripts/format.sh +++ b/scripts/format.sh @@ -8,13 +8,13 @@ set -e # Function to check if an R package is installed -is_r_package_installed() { +styler_in() { local pkg="$1" Rscript -e "if (!requireNamespace('$pkg', quietly = TRUE)) { quit(status=1) }" } # Check if styler is installed; install it if not -if is_r_package_installed "styler"; then +if styler_in "styler"; then echo "The 'styler' package is already installed." else echo "The 'styler' package is not installed. Installing now..." diff --git a/scripts/workflow.bm.R b/scripts/workflow.bm.R index bcfcb0af818..c3a195348ec 100644 --- a/scripts/workflow.bm.R +++ b/scripts/workflow.bm.R @@ -21,6 +21,8 @@ status.start <- function(name) { cat(paste(name, format(Sys.time(), "%F %T"), sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) } } + + status.end <- function(status = "DONE") { if (exists("settings")) { cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep = "\t"), file = file.path(settings$outdir, "STATUS"), append = TRUE) From 047eec3de81c641d3be571f28d3b075933bdda85 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Mon, 6 Jan 2025 17:17:10 +0530 Subject: [PATCH 3/5] Roxygenise R files --- base/db/man/convert_input.Rd | 6 +- base/db/man/db_merge_into.Rd | 6 +- base/db/man/get_postgres_envvars.Rd | 8 +-- base/db/man/insert.format.vars.Rd | 6 +- base/db/man/insert_table.Rd | 12 ++-- base/db/man/match_dbcols.Rd | 2 +- base/db/man/query_priors.Rd | 70 +++++++++++-------- base/db/man/symmetric_setdiff.Rd | 16 +++-- base/db/man/try2sqlite.Rd | 4 +- base/logger/man/severeifnot.Rd | 6 +- base/remote/man/qsub_parallel.Rd | 2 +- base/remote/man/remote.copy.from.Rd | 4 +- base/remote/man/remote.copy.to.Rd | 4 +- base/remote/man/remote.execute.R.Rd | 2 +- base/remote/man/remote.execute.cmd.Rd | 4 +- base/settings/man/clean.settings.Rd | 2 +- base/settings/man/get_args.Rd | 4 +- base/settings/man/papply.Rd | 19 +++-- base/settings/man/site.pft.linkage.Rd | 18 ++--- base/utils/man/datetime2doy.Rd | 2 +- base/utils/man/days_in_year.Rd | 6 +- base/utils/man/distn.stats.Rd | 2 +- base/utils/man/download.url.Rd | 2 +- base/utils/man/download_file.Rd | 8 ++- base/utils/man/full.path.Rd | 2 +- base/utils/man/get.parameter.stat.Rd | 4 +- base/utils/man/get.run.id.Rd | 4 +- base/utils/man/load.modelpkg.Rd | 4 +- base/utils/man/need_packages.Rd | 2 +- base/utils/man/r2bugs.distributions.Rd | 8 ++- base/utils/man/retry.func.Rd | 13 ++-- base/utils/man/robustly.Rd | 12 ++-- base/utils/man/seconds_in_year.Rd | 6 +- base/utils/man/sendmail.Rd | 2 +- base/utils/man/timezone_hour.Rd | 2 +- base/utils/man/trait.lookup.Rd | 6 +- base/utils/man/transformstats.Rd | 10 +-- base/utils/man/tryl.Rd | 4 +- base/utils/man/unit_is_parseable.Rd | 4 +- base/visualization/man/vwReg.Rd | 2 +- base/workflow/man/start_model_runs.Rd | 2 +- models/ed/man/read_restart.ED2.Rd | 10 +-- models/sipnet/man/mergeNC.Rd | 11 +-- modules/allometry/man/AllomAve.Rd | 15 ++-- modules/allometry/man/allom.predict.Rd | 8 +-- modules/allometry/man/load.allom.Rd | 6 +- modules/allometry/man/read.allom.data.Rd | 2 +- modules/assim.batch/man/autoburnin.Rd | 8 +-- modules/assim.batch/man/getBurnin.Rd | 8 +-- .../assim.batch/man/pda.generate.externals.Rd | 4 +- modules/assim.sequential/man/Analysis.sda.Rd | 2 +- .../man/Create_Site_PFT_CSV.Rd | 25 ++++--- .../man/construct_nimble_H.Rd | 2 +- .../man/sda.enkf.multisite.Rd | 6 +- .../man/align_by_first_observation.Rd | 17 ++--- .../benchmark/man/align_data_to_data_pft.Rd | 29 ++++---- modules/benchmark/man/align_pft.Rd | 32 +++++---- .../benchmark/man/check_if_species_list.Rd | 2 +- modules/data.atmosphere/man/align.met.Rd | 36 +++++----- .../man/cos_solar_zenith_angle.Rd | 2 +- .../data.atmosphere/man/download.Ameriflux.Rd | 2 +- .../man/download.Fluxnet2015.Rd | 2 +- .../man/download.Geostreams.Rd | 10 +-- modules/data.atmosphere/man/download.ICOS.Rd | 4 +- .../data.atmosphere/man/download.NARR_site.Rd | 2 - .../data.atmosphere/man/download.NEONmet.Rd | 2 +- .../data.atmosphere/man/download.NOAA_GEFS.Rd | 12 ++-- .../man/extract.local.CMIP5.Rd | 4 +- .../man/extract.local.NLDAS.Rd | 6 +- .../data.atmosphere/man/extract.nc.ERA5.Rd | 6 +- .../man/gen.subdaily.models.Rd | 4 +- modules/data.atmosphere/man/get.rh.Rd | 2 +- .../data.atmosphere/man/get_NARR_thredds.Rd | 1 - .../data.atmosphere/man/merge_met_variable.Rd | 20 +++--- modules/data.atmosphere/man/met.process.Rd | 2 +- .../man/met2CF.AmerifluxLBL.Rd | 2 +- .../data.atmosphere/man/met2CF.Geostreams.Rd | 2 +- modules/data.atmosphere/man/met2CF.csv.Rd | 22 +++--- ...et_temporal_downscale.Gaussian_ensemble.Rd | 2 +- modules/data.atmosphere/man/nc.merge.Rd | 2 +- modules/data.atmosphere/man/noaa_stage2.Rd | 2 +- .../man/predict_subdaily_met.Rd | 6 +- modules/data.atmosphere/man/spin.met.Rd | 25 +++---- modules/data.atmosphere/man/split_wind.Rd | 12 ++-- .../man/temporal.downscale.functions.Rd | 10 +-- modules/data.land/man/BADM.Rd | 4 +- modules/data.land/man/EPA_ecoregion_finder.Rd | 2 +- modules/data.land/man/Read.IC.info.BADM.Rd | 4 +- modules/data.land/man/Read_Tucson.Rd | 4 +- modules/data.land/man/dataone_download.Rd | 2 +- .../data.land/man/download_NEON_soilmoist.Rd | 4 +- modules/data.land/man/extract_NEON_veg.Rd | 4 +- modules/data.land/man/extract_soil_gssurgo.Rd | 8 +-- modules/data.land/man/extract_soil_nc.Rd | 8 +-- modules/data.land/man/gSSURGO.Query.Rd | 20 +++--- modules/data.land/man/match_species_id.Rd | 20 +++--- modules/data.land/man/sclass.Rd | 2 +- modules/data.land/man/shp2kml.Rd | 2 +- modules/data.land/man/soil2netcdf.Rd | 12 ++-- modules/data.land/man/soil_class.Rd | 18 ++--- modules/data.land/man/soil_params.Rd | 2 +- .../data.land/man/soilgrids_soilC_extract.Rd | 18 ++--- modules/data.remote/man/GEDI_AGB_prep.Rd | 16 ++--- modules/data.remote/man/MODIS_LC_prep.Rd | 2 +- modules/data.remote/man/NASA_CMR_finder.Rd | 6 +- modules/data.remote/man/NASA_DAAC_URL.Rd | 12 ++-- modules/data.remote/man/NASA_DAAC_download.Rd | 22 +++--- modules/data.remote/man/call_MODIS.Rd | 16 ++--- .../man/download.LandTrendr.AGB.Rd | 10 +-- .../data.remote/man/download.thredds.AGB.Rd | 4 +- .../data.remote/man/extract.LandTrendr.AGB.Rd | 12 ++-- .../man/extract_phenology_MODIS.Rd | 8 +-- modules/data.remote/man/l4_download.Rd | 43 ++++++------ modules/meta.analysis/man/pecan.ma.Rd | 4 +- .../priors/man/get.quantiles.from.density.Rd | 2 +- modules/priors/man/get.sample.Rd | 4 +- modules/priors/man/prior.fn.Rd | 4 +- modules/uncertainty/man/flux.uncertainty.Rd | 2 +- .../uncertainty/man/get.ensemble.samples.Rd | 2 +- modules/uncertainty/man/input.ens.gen.Rd | 4 +- .../uncertainty/man/read.ensemble.output.Rd | 2 +- .../uncertainty/man/write.ensemble.configs.Rd | 2 +- 122 files changed, 528 insertions(+), 474 deletions(-) diff --git a/base/db/man/convert_input.Rd b/base/db/man/convert_input.Rd index 466e46b73a3..9ab1161b640 100644 --- a/base/db/man/convert_input.Rd +++ b/base/db/man/convert_input.Rd @@ -97,14 +97,14 @@ along to fcn. The command to execute fcn is built as a string. \section{Database files}{ There are two kinds of database records (in different tables) that represent a given data file in the file system. An input file -contains information about the contents of the data file. A dbfile contains machine spacific information for a given input file, -such as the file path. Because duplicates of data files for a given input can be on multiple different machines, there can be more +contains information about the contents of the data file. A dbfile contains machine spacific information for a given input file, +such as the file path. Because duplicates of data files for a given input can be on multiple different machines, there can be more than one dbfile for a given input file. } \section{Time-span appending}{ -By default, convert_input tries to optimize the download of most data products by only downloading the years of data not present on +By default, convert_input tries to optimize the download of most data products by only downloading the years of data not present on the current machine. (For example, if files for 2004-2008 exist for a given data product exist on this machine and the user requests 2006-2010, the function will only download data for 2009 and 2010). In year-long data files, each year exists as a separate file. The database input file contains records of the bounds of the range stored by those years. The data optimization can be turned off diff --git a/base/db/man/db_merge_into.Rd b/base/db/man/db_merge_into.Rd index b4004667d8f..9ab9264c7db 100644 --- a/base/db/man/db_merge_into.Rd +++ b/base/db/man/db_merge_into.Rd @@ -20,7 +20,7 @@ db_merge_into(values, table, con, by = NULL, drop = FALSE, ...) \item{...}{ Arguments passed on to \code{\link[=insert_table]{insert_table}} \describe{ - \item{\code{coerce_col_class}}{logical, whether or not to coerce local data columns + \item{\code{coerce_col_class}}{logical, whether or not to coerce local data columns to SQL classes. Default = `TRUE.`} }} } @@ -32,7 +32,7 @@ Merge local data frame into SQL table } \examples{ irisdb <- DBI::dbConnect(RSQLite::SQLite(), ":memory:") -dplyr::copy_to(irisdb, iris[1:10,], name = "iris", overwrite = TRUE) -db_merge_into(iris[1:12,], "iris", irisdb) +dplyr::copy_to(irisdb, iris[1:10, ], name = "iris", overwrite = TRUE) +db_merge_into(iris[1:12, ], "iris", irisdb) dplyr::tbl(irisdb, "iris") \%>\% dplyr::count() } diff --git a/base/db/man/get_postgres_envvars.Rd b/base/db/man/get_postgres_envvars.Rd index c802444d6b4..17c6478a4ec 100644 --- a/base/db/man/get_postgres_envvars.Rd +++ b/base/db/man/get_postgres_envvars.Rd @@ -30,11 +30,11 @@ The list of environment variables we check is taken from the per-session behavior (e.g. PGTZ, PGSYSCONFDIR). } \examples{ - host <- Sys.getenv("PGHOST") # to restore environment after demo +host <- Sys.getenv("PGHOST") # to restore environment after demo - Sys.unsetenv("PGHOST") - get_postgres_envvars()$host # NULL - get_postgres_envvars(host = "default", port = 5432)$host # "default" +Sys.unsetenv("PGHOST") +get_postgres_envvars()$host # NULL +get_postgres_envvars(host = "default", port = 5432)$host # "default" # defaults are ignored for a variable that exists Sys.setenv(PGHOST = "localhost") get_postgres_envvars()$host # "localhost" diff --git a/base/db/man/insert.format.vars.Rd b/base/db/man/insert.format.vars.Rd index 6d425109c8a..d0f6c7a9af0 100644 --- a/base/db/man/insert.format.vars.Rd +++ b/base/db/man/insert.format.vars.Rd @@ -56,7 +56,8 @@ formats_variables_tibble <- tibble::tibble( name = c("NPP", NA, "YEAR"), unit = c("g C m-2 yr-1", NA, NA), storage_type = c(NA, NA, "\%Y"), - column_number = c(2, NA, 4)) + column_number = c(2, NA, 4) +) insert.format.vars( con = con, @@ -65,7 +66,8 @@ insert.format.vars( notes = "NPP from Harvard Forest.", header = FALSE, skip = 0, - formats_variables = formats_variables_tibble) + formats_variables = formats_variables_tibble +) } } \author{ diff --git a/base/db/man/insert_table.Rd b/base/db/man/insert_table.Rd index f0b15fc0cfb..81952c890f6 100644 --- a/base/db/man/insert_table.Rd +++ b/base/db/man/insert_table.Rd @@ -13,7 +13,7 @@ insert_table(values, table, con, coerce_col_class = TRUE, drop = TRUE) \item{con}{Database connection object} -\item{coerce_col_class}{logical, whether or not to coerce local data columns +\item{coerce_col_class}{logical, whether or not to coerce local data columns to SQL classes. Default = `TRUE.`} \item{drop}{logical. If `TRUE` (default), drop columns not found in SQL table.} @@ -22,14 +22,14 @@ to SQL classes. Default = `TRUE.`} data frame with query results } \description{ -First, subset to matching columns. Then, make sure the local and SQL column -classes match, coercing local to SQL as necessary (or throwing an error). -Then, build an SQL string for the insert statement. Finally, insert into the +First, subset to matching columns. Then, make sure the local and SQL column +classes match, coercing local to SQL as necessary (or throwing an error). +Then, build an SQL string for the insert statement. Finally, insert into the database. } \examples{ irisdb <- DBI::dbConnect(RSQLite::SQLite(), ":memory:") -dplyr::copy_to(irisdb, iris[1,], name = "iris", overwrite = TRUE) -insert_table(iris[-1,], "iris", irisdb) +dplyr::copy_to(irisdb, iris[1, ], name = "iris", overwrite = TRUE) +insert_table(iris[-1, ], "iris", irisdb) dplyr::tbl(irisdb, "iris") } diff --git a/base/db/man/match_dbcols.Rd b/base/db/man/match_dbcols.Rd index 9c42badadc0..1ce154af69e 100644 --- a/base/db/man/match_dbcols.Rd +++ b/base/db/man/match_dbcols.Rd @@ -13,7 +13,7 @@ match_dbcols(values, table, con, coerce_col_class = TRUE, drop = TRUE) \item{con}{Database connection object} -\item{coerce_col_class}{logical, whether or not to coerce local data columns +\item{coerce_col_class}{logical, whether or not to coerce local data columns to SQL classes. Default = `TRUE.`} \item{drop}{logical. If `TRUE` (default), drop columns not found in SQL table.} diff --git a/base/db/man/query_priors.Rd b/base/db/man/query_priors.Rd index 10677150f7d..2a94de36902 100644 --- a/base/db/man/query_priors.Rd +++ b/base/db/man/query_priors.Rd @@ -42,39 +42,47 @@ Query priors using prepared statements } \examples{ \dontrun{ - con <- db.open(...) +con <- db.open(...) - # No trait provided, so return all available traits - pdat <- query_priors( - c("temperate.Early_Hardwood", "temperate.North_Mid_Hardwood", - "temperate.Late_Hardwood"), - con = con - ) +# No trait provided, so return all available traits +pdat <- query_priors( + c( + "temperate.Early_Hardwood", "temperate.North_Mid_Hardwood", + "temperate.Late_Hardwood" + ), + con = con +) - # Traits provided, so restrict to only those traits. Note that - # because `expand = TRUE`, this will search for these traits for - # every PFT. - pdat2 <- query_priors( - c("Optics.Temperate_Early_Hardwood", - "Optics.Temperate_Mid_Hardwood", - "Optics.Temperate_Late_Hardwood"), - c("leaf_reflect_vis", "leaf_reflect_nir"), - con = con - ) +# Traits provided, so restrict to only those traits. Note that +# because `expand = TRUE`, this will search for these traits for +# every PFT. +pdat2 <- query_priors( + c( + "Optics.Temperate_Early_Hardwood", + "Optics.Temperate_Mid_Hardwood", + "Optics.Temperate_Late_Hardwood" + ), + c("leaf_reflect_vis", "leaf_reflect_nir"), + con = con +) - # With `expand = FALSE`, search the first trait for the first PFT, - # the second trait for the second PFT, etc. Note that this means - # PFT and trait input vectors must be the same length. - pdat2 <- query_priors( - c("Optics.Temperate_Early_Hardwood", - "Optics.Temperate_Early_Hardwood", - "Optics.Temperate_Mid_Hardwood", - "Optics.Temperate_Late_Hardwood"), - c("leaf_reflect_vis", - "leaf_reflect_nir", - "leaf_reflect_vis", - "leaf_reflect_nir"), - con = con - ) +# With `expand = FALSE`, search the first trait for the first PFT, +# the second trait for the second PFT, etc. Note that this means +# PFT and trait input vectors must be the same length. +pdat2 <- query_priors( + c( + "Optics.Temperate_Early_Hardwood", + "Optics.Temperate_Early_Hardwood", + "Optics.Temperate_Mid_Hardwood", + "Optics.Temperate_Late_Hardwood" + ), + c( + "leaf_reflect_vis", + "leaf_reflect_nir", + "leaf_reflect_vis", + "leaf_reflect_nir" + ), + con = con +) } } diff --git a/base/db/man/symmetric_setdiff.Rd b/base/db/man/symmetric_setdiff.Rd index 572a7f20522..aa83f1164c7 100644 --- a/base/db/man/symmetric_setdiff.Rd +++ b/base/db/man/symmetric_setdiff.Rd @@ -34,11 +34,15 @@ isn't numeric to character, to facilitate comparison.} Symmetric set difference of two data frames } \examples{ -xdf <- data.frame(a = c("a", "b", "c"), - b = c(1, 2, 3), - stringsAsFactors = FALSE) -ydf <- data.frame(a = c("a", "b", "d"), - b = c(1, 2.5, 3), - stringsAsFactors = FALSE) +xdf <- data.frame( + a = c("a", "b", "c"), + b = c(1, 2, 3), + stringsAsFactors = FALSE +) +ydf <- data.frame( + a = c("a", "b", "d"), + b = c(1, 2.5, 3), + stringsAsFactors = FALSE +) symmetric_setdiff(xdf, ydf) } diff --git a/base/db/man/try2sqlite.Rd b/base/db/man/try2sqlite.Rd index e4b6407712b..083ccd0dc31 100644 --- a/base/db/man/try2sqlite.Rd +++ b/base/db/man/try2sqlite.Rd @@ -13,8 +13,8 @@ Multiple files are combined with `data.table::rbindlist`.} \item{sqlite_file}{Target SQLite database file name, as character.} } \description{ -The TRY file is huge and unnecessarily long, which makes it difficult to -work with. The resulting SQLite database is much smaller on disk, and can be +The TRY file is huge and unnecessarily long, which makes it difficult to +work with. The resulting SQLite database is much smaller on disk, and can be read much faster thanks to lazy evaluation. } \details{ diff --git a/base/logger/man/severeifnot.Rd b/base/logger/man/severeifnot.Rd index 0bc51df1826..86394f04f98 100644 --- a/base/logger/man/severeifnot.Rd +++ b/base/logger/man/severeifnot.Rd @@ -43,8 +43,10 @@ infoifnot("Something is not a list.", is.list(a), is.list(b)) warnifnot("I would prefer it if you used lists.", is.list(a), is.list(b)) errorifnot("You should definitely use lists.", is.list(a), is.list(b)) try({ - severeifnot("I cannot deal with the fact that something is not a list.", + severeifnot( + "I cannot deal with the fact that something is not a list.", is.list(a), - is.list(b)) + is.list(b) + ) }) } diff --git a/base/remote/man/qsub_parallel.Rd b/base/remote/man/qsub_parallel.Rd index 274104b8139..06e2409ccf9 100644 --- a/base/remote/man/qsub_parallel.Rd +++ b/base/remote/man/qsub_parallel.Rd @@ -28,7 +28,7 @@ qsub_parallel } \examples{ \dontrun{ - qsub_parallel(settings) +qsub_parallel(settings) } } \author{ diff --git a/base/remote/man/remote.copy.from.Rd b/base/remote/man/remote.copy.from.Rd index 794aaa06c21..d751108d742 100644 --- a/base/remote/man/remote.copy.from.Rd +++ b/base/remote/man/remote.copy.from.Rd @@ -38,8 +38,8 @@ is a folder it will copy the file into that folder. } \examples{ \dontrun{ - host <- list(name='geo.bu.edu', user='kooper', tunnel='/tmp/geo.tunnel') - remote.copy.from(host, '/tmp/kooper', '/tmp/geo.tmp', delete=TRUE) +host <- list(name = "geo.bu.edu", user = "kooper", tunnel = "/tmp/geo.tunnel") +remote.copy.from(host, "/tmp/kooper", "/tmp/geo.tmp", delete = TRUE) } } \author{ diff --git a/base/remote/man/remote.copy.to.Rd b/base/remote/man/remote.copy.to.Rd index 1130c3b3501..6ac147e23f5 100644 --- a/base/remote/man/remote.copy.to.Rd +++ b/base/remote/man/remote.copy.to.Rd @@ -28,8 +28,8 @@ is a folder it will copy the file into that folder. } \examples{ \dontrun{ - host <- list(name='geo.bu.edu', user='kooper', tunnel='/tmp/geo.tunnel') - remote.copy.to(host, '/tmp/kooper', '/tmp/kooper', delete=TRUE) +host <- list(name = "geo.bu.edu", user = "kooper", tunnel = "/tmp/geo.tunnel") +remote.copy.to(host, "/tmp/kooper", "/tmp/kooper", delete = TRUE) } } \author{ diff --git a/base/remote/man/remote.execute.R.Rd b/base/remote/man/remote.execute.R.Rd index 5c47303f527..35f080305ee 100644 --- a/base/remote/man/remote.execute.R.Rd +++ b/base/remote/man/remote.execute.R.Rd @@ -39,7 +39,7 @@ machine it will execute the command locally without ssh. } \examples{ \dontrun{ - remote.execute.R('list.files()', host='localhost', verbose=FALSE) +remote.execute.R("list.files()", host = "localhost", verbose = FALSE) } } \author{ diff --git a/base/remote/man/remote.execute.cmd.Rd b/base/remote/man/remote.execute.cmd.Rd index d9a51e2c863..8fbf7b5e719 100644 --- a/base/remote/man/remote.execute.cmd.Rd +++ b/base/remote/man/remote.execute.cmd.Rd @@ -28,8 +28,8 @@ machine it will execute the command locally without ssh. } \examples{ \dontrun{ - host <- list(name='geo.bu.edu', user='kooper', tunnel='/tmp/geo.tunnel') - print(remote.execute.cmd(host, 'ls', c('-l', '/'), stderr=TRUE)) +host <- list(name = "geo.bu.edu", user = "kooper", tunnel = "/tmp/geo.tunnel") +print(remote.execute.cmd(host, "ls", c("-l", "/"), stderr = TRUE)) } } \author{ diff --git a/base/settings/man/clean.settings.Rd b/base/settings/man/clean.settings.Rd index 74d1a2a150e..b38b5a1e4f2 100644 --- a/base/settings/man/clean.settings.Rd +++ b/base/settings/man/clean.settings.Rd @@ -24,7 +24,7 @@ set the outdir to be 'pecan' for the next run. } \examples{ \dontrun{ -clean.settings('output/PEcAn_1/pecan.xml', 'pecan.xml') +clean.settings("output/PEcAn_1/pecan.xml", "pecan.xml") } } \author{ diff --git a/base/settings/man/get_args.Rd b/base/settings/man/get_args.Rd index 9dd874cbe6e..b3be24eb0c8 100644 --- a/base/settings/man/get_args.Rd +++ b/base/settings/man/get_args.Rd @@ -14,5 +14,7 @@ Used in web/workflow.R to parse command line arguments. See also https://github.com/PecanProject/pecan/pull/2626. } \examples{ -\dontrun{./web/workflow.R -h} +\dontrun{ +. / web / workflow.R - h +} } diff --git a/base/settings/man/papply.Rd b/base/settings/man/papply.Rd index ac8cccccab7..ee779826d75 100644 --- a/base/settings/man/papply.Rd +++ b/base/settings/man/papply.Rd @@ -50,18 +50,18 @@ result in an error. } } \examples{ -f = function(settings, ...) { +f <- function(settings, ...) { # Here's how I envisioned a typical use case within a standard PEcAn function - if(is.MultiSettings(settings)) { + if (is.MultiSettings(settings)) { return(papply(settings, f, ...)) } - + # Don't worry about the beolow, it's just some guts to make the function do something we can see l <- list(...) - for(i in seq_along(l)) { + for (i in seq_along(l)) { ind <- length(settings) + 1 settings[[ind]] <- l[[i]] - if(!is.null(names(l))) { + if (!is.null(names(l))) { names(settings)[ind] <- names(l)[i] } } @@ -69,14 +69,13 @@ f = function(settings, ...) { } # Example -settings1 <- Settings(list(a="aa", b=1:3, c="NA")) -settings2 <- Settings(list(a="A", b=4:5, c=paste)) +settings1 <- Settings(list(a = "aa", b = 1:3, c = "NA")) +settings2 <- Settings(list(a = "A", b = 4:5, c = paste)) multiSettings <- MultiSettings(settings1, settings2) # The fucntion should add element $d = D to either a Settings, or each entry in a MultiSettings -f(settings1, d="D") -print(f(multiSettings, d="D"), TRUE) - +f(settings1, d = "D") +print(f(multiSettings, d = "D"), TRUE) } \author{ Ryan Kelly diff --git a/base/settings/man/site.pft.linkage.Rd b/base/settings/man/site.pft.linkage.Rd index 56bae28cf0b..d7bb7d4042d 100644 --- a/base/settings/man/site.pft.linkage.Rd +++ b/base/settings/man/site.pft.linkage.Rd @@ -27,17 +27,17 @@ resulting multiple rows for a site. } \examples{ \dontrun{ -#setting up the Look up tables -site.pft.links <-tribble( - ~site, ~pft, - "1000025731", "temperate.broadleaf.deciduous1", - "1000025731", "temperate.needleleaf.evergreen", - "1000000048", "temperate.broadleaf.deciduous2", - "772", "temperate.broadleaf.deciduous3", - "763", "temperate.broadleaf.deciduous4" +# setting up the Look up tables +site.pft.links <- tribble( + ~site, ~pft, + "1000025731", "temperate.broadleaf.deciduous1", + "1000025731", "temperate.needleleaf.evergreen", + "1000000048", "temperate.broadleaf.deciduous2", + "772", "temperate.broadleaf.deciduous3", + "763", "temperate.broadleaf.deciduous4" ) # sending a multi- setting xml file to the function -site.pft.linkage(settings,site.pft.links) +site.pft.linkage(settings, site.pft.links) } } diff --git a/base/utils/man/datetime2doy.Rd b/base/utils/man/datetime2doy.Rd index afb22dc51f3..99fb6cf25cb 100644 --- a/base/utils/man/datetime2doy.Rd +++ b/base/utils/man/datetime2doy.Rd @@ -29,7 +29,7 @@ Julian Day do not support non-integer days. \examples{ datetime2doy("2010-01-01") # 1 datetime2doy("2010-01-01 12:00:00") # 1.5 -cf2doy(0, "days since 2007-01-01") +cf2doy(0, "days since 2007-01-01") cf2doy(5, "days since 2010-01-01") # 6 cf2doy(5, "days since 2010-01-01") # 6 } diff --git a/base/utils/man/days_in_year.Rd b/base/utils/man/days_in_year.Rd index 9ddcaae27f4..9e7a31d23e7 100644 --- a/base/utils/man/days_in_year.Rd +++ b/base/utils/man/days_in_year.Rd @@ -18,9 +18,9 @@ integer vector, all either 365 or 366 Calculate number of days in a year based on whether it is a leap year or not. } \examples{ -days_in_year(2010) # Not a leap year -- returns 365 -days_in_year(2012) # Leap year -- returns 366 -days_in_year(2000:2008) # Function is vectorized over years +days_in_year(2010) # Not a leap year -- returns 365 +days_in_year(2012) # Leap year -- returns 366 +days_in_year(2000:2008) # Function is vectorized over years } \author{ Alexey Shiklomanov diff --git a/base/utils/man/distn.stats.Rd b/base/utils/man/distn.stats.Rd index ac0a64079fa..e681100cbde 100644 --- a/base/utils/man/distn.stats.Rd +++ b/base/utils/man/distn.stats.Rd @@ -21,7 +21,7 @@ Implementation of standard equations used to calculate mean and sd for a variety named distributions different } \examples{ -distn.stats('norm', 0, 1) +distn.stats("norm", 0, 1) } \author{ David LeBauer diff --git a/base/utils/man/download.url.Rd b/base/utils/man/download.url.Rd index 9187d9f72c5..50f5948ebe1 100644 --- a/base/utils/man/download.url.Rd +++ b/base/utils/man/download.url.Rd @@ -29,6 +29,6 @@ it will return the name of the file } \examples{ \dontrun{ -download.url('http://localhost/', index.html) +download.url("http://localhost/", index.html) } } diff --git a/base/utils/man/download_file.Rd b/base/utils/man/download_file.Rd index 97c660c8a81..d735e0737c6 100644 --- a/base/utils/man/download_file.Rd +++ b/base/utils/man/download_file.Rd @@ -20,11 +20,13 @@ home directory } \examples{ \dontrun{ -download_file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt","~/test.download.txt") +download_file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt", "~/test.download.txt") -download_file(" +download_file( + " ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/pres.sfc.2000.nc", - "~/pres.sfc.2000.nc") + "~/pres.sfc.2000.nc" +) } } diff --git a/base/utils/man/full.path.Rd b/base/utils/man/full.path.Rd index 5fe7d1bf162..2750997c402 100644 --- a/base/utils/man/full.path.Rd +++ b/base/utils/man/full.path.Rd @@ -18,7 +18,7 @@ will normalize the path and prepend it with the current working folder if needed to get an absolute path name. } \examples{ -full.path('pecan') +full.path("pecan") } \author{ Rob Kooper diff --git a/base/utils/man/get.parameter.stat.Rd b/base/utils/man/get.parameter.stat.Rd index 9a6f87d1386..10f5071104a 100644 --- a/base/utils/man/get.parameter.stat.Rd +++ b/base/utils/man/get.parameter.stat.Rd @@ -18,7 +18,9 @@ table with parameter statistics Gets statistics for LaTeX - formatted table } \examples{ -\dontrun{get.parameter.stat(mcmc.summaries[[1]], 'beta.o')} +\dontrun{ +get.parameter.stat(mcmc.summaries[[1]], "beta.o") +} } \author{ David LeBauer diff --git a/base/utils/man/get.run.id.Rd b/base/utils/man/get.run.id.Rd index 675d762416a..a892b9cdb42 100644 --- a/base/utils/man/get.run.id.Rd +++ b/base/utils/man/get.run.id.Rd @@ -25,8 +25,8 @@ id representing a model run Provides a consistent method of naming runs; for use in model input files and indices } \examples{ -get.run.id('ENS', left.pad.zeros(1, 5)) -get.run.id('SA', round(qnorm(-3),3), trait = 'Vcmax') +get.run.id("ENS", left.pad.zeros(1, 5)) +get.run.id("SA", round(qnorm(-3), 3), trait = "Vcmax") } \author{ Carl Davidson, David LeBauer diff --git a/base/utils/man/load.modelpkg.Rd b/base/utils/man/load.modelpkg.Rd index 06d4aa6cbf7..1fd4e073496 100644 --- a/base/utils/man/load.modelpkg.Rd +++ b/base/utils/man/load.modelpkg.Rd @@ -16,7 +16,9 @@ FALSE if function returns error; else TRUE Load model package } \examples{ -\dontrun{require.modelpkg(BioCro)} +\dontrun{ +require.modelpkg(BioCro) +} } \author{ David LeBauer diff --git a/base/utils/man/need_packages.Rd b/base/utils/man/need_packages.Rd index 6ed4ff1341e..0e7fd2d5b77 100644 --- a/base/utils/man/need_packages.Rd +++ b/base/utils/man/need_packages.Rd @@ -21,7 +21,7 @@ error if not. \examples{ # Only need ::: because package isn't exported. # Inside a package, just call `need_packages` -PEcAn.utils:::need_packages("stats", "methods") # Always works +PEcAn.utils:::need_packages("stats", "methods") # Always works try(PEcAn.utils:::need_packages("notapackage")) } \author{ diff --git a/base/utils/man/r2bugs.distributions.Rd b/base/utils/man/r2bugs.distributions.Rd index f900f747faa..3fa3e007c5b 100644 --- a/base/utils/man/r2bugs.distributions.Rd +++ b/base/utils/man/r2bugs.distributions.Rd @@ -18,9 +18,11 @@ priors dataframe using JAGS default parameterizations R and BUGS have different parameterizations for some distributions. This function transforms the distributions from R defaults to BUGS defaults. BUGS is an implementation of the BUGS language, and these transformations are expected to work for bugs. } \examples{ -priors <- data.frame(distn = c('weibull', 'lnorm', 'norm', 'gamma'), - parama = c(1, 1, 1, 1), - paramb = c(2, 2, 2, 2)) +priors <- data.frame( + distn = c("weibull", "lnorm", "norm", "gamma"), + parama = c(1, 1, 1, 1), + paramb = c(2, 2, 2, 2) +) r2bugs.distributions(priors) } \author{ diff --git a/base/utils/man/retry.func.Rd b/base/utils/man/retry.func.Rd index 7a3ff9216ef..9a8b648e13d 100644 --- a/base/utils/man/retry.func.Rd +++ b/base/utils/man/retry.func.Rd @@ -30,13 +30,16 @@ Retry function X times before stopping in error } \examples{ \dontrun{ - file_url <- paste0("https://thredds.daac.ornl.gov/", - "thredds/dodsC/ornldaac/1220", - "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4") +file_url <- paste0( + "https://thredds.daac.ornl.gov/", + "thredds/dodsC/ornldaac/1220", + "/mstmip_driver_global_hd_climate_lwdown_1999_v1.nc4" +) dap <- retry.func( ncdf4::nc_open(file_url), - maxErrors=10, - sleep=2) + maxErrors = 10, + sleep = 2 +) } } diff --git a/base/utils/man/robustly.Rd b/base/utils/man/robustly.Rd index 43b2e07be25..0b102d84e72 100644 --- a/base/utils/man/robustly.Rd +++ b/base/utils/man/robustly.Rd @@ -25,11 +25,11 @@ Adverb to try calling a function \code{n} times before giving up rlog <- robustly(log, timeout = 0.3) try(rlog("fail")) \dontrun{ - nc_openr <- robustly(ncdf4::nc_open, n = 10, timeout = 0.5) - nc <- nc_openr(url) - # ...or just call the function directly - nc <- robustly(ncdf4::nc_open, n = 20)(url) - # Useful in `purrr` maps - many_vars <- purrr::map(varnames, robustly(ncdf4::ncvar_get), nc = nc) +nc_openr <- robustly(ncdf4::nc_open, n = 10, timeout = 0.5) +nc <- nc_openr(url) +# ...or just call the function directly +nc <- robustly(ncdf4::nc_open, n = 20)(url) +# Useful in `purrr` maps +many_vars <- purrr::map(varnames, robustly(ncdf4::ncvar_get), nc = nc) } } diff --git a/base/utils/man/seconds_in_year.Rd b/base/utils/man/seconds_in_year.Rd index 720134b31bd..bbac6e10bc4 100644 --- a/base/utils/man/seconds_in_year.Rd +++ b/base/utils/man/seconds_in_year.Rd @@ -17,9 +17,9 @@ seconds_in_year(year, leap_year = TRUE, ...) Number of seconds in a given year } \examples{ -seconds_in_year(2000) # Leap year -- 366 x 24 x 60 x 60 = 31622400 -seconds_in_year(2001) # Regular year -- 365 x 24 x 60 x 60 = 31536000 -seconds_in_year(2000:2005) # Vectorized over year +seconds_in_year(2000) # Leap year -- 366 x 24 x 60 x 60 = 31622400 +seconds_in_year(2001) # Regular year -- 365 x 24 x 60 x 60 = 31536000 +seconds_in_year(2000:2005) # Vectorized over year } \author{ Alexey Shiklomanov diff --git a/base/utils/man/sendmail.Rd b/base/utils/man/sendmail.Rd index a6c453f129a..3209a06c802 100644 --- a/base/utils/man/sendmail.Rd +++ b/base/utils/man/sendmail.Rd @@ -23,7 +23,7 @@ Sends email. This assumes the program sendmail is installed. } \examples{ \dontrun{ -sendmail('bob@example.com', 'joe@example.com', 'Hi', 'This is R.') +sendmail("bob@example.com", "joe@example.com", "Hi", "This is R.") } } \author{ diff --git a/base/utils/man/timezone_hour.Rd b/base/utils/man/timezone_hour.Rd index f63c6485c7b..7653939712b 100644 --- a/base/utils/man/timezone_hour.Rd +++ b/base/utils/man/timezone_hour.Rd @@ -17,7 +17,7 @@ Returns the number of hours offset to UTC for a timezone. } \examples{ \dontrun{ -timezone_hour('America/New_York') +timezone_hour("America/New_York") } } \author{ diff --git a/base/utils/man/trait.lookup.Rd b/base/utils/man/trait.lookup.Rd index 5d676fc6b59..686eabada28 100644 --- a/base/utils/man/trait.lookup.Rd +++ b/base/utils/man/trait.lookup.Rd @@ -20,10 +20,10 @@ Dictionary of terms used to identify traits in ed, filenames, and figures \examples{ # convert parameter name to a string appropriate for end-use plotting \dontrun{ -trait.lookup('growth_resp_factor') -trait.lookup('growth_resp_factor')$figid +trait.lookup("growth_resp_factor") +trait.lookup("growth_resp_factor")$figid # get a list of all traits and units in dictionary -trait.lookup()[,c('figid', 'units')] +trait.lookup()[, c("figid", "units")] } } diff --git a/base/utils/man/transformstats.Rd b/base/utils/man/transformstats.Rd index de87f63654e..7312009213b 100644 --- a/base/utils/man/transformstats.Rd +++ b/base/utils/man/transformstats.Rd @@ -21,10 +21,12 @@ LeBauer 2020 Transforming ANOVA and Regression statistics for Meta-analysis. Authorea. DOI: https://doi.org/10.22541/au.158359749.96662550 } \examples{ -statdf <- data.frame(Y=rep(1,5), - stat=rep(1,5), - n=rep(4,5), - statname=c('SD', 'MSE', 'LSD', 'HSD', 'MSD')) +statdf <- data.frame( + Y = rep(1, 5), + stat = rep(1, 5), + n = rep(4, 5), + statname = c("SD", "MSE", "LSD", "HSD", "MSD") +) transformstats(statdf) } \author{ diff --git a/base/utils/man/tryl.Rd b/base/utils/man/tryl.Rd index 9d011f2e15a..41d2816ec3d 100644 --- a/base/utils/man/tryl.Rd +++ b/base/utils/man/tryl.Rd @@ -16,9 +16,9 @@ FALSE if function returns error; else TRUE adaptation of try that returns a logical value (FALSE if error) } \examples{ -tryl(1+1) +tryl(1 + 1) # TRUE -tryl(sum('a')) +tryl(sum("a")) # FALSE } \author{ diff --git a/base/utils/man/unit_is_parseable.Rd b/base/utils/man/unit_is_parseable.Rd index ae1490e526a..ff89978959f 100644 --- a/base/utils/man/unit_is_parseable.Rd +++ b/base/utils/man/unit_is_parseable.Rd @@ -16,8 +16,8 @@ TRUE if the units is parseable, FALSE otherwise. Function will replace the now-unmaintained \code{udunits2::ud.is.parseable} } \examples{ - unit_is_parseable("g/sec^2") - unit_is_parseable("kiglometters") +unit_is_parseable("g/sec^2") +unit_is_parseable("kiglometters") } \author{ diff --git a/base/visualization/man/vwReg.Rd b/base/visualization/man/vwReg.Rd index 0f2301818e4..846195efae6 100644 --- a/base/visualization/man/vwReg.Rd +++ b/base/visualization/man/vwReg.Rd @@ -88,7 +88,7 @@ Details: \url{http://www.nicebread.de/visually-weighted-regression-in-r-a-la-sol \examples{ # build a demo data set set.seed(1) -x <- rnorm(200, 0.8, 1.2) +x <- rnorm(200, 0.8, 1.2) e <- rnorm(200, 0, 3)*(abs(x)^1.5 + .5) + rnorm(200, 0, 4) e2 <- rnorm(200, 0, 5)*(abs(x)^1.5 + .8) + rnorm(200, 0, 5) y <- 8*x - x^3 + e diff --git a/base/workflow/man/start_model_runs.Rd b/base/workflow/man/start_model_runs.Rd index a1c171073bb..f6e34703150 100644 --- a/base/workflow/man/start_model_runs.Rd +++ b/base/workflow/man/start_model_runs.Rd @@ -27,7 +27,7 @@ Start selected ecosystem model runs within PEcAn workflow }} \examples{ \dontrun{ - start_model_runs(settings) +start_model_runs(settings) } } \author{ diff --git a/models/ed/man/read_restart.ED2.Rd b/models/ed/man/read_restart.ED2.Rd index eff0cdcebd8..95d6c728202 100644 --- a/models/ed/man/read_restart.ED2.Rd +++ b/models/ed/man/read_restart.ED2.Rd @@ -24,11 +24,11 @@ State data assimilation read-restart for ED2 } \examples{ \dontrun{ - outdir <- "~/sda-hackathon/outputs" - runid <- "99000000020" - settings_file <- "outputs/pecan.CONFIGS.xml" - settings <- PEcAn.settings::read.settings(settings_file) - forecast <- read_restart.ED2(...) +outdir <- "~/sda-hackathon/outputs" +runid <- "99000000020" +settings_file <- "outputs/pecan.CONFIGS.xml" +settings <- PEcAn.settings::read.settings(settings_file) +forecast <- read_restart.ED2(...) } } diff --git a/models/sipnet/man/mergeNC.Rd b/models/sipnet/man/mergeNC.Rd index 011a8c8e46f..c3bf6846cce 100644 --- a/models/sipnet/man/mergeNC.Rd +++ b/models/sipnet/man/mergeNC.Rd @@ -22,10 +22,11 @@ Merge multiple NetCDF files into one } \examples{ \dontrun{ -files <- list.files(paste0(system.file(package="processNC"), "/extdata"), - pattern="tas.*\\\\.nc", full.names=TRUE) -temp <- tempfile(fileext=".nc") -mergeNC(files=files, outfile=temp) -terra::rast(temp) +files <- list.files(paste0(system.file(package = "processNC"), "/extdata"), + pattern = "tas.*\\\\.nc", full.names = TRUE +) +temp <- tempfile(fileext = ".nc") +mergeNC(files = files, outfile = temp) +terra::rast(temp) } } diff --git a/modules/allometry/man/AllomAve.Rd b/modules/allometry/man/AllomAve.Rd index 7d9fc911bab..2796f407344 100644 --- a/modules/allometry/man/AllomAve.Rd +++ b/modules/allometry/man/AllomAve.Rd @@ -50,10 +50,11 @@ nested list of parameter summary statistics \description{ Allometry wrapper function that handles loading and subsetting the data, fitting the Bayesian models, and generating diagnostic figures. Set up to loop over - multiple PFTs and components. + multiple PFTs and components. Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. - -There are two usages of this function. +} +\details{ +There are two usages of this function. When running 'online' (connected to the PEcAn database), pass the database connection, con, and the pfts subsection of the PEcAn settings. When running 'stand alone' pass the pft list mapping species to species codes @@ -61,13 +62,13 @@ When running 'stand alone' pass the pft list mapping species to species codes } \examples{ -if(FALSE){ - pfts = list(FAGR = data.frame(spcd=531,acronym='FAGR')) - allom.stats = AllomAve(pfts,ngibbs=500) +if (FALSE) { + pfts <- list(FAGR = data.frame(spcd = 531, acronym = "FAGR")) + allom.stats <- AllomAve(pfts, ngibbs = 500) ## example of a PFT with multiple species (late hardwood) ## note that if you're just using Jenkins the acronym column is optional - pfts = list(LH = data.frame(spcd = c(531,318),acronym=c('FAGR','ACSA3'))) + pfts <- list(LH = data.frame(spcd = c(531, 318), acronym = c("FAGR", "ACSA3"))) } } diff --git a/modules/allometry/man/allom.predict.Rd b/modules/allometry/man/allom.predict.Rd index 031510f2504..d998f65ad58 100644 --- a/modules/allometry/man/allom.predict.Rd +++ b/modules/allometry/man/allom.predict.Rd @@ -47,12 +47,10 @@ Function for making tree-level Monte Carlo predictions from allometric equations estimated from the PEcAn allometry module } \examples{ - \dontrun{ - object = '~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/' - dbh = seq(10,50,by=5) - mass = allom.predict(object,dbh,n=100) - +object <- "~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/" +dbh <- seq(10, 50, by = 5) +mass <- allom.predict(object, dbh, n = 100) } } diff --git a/modules/allometry/man/load.allom.Rd b/modules/allometry/man/load.allom.Rd index 4b9f0415485..42ce9053e86 100644 --- a/modules/allometry/man/load.allom.Rd +++ b/modules/allometry/man/load.allom.Rd @@ -20,11 +20,9 @@ mcmc outputs in a list by PFT then component loads allom files } \examples{ - \dontrun{ - object = '~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/' - allom.mcmc = load.allom(object) - +object <- "~/Dropbox//HF C Synthesis/Allometry Papers & Analysis/" +allom.mcmc <- load.allom(object) } } diff --git a/modules/allometry/man/read.allom.data.Rd b/modules/allometry/man/read.allom.data.Rd index cffd3a490ff..a609b569267 100644 --- a/modules/allometry/man/read.allom.data.Rd +++ b/modules/allometry/man/read.allom.data.Rd @@ -29,6 +29,6 @@ read.allom.data(pft.data, component, field, parm, nsim = 10000) Extracts PFT- and component-specific data and allometeric equations from the specified files. } \details{ -This code also estimates the standard error from R-squared, +This code also estimates the standard error from R-squared, which is required to simulate pseudodata from the allometric eqns. } diff --git a/modules/assim.batch/man/autoburnin.Rd b/modules/assim.batch/man/autoburnin.Rd index 8d4eae8114f..eb7908d03cb 100644 --- a/modules/assim.batch/man/autoburnin.Rd +++ b/modules/assim.batch/man/autoburnin.Rd @@ -19,10 +19,10 @@ and \code{gelman.diag}.} Automatically calculate and apply burnin value } \examples{ - z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) - z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) - z <- coda::mcmc.list(z1, z2) - z_burned <- autoburnin(z) +z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) +z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) +z <- coda::mcmc.list(z1, z2) +z_burned <- autoburnin(z) } \author{ Michael Dietze, Alexey Shiklomanov diff --git a/modules/assim.batch/man/getBurnin.Rd b/modules/assim.batch/man/getBurnin.Rd index 6d18ec20294..48595b8bffa 100644 --- a/modules/assim.batch/man/getBurnin.Rd +++ b/modules/assim.batch/man/getBurnin.Rd @@ -35,10 +35,10 @@ Automatically detect burnin based on one of several methods. See "gelman_diag_mw" and "gelman_diag_gelmanPlot" } \examples{ - z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) - z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) - z <- coda::mcmc.list(z1, z2) - burnin <- getBurnin(z, threshold = 1.05) +z1 <- coda::mcmc(c(rnorm(2500, 5), rnorm(2500, 0))) +z2 <- coda::mcmc(c(rnorm(2500, -5), rnorm(2500, 0))) +z <- coda::mcmc.list(z1, z2) +burnin <- getBurnin(z, threshold = 1.05) } \author{ Alexey Shiklomanov, Michael Dietze diff --git a/modules/assim.batch/man/pda.generate.externals.Rd b/modules/assim.batch/man/pda.generate.externals.Rd index fc5b3ce409c..2f298a9a8ba 100644 --- a/modules/assim.batch/man/pda.generate.externals.Rd +++ b/modules/assim.batch/man/pda.generate.externals.Rd @@ -103,9 +103,9 @@ You can use this function just to generate either one of the external.* PDA obje } \examples{ \dontrun{ -pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, +pda.externals <- pda.generate.externals(external.data = TRUE, obs = obs, varn = "NEE", varid = 297, n_eff = 106.9386, -external.formats = TRUE, model_data_diag = TRUE, +external.formats = TRUE, model_data_diag = TRUE, model.out = "/tmp/out/outdir", start_date = "2017-01-01", end_date = "2018-12-31") } diff --git a/modules/assim.sequential/man/Analysis.sda.Rd b/modules/assim.sequential/man/Analysis.sda.Rd index b2d02230274..3c57a27879b 100644 --- a/modules/assim.sequential/man/Analysis.sda.Rd +++ b/modules/assim.sequential/man/Analysis.sda.Rd @@ -33,7 +33,7 @@ Analysis.sda( Returns whatever the FUN is returning. In case of EnKF and GEF, this function returns a list with estimated mean and cov matrix of forecast state variables as well as mean and cov estimated as a result of assimilation/analysis . } \description{ -This functions uses the FUN to perform the analysis. EnKF function is developed inside the PEcAnAssimSequential package which can be sent to this function to perform the Ensemble Kalman Filter. +This functions uses the FUN to perform the analysis. EnKF function is developed inside the PEcAnAssimSequential package which can be sent to this function to perform the Ensemble Kalman Filter. The other option is GEF function inside the same package allowing to perform Generalized Ensemble kalman Filter. If you're using an arbitrary function you can use the ... to send any other variables to your desired analysis function. diff --git a/modules/assim.sequential/man/Create_Site_PFT_CSV.Rd b/modules/assim.sequential/man/Create_Site_PFT_CSV.Rd index 732dcf63a84..13831fee448 100644 --- a/modules/assim.sequential/man/Create_Site_PFT_CSV.Rd +++ b/modules/assim.sequential/man/Create_Site_PFT_CSV.Rd @@ -23,17 +23,20 @@ Title Identify pft for each site of a multi-site settings using NLCD and Eco-reg } \examples{ \dontrun{ - NLCD <- file.path( - "/fs", "data1", "pecan.data", "input", - "nlcd_2001_landcover_2011_edition_2014_10_10", - "nlcd_2001_landcover_2011_edition_2014_10_10.img") - Ecoregion <- file.path( - "/projectnb", "dietzelab", "dongchen", - "All_NEON_SDA", "NEON42", "eco-region", "us_eco_l3_state_boundaries.shp") - settings <- PEcAn.settings::read.settings( - "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/pecan.xml") - con <- PEcAn.DB::db.open(settings$database$bety) - site_pft_info <- Create_Site_PFT_CSV(settings, Ecoregion, NLCD, con) +NLCD <- file.path( + "/fs", "data1", "pecan.data", "input", + "nlcd_2001_landcover_2011_edition_2014_10_10", + "nlcd_2001_landcover_2011_edition_2014_10_10.img" +) +Ecoregion <- file.path( + "/projectnb", "dietzelab", "dongchen", + "All_NEON_SDA", "NEON42", "eco-region", "us_eco_l3_state_boundaries.shp" +) +settings <- PEcAn.settings::read.settings( + "/projectnb/dietzelab/dongchen/All_NEON_SDA/NEON42/pecan.xml" +) +con <- PEcAn.DB::db.open(settings$database$bety) +site_pft_info <- Create_Site_PFT_CSV(settings, Ecoregion, NLCD, con) } } diff --git a/modules/assim.sequential/man/construct_nimble_H.Rd b/modules/assim.sequential/man/construct_nimble_H.Rd index f7705188a1b..61d64b1f808 100644 --- a/modules/assim.sequential/man/construct_nimble_H.Rd +++ b/modules/assim.sequential/man/construct_nimble_H.Rd @@ -18,7 +18,7 @@ construct_nimble_H(site.ids, var.names, obs.t, pft.path = NULL, by = "single") \item{by}{criteria, it supports by variable, site, pft, all, and single Q.} } \value{ -Returns one vector containing index for which Q to be estimated for which variable, +Returns one vector containing index for which Q to be estimated for which variable, and the other vector gives which state variable has which observation (= element.W.Data). } \description{ diff --git a/modules/assim.sequential/man/sda.enkf.multisite.Rd b/modules/assim.sequential/man/sda.enkf.multisite.Rd index 81b79f1c1a1..af4098f49e6 100644 --- a/modules/assim.sequential/man/sda.enkf.multisite.Rd +++ b/modules/assim.sequential/man/sda.enkf.multisite.Rd @@ -33,9 +33,9 @@ sda.enkf.multisite( \item{ensemble.samples}{Pass ensemble.samples from outside to avoid GitHub check issues.} -\item{control}{List of flags controlling the behavior of the SDA. -`trace` for reporting back the SDA outcomes; -`TimeseriesPlot` for post analysis examination; +\item{control}{List of flags controlling the behavior of the SDA. +`trace` for reporting back the SDA outcomes; +`TimeseriesPlot` for post analysis examination; `debug` decide if we want to pause the code and examining the variables inside the function; `pause` decide if we want to pause the SDA workflow at current time point t; `Profiling` decide if we want to export the temporal SDA outputs in CSV file; diff --git a/modules/benchmark/man/align_by_first_observation.Rd b/modules/benchmark/man/align_by_first_observation.Rd index 87b6ee65b6b..2eb8de32425 100644 --- a/modules/benchmark/man/align_by_first_observation.Rd +++ b/modules/benchmark/man/align_by_first_observation.Rd @@ -11,7 +11,7 @@ align_by_first_observation(observation_one, observation_two, custom_table) \item{observation_two}{another vector of plant functional types, or species. Provides the order.} -\item{custom_table}{a table that either maps two pft's to one another or maps custom species codes to bety id codes. +\item{custom_table}{a table that either maps two pft's to one another or maps custom species codes to bety id codes. In the second case, must be passable to match_species_id.} } \value{ @@ -22,18 +22,19 @@ align_first_observation } \examples{ -observation_one<-c("AMCA3","AMCA3","AMCA3","AMCA3") -observation_two<-c("a", "b", "a", "a") +observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") +observation_two <- c("a", "b", "a", "a") -table<-list() -table$plant_functional_type_one<- c("AMCA3","AMCA3","ARHY", "ARHY") -table$plant_functional_type_two<- c('a','a','b', 'b') # PFT groupings -table<-as.data.frame(table) +table <- list() +table$plant_functional_type_one <- c("AMCA3", "AMCA3", "ARHY", "ARHY") +table$plant_functional_type_two <- c("a", "a", "b", "b") # PFT groupings +table <- as.data.frame(table) aligned <- align_by_first_observation( observation_one = observation_one, observation_two = observation_two, - custom_table = table) + custom_table = table +) # aligned should be a vector '[1] "AMCA3" "ARHY" "AMCA3" "AMCA3"' } diff --git a/modules/benchmark/man/align_data_to_data_pft.Rd b/modules/benchmark/man/align_data_to_data_pft.Rd index 99d50c029bc..e2ccb470d40 100644 --- a/modules/benchmark/man/align_data_to_data_pft.Rd +++ b/modules/benchmark/man/align_data_to_data_pft.Rd @@ -46,34 +46,35 @@ align_data_to_data_pft } \details{ Aligns vectors of Plant Fucntional Typed and species. -Can align: +Can align: - two vectors of plant functional types (pft's) if a custom map is provided - a list of species (usda, fia, or latin_name format) to a plant functional type - a list of species in a custom format, with a table mapping it to bety_species_id's - Will return a list of what was originally provided, bety_species_codes if possible, + Will return a list of what was originally provided, bety_species_codes if possible, and an aligned output. Because some alignement is order-sensitive, alignment based on observation_one and observation_two are both provided. } \examples{ \dontrun{ -observation_one<-c("AMCA3","AMCA3","AMCA3","AMCA3") -observation_two<-c("a", "b", "a", "a") +observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") +observation_two <- c("a", "b", "a", "a") -table<-list() -table$plant_functional_type_one<- c("AMCA3","AMCA3","ARHY", "ARHY") -table$plant_functional_type_two<- c('a','a','b', 'b') # PFT groupings -table<-as.data.frame(table) +table <- list() +table$plant_functional_type_one <- c("AMCA3", "AMCA3", "ARHY", "ARHY") +table$plant_functional_type_two <- c("a", "a", "b", "b") # PFT groupings +table <- as.data.frame(table) -format_one<-"species_USDA_symbol" -format_two<-"plant_functional_type" +format_one <- "species_USDA_symbol" +format_two <- "plant_functional_type" aligned <- align_data_to_data_pft( - con = con, - observation_one = observation_one, observation_two = observation_two, - format_one = format_one, format_two = format_two, - custom_table = table) + con = con, + observation_one = observation_one, observation_two = observation_two, + format_one = format_one, format_two = format_two, + custom_table = table +) } } \author{ diff --git a/modules/benchmark/man/align_pft.Rd b/modules/benchmark/man/align_pft.Rd index 71a24566e2f..aa5c1458edf 100644 --- a/modules/benchmark/man/align_pft.Rd +++ b/modules/benchmark/man/align_pft.Rd @@ -23,14 +23,14 @@ align_pft( \item{observation_two}{anouther vector of plant fucntional types, or species} -\item{custom_table}{a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. +\item{custom_table}{a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. In the second case, must be passable to match_species_id.} \item{format_one}{The output of query.format.vars() of observation one of the form output$vars$bety_names} \item{format_two}{The output of query.format.vars() of observation two of the form output$vars$bety_names} -\item{subset_is_ok}{When aligning two species lists, this allows for alignement when species lists aren't identical. +\item{subset_is_ok}{When aligning two species lists, this allows for alignement when species lists aren't identical. set to FALSE by default.} \item{comparison_type}{one of "data_to_model", "data_to_data", or "model_to_model"} @@ -50,14 +50,14 @@ set to FALSE by default.} Align vectors of Plant Functional Type and species. } \details{ -Can align: +Can align: - two vectors of plant fucntional types (pft's) if a custom map is provided - a list of species (usda, fia, or latin_name format) to a plant fucntional type - a list of species in a custom format, with a table mapping it to bety_species_id's - Will return a list of what was originally provided, bety_speceis_codes if possible, + Will return a list of what was originally provided, bety_speceis_codes if possible, and an aligned output. Becuase some alignement is order-sensitive, alignment based on observation_one - and observation_two are both provided. + and observation_two are both provided. \code{comparison_type} can be one of the following: \describe{ @@ -71,20 +71,22 @@ Can align: #------------ A species to PFT alignment ----------- -observation_one<-c("AMCA3","AMCA3","AMCA3","AMCA3") -observation_two<-c("a", "b", "a", "a") # +observation_one <- c("AMCA3", "AMCA3", "AMCA3", "AMCA3") +observation_two <- c("a", "b", "a", "a") # -format_one<-"species_USDA_symbol" -format_two<-"plant_funtional_type" +format_one <- "species_USDA_symbol" +format_two <- "plant_funtional_type" -table<-list() -table$plant_functional_type_one<- c("AMCA3","AMCA3","ARHY", "ARHY") -table$plant_functional_type_two<- c('a','a','b', 'b') # PFT groupings -table<-as.data.frame(table) +table <- list() +table$plant_functional_type_one <- c("AMCA3", "AMCA3", "ARHY", "ARHY") +table$plant_functional_type_two <- c("a", "a", "b", "b") # PFT groupings +table <- as.data.frame(table) -aligned<-align_pft(con = con, observation_one = observation_one, observation_two = observation_two, -format_one = format_one, format_two = format_two, custom_table = table) +aligned <- align_pft( + con = con, observation_one = observation_one, observation_two = observation_two, + format_one = format_one, format_two = format_two, custom_table = table +) } } diff --git a/modules/benchmark/man/check_if_species_list.Rd b/modules/benchmark/man/check_if_species_list.Rd index 60357127e67..c10dab46a53 100644 --- a/modules/benchmark/man/check_if_species_list.Rd +++ b/modules/benchmark/man/check_if_species_list.Rd @@ -9,7 +9,7 @@ check_if_species_list(vars, custom_table = NULL) \arguments{ \item{vars}{format} -\item{custom_table}{a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. +\item{custom_table}{a table that either maps two pft's to one anouther or maps custom species codes to bety id codes. In the second case, must be passable to match_species_id.} } \value{ diff --git a/modules/data.atmosphere/man/align.met.Rd b/modules/data.atmosphere/man/align.met.Rd index 7620095fccc..618e6087ef5 100644 --- a/modules/data.atmosphere/man/align.met.Rd +++ b/modules/data.atmosphere/man/align.met.Rd @@ -21,25 +21,25 @@ align.met( \item{source.path}{- data to be bias-corrected aligned with training data (from align.met)} -\item{yrs.train}{- (optional) specify a specific years to be loaded for the training data; -prevents needing to load the entire dataset. If NULL, all available years +\item{yrs.train}{- (optional) specify a specific years to be loaded for the training data; +prevents needing to load the entire dataset. If NULL, all available years will be loaded. If not null, should be a vector of numbers (so you can skip problematic years)} \item{yrs.source}{- (optional) specify a specific years to be loaded for the source data; -prevents needing to load the entire dataset. If NULL, all available years +prevents needing to load the entire dataset. If NULL, all available years will be loaded. If not null, should be a vector of numbers (so you can skip problematic years)} \item{n.ens}{- number of ensemble members to generate and save} -\item{pair.mems}{- logical stating whether ensemble members should be paired in +\item{pair.mems}{- logical stating whether ensemble members should be paired in the case where ensembles are being read in in both the training and source data} -\item{mems.train}{- (optional) string of ensemble identifiers that ensure the training data is read +\item{mems.train}{- (optional) string of ensemble identifiers that ensure the training data is read in a specific order to ensure consistent time series & proper error propagation. -If null, members of the training data ensemble will be randomly selected and -ordered. Specifying the ensemble members IDs (e.g. CCSM_001, CCSM_002) will +If null, members of the training data ensemble will be randomly selected and +ordered. Specifying the ensemble members IDs (e.g. CCSM_001, CCSM_002) will ensure ensemble members are properly identified and combined.} \item{seed}{- specify seed so that random draws can be reproduced} @@ -50,28 +50,28 @@ ensure ensemble members are properly identified and combined.} 2-layered list (stored in memory) containing the training and source data that are now matched in temporal resolution have the specified number of ensemble members - dat.train (training dataset) and dat.source (source data to be downscaled or bias-corrected) - are both lists that contain separate data frames for time indices and all available met + are both lists that contain separate data frames for time indices and all available met variables with ensemble members in columns } \description{ -This script aligns meteorology datasets in at temporal resolution for debiasing & - temporal downscaling. - Note: The output here is stored in memory! - Note: can probably at borrow from or adapt align_data.R in Benchmarking module, but +This script aligns meteorology datasets in at temporal resolution for debiasing & + temporal downscaling. + Note: The output here is stored in memory! + Note: can probably at borrow from or adapt align_data.R in Benchmarking module, but it's too much of a black box at the moment. } \details{ Align meteorology datasets for debiasing -1. Assumes that both the training and source data are in *at least* daily resolution - and each dataset is in a consistent temporal resolution being read from a single file - (CF/Pecan format). For example, CMIP5 historical/p1000 runs where radiation drivers +1. Assumes that both the training and source data are in *at least* daily resolution + and each dataset is in a consistent temporal resolution being read from a single file + (CF/Pecan format). For example, CMIP5 historical/p1000 runs where radiation drivers are in monthly resolution and temperature is in daily will need to be reconciled using one of the "met2CF" or "download" or "extract" functions - 2. Default file structure: Ensembles members for a given site or set of simes are housed - in a common folder with the site ID. Right now everything is based off of Christy's + 2. Default file structure: Ensembles members for a given site or set of simes are housed + in a common folder with the site ID. Right now everything is based off of Christy's PalEON ensemble ID scheme where the site ID is a character string (e.g. HARVARD) followed - the SOURCE data family (i.e. GCM) as a string and then the ensemble member ID as a number + the SOURCE data family (i.e. GCM) as a string and then the ensemble member ID as a number (e.g. 001). For example, the file path for a single daily ensemble member for PalEON is: "~/Desktop/Research/met_ensembles/data/met_ensembles/HARVARD/day/ensembles/bcc-csm1-1_004" with each year in a separate netcdf file inside of it. "bcc-csm1-1_004" is an example of diff --git a/modules/data.atmosphere/man/cos_solar_zenith_angle.Rd b/modules/data.atmosphere/man/cos_solar_zenith_angle.Rd index 2a5565c42bf..6a446470e84 100644 --- a/modules/data.atmosphere/man/cos_solar_zenith_angle.Rd +++ b/modules/data.atmosphere/man/cos_solar_zenith_angle.Rd @@ -21,7 +21,7 @@ cos_solar_zenith_angle(doy, lat, lon, dt, hr) Numeric value representing the cosine of the solar zenith angle. } \description{ -Calculates the cosine of the solar zenith angle based on the given parameters. +Calculates the cosine of the solar zenith angle based on the given parameters. This angle is crucial in determining the amount of solar radiation reaching a point on Earth. } \details{ diff --git a/modules/data.atmosphere/man/download.Ameriflux.Rd b/modules/data.atmosphere/man/download.Ameriflux.Rd index 15091897fc2..d47fca5f3d0 100644 --- a/modules/data.atmosphere/man/download.Ameriflux.Rd +++ b/modules/data.atmosphere/man/download.Ameriflux.Rd @@ -15,7 +15,7 @@ download.Ameriflux( ) } \arguments{ -\item{sitename}{the FLUXNET ID of the site to be downloaded, used as file name prefix. +\item{sitename}{the FLUXNET ID of the site to be downloaded, used as file name prefix. The 'SITE_ID' field in \href{http://ameriflux.lbl.gov/sites/site-list-and-pages/}{list of Ameriflux sites}} \item{outfolder}{location on disk where outputs will be stored} diff --git a/modules/data.atmosphere/man/download.Fluxnet2015.Rd b/modules/data.atmosphere/man/download.Fluxnet2015.Rd index 72c7f309ee4..b8b51959d0b 100644 --- a/modules/data.atmosphere/man/download.Fluxnet2015.Rd +++ b/modules/data.atmosphere/man/download.Fluxnet2015.Rd @@ -16,7 +16,7 @@ download.Fluxnet2015( ) } \arguments{ -\item{sitename}{the FLUXNET ID of the site to be downloaded, used as file name prefix. +\item{sitename}{the FLUXNET ID of the site to be downloaded, used as file name prefix. The 'SITE_ID' field in \href{https://fluxnet.org/sites/site-list-and-pages/}{list of Ameriflux sites}} \item{outfolder}{location on disk where outputs will be stored} diff --git a/modules/data.atmosphere/man/download.Geostreams.Rd b/modules/data.atmosphere/man/download.Geostreams.Rd index 26c839e2528..f98498ff2ce 100644 --- a/modules/data.atmosphere/man/download.Geostreams.Rd +++ b/modules/data.atmosphere/man/download.Geostreams.Rd @@ -57,10 +57,12 @@ If using `~/.pecan.clowder.xml`, it must be a valid PEcAn-formatted XML settings } \examples{ \dontrun{ - download.Geostreams(outfolder = "~/output/dbfiles/Clowder_EF", - sitename = "UIUC Energy Farm - CEN", - start_date = "2016-01-01", end_date="2016-12-31", - key="verysecret") +download.Geostreams( + outfolder = "~/output/dbfiles/Clowder_EF", + sitename = "UIUC Energy Farm - CEN", + start_date = "2016-01-01", end_date = "2016-12-31", + key = "verysecret" +) } } \author{ diff --git a/modules/data.atmosphere/man/download.ICOS.Rd b/modules/data.atmosphere/man/download.ICOS.Rd index 09ea66b81a4..df98f2386c5 100644 --- a/modules/data.atmosphere/man/download.ICOS.Rd +++ b/modules/data.atmosphere/man/download.ICOS.Rd @@ -33,13 +33,13 @@ download.ICOS( information about the output file } \description{ -Currently available products: +Currently available products: Drought-2018 ecosystem eddy covariance flux product https://www.icos-cp.eu/data-products/YVR0-4898 ICOS Final Fully Quality Controlled Observational Data (Level 2) https://www.icos-cp.eu/data-products/ecosystem-release } \examples{ \dontrun{ -download.ICOS("FI-Sii", "/home/carya/pecan", "2016-01-01", "2018-01-01", product="Drought2018") +download.ICOS("FI-Sii", "/home/carya/pecan", "2016-01-01", "2018-01-01", product = "Drought2018") } } \author{ diff --git a/modules/data.atmosphere/man/download.NARR_site.Rd b/modules/data.atmosphere/man/download.NARR_site.Rd index 0cbdf407772..20deedc01f5 100644 --- a/modules/data.atmosphere/man/download.NARR_site.Rd +++ b/modules/data.atmosphere/man/download.NARR_site.Rd @@ -47,12 +47,10 @@ Requires the `progress` package to be installed.} Download NARR time series for a single site } \examples{ - \dontrun{ download.NARR_site(tempdir(), "2001-01-01", "2001-01-12", 43.372, -89.907) } - } \author{ Alexey Shiklomanov diff --git a/modules/data.atmosphere/man/download.NEONmet.Rd b/modules/data.atmosphere/man/download.NEONmet.Rd index c8ffa061ae6..7abc1bfd46a 100644 --- a/modules/data.atmosphere/man/download.NEONmet.Rd +++ b/modules/data.atmosphere/man/download.NEONmet.Rd @@ -15,7 +15,7 @@ download.NEONmet( ) } \arguments{ -\item{sitename}{the NEON ID of the site to be downloaded, used as file name prefix. +\item{sitename}{the NEON ID of the site to be downloaded, used as file name prefix. The 4-letter SITE code in \href{https://www.neonscience.org/science-design/field-sites/list}{list of NEON sites}} \item{outfolder}{location on disk where outputs will be stored} diff --git a/modules/data.atmosphere/man/download.NOAA_GEFS.Rd b/modules/data.atmosphere/man/download.NOAA_GEFS.Rd index 05aa332be43..aa01cffd137 100644 --- a/modules/data.atmosphere/man/download.NOAA_GEFS.Rd +++ b/modules/data.atmosphere/man/download.NOAA_GEFS.Rd @@ -50,14 +50,14 @@ Download NOAA GEFS Weather Data } \section{Information on Units}{ -Information on NOAA weather units can be found below. Note that the temperature is measured in degrees C, +Information on NOAA weather units can be found below. Note that the temperature is measured in degrees C, but is converted at the station and downloaded in Kelvin. } \section{NOAA_GEFS General Information}{ -This function downloads NOAA GEFS weather data. GEFS is an ensemble of 21 different weather forecast models. -A 16 day forecast is avaliable every 6 hours. Each forecast includes information on a total of 8 variables. +This function downloads NOAA GEFS weather data. GEFS is an ensemble of 21 different weather forecast models. +A 16 day forecast is avaliable every 6 hours. Each forecast includes information on a total of 8 variables. These are transformed from the NOAA standard to the internal PEcAn standard. } @@ -79,9 +79,9 @@ June 6th, 2018 at 6:00 a.m. to June 24th, 2018 at 6:00 a.m. \examples{ \dontrun{ - download.NOAA_GEFS(outfolder="~/Working/results", - lat.in= 45.805925, - lon.in = -90.07961, + download.NOAA_GEFS(outfolder="~/Working/results", + lat.in= 45.805925, + lon.in = -90.07961, site_id = 676) } diff --git a/modules/data.atmosphere/man/extract.local.CMIP5.Rd b/modules/data.atmosphere/man/extract.local.CMIP5.Rd index 14eb0142c7e..1cf25d0495a 100644 --- a/modules/data.atmosphere/man/extract.local.CMIP5.Rd +++ b/modules/data.atmosphere/man/extract.local.CMIP5.Rd @@ -42,7 +42,7 @@ extract.local.CMIP5( \item{ensemble_member}{which CMIP5 experiment ensemble member} \item{date.origin}{(optional) specify the date of origin for timestamps in the files being read. -If NULL defaults to 1850 for historical simulations (except MPI-ESM-P) and +If NULL defaults to 1850 for historical simulations (except MPI-ESM-P) and 850 for p1000 simulations (plus MPI-ESM-P historical). Format: YYYY-MM-DD} \item{adjust.pr}{- adjustment factor fore precipitation when the extracted values seem off} @@ -57,7 +57,7 @@ If NULL defaults to 1850 for historical simulations (except MPI-ESM-P) and This function extracts CMIP5 data from grids that have been downloaded and stored locally. Files are saved as a netCDF file in CF conventions at *DAILY* resolution. Note: At this point in time, variables that are only available at a native monthly resolution will be repeated to - give a pseudo-daily record (and can get dealt with in the downscaling workflow). These files + give a pseudo-daily record (and can get dealt with in the downscaling workflow). These files are ready to be used in the general PEcAn workflow or fed into the downscaling workflow. } \author{ diff --git a/modules/data.atmosphere/man/extract.local.NLDAS.Rd b/modules/data.atmosphere/man/extract.local.NLDAS.Rd index b9a9f2e88b8..f1aa1c2ced1 100644 --- a/modules/data.atmosphere/man/extract.local.NLDAS.Rd +++ b/modules/data.atmosphere/man/extract.local.NLDAS.Rd @@ -39,9 +39,9 @@ to control printing of debug info} } \description{ This function extracts NLDAS data from grids that have been downloaded and stored locally. - Once upon a time, you could query these files directly from the internet, but now they're - behind a tricky authentication wall. Files are saved as a netCDF file in CF conventions. - These files are ready to be used in the general PEcAn workflow or fed into the downscaling + Once upon a time, you could query these files directly from the internet, but now they're + behind a tricky authentication wall. Files are saved as a netCDF file in CF conventions. + These files are ready to be used in the general PEcAn workflow or fed into the downscaling workflow. } \author{ diff --git a/modules/data.atmosphere/man/extract.nc.ERA5.Rd b/modules/data.atmosphere/man/extract.nc.ERA5.Rd index d377f131bd4..9b5d55217f3 100644 --- a/modules/data.atmosphere/man/extract.nc.ERA5.Rd +++ b/modules/data.atmosphere/man/extract.nc.ERA5.Rd @@ -58,9 +58,9 @@ For the list of variables check out the documentation at \url{ } \examples{ \dontrun{ -point.data <- ERA5_extract(sslat=40, slon=-120, years=c(1990:1995), vars=NULL) - - purrr::map(~xts::apply.daily(.x, mean)) +point.data <- ERA5_extract(sslat = 40, slon = -120, years = c(1990:1995), vars = NULL) +# point.data \%>\% +purrr::map(~ xts::apply.daily(.x, mean)) } } diff --git a/modules/data.atmosphere/man/gen.subdaily.models.Rd b/modules/data.atmosphere/man/gen.subdaily.models.Rd index 3c77bc657c9..13b4e59739a 100644 --- a/modules/data.atmosphere/man/gen.subdaily.models.Rd +++ b/modules/data.atmosphere/man/gen.subdaily.models.Rd @@ -26,7 +26,7 @@ gen.subdaily.models( \item{path.train}{- path to CF/PEcAn style training data where each year is in a separate file.} -\item{yrs.train}{- which years of the training data should be used for to generate the model for +\item{yrs.train}{- which years of the training data should be used for to generate the model for the subdaily cycle. If NULL, will default to all years} \item{direction.filter}{- Whether the model will be filtered backward or forward in time. options = c("backward", "forward") @@ -36,7 +36,7 @@ the subdaily cycle. If NULL, will default to all years} \item{n.beta}{- number of betas to save from linear regression model} -\item{day.window}{- integer specifying number of days around the day being modeled you want to use data from for that +\item{day.window}{- integer specifying number of days around the day being modeled you want to use data from for that specific hours coefficients. Must be integer because we want statistics from the same time of day for each day surrounding the model day} diff --git a/modules/data.atmosphere/man/get.rh.Rd b/modules/data.atmosphere/man/get.rh.Rd index 34056a27891..1b575315550 100644 --- a/modules/data.atmosphere/man/get.rh.Rd +++ b/modules/data.atmosphere/man/get.rh.Rd @@ -23,7 +23,7 @@ Relative Humidity and the Dewpoint Temperature in Moist Air A Simple Conversion and Applications. BAMS https://doi.org/10.1175/BAMS-86-2-225 R = 461.5 K-1 kg-1 gas constant H2O -L enthalpy of vaporization +L enthalpy of vaporization linear dependence on T (p 226, following eq 9) } \author{ diff --git a/modules/data.atmosphere/man/get_NARR_thredds.Rd b/modules/data.atmosphere/man/get_NARR_thredds.Rd index 58896d47f1a..5d9e9dd691e 100644 --- a/modules/data.atmosphere/man/get_NARR_thredds.Rd +++ b/modules/data.atmosphere/man/get_NARR_thredds.Rd @@ -42,7 +42,6 @@ Requires the `progress` package to be installed.} Retrieve NARR data using thredds } \examples{ - \dontrun{ dat <- get_NARR_thredds("2008-01-01", "2008-01-15", 43.3724, -89.9071) } diff --git a/modules/data.atmosphere/man/merge_met_variable.Rd b/modules/data.atmosphere/man/merge_met_variable.Rd index 66821bf6bb7..3271885774e 100644 --- a/modules/data.atmosphere/man/merge_met_variable.Rd +++ b/modules/data.atmosphere/man/merge_met_variable.Rd @@ -35,23 +35,23 @@ print debugging information as they run?} Currently nothing. TODO: Return a data frame summarizing the merged files. } \description{ -Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. -Currently unit and name checking only implemented for CO2. +Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. +Currently unit and name checking only implemented for CO2. Currently does not yet support merge data that has lat/lon New variable only has time dimension and thus MIGHT break downstream code.... } \examples{ \dontrun{ -in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" -in.prefix <- "" -outfolder <- "~/paleon/metTest/" +in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" +in.prefix <- "" +outfolder <- "~/paleon/metTest/" merge.file <- "~/paleon/paleon_monthly_co2.nc" start_date <- "0850-01-01" -end_date <- "2010-12-31" -overwrite <- FALSE -verbose <- TRUE +end_date <- "2010-12-31" +overwrite <- FALSE +verbose <- TRUE -merge_met_variable(in.path,in.prefix,start_date,end_date,merge.file,overwrite,verbose) -PEcAn.DALEC::met2model.DALEC(in.path,in.prefix,outfolder,start_date,end_date) +merge_met_variable(in.path, in.prefix, start_date, end_date, merge.file, overwrite, verbose) +PEcAn.DALEC::met2model.DALEC(in.path, in.prefix, outfolder, start_date, end_date) } } diff --git a/modules/data.atmosphere/man/met.process.Rd b/modules/data.atmosphere/man/met.process.Rd index fce6b829b41..429e33d2c69 100644 --- a/modules/data.atmosphere/man/met.process.Rd +++ b/modules/data.atmosphere/man/met.process.Rd @@ -39,7 +39,7 @@ met.process( \item{overwrite}{Whether to force met.process to proceed. - `overwrite` may be a list with individual components corresponding to + `overwrite` may be a list with individual components corresponding to `download`, `met2cf`, `standardize`, and `met2model`. If it is instead a simple boolean, the default behavior for `overwrite=FALSE` is to overwrite nothing, as you might expect. Note however that the default behavior for `overwrite=TRUE` is to overwrite everything diff --git a/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd b/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd index 9ea8eeeca6b..94f723cbba1 100644 --- a/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd +++ b/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd @@ -43,7 +43,7 @@ format is output from db/R/query.format.vars, and should have: format$na.strings = list of missing values to convert to NA, such as -9999 format$skip = lines to skip excluding header format$vars$column_number = Column number in CSV file (optional, will use header name first) -Columns with NA for bety variable name are dropped. +Columns with NA for bety variable name are dropped. Units for datetime field are the lubridate function that will be used to parse the date (e.g. \code{ymd_hms} or \code{mdy_hm}).} \item{overwrite}{should existing files be overwritten} diff --git a/modules/data.atmosphere/man/met2CF.Geostreams.Rd b/modules/data.atmosphere/man/met2CF.Geostreams.Rd index c5a4f3f4496..9cb0bf9024a 100644 --- a/modules/data.atmosphere/man/met2CF.Geostreams.Rd +++ b/modules/data.atmosphere/man/met2CF.Geostreams.Rd @@ -26,7 +26,7 @@ met2CF.Geostreams( \item{overwrite}{logical: Regenerate existing files of the same name?} -\item{verbose}{logical, passed on to \code{\link[ncdf4]{nc_create}} +\item{verbose}{logical, passed on to \code{\link[ncdf4]{nc_create}} to control how chatty it should be during netCDF creation} \item{...}{other arguments, currently ignored} diff --git a/modules/data.atmosphere/man/met2CF.csv.Rd b/modules/data.atmosphere/man/met2CF.csv.Rd index 13add76a040..3ef8d204a49 100644 --- a/modules/data.atmosphere/man/met2CF.csv.Rd +++ b/modules/data.atmosphere/man/met2CF.csv.Rd @@ -78,23 +78,27 @@ Units for datetime field are the lubridate function that will be used to \examples{ \dontrun{ con <- PEcAn.DB::db.open( - list(user='bety', password='bety', host='localhost', - dbname='bety', driver='PostgreSQL',write=TRUE)) -start_date <- lubridate::ymd_hm('200401010000') -end_date <- lubridate::ymd_hm('200412312330') -file<-PEcAn.data.atmosphere::download.Fluxnet2015('US-WCr','~/',start_date,end_date) -in.path <- '~/' + list( + user = "bety", password = "bety", host = "localhost", + dbname = "bety", driver = "PostgreSQL", write = TRUE + ) +) +start_date <- lubridate::ymd_hm("200401010000") +end_date <- lubridate::ymd_hm("200412312330") +file <- PEcAn.data.atmosphere::download.Fluxnet2015("US-WCr", "~/", start_date, end_date) +in.path <- "~/" in.prefix <- file$dbfile.name -outfolder <- '~/' +outfolder <- "~/" format.id <- 5000000001 -format <- PEcAn.DB::query.format.vars(format.id=format.id,bety = bety) +format <- PEcAn.DB::query.format.vars(format.id = format.id, bety = bety) format$lon <- -92.0 format$lat <- 45.0 format$time_zone <- "America/Chicago" results <- PEcAn.data.atmosphere::met2CF.csv( in.path, in.prefix, outfolder, start_date, end_date, format, - overwrite=TRUE) + overwrite = TRUE +) } } diff --git a/modules/data.atmosphere/man/met_temporal_downscale.Gaussian_ensemble.Rd b/modules/data.atmosphere/man/met_temporal_downscale.Gaussian_ensemble.Rd index 253cc6dc550..0d25e36e903 100644 --- a/modules/data.atmosphere/man/met_temporal_downscale.Gaussian_ensemble.Rd +++ b/modules/data.atmosphere/man/met_temporal_downscale.Gaussian_ensemble.Rd @@ -28,7 +28,7 @@ met_temporal_downscale.Gaussian_ensemble( \item{input_met}{- the source dataset that will temporally downscaled by the train_met dataset} -\item{train_met}{- the observed dataset that will be used to train the modeled dataset in NC format. i.e. Flux Tower dataset +\item{train_met}{- the observed dataset that will be used to train the modeled dataset in NC format. i.e. Flux Tower dataset (see download.Fluxnet2015 or download.Ameriflux)} \item{overwrite}{logical: replace output file if it already exists?} diff --git a/modules/data.atmosphere/man/nc.merge.Rd b/modules/data.atmosphere/man/nc.merge.Rd index dbed3d19330..4f928804a70 100644 --- a/modules/data.atmosphere/man/nc.merge.Rd +++ b/modules/data.atmosphere/man/nc.merge.Rd @@ -39,7 +39,7 @@ functions print debugging information as they run?} \description{ This is the 1st function for the tdm (Temporally Downscale Meteorology) workflow. The nc2dat.train function parses multiple netCDF files into one central training data file called 'dat.train_file'. This netCDF - file will be used to generate the subdaily models in the next step of the workflow, generate.subdaily.models(). + file will be used to generate the subdaily models in the next step of the workflow, generate.subdaily.models(). It is also called in tdm_predict_subdaily_met which is the final step of the tdm workflow. } \details{ diff --git a/modules/data.atmosphere/man/noaa_stage2.Rd b/modules/data.atmosphere/man/noaa_stage2.Rd index 917ebccb6be..1efc2e823b7 100644 --- a/modules/data.atmosphere/man/noaa_stage2.Rd +++ b/modules/data.atmosphere/man/noaa_stage2.Rd @@ -13,7 +13,7 @@ noaa_stage2( ) } \arguments{ -\item{cycle}{Hour at which forecast was made, as character string +\item{cycle}{Hour at which forecast was made, as character string (`"00"`, `"06"`, `"12"` or `"18"`). Only `"00"` (default) has 30 days horizon.} \item{version}{GEFS forecast version. Prior versions correspond to forecasts diff --git a/modules/data.atmosphere/man/predict_subdaily_met.Rd b/modules/data.atmosphere/man/predict_subdaily_met.Rd index 18453131757..007ef20587b 100644 --- a/modules/data.atmosphere/man/predict_subdaily_met.Rd +++ b/modules/data.atmosphere/man/predict_subdaily_met.Rd @@ -27,8 +27,8 @@ predict_subdaily_met( \arguments{ \item{outfolder}{- directory where output file will be stored} -\item{in.path}{- base path to dataset you wish to temporally downscale; Note: in order for parallelization -to work, the in.prefix will need to be appended as the final level of the file structure. +\item{in.path}{- base path to dataset you wish to temporally downscale; Note: in order for parallelization +to work, the in.prefix will need to be appended as the final level of the file structure. For example, if prefix is GFDL.CM3.rcp45.r1i1p1, there should be a directory with that title in in.path.} \item{in.prefix}{- prefix of model dataset, i.e. if file is GFDL.CM3.rcp45.r1i1p1.2006 the prefix is 'GFDL.CM3.rcp45.r1i1p1'} @@ -42,7 +42,7 @@ For example, if prefix is GFDL.CM3.rcp45.r1i1p1, there should be a directory wit \item{yrs.predict}{- years for which you want to generate met. if NULL, all years in in.path will be done} -\item{ens.labs}{- vector containing the labels (suffixes) for each ensemble member; this allows you to add to your +\item{ens.labs}{- vector containing the labels (suffixes) for each ensemble member; this allows you to add to your ensemble rather than overwriting with a default naming scheme} \item{resids}{- logical stating whether to pass on residual data or not} diff --git a/modules/data.atmosphere/man/spin.met.Rd b/modules/data.atmosphere/man/spin.met.Rd index 4e2afa9a7e5..318ac5ed5e3 100644 --- a/modules/data.atmosphere/man/spin.met.Rd +++ b/modules/data.atmosphere/man/spin.met.Rd @@ -42,8 +42,8 @@ updated start date Spin-up meteorology } \details{ -spin.met works by creating symbolic links to the sampled met file, -rather than copying the whole file. Be aware that the internal dates in +spin.met works by creating symbolic links to the sampled met file, +rather than copying the whole file. Be aware that the internal dates in those files are not modified. Right now this is designed to be called within met2model.[MODEL] before the met is processed (it's designed to work with annual CF files not model-specific files) for example with models that process met @@ -51,18 +51,19 @@ into one large file } \examples{ start_date <- "0850-01-01 00:00:00" -end_date <- "2010-12-31 23:59:59" -nyear <- 10 -nsample <- 50 -resample <- TRUE +end_date <- "2010-12-31 23:59:59" +nyear <- 10 +nsample <- 50 +resample <- TRUE \dontrun{ -if(!is.null(spin)){ - ## if spinning up, extend processed met by resampling or cycling met - start_date <- PEcAn.data.atmosphere::spin.met( - in.path, in.prefix, - start_date, end_date, - nyear, nsample, resample) +if (!is.null(spin)) { + ## if spinning up, extend processed met by resampling or cycling met + start_date <- PEcAn.data.atmosphere::spin.met( + in.path, in.prefix, + start_date, end_date, + nyear, nsample, resample + ) } } } diff --git a/modules/data.atmosphere/man/split_wind.Rd b/modules/data.atmosphere/man/split_wind.Rd index 02747a03110..13ddb679d4b 100644 --- a/modules/data.atmosphere/man/split_wind.Rd +++ b/modules/data.atmosphere/man/split_wind.Rd @@ -35,13 +35,13 @@ Currently modifies the files IN PLACE rather than creating a new copy of the fil } \examples{ \dontrun{ -in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" -in.prefix <- "" -outfolder <- "~/paleon/metTest/" +in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" +in.prefix <- "" +outfolder <- "~/paleon/metTest/" start_date <- "0850-01-01" -end_date <- "2010-12-31" -overwrite <- FALSE -verbose <- TRUE +end_date <- "2010-12-31" +overwrite <- FALSE +verbose <- TRUE split_wind(in.path, in.prefix, start_date, end_date, merge.file, overwrite, verbose) } diff --git a/modules/data.atmosphere/man/temporal.downscale.functions.Rd b/modules/data.atmosphere/man/temporal.downscale.functions.Rd index 654fc66d6d4..31b27a6578e 100644 --- a/modules/data.atmosphere/man/temporal.downscale.functions.Rd +++ b/modules/data.atmosphere/man/temporal.downscale.functions.Rd @@ -42,12 +42,12 @@ still being worked on, set to FALSE} } \description{ This function contains the functions that do the heavy lifting in gen.subdaily.models() - and predict.subdaily.workflow(). Individual variable functions actually generate the models - and betas from the dat.train_file and save them in the output file. save.model() and - save.betas() are helper functions that save the linear regression model output to a - specific location. In the future, we should only save the data that we actually use from the + and predict.subdaily.workflow(). Individual variable functions actually generate the models + and betas from the dat.train_file and save them in the output file. save.model() and + save.betas() are helper functions that save the linear regression model output to a + specific location. In the future, we should only save the data that we actually use from the linear regression model because this is a large file. predict.met() is called from - predict.subdaily.workflow() and references the linear regression model output to + predict.subdaily.workflow() and references the linear regression model output to predict the ensemble data. } \details{ diff --git a/modules/data.land/man/BADM.Rd b/modules/data.land/man/BADM.Rd index 9e67507130e..9be324b847e 100644 --- a/modules/data.land/man/BADM.Rd +++ b/modules/data.land/man/BADM.Rd @@ -15,8 +15,8 @@ A data frame with 12,300 rows and 13 columns: \item{VARIABLE_GROUP}{category, eg abovground biomass or soil chemistry} \item{VARIABLE, DATAVALUE}{key and value for each measured variable} \item{NA_L1CODE, NA_L1NAME, NA_L2CODE, NA_L2NAME}{ - numeric IDs and names for the Level 1 and level 2 ecoregions where - this site is located} + numeric IDs and names for the Level 1 and level 2 ecoregions where + this site is located} } } \source{ diff --git a/modules/data.land/man/EPA_ecoregion_finder.Rd b/modules/data.land/man/EPA_ecoregion_finder.Rd index 1b995fd8b1b..6bf22e7ef7b 100644 --- a/modules/data.land/man/EPA_ecoregion_finder.Rd +++ b/modules/data.land/man/EPA_ecoregion_finder.Rd @@ -17,6 +17,6 @@ EPA_ecoregion_finder(Lat, Lon, folder.path = NULL) a dataframe with codes corresponding to level1 and level2 codes as two columns } \description{ -This function is designed to find the level1 and level2 code ecoregions for a given lat and long. +This function is designed to find the level1 and level2 code ecoregions for a given lat and long. You can learn more about ecoregions here: \url{https://www.epa.gov/eco-research/ecoregions}. } diff --git a/modules/data.land/man/Read.IC.info.BADM.Rd b/modules/data.land/man/Read.IC.info.BADM.Rd index d81d2631c11..d1926367252 100644 --- a/modules/data.land/man/Read.IC.info.BADM.Rd +++ b/modules/data.land/man/Read.IC.info.BADM.Rd @@ -17,12 +17,12 @@ a dataframe with 7 columns of Site, Variable, Date, Organ, AGB, soil_organic_car } \description{ This function returns a dataframe of plant biomass, root and soil carbon for a set of lat and long coordinates. -This function first finds the level1 and level2 ecoregions for the given coordinates, and then tries to filter BADM database for those eco-regions. +This function first finds the level1 and level2 ecoregions for the given coordinates, and then tries to filter BADM database for those eco-regions. If no data found in the BADM database for the given lat/longs eco-regions, then all the data in the database will be used to return the initial condition. All the variables are also converted to kg/m^2. } \examples{ \dontrun{ - badm_test <- Read.IC.info.BADM(45.805925,-90.07961) +badm_test <- Read.IC.info.BADM(45.805925, -90.07961) } } diff --git a/modules/data.land/man/Read_Tucson.Rd b/modules/data.land/man/Read_Tucson.Rd index d008e9bc65c..278c9f2345e 100644 --- a/modules/data.land/man/Read_Tucson.Rd +++ b/modules/data.land/man/Read_Tucson.Rd @@ -11,7 +11,7 @@ Read_Tucson(folder) Will read all files at this path matching "TXT", "rwl", or "rw"} } \description{ -wrapper around read.tucson that loads a whole directory of tree ring files -and calls a 'clean' function that removes redundant records +wrapper around read.tucson that loads a whole directory of tree ring files +and calls a 'clean' function that removes redundant records (WinDendro can sometimes create duplicate records when editing) } diff --git a/modules/data.land/man/dataone_download.Rd b/modules/data.land/man/dataone_download.Rd index ec9ce4d716b..09e4453565a 100644 --- a/modules/data.land/man/dataone_download.Rd +++ b/modules/data.land/man/dataone_download.Rd @@ -28,7 +28,7 @@ Adapts the dataone::getDataPackage workflow to allow users to download data from } \examples{ \dontrun{ -dataone_download(id = "doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87", +dataone_download(id = "doi:10.6073/pasta/63ad7159306bc031520f09b2faefcf87", filepath = "/fs/data1/pecan.data/dbfiles") } } diff --git a/modules/data.land/man/download_NEON_soilmoist.Rd b/modules/data.land/man/download_NEON_soilmoist.Rd index 2ce60df1737..fbdae810e33 100644 --- a/modules/data.land/man/download_NEON_soilmoist.Rd +++ b/modules/data.land/man/download_NEON_soilmoist.Rd @@ -26,8 +26,8 @@ Both variables will be saved in outdir automatically (chr)} \item{enddate}{start date as YYYY-mm. If left empty, all data available will be downloaded (chr)} \item{outdir}{out directory to store the following data: -.rds list files of SWC and SIC data for each site and sensor position, -sensor positions .csv for each site, +.rds list files of SWC and SIC data for each site and sensor position, +sensor positions .csv for each site, variable description .csv file, readme .csv file} } diff --git a/modules/data.land/man/extract_NEON_veg.Rd b/modules/data.land/man/extract_NEON_veg.Rd index f3c033e5f46..043e93bfc7c 100644 --- a/modules/data.land/man/extract_NEON_veg.Rd +++ b/modules/data.land/man/extract_NEON_veg.Rd @@ -36,8 +36,8 @@ veg_info object to be passed to extract_veg within ic_process extract_NEON_veg } \examples{ -start_date = as.Date("2020-01-01") -end_date = as.Date("2021-09-01") +start_date <- as.Date("2020-01-01") +end_date <- as.Date("2021-09-01") } \author{ Alexis Helgeson and Michael Dietze diff --git a/modules/data.land/man/extract_soil_gssurgo.Rd b/modules/data.land/man/extract_soil_gssurgo.Rd index d8231132824..9b258cae7d5 100644 --- a/modules/data.land/man/extract_soil_gssurgo.Rd +++ b/modules/data.land/man/extract_soil_gssurgo.Rd @@ -34,10 +34,10 @@ Extract soil data from gssurgo } \examples{ \dontrun{ - outdir <- "~/paleon/envTest" - lat <- 40 - lon <- -80 - PEcAn.data.land::extract_soil_gssurgo(outdir, lat, lon) +outdir <- "~/paleon/envTest" +lat <- 40 +lon <- -80 +PEcAn.data.land::extract_soil_gssurgo(outdir, lat, lon) } } \author{ diff --git a/modules/data.land/man/extract_soil_nc.Rd b/modules/data.land/man/extract_soil_nc.Rd index 1df60d75bcf..f551b4c8245 100644 --- a/modules/data.land/man/extract_soil_nc.Rd +++ b/modules/data.land/man/extract_soil_nc.Rd @@ -24,9 +24,9 @@ Extract soil data from the gridpoint closest to a location \examples{ \dontrun{ in.file <- "~/paleon/env_paleon/soil/paleon_soil.nc" -outdir <- "~/paleon/envTest" -lat <- 40 -lon <- -80 -PEcAn.data.land::extract_soil_nc(in.file,outdir,lat,lon) +outdir <- "~/paleon/envTest" +lat <- 40 +lon <- -80 +PEcAn.data.land::extract_soil_nc(in.file, outdir, lat, lon) } } diff --git a/modules/data.land/man/gSSURGO.Query.Rd b/modules/data.land/man/gSSURGO.Query.Rd index 27a7a4d2cb2..e3a6d1eee72 100644 --- a/modules/data.land/man/gSSURGO.Query.Rd +++ b/modules/data.land/man/gSSURGO.Query.Rd @@ -24,18 +24,20 @@ This function queries the gSSURGO database for a series of map unit keys Full documention of available tables and their relationships can be found here \url{www.sdmdataaccess.nrcs.usda.gov/QueryHelp.aspx} There have been occasions where NRCS made some minor changes to the structure of the API which this code is where those changes need to be implemneted here. -Fields need to be defined with their associate tables. For example, sandtotal is a field in chorizon table which needs to be defined as chorizon.sandotal_(r/l/h), where +Fields need to be defined with their associate tables. For example, sandtotal is a field in chorizon table which needs to be defined as chorizon.sandotal_(r/l/h), where r stands for the representative value, l stands for low and h stands for high. At the moment fields from mapunit, component, muaggatt, and chorizon tables can be extracted. } \examples{ \dontrun{ - PEcAn.data.land::gSSURGO.Query( - mukeys = 2747727, - fields = c( - "chorizon.cec7_r", "chorizon.sandtotal_r", - "chorizon.silttotal_r","chorizon.claytotal_r", - "chorizon.om_r","chorizon.hzdept_r","chorizon.frag3to10_r", - "chorizon.dbovendry_r","chorizon.ph1to1h2o_r", - "chorizon.cokey","chorizon.chkey")) +PEcAn.data.land::gSSURGO.Query( + mukeys = 2747727, + fields = c( + "chorizon.cec7_r", "chorizon.sandtotal_r", + "chorizon.silttotal_r", "chorizon.claytotal_r", + "chorizon.om_r", "chorizon.hzdept_r", "chorizon.frag3to10_r", + "chorizon.dbovendry_r", "chorizon.ph1to1h2o_r", + "chorizon.cokey", "chorizon.chkey" + ) +) } } diff --git a/modules/data.land/man/match_species_id.Rd b/modules/data.land/man/match_species_id.Rd index daa9b19d977..51da9f6ccec 100644 --- a/modules/data.land/man/match_species_id.Rd +++ b/modules/data.land/man/match_species_id.Rd @@ -48,16 +48,18 @@ Parses species codes in input data and matches them with the BETY species ID. \dontrun{ con <- PEcAn.DB::db.open(list( driver = "Postgres", - dbname = 'bety', - user = 'bety', - password = 'bety', - host = 'localhost') + dbname = "bety", + user = "bety", + password = "bety", + host = "localhost" +)) +input_codes <- c("ACRU", "PIMA", "TSCA") +format_name <- "usda" +match_species_id( + input_codes = input_codes, + format_name = format_name, + bety = con ) -input_codes <- c('ACRU', 'PIMA', 'TSCA') -format_name <- 'usda' -match_species_id(input_codes = input_codes, - format_name = format_name, - bety = con) } } diff --git a/modules/data.land/man/sclass.Rd b/modules/data.land/man/sclass.Rd index 476c75e967e..def56ce8472 100644 --- a/modules/data.land/man/sclass.Rd +++ b/modules/data.land/man/sclass.Rd @@ -19,5 +19,5 @@ vector of integers identifying textural class of each input layer. This function determines the soil class number based on the fraction of sand, clay, and silt } \examples{ -sclass(0.3,0.3) +sclass(0.3, 0.3) } diff --git a/modules/data.land/man/shp2kml.Rd b/modules/data.land/man/shp2kml.Rd index afae5b612e8..748a64d69db 100644 --- a/modules/data.land/man/shp2kml.Rd +++ b/modules/data.land/man/shp2kml.Rd @@ -22,7 +22,7 @@ shp2kml( \item{kmz}{TRUE/FALSE. Option to write out file as a compressed kml. Requires zip utility} -\item{proj4}{OPTIONAL. Define output proj4 projection string. If set, input vector will be +\item{proj4}{OPTIONAL. Define output proj4 projection string. If set, input vector will be reprojected to desired projection. Not yet implemented.} \item{color}{OPTIONAL. Fill color for output kml/kmz file} diff --git a/modules/data.land/man/soil2netcdf.Rd b/modules/data.land/man/soil2netcdf.Rd index a21840e8fb5..39235b114e0 100644 --- a/modules/data.land/man/soil2netcdf.Rd +++ b/modules/data.land/man/soil2netcdf.Rd @@ -32,9 +32,11 @@ pain for storing strings. Conversion back can be done by and then soil.name[soil_n] } \examples{ -\dontrun{ soil.data <- list(fraction_of_sand_in_soil = c - (0.3,0.4,0.5), fraction_of_clay_in_soil = c(0.3,0.3,0.3), soil_depth = c - (0.2,0.5,1.0)) - -soil2netcdf(soil.data,"soil.nc") } +\dontrun{ +soil.data <- list(fraction_of_sand_in_soil = c +(0.3, 0.4, 0.5), fraction_of_clay_in_soil = c(0.3, 0.3, 0.3), soil_depth = c +(0.2, 0.5, 1.0)) + +soil2netcdf(soil.data, "soil.nc") +} } diff --git a/modules/data.land/man/soil_class.Rd b/modules/data.land/man/soil_class.Rd index 7dd2ff3eaf0..47822102c2a 100644 --- a/modules/data.land/man/soil_class.Rd +++ b/modules/data.land/man/soil_class.Rd @@ -9,31 +9,31 @@ A list with 26 entries: \describe{ \item{air.cond, h2o.cond, sand.cond, silt.cond, clay.cond}{ - thermal conductivity, W m^-1 K^-1} + thermal conductivity, W m^-1 K^-1} \item{air.hcap, sand.hcap, silt.hcap, clay.hcap}{heat capacity, - J m^-3 K^-1} + J m^-3 K^-1} \item{kair, ksand, ksilt, kclay}{relative conductivity factor} \item{fieldcp.K}{hydraulic conductance at field capacity, mm day^-1} \item{grav}{gravity acceleration, m s^-2} \item{soil.key}{Abbreviations for each of 18 soil texture classes, e.g. "SiL", "LSa"} \item{soil.name}{Names for 18 soil texture classes, e.g. "Sand", - "Silty clay"} + "Silty clay"} \item{soilcp.MPa}{soil water potential when air-dry, MPa} \item{soilld.MPa}{soil water potential at critical water content, MPa} \item{soilwp.MPa}{soil water potential at wilting point, MPa} \item{stext.lines}{list of 18 lists, each giving minimum and maximum - sand/silt/clay contents for a soil texture class} + sand/silt/clay contents for a soil texture class} \item{stext.polygon}{list of 18 lists, each giving corner points in the - soil texture triangle for a soil texture class} + soil texture triangle for a soil texture class} \item{texture}{data frame with 13 rows and 21 columns, giving default - parameter values for 13 named soil textures} + parameter values for 13 named soil textures} \item{theta.crit}{critical water content (fractional soil moisture at - which plants start dropping leaves), m^3 m^-3} + which plants start dropping leaves), m^3 m^-3} \item{xclay.def}{default volume fraction of sand in each of 18 soil - texture classes} + texture classes} \item{xsand.def}{default volume fraction of clay in each of 18 soil - texture classes} + texture classes} } } \source{ diff --git a/modules/data.land/man/soil_params.Rd b/modules/data.land/man/soil_params.Rd index 4fa1ae61e10..ac0db4a13e1 100644 --- a/modules/data.land/man/soil_params.Rd +++ b/modules/data.land/man/soil_params.Rd @@ -42,5 +42,5 @@ Estimate soil parameters from texture class or sand/silt/clay \examples{ sand <- c(0.3, 0.4, 0.5) clay <- c(0.3, 0.3, 0.3) -soil_params(sand=sand,clay=clay) +soil_params(sand = sand, clay = clay) } diff --git a/modules/data.land/man/soilgrids_soilC_extract.Rd b/modules/data.land/man/soilgrids_soilC_extract.Rd index 175dbc71ee5..506642c477a 100644 --- a/modules/data.land/man/soilgrids_soilC_extract.Rd +++ b/modules/data.land/man/soilgrids_soilC_extract.Rd @@ -7,25 +7,25 @@ soilgrids_soilC_extract(site_info, outdir = NULL, verbose = TRUE) } \arguments{ -\item{site_info}{A dataframe of site info containing the BETYdb site ID, -site name, latitude, and longitude, e.g. +\item{site_info}{A dataframe of site info containing the BETYdb site ID, +site name, latitude, and longitude, e.g. (site_id, site_name, lat, lon)} -\item{outdir}{Optional. Provide the results as a CSV file +\item{outdir}{Optional. Provide the results as a CSV file (soilgrids_soilC_data.csv)} \item{verbose}{Provide progress feedback to the terminal? TRUE/FALSE} } \value{ -a dataframe containing the total soil carbon values -and the corresponding standard deviation values (uncertainties) for each location +a dataframe containing the total soil carbon values +and the corresponding standard deviation values (uncertainties) for each location Output column names are c("Site_ID","Site_Name","Latitude","Longitude", "Total_soilC","Std_soilC") } \description{ soilgrids_soilC_extract function -A function to extract total soil organic carbon for a single or group of -lat/long locationsbased on user-defined site location from SoilGrids250m +A function to extract total soil organic carbon for a single or group of +lat/long locationsbased on user-defined site location from SoilGrids250m version 2.0 : https://soilgrids.org } \examples{ @@ -41,7 +41,7 @@ db_password <- 'bety' bety <- list(user='bety', password='bety', host=host_db, dbname='betydb', driver=RPostgres::Postgres(),write=FALSE) -con <- DBI::dbConnect(drv=bety$driver, dbname=bety$dbname, host=bety$host, +con <- DBI::dbConnect(drv=bety$driver, dbname=bety$dbname, host=bety$host, password=bety$password, user=bety$user) suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, @@ -55,7 +55,7 @@ DBI::dbDisconnect(con) site_info <- qry_results.2 verbose <- TRUE -system.time(result_soc <- PEcAn.data.land::soilgrids_soilC_extract(site_info=site_info, +system.time(result_soc <- PEcAn.data.land::soilgrids_soilC_extract(site_info=site_info, verbose=verbose)) result_soc diff --git a/modules/data.remote/man/GEDI_AGB_prep.Rd b/modules/data.remote/man/GEDI_AGB_prep.Rd index db472005724..4d5fa1a4d79 100644 --- a/modules/data.remote/man/GEDI_AGB_prep.Rd +++ b/modules/data.remote/man/GEDI_AGB_prep.Rd @@ -44,16 +44,16 @@ During the first use, users will be ask to enter their Earth Explore \examples{ \dontrun{ settings <- PEcAn.settings::read.settings("pecan.xml") -site_info <- settings \%>\% - purrr::map(~.x[['run']] ) \%>\% - purrr::map('site')\%>\% - purrr::map(function(site.list){ - #conversion from string to number +site_info <- settings \%>\% + purrr::map(~ .x[["run"]]) \%>\% + purrr::map("site") \%>\% + purrr::map(function(site.list) { + # conversion from string to number site.list$lat <- as.numeric(site.list$lat) site.list$lon <- as.numeric(site.list$lon) - list(site_id=site.list$id, lat=site.list$lat, lon=site.list$lon, site_name=site.list$name) - })\%>\% - dplyr::bind_rows() \%>\% + list(site_id = site.list$id, lat = site.list$lat, lon = site.list$lon, site_name = site.list$name) + }) \%>\% + dplyr::bind_rows() \%>\% as.list() time_points <- seq(start.date, end.date, by = time.step) buffer <- 0.01 diff --git a/modules/data.remote/man/MODIS_LC_prep.Rd b/modules/data.remote/man/MODIS_LC_prep.Rd index 7229ce6d83b..66748cc3099 100644 --- a/modules/data.remote/man/MODIS_LC_prep.Rd +++ b/modules/data.remote/man/MODIS_LC_prep.Rd @@ -27,7 +27,7 @@ A data frame containing MODIS land cover types for each site and each time step. Prepare MODIS land cover data for the SDA workflow. } \details{ -This function enables the feature of grabbing pre-extracted MODIS LC CSV files such that any site that +This function enables the feature of grabbing pre-extracted MODIS LC CSV files such that any site that has records will be skipped (See Line 33). In more detail, we will be loading the previous `LC.csv` file, which contains previous extracted land cover records and trying to match that with current requests (location, time). Any requests that fail the match will be regarded as new extractions and combine with the previous `LC.csv` file. diff --git a/modules/data.remote/man/NASA_CMR_finder.Rd b/modules/data.remote/man/NASA_CMR_finder.Rd index 4359fb952b3..bf9c97a8f8e 100644 --- a/modules/data.remote/man/NASA_CMR_finder.Rd +++ b/modules/data.remote/man/NASA_CMR_finder.Rd @@ -7,12 +7,12 @@ NASA_CMR_finder(doi) } \arguments{ -\item{doi}{Character: data DOI on the NASA DAAC server, it can be obtained -directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through +\item{doi}{Character: data DOI on the NASA DAAC server, it can be obtained +directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through https://daac.ornl.gov/cgi-bin/dsviewer.pl?ds_id=2056).} } \value{ -A list with each containing corresponding provider and concept ids +A list with each containing corresponding provider and concept ids given the data doi. } \description{ diff --git a/modules/data.remote/man/NASA_DAAC_URL.Rd b/modules/data.remote/man/NASA_DAAC_URL.Rd index 7ecb41b3b43..fbb209bd75c 100644 --- a/modules/data.remote/man/NASA_DAAC_URL.Rd +++ b/modules/data.remote/man/NASA_DAAC_URL.Rd @@ -15,7 +15,7 @@ NASA_DAAC_URL( ) } \arguments{ -\item{base_url}{Character: base URL for the CMR search. +\item{base_url}{Character: base URL for the CMR search. default is "https://cmr.earthdata.nasa.gov/search/granules.json?pretty=true".} \item{provider}{Character: ID of data provider from NASA DAAC. See `NASA_CMR_finder` for more details.} @@ -42,10 +42,12 @@ provider <- "ORNL_CLOUD" concept_id <- "C2770099044-ORNL_CLOUD" bbox <- "-121,33,-117,35" daterange <- c("2022-02-23", "2022-05-30") -URL <- NASA_DAAC_URL(provider = provider, -concept_id = concept_id, -bbox = bbox, -daterange = daterange) +URL <- NASA_DAAC_URL( + provider = provider, + concept_id = concept_id, + bbox = bbox, + daterange = daterange +) } } \author{ diff --git a/modules/data.remote/man/NASA_DAAC_download.Rd b/modules/data.remote/man/NASA_DAAC_download.Rd index b41cfc920c9..85c2b64368c 100644 --- a/modules/data.remote/man/NASA_DAAC_download.Rd +++ b/modules/data.remote/man/NASA_DAAC_download.Rd @@ -38,8 +38,8 @@ NASA_DAAC_download( \item{outdir}{Character: path of the directory in which to save the downloaded files. Default is the current work directory(getwd()).} -\item{doi}{Character: data DOI on the NASA DAAC server, it can be obtained -directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through +\item{doi}{Character: data DOI on the NASA DAAC server, it can be obtained +directly from the NASA ORNL DAAC data portal (e.g., GEDI L4A through https://daac.ornl.gov/cgi-bin/dsviewer.pl?ds_id=2056).} \item{netrc_file}{Character: path to the credential file, default is NULL.} @@ -62,14 +62,16 @@ from <- "2022-02-23" to <- "2022-05-30" doi <- "10.3334/ORNLDAAC/2183" outdir <- "/projectnb/dietzelab/dongchen/SHIFT/test_download" -metadata <- NASA_DAAC_download(ul_lat = ul_lat, - ul_lon = ul_lon, - lr_lat = lr_lat, - lr_lon = lr_lon, - from = from, - to = to, - doi = doi, - just_path = T) +metadata <- NASA_DAAC_download( + ul_lat = ul_lat, + ul_lon = ul_lon, + lr_lat = lr_lat, + lr_lon = lr_lon, + from = from, + to = to, + doi = doi, + just_path = T +) } } \author{ diff --git a/modules/data.remote/man/call_MODIS.Rd b/modules/data.remote/man/call_MODIS.Rd index 52fc0422110..f0bd79dd7d6 100644 --- a/modules/data.remote/man/call_MODIS.Rd +++ b/modules/data.remote/man/call_MODIS.Rd @@ -25,30 +25,30 @@ call_MODIS( \item{band}{string value for which measurement to extract} -\item{site_info}{Bety list of site info for parsing MODIS data: list(site_id, site_name, lat, +\item{site_info}{Bety list of site info for parsing MODIS data: list(site_id, site_name, lat, lon, time_zone)} \item{product_dates}{a character vector of the start and end date of the data in YYYYJJJ} \item{outdir}{where the output file will be stored. Default is NULL and in this case only values are returned. When path is provided values are returned and written to disk.} -\item{run_parallel}{optional method to download data paralleize. Only works if more than 1 +\item{run_parallel}{optional method to download data paralleize. Only works if more than 1 site is needed and there are >1 CPUs available.} -\item{ncores}{number of cpus to use if run_parallel is set to TRUE. If you do not know the +\item{ncores}{number of cpus to use if run_parallel is set to TRUE. If you do not know the number of CPU's available, enter NULL.} -\item{package_method}{string value to inform function of which package method to use to download +\item{package_method}{string value to inform function of which package method to use to download modis data. Either "MODISTools" or "reticulate" (optional)} -\item{QC_filter}{Converts QC values of band and keeps only data values that are excellent or good -(as described by MODIS documentation), and removes all bad values. qc_band must be supplied for this +\item{QC_filter}{Converts QC values of band and keeps only data values that are excellent or good +(as described by MODIS documentation), and removes all bad values. qc_band must be supplied for this parameter to work. Default is False. Only MODISTools option.} -\item{progress}{TRUE reports the download progress bar of the dataset, FALSE omits the download +\item{progress}{TRUE reports the download progress bar of the dataset, FALSE omits the download progress bar. Default is TRUE. Only MODISTools option. -Requires Python3 for reticulate method option. There are a number of required python libraries. +Requires Python3 for reticulate method option. There are a number of required python libraries. sudo -H pip install numpy suds netCDF4 json depends on the MODISTools package version 1.1.0} } diff --git a/modules/data.remote/man/download.LandTrendr.AGB.Rd b/modules/data.remote/man/download.LandTrendr.AGB.Rd index a1021f109b4..605a267145c 100644 --- a/modules/data.remote/man/download.LandTrendr.AGB.Rd +++ b/modules/data.remote/man/download.LandTrendr.AGB.Rd @@ -22,7 +22,7 @@ download.LandTrendr.AGB( \item{product_dates}{What data product dates to download} -\item{product_version}{Optional. LandTrend AGB is provided with two versions, +\item{product_version}{Optional. LandTrend AGB is provided with two versions, v0 and v1 (latest version)} \item{con}{Optional database connection. If specified then the code will check to see} @@ -46,12 +46,12 @@ product_dates <- c(1990, 1991, 1995) # using discontinous, or specific years product_dates2 <- seq(1992, 1995, 1) # using a date sequence for selection of years product_version = "v1" -results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, - product_dates = product_dates, +results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, + product_dates = product_dates, product_version = product_version) -results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, - product_dates = product_dates2, +results <- PEcAn.data.remote::download.LandTrendr.AGB(outdir=outdir, + product_dates = product_dates2, product_version = product_version) } diff --git a/modules/data.remote/man/download.thredds.AGB.Rd b/modules/data.remote/man/download.thredds.AGB.Rd index 79efcce9998..dc1dd1c3afb 100644 --- a/modules/data.remote/man/download.thredds.AGB.Rd +++ b/modules/data.remote/man/download.thredds.AGB.Rd @@ -29,8 +29,8 @@ download.thredds.AGB \examples{ \dontrun{ outdir <- "~/scratch/abg_data/" -results <- PEcAn.data.remote::download.thredds.AGB(outdir=outdir, - site_ids = c(676, 678, 679, 755, 767, 1000000030, 1000000145, 1000025731), +results <- PEcAn.data.remote::download.thredds.AGB(outdir=outdir, + site_ids = c(676, 678, 679, 755, 767, 1000000030, 1000000145, 1000025731), run_parallel = TRUE, ncores = 8) } } diff --git a/modules/data.remote/man/extract.LandTrendr.AGB.Rd b/modules/data.remote/man/extract.LandTrendr.AGB.Rd index b0b77de4de7..631729b6e8e 100644 --- a/modules/data.remote/man/extract.LandTrendr.AGB.Rd +++ b/modules/data.remote/man/extract.LandTrendr.AGB.Rd @@ -29,13 +29,13 @@ extract.LandTrendr.AGB( \item{product_dates}{Process and extract data only from selected years. Default behavior (product_dates = NULL) is to extract data from all availible years in BETYdb or data_dir} -\item{output_file}{Path to save LandTrendr_AGB_output.RData file containing the +\item{output_file}{Path to save LandTrendr_AGB_output.RData file containing the output extraction list (see return)} \item{...}{Additional arguments, currently ignored} } \value{ -list of two containing the median AGB values per pixel and the corresponding +list of two containing the median AGB values per pixel and the corresponding standard deviation values (uncertainties) } \description{ @@ -52,16 +52,16 @@ con <- PEcAn.DB::db.open( dbname='bety', driver='PostgreSQL',write=TRUE)) site_ID <- c(2000000023,1000025731,676,1000005149) # BETYdb site IDs -suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, -ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", +suppressWarnings(site_qry <- glue::glue_sql("SELECT *, ST_X(ST_CENTROID(geometry)) AS lon, +ST_Y(ST_CENTROID(geometry)) AS lat FROM sites WHERE id IN ({ids*})", ids = site_ID, .con = con)) suppressWarnings(qry_results <- DBI::dbSendQuery(con,site_qry)) suppressWarnings(qry_results <- DBI::dbFetch(qry_results)) -site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, +site_info <- list(site_id=qry_results$id, site_name=qry_results$sitename, lat=qry_results$lat, lon=qry_results$lon, time_zone=qry_results$time_zone) data_dir <- "~/scratch/agb_data/" -results <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", +results <- extract.LandTrendr.AGB(site_info, "median", buffer = NULL, fun = "mean", data_dir, product_dates, output_file) } diff --git a/modules/data.remote/man/extract_phenology_MODIS.Rd b/modules/data.remote/man/extract_phenology_MODIS.Rd index 0a134d32069..d725ee14237 100644 --- a/modules/data.remote/man/extract_phenology_MODIS.Rd +++ b/modules/data.remote/man/extract_phenology_MODIS.Rd @@ -14,7 +14,7 @@ extract_phenology_MODIS( ) } \arguments{ -\item{site_info}{A dataframe of site info containing the BETYdb site ID, +\item{site_info}{A dataframe of site info containing the BETYdb site ID, site name, latitude, and longitude, e.g.} \item{start_date}{Start date to download data} @@ -23,14 +23,14 @@ site name, latitude, and longitude, e.g.} \item{outdir}{Path to store the outputs} -\item{run_parallel}{optional method to download data parallely. Only works if more than 1 +\item{run_parallel}{optional method to download data parallely. Only works if more than 1 site is needed and there are >1 CPUs available.} -\item{ncores}{number of cpus to use if run_parallel is set to TRUE. If you do not know the +\item{ncores}{number of cpus to use if run_parallel is set to TRUE. If you do not know the number of CPU's available, enter NULL.} } \value{ -the path for output file +the path for output file The output file will be saved as a CSV file to the outdir. Output column names are "year", "site_id", "lat", "lon", "leafonday","leafoffday","leafon_qa","leafoff_qa" } diff --git a/modules/data.remote/man/l4_download.Rd b/modules/data.remote/man/l4_download.Rd index d4f7cf1541b..3dd1b5f08bc 100644 --- a/modules/data.remote/man/l4_download.Rd +++ b/modules/data.remote/man/l4_download.Rd @@ -68,8 +68,8 @@ During the first use, users will be ask to enter their Earth Explore } \examples{ \dontrun{ -#retrive Italy bound -bound <- sf::st_as_sf(raster::getData('GADM', country='ITA', level=1)) +# retrive Italy bound +bound <- sf::st_as_sf(raster::getData("GADM", country = "ITA", level = 1)) ex <- raster::extent(bound) ul_lat <- ex[4] lr_lat <- ex[3] @@ -77,27 +77,30 @@ ul_lon <- ex[2] lr_lon <- ex[1] from <- "2020-07-01" to <- "2020-07-02" -#get just files path available for the searched parameters -l4_download(ul_lat=ul_lat, - lr_lat=lr_lat, - ul_lon=ul_lon, - lr_lon=lr_lon, - from=from, - to=to, - just_path=T +# get just files path available for the searched parameters +l4_download( + ul_lat = ul_lat, + lr_lat = lr_lat, + ul_lon = ul_lon, + lr_lon = lr_lon, + from = from, + to = to, + just_path = T ) -#download the first 4 files +# download the first 4 files -l4_download(ul_lat=ul_lat, - lr_lat=lr_lat, - ul_lon=ul_lon, - lr_lon=lr_lon, - from=from, - to=to, - just_path=F, - outdir = tempdir(), - subset=1:4) +l4_download( + ul_lat = ul_lat, + lr_lat = lr_lat, + ul_lon = ul_lon, + lr_lon = lr_lon, + from = from, + to = to, + just_path = F, + outdir = tempdir(), + subset = 1:4 +) } } \author{ diff --git a/modules/meta.analysis/man/pecan.ma.Rd b/modules/meta.analysis/man/pecan.ma.Rd index 3029bd7d4e5..589dc7d70fb 100644 --- a/modules/meta.analysis/man/pecan.ma.Rd +++ b/modules/meta.analysis/man/pecan.ma.Rd @@ -69,12 +69,12 @@ function to modify the \code{ma.model.template.bug} generic model. values = list(pft))[[1]] traits <- c("SLA", "Vcmax") trait_string <- paste(shQuote(traits), collapse = ",") - + # Load traits and priors from BETY species <- PEcAn.DB::query.pft_species(pft, con = con) trait.data <- PEcAn.DB::query.traits(species[["id"]], c("SLA", "Vcmax"), con = con) prior.distns <- PEcAn.DB::query.priors(pft_id, trait_string, con = con) - + # Pre-process data jagged.data <- lapply(trait.data, PEcAn.MA::jagify) taupriors <- list(tauA = 0.01, diff --git a/modules/priors/man/get.quantiles.from.density.Rd b/modules/priors/man/get.quantiles.from.density.Rd index fd7bea29be3..9f4cdc520e2 100644 --- a/modules/priors/man/get.quantiles.from.density.Rd +++ b/modules/priors/man/get.quantiles.from.density.Rd @@ -15,7 +15,7 @@ get.quantiles.from.density(density.df, quantiles = c(0.025, 0.5, 0.975)) Finds quantiles on a density data frame } \examples{ -prior.df <- create.density.df(distribution = list('norm',0,1)) +prior.df <- create.density.df(distribution = list("norm", 0, 1)) get.quantiles.from.density(prior.df) samp.df <- create.density.df(samps = rnorm(100)) get.quantiles.from.density(samp.df) diff --git a/modules/priors/man/get.sample.Rd b/modules/priors/man/get.sample.Rd index c3dc313f4ef..bf6145b352d 100644 --- a/modules/priors/man/get.sample.Rd +++ b/modules/priors/man/get.sample.Rd @@ -28,11 +28,11 @@ or list and it can return either a random sample of length n OR a sample from a \dontrun{ # return 1st through 99th quantile of standard normal distribution: PEcAn.priors::get.sample( - prior = data.frame(distn = 'norm', parama = 0, paramb = 1), + prior = data.frame(distn = 'norm', parama = 0, paramb = 1), p = 1:99/100) # return 100 random samples from standard normal distribution: PEcAn.priors::get.sample( - prior = data.frame(distn = 'norm', parama = 0, paramb = 1), + prior = data.frame(distn = 'norm', parama = 0, paramb = 1), n = 100) } } diff --git a/modules/priors/man/prior.fn.Rd b/modules/priors/man/prior.fn.Rd index ef74d3362f0..3ebb2c90939 100644 --- a/modules/priors/man/prior.fn.Rd +++ b/modules/priors/man/prior.fn.Rd @@ -26,8 +26,8 @@ parms Prior fitting function for optimization } \details{ -This function is used within `DEoptim` to parameterize a distribution to the -central tendency and confidence interval of a parameter. +This function is used within `DEoptim` to parameterize a distribution to the +central tendency and confidence interval of a parameter. This function is not very robust; currently it needs to be tweaked when distributions require starting values (e.g. beta, f) } diff --git a/modules/uncertainty/man/flux.uncertainty.Rd b/modules/uncertainty/man/flux.uncertainty.Rd index 77606cf14d1..6cf6461c7b6 100644 --- a/modules/uncertainty/man/flux.uncertainty.Rd +++ b/modules/uncertainty/man/flux.uncertainty.Rd @@ -19,7 +19,7 @@ flux.uncertainty( \item{QC}{= quality control flag time series (0 = best)} -\item{flags}{= additional flags on flux filtering of PAIRS (length = 1/2 that of the +\item{flags}{= additional flags on flux filtering of PAIRS (length = 1/2 that of the time series, TRUE = use).} \item{bin.num}{= number of bins (default = 10)} diff --git a/modules/uncertainty/man/get.ensemble.samples.Rd b/modules/uncertainty/man/get.ensemble.samples.Rd index ac981dcb82e..c71b4787bda 100644 --- a/modules/uncertainty/man/get.ensemble.samples.Rd +++ b/modules/uncertainty/man/get.ensemble.samples.Rd @@ -33,7 +33,7 @@ matrix of (quasi-)random samples from trait distributions Get parameter values used in ensemble } \details{ -Returns a matrix of randomly or quasi-randomly sampled trait values +Returns a matrix of randomly or quasi-randomly sampled trait values to be assigned to traits over several model runs. given the number of model runs and a list of sample distributions for traits The model run is indexed first by model run, then by trait diff --git a/modules/uncertainty/man/input.ens.gen.Rd b/modules/uncertainty/man/input.ens.gen.Rd index 13ecf373805..57e61fc3ce5 100644 --- a/modules/uncertainty/man/input.ens.gen.Rd +++ b/modules/uncertainty/man/input.ens.gen.Rd @@ -26,6 +26,8 @@ If for example met was a parent and it's sampling method resulted in choosing th parent_ids to this function. } \examples{ -\dontrun{input.ens.gen(settings,"met","sampling")} +\dontrun{ +input.ens.gen(settings, "met", "sampling") +} } diff --git a/modules/uncertainty/man/read.ensemble.output.Rd b/modules/uncertainty/man/read.ensemble.output.Rd index 2b6ad72cb69..8cd188470a9 100644 --- a/modules/uncertainty/man/read.ensemble.output.Rd +++ b/modules/uncertainty/man/read.ensemble.output.Rd @@ -37,7 +37,7 @@ a list of ensemble model output Reads output from model ensemble } \details{ -Reads output for an ensemble of length specified by \code{ensemble.size} and bounded by \code{start.year} +Reads output for an ensemble of length specified by \code{ensemble.size} and bounded by \code{start.year} and \code{end.year} } \author{ diff --git a/modules/uncertainty/man/write.ensemble.configs.Rd b/modules/uncertainty/man/write.ensemble.configs.Rd index 6a58d47ae4c..6b42afacd41 100644 --- a/modules/uncertainty/man/write.ensemble.configs.Rd +++ b/modules/uncertainty/man/write.ensemble.configs.Rd @@ -37,7 +37,7 @@ list, containing $runs = data frame of runids, $ensemble.id = the ensemble ID fo } \description{ Writes config files for use in meta-analysis and returns a list of run ids. -Given a pft.xml object, a list of lists as supplied by get.sa.samples, +Given a pft.xml object, a list of lists as supplied by get.sa.samples, a name to distinguish the output files, and the directory to place the files. } \details{ From 6c80de048e64bd78240deb0b7f3e1a628d2db439 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Tue, 7 Jan 2025 02:02:26 +0530 Subject: [PATCH 4/5] Update roxygen docs --- models/dvmdostem/man/adjust.runmask.dvmdostem.Rd | 2 +- .../man/enforce.runmask.cmt.vegmap.harmony.Rd | 10 +++++----- models/dvmdostem/man/model2netcdf.dvmdostem.Rd | 1 - models/dvmdostem/man/requested_vars_string2list.Rd | 4 ++-- models/dvmdostem/man/vmap_reverse.Rd | 2 +- models/fates/man/model2netcdf.FATES.Rd | 1 - models/gday/man/met2model.GDAY.Rd | 6 +++--- models/maat/man/met2model.MAAT.Rd | 2 +- models/sibcasa/man/sibcasa_output_vars.Rd | 4 ++-- 9 files changed, 15 insertions(+), 17 deletions(-) diff --git a/models/dvmdostem/man/adjust.runmask.dvmdostem.Rd b/models/dvmdostem/man/adjust.runmask.dvmdostem.Rd index 85e34086253..b3e8fcb4a33 100644 --- a/models/dvmdostem/man/adjust.runmask.dvmdostem.Rd +++ b/models/dvmdostem/man/adjust.runmask.dvmdostem.Rd @@ -18,7 +18,7 @@ adjust.runmask.dvmdostem(siteDataPath, rundir, pixel_X, pixel_Y) \description{ Adjust the runmask for dvmdostem. This is necessary if you are using a mutisite dvmdostem dataset (more than one grid cell/pixel) -and you are not forcing the community (cmt or vegetation) type. In +and you are not forcing the community (cmt or vegetation) type. In other words you are using a vegetation map to determine the pixel's cmt type. In this case you must make sure that for the site and PFTs you have selected, the underlying veg map classifies the site as the diff --git a/models/dvmdostem/man/enforce.runmask.cmt.vegmap.harmony.Rd b/models/dvmdostem/man/enforce.runmask.cmt.vegmap.harmony.Rd index b81dc1bf71e..a6a67394549 100644 --- a/models/dvmdostem/man/enforce.runmask.cmt.vegmap.harmony.Rd +++ b/models/dvmdostem/man/enforce.runmask.cmt.vegmap.harmony.Rd @@ -3,8 +3,8 @@ \name{enforce.runmask.cmt.vegmap.harmony} \alias{enforce.runmask.cmt.vegmap.harmony} \title{Make sure that selected run mask pixel, veg map pixel value and CMT type are all copasetic. The -function calls stop() if there is anything inconsistent, for example more tha one pixel is -enabled in the run mask, or the enabled pixel's vegetation type does not match the +function calls stop() if there is anything inconsistent, for example more tha one pixel is +enabled in the run mask, or the enabled pixel's vegetation type does not match the vegetation/community type of the chosen PFTs.} \usage{ enforce.runmask.cmt.vegmap.harmony(siteDataPath, rundir, cmtnum) @@ -12,7 +12,7 @@ enforce.runmask.cmt.vegmap.harmony(siteDataPath, rundir, cmtnum) \arguments{ \item{siteDataPath}{is the path to the folder where we expect to find the dvmdostem input data files.} -\item{rundir}{is the path to the local running directory where customized files (config, parameters, +\item{rundir}{is the path to the local running directory where customized files (config, parameters, runmask etc) are copied to.} \item{cmtnum}{is the community type (vegetation type) that should be used for the run. Based on the @@ -23,8 +23,8 @@ none } \description{ Make sure that selected run mask pixel, veg map pixel value and CMT type are all copasetic. The -function calls stop() if there is anything inconsistent, for example more tha one pixel is -enabled in the run mask, or the enabled pixel's vegetation type does not match the +function calls stop() if there is anything inconsistent, for example more tha one pixel is +enabled in the run mask, or the enabled pixel's vegetation type does not match the vegetation/community type of the chosen PFTs. } \author{ diff --git a/models/dvmdostem/man/model2netcdf.dvmdostem.Rd b/models/dvmdostem/man/model2netcdf.dvmdostem.Rd index ef9e75305a3..d3cb4c0292d 100644 --- a/models/dvmdostem/man/model2netcdf.dvmdostem.Rd +++ b/models/dvmdostem/man/model2netcdf.dvmdostem.Rd @@ -19,7 +19,6 @@ model2netcdf.dvmdostem(outdir, runstart, runend, pecan_requested_vars) Code to convert dvmdostem netcdf output into into CF standard } \examples{ - \dontrun{ # example code here? } diff --git a/models/dvmdostem/man/requested_vars_string2list.Rd b/models/dvmdostem/man/requested_vars_string2list.Rd index c508937094b..db8de45b72c 100644 --- a/models/dvmdostem/man/requested_vars_string2list.Rd +++ b/models/dvmdostem/man/requested_vars_string2list.Rd @@ -15,8 +15,8 @@ requested_vars_string2list(req_v_str, outspec_path) a list of the requested variables } \description{ -Look up the "depends_on" in the output variable mapping, -accumulate a list of dvmdostem variables to turn on to support +Look up the "depends_on" in the output variable mapping, +accumulate a list of dvmdostem variables to turn on to support the requested variables in the pecan.xml tag } \author{ diff --git a/models/dvmdostem/man/vmap_reverse.Rd b/models/dvmdostem/man/vmap_reverse.Rd index f367dcf0ba3..1f5532fd865 100644 --- a/models/dvmdostem/man/vmap_reverse.Rd +++ b/models/dvmdostem/man/vmap_reverse.Rd @@ -4,7 +4,7 @@ \name{vmap_reverse} \alias{vmap_reverse} \title{Build a mapping from dvmdostem names to PEcAn names, units, etc. -The temunits should be (are) looked up from the dvmdostem output +The temunits should be (are) looked up from the dvmdostem output file's units attributes.} \format{ An object of class \code{list} of length 19. diff --git a/models/fates/man/model2netcdf.FATES.Rd b/models/fates/man/model2netcdf.FATES.Rd index 2fafb91db21..c427123291d 100644 --- a/models/fates/man/model2netcdf.FATES.Rd +++ b/models/fates/man/model2netcdf.FATES.Rd @@ -33,7 +33,6 @@ model2netcdf.FATES( Code to convert FATES netcdf output into into CF standard } \examples{ - \dontrun{ example.output <- system.file("case.clm2.h0.2004-01-01-00000.nc",package="PEcAn.FATES") model2netcdf.FATES(outdir="~/",sitelat, sitelon, start_date, end_date, vars_names, pfts) diff --git a/models/gday/man/met2model.GDAY.Rd b/models/gday/man/met2model.GDAY.Rd index 5a180d85eb5..4fc5e4ee45a 100644 --- a/models/gday/man/met2model.GDAY.Rd +++ b/models/gday/man/met2model.GDAY.Rd @@ -40,9 +40,9 @@ that will be inserted into database } \description{ Function to convert NetCDF met files in PEcAn-CF format into GDAY met driver files. -This function is an R wrapper to the python script "generate_forcing_data.py" -in the inst/ folder. The python script supports arguments to generate sub-daily (30 min) -weather data as well as soil temperature from 6 day running mean. These arguments are +This function is an R wrapper to the python script "generate_forcing_data.py" +in the inst/ folder. The python script supports arguments to generate sub-daily (30 min) +weather data as well as soil temperature from 6 day running mean. These arguments are hard-coded in this function to generate daily GDAY files without soil temperature. } \details{ diff --git a/models/maat/man/met2model.MAAT.Rd b/models/maat/man/met2model.MAAT.Rd index b6e4ce7f27e..bdb4be6937a 100644 --- a/models/maat/man/met2model.MAAT.Rd +++ b/models/maat/man/met2model.MAAT.Rd @@ -31,7 +31,7 @@ met2model.MAAT( \item{verbose}{should the function be very verbose} -\item{leap_year}{Enforce Leap-years? If set to TRUE, will require leap years to have 366 days. +\item{leap_year}{Enforce Leap-years? If set to TRUE, will require leap years to have 366 days. If set to false, will require all years to have 365 days. Default = TRUE.} \item{...}{additional arguments, currently ignored} diff --git a/models/sibcasa/man/sibcasa_output_vars.Rd b/models/sibcasa/man/sibcasa_output_vars.Rd index e85c2700a91..7748b619b1d 100644 --- a/models/sibcasa/man/sibcasa_output_vars.Rd +++ b/models/sibcasa/man/sibcasa_output_vars.Rd @@ -19,8 +19,8 @@ sibcasa_output_vars } \description{ TODO: Provide a complete description here of the dataset, -including its origin and purpose. + including its origin and purpose. The column descriptions below were written by someone looking at the file -with no insight into its usage + with no insight into its usage } \keyword{datasets} From 31e7e7986951a4a1b5ebdfcab7778561d287ece7 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Thu, 9 Jan 2025 14:50:05 +0530 Subject: [PATCH 5/5] More roxygen changes Signed-off-by: Abhinav Pandey --- models/basgra/man/run_BASGRA.Rd | 2 +- modules/rtm/man/fortran_data_module.Rd | 34 ++++++++++++++------------ 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/models/basgra/man/run_BASGRA.Rd b/models/basgra/man/run_BASGRA.Rd index 8fc469087a9..d40673c03f0 100644 --- a/models/basgra/man/run_BASGRA.Rd +++ b/models/basgra/man/run_BASGRA.Rd @@ -47,7 +47,7 @@ BASGRA wrapper function. Runs and writes model outputs in PEcAn standard. \details{ BASGRA is written in fortran is run through R by wrapper functions written by Marcel Van Oijen. This function makes use of those wrappers but gives control of datastream in and out of the model to PEcAn. -With this function we skip model2netcdf, we can also skip met2model but keeping it for now. +With this function we skip model2netcdf, we can also skip met2model but keeping it for now. write.config.BASGRA modifies args of this function through template.job then job.sh runs calls this function to run the model } diff --git a/modules/rtm/man/fortran_data_module.Rd b/modules/rtm/man/fortran_data_module.Rd index 31b9abc88d7..8e952a91f22 100644 --- a/modules/rtm/man/fortran_data_module.Rd +++ b/modules/rtm/man/fortran_data_module.Rd @@ -30,22 +30,26 @@ arrays may be in the future) and splits long data into rows of 10. Currently, only numeric data are supported (i.e. no characters). } \examples{ - w <- 3.2 - x <- 1:5 - y <- 6:15 - z <- seq(exp(1), pi, length.out=42) - l <- list(x=x, y=y, z=z) ## NOTE that names must be explicitly declared - l.types <- c('real','integer', 'real*4', 'real*8') - fortran_data_module(l, l.types, 'testmod', - file.path(tempdir(), "testmod.f90")) +w <- 3.2 +x <- 1:5 +y <- 6:15 +z <- seq(exp(1), pi, length.out = 42) +l <- list(x = x, y = y, z = z) ## NOTE that names must be explicitly declared +l.types <- c("real", "integer", "real*4", "real*8") +fortran_data_module( + l, l.types, "testmod", + file.path(tempdir(), "testmod.f90") +) - x <- runif(10) - y <- rnorm(10) - z <- rgamma(10, 3) - d <- data.frame(x,y,z) ## NOTE that data.frames are just named lists - d.types <- rep('real*8', ncol(d)) - fortran_data_module(d, d.types, 'random', - file.path(tempdir(), "random.f90")) +x <- runif(10) +y <- rnorm(10) +z <- rgamma(10, 3) +d <- data.frame(x, y, z) ## NOTE that data.frames are just named lists +d.types <- rep("real*8", ncol(d)) +fortran_data_module( + d, d.types, "random", + file.path(tempdir(), "random.f90") +) } \author{ Alexey Shiklomanov